From 4ec214469990394dbca27c6df7b01fc10e0b82ab Mon Sep 17 00:00:00 2001 From: Anna Benke Date: Thu, 23 Apr 2026 15:39:29 +0200 Subject: [PATCH 1/2] feat(gooddata-pipelines): support tags, description and merge into existing LDM --- .../latest/pipelines/ldm_extension/_index.md | 23 ++ .../ldm_extension/input_processor.py | 87 +++++++- .../ldm_extension/ldm_extension_manager.py | 74 +++++- .../models/custom_data_object.py | 18 +- .../tests/test_ldm_extension/conftest.py | 84 +++++++ .../test_input_processor.py | 109 +++------ .../test_ldm_extension_manager.py | 211 +++++++++++++++++- .../test_ldm_extension/test_merge_ldm.py | 84 +++++++ 8 files changed, 599 insertions(+), 91 deletions(-) create mode 100644 packages/gooddata-pipelines/tests/test_ldm_extension/conftest.py create mode 100644 packages/gooddata-pipelines/tests/test_ldm_extension/test_merge_ldm.py diff --git a/docs/content/en/latest/pipelines/ldm_extension/_index.md b/docs/content/en/latest/pipelines/ldm_extension/_index.md index 7435b59b8..2251b01a8 100644 --- a/docs/content/en/latest/pipelines/ldm_extension/_index.md +++ b/docs/content/en/latest/pipelines/ldm_extension/_index.md @@ -45,6 +45,8 @@ The custom dataset represents a new dataset appended to the child LDM. It is def | dataset_reference_source_column_data_type | [ColumnDataType](#columndatatype) | Column data type. | | workspace_data_filter_id | string | ID of the workspace data filter to use. | | workspace_data_filter_column_name | string | Name of the column in custom dataset used for filtering. | +| dataset_description | string \| None | Optional declarative description on the custom dataset. | +| dataset_tags | string[] \| None | Optional tag list; when omitted, defaults to a single tag derived from the dataset display name. | #### Validity constraints @@ -63,6 +65,8 @@ The custom fields define the individual fields in the custom datasets defined ab | custom_field_type | [CustomFieldType](#customfieldtype) | Indicates whether the field represents an attribute, a date, or a fact. | | custom_field_source_column | string | Name of the column in the physical data model. | | custom_field_source_column_data_type | [ColumnDataType](#columndatatype) | Data type of the field. | +| description | string \| None | Optional declarative description on the attribute, fact, or date dataset. | +| tags | string[] \| None | Optional tag list; when omitted, defaults to a single tag derived from the dataset display name. | #### Validity constraints @@ -128,6 +132,25 @@ ldm_extension_manager.process( ``` +### Merging into an existing child workspace LDM + +By default, `process` **replaces** the child workspace LDM with the declarative fragment built from your inputs. Any prior custom datasets or date instances that aren't in the current call are lost. + +Set `merge_into_existing_ldm=True` to switch to an **append / update** behaviour: `process` loads the current workspace LDM first, replaces any dataset or date instance whose `id` matches one in your input, and keeps the rest of the model as is (including previously uploaded custom extensions). + +Optional cleanup: when `remove_managed_datasets_missing_from_input=True` and `management_tag` is set, datasets that carry that tag but are **not** in the current `process` call are removed from the merged LDM before the upload. This lets tools such as BCA reliably delete their own obsolete custom datasets without touching anything else. + +```python +ldm_extension_manager.process( + custom_datasets=custom_dataset_definitions, + custom_fields=custom_field_definitions, + check_relations=False, + merge_into_existing_ldm=True, + remove_managed_datasets_missing_from_input=True, + management_tag="bca_tooling_managed", +) +``` + ## Example Here is a complete example of extending a child workspace's LDM: diff --git a/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/input_processor.py b/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/input_processor.py index d6f8c2b02..6f43a037c 100644 --- a/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/input_processor.py +++ b/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/input_processor.py @@ -5,6 +5,8 @@ into objects defined in the GoodData Python SDK. """ +import copy + from gooddata_sdk.catalog.identifier import ( CatalogDatasetWorkspaceDataFilterIdentifier, CatalogGrainIdentifier, @@ -36,11 +38,26 @@ from gooddata_pipelines.ldm_extension.models.custom_data_object import ( ColumnDataType, CustomDataset, + CustomDatasetDefinition, CustomFieldDefinition, CustomFieldType, ) +def _effective_field_tags( + dataset_name: str, custom_field: CustomFieldDefinition +) -> list[str]: + if custom_field.tags is not None: + return list(custom_field.tags) + return [dataset_name] + + +def _effective_dataset_tags(definition: CustomDatasetDefinition) -> list[str]: + if definition.dataset_tags is not None: + return list(definition.dataset_tags) + return [definition.dataset_name] + + class LdmExtensionDataProcessor: """Create GoodData LDM from validated custom datasets and fields.""" @@ -77,7 +94,8 @@ def _attribute_from_field( source_column=custom_field.custom_field_source_column, labels=[], source_column_data_type=custom_field.custom_field_source_column_data_type.value, - tags=[dataset_name], + description=custom_field.description, + tags=_effective_field_tags(dataset_name, custom_field), ) @staticmethod @@ -91,7 +109,8 @@ def _fact_from_field( title=custom_field.custom_field_name, source_column=custom_field.custom_field_source_column, source_column_data_type=custom_field.custom_field_source_column_data_type.value, - tags=[dataset_name], + description=custom_field.description, + tags=_effective_field_tags(dataset_name, custom_field), ) def _date_from_field( @@ -109,7 +128,8 @@ def _date_from_field( title_pattern="%titleBase - %granularityTitle", ), granularities=self.DATE_GRANULARITIES, - tags=[dataset_name], + description=custom_field.description, + tags=_effective_field_tags(dataset_name, custom_field), ) @staticmethod @@ -258,7 +278,7 @@ def datasets_to_ldm( ), ] + date_references, - description=None, + description=dataset.definition.dataset_description, attributes=attributes, facts=facts, data_source_table_id=dataset_source_table_id, @@ -278,7 +298,7 @@ def datasets_to_ldm( filter_column_data_type=ColumnDataType.STRING.value, ) ], - tags=[dataset.definition.dataset_name], + tags=_effective_dataset_tags(dataset.definition), ) ) @@ -287,3 +307,60 @@ def datasets_to_ldm( datasets=declarative_datasets, date_instances=date_instances ) return CatalogDeclarativeModel(ldm=ldm) + + def merge_custom_ldm_into_existing( + self, + existing: CatalogDeclarativeModel, + custom_datasets: dict[DatasetId, CustomDataset], + *, + remove_managed_datasets_missing_from_input: bool = False, + management_tag: str | None = None, + ) -> CatalogDeclarativeModel: + """Merge datasets produced from ``custom_datasets`` into an existing declarative LDM. + + Custom datasets and date instances that share an ``id`` with the fragment replace + their previous definitions. When ``remove_managed_datasets_missing_from_input`` is + set, datasets that carry ``management_tag`` but are absent from the incoming + fragment are removed first (typical for tooling-owned extension datasets). + + Any other pre-existing LDM objects (previously uploaded extensions whose ids + are not in the incoming fragment) are preserved unchanged. + """ + fragment = self.datasets_to_ldm(custom_datasets) + fragment_ldm = fragment.ldm or CatalogDeclarativeLdm( + datasets=[], date_instances=[] + ) + + result = copy.deepcopy(existing) + result_ldm = result.ldm or CatalogDeclarativeLdm( + datasets=[], date_instances=[] + ) + result.ldm = result_ldm + + incoming_dataset_ids = {d.id for d in fragment_ldm.datasets} + incoming_date_ids = {d.id for d in fragment_ldm.date_instances} + + datasets = list(result_ldm.datasets) + if remove_managed_datasets_missing_from_input and management_tag: + datasets = [ + d + for d in datasets + if not ( + d.tags + and management_tag in d.tags + and d.id not in incoming_dataset_ids + ) + ] + datasets = [d for d in datasets if d.id not in incoming_dataset_ids] + datasets.extend(fragment_ldm.datasets) + result_ldm.datasets = datasets + + date_instances = [ + d + for d in result_ldm.date_instances + if d.id not in incoming_date_ids + ] + date_instances.extend(fragment_ldm.date_instances) + result_ldm.date_instances = date_instances + + return result diff --git a/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/ldm_extension_manager.py b/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/ldm_extension_manager.py index f08f017e2..cd5d797f0 100644 --- a/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/ldm_extension_manager.py +++ b/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/ldm_extension_manager.py @@ -3,6 +3,9 @@ from pathlib import Path +from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.ldm import ( + CatalogDeclarativeModel, +) from gooddata_sdk.sdk import GoodDataSdk from gooddata_sdk.utils import PROFILES_FILE_PATH, profile_content @@ -147,9 +150,35 @@ def _new_ldm_does_not_invalidate_relations( # If the set of new invalid relations is a subset of the current one, return set_new_invalid_relations.issubset(set_current_invalid_relations) + def _ldm_payload_for_workspace( + self, + workspace_id: str, + datasets: dict[DatasetId, CustomDataset], + *, + merge_into_existing_ldm: bool, + remove_managed_datasets_missing_from_input: bool, + management_tag: str | None, + ) -> CatalogDeclarativeModel: + """Build the declarative LDM payload to upload for one workspace.""" + if not merge_into_existing_ldm: + return self._processor.datasets_to_ldm(datasets) + current = self._sdk.catalog_workspace_content.get_declarative_ldm( + workspace_id + ) + return self._processor.merge_custom_ldm_into_existing( + current, + datasets, + remove_managed_datasets_missing_from_input=remove_managed_datasets_missing_from_input, + management_tag=management_tag, + ) + def _process_with_relations_check( self, validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]], + *, + merge_into_existing_ldm: bool = False, + remove_managed_datasets_missing_from_input: bool = False, + management_tag: str | None = None, ) -> None: """Check whether relations of analytical objects are valid before and after updating the LDM in the GoodData workspace. @@ -173,7 +202,13 @@ def _process_with_relations_check( # Put the LDM with custom datasets into the GoodData workspace. self._sdk.catalog_workspace_content.put_declarative_ldm( workspace_id=workspace_id, - ldm=self._processor.datasets_to_ldm(datasets), + ldm=self._ldm_payload_for_workspace( + workspace_id, + datasets, + merge_into_existing_ldm=merge_into_existing_ldm, + remove_managed_datasets_missing_from_input=remove_managed_datasets_missing_from_input, + management_tag=management_tag, + ), ) # Get a set of objects with invalid relations from the new workspace state @@ -232,13 +267,23 @@ def _log_diff_invalid_relations( def _process_without_relations_check( self, validated_data: dict[WorkspaceId, dict[DatasetId, CustomDataset]], + *, + merge_into_existing_ldm: bool = False, + remove_managed_datasets_missing_from_input: bool = False, + management_tag: str | None = None, ) -> None: """Update the LDM in the GoodData workspace without checking relations.""" for workspace_id, datasets in validated_data.items(): # Put the LDM with custom datasets into the GoodData workspace. self._sdk.catalog_workspace_content.put_declarative_ldm( workspace_id=workspace_id, - ldm=self._processor.datasets_to_ldm(datasets), + ldm=self._ldm_payload_for_workspace( + workspace_id, + datasets, + merge_into_existing_ldm=merge_into_existing_ldm, + remove_managed_datasets_missing_from_input=remove_managed_datasets_missing_from_input, + management_tag=management_tag, + ), ) self._log_success_message(workspace_id) @@ -251,6 +296,9 @@ def process( custom_datasets: list[CustomDatasetDefinition], custom_fields: list[CustomFieldDefinition], check_relations: bool = True, + merge_into_existing_ldm: bool = False, + remove_managed_datasets_missing_from_input: bool = False, + management_tag: str | None = None, ) -> None: """Create custom datasets and fields in GoodData workspaces. @@ -266,6 +314,14 @@ def process( after updating the LDM. If the number of invalid relations increases, the LDM will be reverted to its previous state. If False, the check is skiped and the LDM is updated directly. Defaults to True. + merge_into_existing_ldm (bool): When True, load the workspace LDM first and merge + the generated custom datasets and date instances into it instead of uploading + only the extension fragment. Defaults to False for backward compatibility. + remove_managed_datasets_missing_from_input (bool): When ``merge_into_existing_ldm`` + is True, remove existing datasets that contain ``management_tag`` but whose + dataset id is not present in this ``process`` call (tooling cleanup). + management_tag (str | None): Tag value used with + ``remove_managed_datasets_missing_from_input``. Raises: ValueError: If there are validation errors in the dataset or field definitions. @@ -278,6 +334,16 @@ def process( if check_relations: # Process the validated data with relations check. - self._process_with_relations_check(validated_data) + self._process_with_relations_check( + validated_data, + merge_into_existing_ldm=merge_into_existing_ldm, + remove_managed_datasets_missing_from_input=remove_managed_datasets_missing_from_input, + management_tag=management_tag, + ) else: - self._process_without_relations_check(validated_data) + self._process_without_relations_check( + validated_data, + merge_into_existing_ldm=merge_into_existing_ldm, + remove_managed_datasets_missing_from_input=remove_managed_datasets_missing_from_input, + management_tag=management_tag, + ) diff --git a/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/models/custom_data_object.py b/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/models/custom_data_object.py index b241d5e34..9c0dae3a4 100644 --- a/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/models/custom_data_object.py +++ b/packages/gooddata-pipelines/src/gooddata_pipelines/ldm_extension/models/custom_data_object.py @@ -7,7 +7,7 @@ from enum import Enum -from pydantic import BaseModel, model_validator +from pydantic import BaseModel, Field, model_validator class CustomFieldType(str, Enum): @@ -42,6 +42,14 @@ class CustomFieldDefinition(BaseModel): custom_field_type: CustomFieldType custom_field_source_column: str custom_field_source_column_data_type: ColumnDataType + description: str | None = Field( + default=None, + description="Declarative description on the attribute, fact, or date dataset.", + ) + tags: list[str] | None = Field( + default=None, + description="If set, replaces the default tag list (dataset display name only).", + ) @model_validator(mode="after") def check_ids_not_equal(self) -> "CustomFieldDefinition": @@ -68,6 +76,14 @@ class CustomDatasetDefinition(BaseModel): dataset_reference_source_column_data_type: ColumnDataType workspace_data_filter_id: str workspace_data_filter_column_name: str + dataset_description: str | None = Field( + default=None, + description="Declarative description on the custom dataset.", + ) + dataset_tags: list[str] | None = Field( + default=None, + description="If set, replaces the default tag list (dataset display name only).", + ) @model_validator(mode="after") def check_source(self) -> "CustomDatasetDefinition": diff --git a/packages/gooddata-pipelines/tests/test_ldm_extension/conftest.py b/packages/gooddata-pipelines/tests/test_ldm_extension/conftest.py new file mode 100644 index 000000000..86754a5dc --- /dev/null +++ b/packages/gooddata-pipelines/tests/test_ldm_extension/conftest.py @@ -0,0 +1,84 @@ +# (C) 2025 GoodData Corporation +import pytest + +from gooddata_pipelines.ldm_extension.models.custom_data_object import ( + ColumnDataType, + CustomDataset, + CustomDatasetDefinition, + CustomFieldDefinition, + CustomFieldType, +) + + +@pytest.fixture +def mock_custom_field_attribute(): + return CustomFieldDefinition( + workspace_id="workspace1", + dataset_id="ds1", + custom_field_id="attr1", + custom_field_name="Attribute 1", + custom_field_type=CustomFieldType.ATTRIBUTE, + custom_field_source_column="col_attr1", + custom_field_source_column_data_type=ColumnDataType.STRING, + ) + + +@pytest.fixture +def mock_custom_field_fact(): + return CustomFieldDefinition( + workspace_id="workspace1", + dataset_id="ds1", + custom_field_id="fact1", + custom_field_name="Fact 1", + custom_field_type=CustomFieldType.FACT, + custom_field_source_column="col_fact1", + custom_field_source_column_data_type=ColumnDataType.INT, + ) + + +@pytest.fixture +def mock_custom_field_date(): + return CustomFieldDefinition( + workspace_id="workspace1", + dataset_id="ds1", + custom_field_id="date1", + custom_field_name="Date 1", + custom_field_type=CustomFieldType.DATE, + custom_field_source_column="col_date1", + custom_field_source_column_data_type=ColumnDataType.DATE, + ) + + +@pytest.fixture +def mock_dataset_definition(): + return CustomDatasetDefinition( + workspace_id="workspace1", + dataset_id="ds1", + dataset_name="Dataset 1", + dataset_source_table="table1", + dataset_datasource_id="ds_source", + dataset_source_sql=None, + parent_dataset_reference="parent_ds", + parent_dataset_reference_attribute_id="parent_attr", + dataset_reference_source_column="ref_col", + dataset_reference_source_column_data_type=ColumnDataType.STRING, + workspace_data_filter_id="wdf1", + workspace_data_filter_column_name="col1", + ) + + +@pytest.fixture +def mock_custom_dataset( + mock_dataset_definition, + mock_custom_field_attribute, + mock_custom_field_fact, + mock_custom_field_date, +): + return CustomDataset( + definition=mock_dataset_definition, + custom_fields=[ + mock_custom_field_attribute, + mock_custom_field_fact, + mock_custom_field_date, + ], + ) diff --git a/packages/gooddata-pipelines/tests/test_ldm_extension/test_input_processor.py b/packages/gooddata-pipelines/tests/test_ldm_extension/test_input_processor.py index 851e903fa..8c50cd571 100644 --- a/packages/gooddata-pipelines/tests/test_ldm_extension/test_input_processor.py +++ b/packages/gooddata-pipelines/tests/test_ldm_extension/test_input_processor.py @@ -1,21 +1,29 @@ # (C) 2025 GoodData Corporation -import pytest - from gooddata_pipelines.ldm_extension.input_processor import ( LdmExtensionDataProcessor, ) from gooddata_pipelines.ldm_extension.models.custom_data_object import ( ColumnDataType, CustomDataset, - CustomDatasetDefinition, CustomFieldDefinition, CustomFieldType, ) -@pytest.fixture -def mock_custom_field_attribute(): - return CustomFieldDefinition( +def test_attribute_from_field(mock_custom_field_attribute): + attr = LdmExtensionDataProcessor._attribute_from_field( + "dataset_name", mock_custom_field_attribute + ) + assert attr.id == "attr1" + assert attr.title == "Attribute 1" + assert attr.source_column == "col_attr1" + assert attr.source_column_data_type == ColumnDataType.STRING.value + assert attr.tags == ["dataset_name"] + assert attr.description is None + + +def test_attribute_from_field_custom_tags_and_description(): + field = CustomFieldDefinition( workspace_id="workspace1", dataset_id="ds1", custom_field_id="attr1", @@ -23,79 +31,14 @@ def mock_custom_field_attribute(): custom_field_type=CustomFieldType.ATTRIBUTE, custom_field_source_column="col_attr1", custom_field_source_column_data_type=ColumnDataType.STRING, + tags=["t1", "t2"], + description="Attr desc", ) - - -@pytest.fixture -def mock_custom_field_fact(): - return CustomFieldDefinition( - workspace_id="workspace1", - dataset_id="ds1", - custom_field_id="fact1", - custom_field_name="Fact 1", - custom_field_type=CustomFieldType.FACT, - custom_field_source_column="col_fact1", - custom_field_source_column_data_type=ColumnDataType.INT, - ) - - -@pytest.fixture -def mock_custom_field_date(): - return CustomFieldDefinition( - workspace_id="workspace1", - dataset_id="ds1", - custom_field_id="date1", - custom_field_name="Date 1", - custom_field_type=CustomFieldType.DATE, - custom_field_source_column="col_date1", - custom_field_source_column_data_type=ColumnDataType.DATE, - ) - - -@pytest.fixture -def mock_dataset_definition(): - return CustomDatasetDefinition( - workspace_id="workspace1", - dataset_id="ds1", - dataset_name="Dataset 1", - dataset_source_table="table1", - dataset_datasource_id="ds_source", - dataset_source_sql=None, - parent_dataset_reference="parent_ds", - parent_dataset_reference_attribute_id="parent_attr", - dataset_reference_source_column="ref_col", - dataset_reference_source_column_data_type=ColumnDataType.STRING, - workspace_data_filter_id="wdf1", - workspace_data_filter_column_name="col1", - ) - - -@pytest.fixture -def mock_custom_dataset( - mock_dataset_definition, - mock_custom_field_attribute, - mock_custom_field_fact, - mock_custom_field_date, -): - return CustomDataset( - definition=mock_dataset_definition, - custom_fields=[ - mock_custom_field_attribute, - mock_custom_field_fact, - mock_custom_field_date, - ], - ) - - -def test_attribute_from_field(mock_custom_field_attribute): attr = LdmExtensionDataProcessor._attribute_from_field( - "dataset_name", mock_custom_field_attribute + "dataset_name", field ) - assert attr.id == "attr1" - assert attr.title == "Attribute 1" - assert attr.source_column == "col_attr1" - assert attr.source_column_data_type == ColumnDataType.STRING.value - assert attr.tags == ["dataset_name"] + assert attr.tags == ["t1", "t2"] + assert attr.description == "Attr desc" def test_fact_from_field(mock_custom_field_fact): @@ -149,6 +92,20 @@ def test_get_sources_sql_only(mock_dataset_definition): assert sql.statement == "SELECT * FROM foo" +def test_datasets_to_ldm_dataset_tags_and_description(mock_dataset_definition): + mock_dataset_definition.dataset_tags = ["managed", "extra"] + mock_dataset_definition.dataset_description = "DS desc" + mock_dataset_definition.dataset_source_sql = "SELECT 1" + mock_dataset_definition.dataset_source_table = None + ds = CustomDataset(definition=mock_dataset_definition, custom_fields=[]) + processor = LdmExtensionDataProcessor() + model = processor.datasets_to_ldm({"ds1": ds}) + d = model.ldm.datasets[0] + assert d.description == "DS desc" + assert d.tags == ["managed", "extra"] + assert d.sql is not None + + def test_datasets_to_ldm(mock_custom_dataset): print(mock_custom_dataset) processor = LdmExtensionDataProcessor() diff --git a/packages/gooddata-pipelines/tests/test_ldm_extension/test_ldm_extension_manager.py b/packages/gooddata-pipelines/tests/test_ldm_extension/test_ldm_extension_manager.py index 5fc6cc087..376ad4f2b 100644 --- a/packages/gooddata-pipelines/tests/test_ldm_extension/test_ldm_extension_manager.py +++ b/packages/gooddata-pipelines/tests/test_ldm_extension/test_ldm_extension_manager.py @@ -1,7 +1,23 @@ # (C) 2025 GoodData Corporation +from unittest.mock import MagicMock + import pytest from pytest_mock import MockerFixture +from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.dataset.dataset import ( + CatalogDeclarativeDataset, +) +from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.ldm import ( + CatalogDeclarativeLdm, + CatalogDeclarativeModel, +) + +from gooddata_pipelines.ldm_extension.input_processor import ( + LdmExtensionDataProcessor, +) +from gooddata_pipelines.ldm_extension.input_validator import ( + LdmExtensionDataValidator, +) from gooddata_pipelines.ldm_extension.ldm_extension_manager import ( LdmExtensionManager, ) @@ -48,7 +64,6 @@ def test_relations_check_success( manager, validated_data, mocker: MockerFixture ): """Relation check passes, workspace layout not reverted.""" - # Setup mocks mocker.patch.object( manager._sdk.catalog_workspace, "get_declarative_workspace", @@ -85,7 +100,6 @@ def test_relations_check_success( manager._sdk.catalog_workspace, "put_declarative_workspace" ) - # Should print "Workspace workspace_1 LDM updated." and not revert manager._process_with_relations_check(validated_data) manager._sdk.catalog_workspace_content.put_declarative_ldm.assert_called_once() manager._sdk.catalog_workspace.put_declarative_workspace.assert_not_called() @@ -95,7 +109,6 @@ def test_relations_check_failure_and_revert( manager, validated_data, capsys, mocker: MockerFixture ): """Relation check fails, workspace layout is reverted.""" - # Setup mocks mocker.patch.object(manager._api, "get_workspace_layout") obj1 = make_analytical_object("a", "A", "type", False) obj2 = make_analytical_object("b", "B", "type", False) @@ -122,7 +135,6 @@ def test_relations_check_failure_and_revert( manager._process_with_relations_check(validated_data) - # Should revert and print info about invalid relations manager._sdk.catalog_workspace.put_declarative_workspace.assert_called_once() out = capsys.readouterr().out assert ( @@ -136,7 +148,6 @@ def test_relations_check_fewer_invalid_relations( manager, validated_data, mocker: MockerFixture ): """Fewer invalid relations after LDM update, no revert needed.""" - # Setup mocks obj1 = make_analytical_object("a", "A", "type", False) mocker.patch.object( manager._sdk.catalog_workspace, @@ -192,3 +203,193 @@ def test_log_diff_invalid_relations(manager, capsys): assert "b (type) B" in captured_output assert "d (type) D" in captured_output assert "c (type) C" not in captured_output + + +def _bare_manager(sdk_mock: MagicMock) -> LdmExtensionManager: + """Build a manager with a real ``LdmExtensionDataProcessor`` and a mocked SDK. + + Used by tests that assert the real merge/payload logic end-to-end. Tests that + only need to check dispatch or logging use the ``manager`` fixture above, + which mocks the processor as well. + """ + bare = object.__new__(LdmExtensionManager) + bare._processor = LdmExtensionDataProcessor() + bare._validator = LdmExtensionDataValidator() + bare._sdk = sdk_mock + bare.logger = MagicMock() + return bare + + +def test_ldm_payload_without_merge_returns_fragment_only(mock_custom_dataset): + sdk_mock = MagicMock() + bare = _bare_manager(sdk_mock) + + payload = bare._ldm_payload_for_workspace( + "workspace1", + {"ds1": mock_custom_dataset}, + merge_into_existing_ldm=False, + remove_managed_datasets_missing_from_input=False, + management_tag=None, + ) + + sdk_mock.catalog_workspace_content.get_declarative_ldm.assert_not_called() + assert payload.ldm is not None + assert [d.id for d in payload.ldm.datasets] == ["ds1"] + + +def test_ldm_payload_merges_with_existing_ldm(mock_custom_dataset): + inherited = CatalogDeclarativeDataset( + id="parent_only", + title="Parent", + grain=[], + references=[], + ) + existing = CatalogDeclarativeModel( + ldm=CatalogDeclarativeLdm(datasets=[inherited], date_instances=[]) + ) + sdk_mock = MagicMock() + sdk_mock.catalog_workspace_content.get_declarative_ldm.return_value = ( + existing + ) + bare = _bare_manager(sdk_mock) + + payload = bare._ldm_payload_for_workspace( + "workspace1", + {"ds1": mock_custom_dataset}, + merge_into_existing_ldm=True, + remove_managed_datasets_missing_from_input=False, + management_tag=None, + ) + + sdk_mock.catalog_workspace_content.get_declarative_ldm.assert_called_once_with( + "workspace1" + ) + assert payload.ldm is not None + assert {d.id for d in payload.ldm.datasets} == {"parent_only", "ds1"} + + +def test_ldm_payload_merge_forwards_cleanup_flags(mock_custom_dataset): + managed_old = CatalogDeclarativeDataset( + id="managed_old", + title="Old", + grain=[], + references=[], + tags=["bca_tooling_managed"], + ) + existing = CatalogDeclarativeModel( + ldm=CatalogDeclarativeLdm(datasets=[managed_old], date_instances=[]) + ) + sdk_mock = MagicMock() + sdk_mock.catalog_workspace_content.get_declarative_ldm.return_value = ( + existing + ) + bare = _bare_manager(sdk_mock) + + payload = bare._ldm_payload_for_workspace( + "workspace1", + {"ds1": mock_custom_dataset}, + merge_into_existing_ldm=True, + remove_managed_datasets_missing_from_input=True, + management_tag="bca_tooling_managed", + ) + + assert payload.ldm is not None + assert [d.id for d in payload.ldm.datasets] == ["ds1"] + + +def test_process_without_relations_check_forwards_merge_kwargs( + mock_custom_dataset, +): + existing = CatalogDeclarativeModel( + ldm=CatalogDeclarativeLdm(datasets=[], date_instances=[]) + ) + sdk_mock = MagicMock() + sdk_mock.catalog_workspace_content.get_declarative_ldm.return_value = ( + existing + ) + bare = _bare_manager(sdk_mock) + + bare._process_without_relations_check( + {"workspace1": {"ds1": mock_custom_dataset}}, + merge_into_existing_ldm=True, + remove_managed_datasets_missing_from_input=False, + management_tag=None, + ) + + sdk_mock.catalog_workspace_content.get_declarative_ldm.assert_called_once_with( + "workspace1" + ) + put_call = sdk_mock.catalog_workspace_content.put_declarative_ldm + put_call.assert_called_once() + kwargs = put_call.call_args.kwargs + assert kwargs["workspace_id"] == "workspace1" + assert [d.id for d in kwargs["ldm"].ldm.datasets] == ["ds1"] + + +def test_process_with_relations_check_happy_path(mock_custom_dataset): + sdk_mock = MagicMock() + bare = _bare_manager(sdk_mock) + bare._get_objects_with_invalid_relations = MagicMock(return_value=[]) + + bare._process_with_relations_check( + {"workspace1": {"ds1": mock_custom_dataset}}, + merge_into_existing_ldm=False, + remove_managed_datasets_missing_from_input=False, + management_tag=None, + ) + + sdk_mock.catalog_workspace.get_declarative_workspace.assert_called_once_with( + "workspace1" + ) + put_call = sdk_mock.catalog_workspace_content.put_declarative_ldm + put_call.assert_called_once() + assert put_call.call_args.kwargs["workspace_id"] == "workspace1" + sdk_mock.catalog_workspace.put_declarative_workspace.assert_not_called() + + +def test_process_dispatches_with_relations_check_by_default(): + sdk_mock = MagicMock() + bare = _bare_manager(sdk_mock) + bare._validator = MagicMock() + bare._validator.validate.return_value = {"workspace1": {}} + bare._process_with_relations_check = MagicMock() + bare._process_without_relations_check = MagicMock() + + bare.process( + custom_datasets=[], + custom_fields=[], + merge_into_existing_ldm=True, + remove_managed_datasets_missing_from_input=True, + management_tag="bca_tooling_managed", + ) + + bare._process_with_relations_check.assert_called_once_with( + {"workspace1": {}}, + merge_into_existing_ldm=True, + remove_managed_datasets_missing_from_input=True, + management_tag="bca_tooling_managed", + ) + bare._process_without_relations_check.assert_not_called() + + +def test_process_skips_relations_check_when_flag_is_false(): + sdk_mock = MagicMock() + bare = _bare_manager(sdk_mock) + bare._validator = MagicMock() + bare._validator.validate.return_value = {"workspace1": {}} + bare._process_with_relations_check = MagicMock() + bare._process_without_relations_check = MagicMock() + + bare.process( + custom_datasets=[], + custom_fields=[], + check_relations=False, + ) + + bare._process_without_relations_check.assert_called_once_with( + {"workspace1": {}}, + merge_into_existing_ldm=False, + remove_managed_datasets_missing_from_input=False, + management_tag=None, + ) + bare._process_with_relations_check.assert_not_called() diff --git a/packages/gooddata-pipelines/tests/test_ldm_extension/test_merge_ldm.py b/packages/gooddata-pipelines/tests/test_ldm_extension/test_merge_ldm.py new file mode 100644 index 000000000..45d0777e6 --- /dev/null +++ b/packages/gooddata-pipelines/tests/test_ldm_extension/test_merge_ldm.py @@ -0,0 +1,84 @@ +# (C) 2025 GoodData Corporation +from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.dataset.dataset import ( + CatalogDeclarativeDataset, +) +from gooddata_sdk.catalog.workspace.declarative_model.workspace.logical_model.ldm import ( + CatalogDeclarativeLdm, + CatalogDeclarativeModel, +) + +from gooddata_pipelines.ldm_extension.input_processor import ( + LdmExtensionDataProcessor, +) +from gooddata_pipelines.ldm_extension.models.custom_data_object import ( + ColumnDataType, + CustomDataset, + CustomDatasetDefinition, +) + + +def test_merge_into_empty_ldm(mock_custom_dataset): + processor = LdmExtensionDataProcessor() + empty = CatalogDeclarativeModel( + ldm=CatalogDeclarativeLdm(datasets=[], date_instances=[]) + ) + merged = processor.merge_custom_ldm_into_existing( + empty, {"ds1": mock_custom_dataset} + ) + assert len(merged.ldm.datasets) == 1 + assert merged.ldm.datasets[0].id == "ds1" + assert len(merged.ldm.date_instances) == 1 + + +def test_merge_preserves_other_datasets(mock_custom_dataset): + inherited = CatalogDeclarativeDataset( + id="parent_only", + title="Parent DS", + grain=[], + references=[], + ) + existing = CatalogDeclarativeModel( + ldm=CatalogDeclarativeLdm(datasets=[inherited], date_instances=[]) + ) + processor = LdmExtensionDataProcessor() + merged = processor.merge_custom_ldm_into_existing( + existing, {"ds1": mock_custom_dataset} + ) + ids = {d.id for d in merged.ldm.datasets} + assert ids == {"parent_only", "ds1"} + + +def test_merge_removes_managed_dataset_not_in_input(): + managed = CatalogDeclarativeDataset( + id="managed_old", + title="Old", + grain=[], + references=[], + tags=["bca_tooling_managed"], + ) + existing = CatalogDeclarativeModel( + ldm=CatalogDeclarativeLdm(datasets=[managed], date_instances=[]) + ) + definition = CustomDatasetDefinition( + workspace_id="workspace1", + dataset_id="managed_new", + dataset_name="Dataset New", + dataset_datasource_id="dsrc1", + dataset_source_table="table1", + dataset_source_sql=None, + parent_dataset_reference="parent_ds", + parent_dataset_reference_attribute_id="parent_attr", + dataset_reference_source_column="ref_col", + dataset_reference_source_column_data_type=ColumnDataType.STRING, + workspace_data_filter_id="wdf1", + workspace_data_filter_column_name="col1", + ) + incoming = CustomDataset(definition=definition, custom_fields=[]) + processor = LdmExtensionDataProcessor() + merged = processor.merge_custom_ldm_into_existing( + existing, + {"managed_new": incoming}, + remove_managed_datasets_missing_from_input=True, + management_tag="bca_tooling_managed", + ) + assert [d.id for d in merged.ldm.datasets] == ["managed_new"] From e3d82a09ecc1beb351a1c1914eea5ab5165789b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nikola=20=C4=8Cech?= Date: Tue, 28 Apr 2026 10:52:12 +0200 Subject: [PATCH 2/2] WEB-4550 gooddata.ai domain migration --- .github/workflows/netlify-deploy.yaml | 2 +- README.md | 8 +-- docs/assets/scss/typography.scss | 2 +- docs/content/en/latest/_index.md | 6 +-- .../administration/organization/_index.md | 2 +- .../administration/permissions/_index.md | 2 +- .../administration/user-groups/_index.md | 2 +- .../users-and-user-groups/_index.md | 2 +- .../en/latest/administration/users/_index.md | 2 +- .../en/latest/data/data-source/_index.md | 2 +- .../latest/data/physical-data-model/_index.md | 2 +- docs/content/en/latest/execution/ai/_index.md | 2 +- docs/content/en/latest/getting-started.md | 4 +- docs/content/en/latest/installation.md | 6 +-- docs/content/en/latest/pipelines-overview.md | 2 +- .../latest/pipelines/ldm_extension/_index.md | 4 +- .../provisioning/user_data_filters.md | 4 +- .../pipelines/provisioning/user_groups.md | 2 +- .../en/latest/pipelines/provisioning/users.md | 2 +- .../provisioning/workspace-permissions.md | 4 +- .../pipelines/provisioning/workspaces.md | 6 +-- .../logical-data-model/_index.md | 2 +- .../workspace-data-filters/_index.md | 2 +- docs/layouts/404.html | 4 +- docs/layouts/partials/favicons.html | 4 +- docs/layouts/partials/footer.html | 4 +- docs/layouts/partials/head-css.html | 4 +- docs/layouts/partials/header-menu.html | 6 +-- docs/layouts/partials/hooks/body-end.html | 2 +- docs/layouts/partials/navbar-breadcrumb.html | 4 +- .../structureddata.api-ref.html | 44 ++++++++-------- .../structureddata.categories.html | 42 +++++++-------- .../structureddata.introduction.html | 52 +++++++++---------- .../gooddata-dbt/tests/gooddata_example.yml | 2 +- packages/gooddata-fdw/README.md | 4 +- packages/gooddata-fdw/docs/installation.rst | 2 +- packages/gooddata-pandas/README.md | 2 +- packages/gooddata-sdk/README.md | 4 +- packages/gooddata-sdk/pyproject.toml | 2 +- .../gooddata-sdk/src/gooddata_sdk/utils.py | 4 +- .../aac_tests/dashboards/dashboard_1.yaml | 2 +- .../aac_tests/datasets/customer.yaml | 2 +- .../aac_tests/datasets/order_date.yaml | 2 +- .../unit_tests/aac_tests/datasets/orders.yaml | 2 +- .../aac_tests/metrics/top_products.yaml | 2 +- .../aac_tests/visualisations/ratings.yaml | 2 +- .../visualisations/ratings_per_category.yaml | 2 +- pyproject.toml | 4 +- 48 files changed, 138 insertions(+), 138 deletions(-) diff --git a/.github/workflows/netlify-deploy.yaml b/.github/workflows/netlify-deploy.yaml index 35be13ed2..e516085bb 100644 --- a/.github/workflows/netlify-deploy.yaml +++ b/.github/workflows/netlify-deploy.yaml @@ -13,7 +13,7 @@ jobs: - name: Hugo Build uses: gooddata/gooddata-python-sdk/.github/actions/hugo-build-versioned-action@master with: - base-url: https://www.gooddata.com/docs/python-sdk + base-url: https://www.gooddata.ai/docs/python-sdk - name: Publish uses: netlify/actions/cli@master with: diff --git a/README.md b/README.md index 81bfcac11..55e407e8e 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ # GoodData Cloud Python Foundations -This repository contains Python packages useful for integration with [GoodData Cloud](https://www.gooddata.com/docs/cloud/). +This repository contains Python packages useful for integration with [GoodData Cloud](https://www.gooddata.ai/docs/cloud/). ## Available packages @@ -18,7 +18,7 @@ Python. Learn more about the clients in their [dedicated readme](./clients_READM The [gooddata-sdk](./packages/gooddata-sdk) package provides a clean and convenient Python API to interact with GoodData.CN. -Check out the GoodData Python SDK [documentation](https://www.gooddata.com/docs/python-sdk) to learn more and get started. +Check out the GoodData Python SDK [documentation](https://www.gooddata.ai/docs/python-sdk) to learn more and get started. ### GoodData Pandas @@ -31,13 +31,13 @@ Check out the GoodData Pandas [documentation](https://gooddata-pandas.readthedoc The [gooddata-pipelines](./packages/gooddata-pipelines/) package provides easy ways to manage the lifecycle of GoodData Cloud. -Check out the GoodData Pipelines [documentation](https://www.gooddata.com/docs/python-sdk/latest/pipelines-overview/) to learn more and get started. +Check out the GoodData Pipelines [documentation](https://www.gooddata.ai/docs/python-sdk/latest/pipelines-overview/) to learn more and get started. ### GoodData FlexConnect The [gooddata-flexconnect](./packages/gooddata-flexconnect) package is the foundation for writing custom FlexConnect data sources. -Check out the GoodData FlexConnect [documentation](https://www.gooddata.com/docs/cloud/connect-data/create-data-sources/flexconnect/) to learn more and get started. +Check out the GoodData FlexConnect [documentation](https://www.gooddata.ai/docs/cloud/connect-data/create-data-sources/flexconnect/) to learn more and get started. ### GoodData Foreign Data Wrapper diff --git a/docs/assets/scss/typography.scss b/docs/assets/scss/typography.scss index 53161e102..d57ea3a94 100644 --- a/docs/assets/scss/typography.scss +++ b/docs/assets/scss/typography.scss @@ -1,6 +1,6 @@ @import "variables/variables"; -$font-path-base: "https://www.gooddata.com"; +$font-path-base: "https://www.gooddata.ai"; @font-face { font-family: "dc8ebcd096d8d65d80200bbe8e045d86"; diff --git a/docs/content/en/latest/_index.md b/docs/content/en/latest/_index.md index c1bdb8f8c..af9d45e0d 100644 --- a/docs/content/en/latest/_index.md +++ b/docs/content/en/latest/_index.md @@ -10,7 +10,7 @@ cascade: path: "/*/**" --- -GoodData Python SDK provides a clean and convenient way to interact with the [GoodData API](https://www.gooddata.com/docs/cloud/api-and-sdk/api/) in Python applications. +GoodData Python SDK provides a clean and convenient way to interact with the [GoodData API](https://www.gooddata.ai/docs/cloud/api-and-sdk/api/) in Python applications. Python is a popular language for working with large amounts of data and data analytics; It is for this reason that we are actively developing this SDK to let Python developers integrate the GoodData analytical engine into their own applications as seamlessly as possible, or to automate their administrative workflow. @@ -111,8 +111,8 @@ You can also perform certain administration tasks: Get started with Python SDK right now by following the [Quick Start]({{< relref "getting-started#quick-start" >}}) guide. -New to GoodData? Follow the [Getting Started](https://www.gooddata.com/docs/cloud/getting-started/) series of articles that include Python SDK code examples. +New to GoodData? Follow the [Getting Started](https://www.gooddata.ai/docs/cloud/getting-started/) series of articles that include Python SDK code examples. ### Troubleshooting -In case of any issues with Python SDK, feel free to reach out to us on our [community slack](https://www.gooddata.com/slack/) or create a [GitHub issue](https://github.com/gooddata/gooddata-python-sdk/issues). +In case of any issues with Python SDK, feel free to reach out to us on our [community slack](https://www.gooddata.ai/slack/) or create a [GitHub issue](https://github.com/gooddata/gooddata-python-sdk/issues). diff --git a/docs/content/en/latest/administration/organization/_index.md b/docs/content/en/latest/administration/organization/_index.md index bd113486f..0cda7693c 100644 --- a/docs/content/en/latest/administration/organization/_index.md +++ b/docs/content/en/latest/administration/organization/_index.md @@ -7,7 +7,7 @@ no_list: true Manage an organization. -See [Manage Organizations](https://www.gooddata.com/docs/cloud/manage-deployment/set-up-organizations/manage-organizations/) to learn how organizations work in GoodData. +See [Manage Organizations](https://www.gooddata.ai/docs/cloud/manage-deployment/set-up-organizations/manage-organizations/) to learn how organizations work in GoodData. ## Methods diff --git a/docs/content/en/latest/administration/permissions/_index.md b/docs/content/en/latest/administration/permissions/_index.md index e0f5a58f8..0efe9cb20 100644 --- a/docs/content/en/latest/administration/permissions/_index.md +++ b/docs/content/en/latest/administration/permissions/_index.md @@ -7,7 +7,7 @@ no_list: true Manage workspace permissions. -See [Manage Permissions](https://www.gooddata.com/docs/cloud/manage-deployment/manage-permissions/) to learn how permissions work in GoodData. +See [Manage Permissions](https://www.gooddata.ai/docs/cloud/manage-deployment/manage-permissions/) to learn how permissions work in GoodData. ### Declarative Methods diff --git a/docs/content/en/latest/administration/user-groups/_index.md b/docs/content/en/latest/administration/user-groups/_index.md index 99c6c24a4..ce55ed133 100644 --- a/docs/content/en/latest/administration/user-groups/_index.md +++ b/docs/content/en/latest/administration/user-groups/_index.md @@ -7,7 +7,7 @@ no_list: true Manage user groups. -See [Manage Permissions](https://www.gooddata.com/docs/cloud/manage-deployment/manage-permissions/) to learn how permissions work in GoodData. +See [Manage Permissions](https://www.gooddata.ai/docs/cloud/manage-deployment/manage-permissions/) to learn how permissions work in GoodData. ### Entity Methods diff --git a/docs/content/en/latest/administration/users-and-user-groups/_index.md b/docs/content/en/latest/administration/users-and-user-groups/_index.md index a2f6f46b5..a0ee0227e 100644 --- a/docs/content/en/latest/administration/users-and-user-groups/_index.md +++ b/docs/content/en/latest/administration/users-and-user-groups/_index.md @@ -7,7 +7,7 @@ no_list: true Manage users and user groups together. -See [Manage Permissions](https://www.gooddata.com/docs/cloud/manage-deployment/manage-permissions/) to learn how permissions work in GoodData. +See [Manage Permissions](https://www.gooddata.ai/docs/cloud/manage-deployment/manage-permissions/) to learn how permissions work in GoodData. ### Declarative Methods diff --git a/docs/content/en/latest/administration/users/_index.md b/docs/content/en/latest/administration/users/_index.md index 84c4e79ed..5d6b2e537 100644 --- a/docs/content/en/latest/administration/users/_index.md +++ b/docs/content/en/latest/administration/users/_index.md @@ -7,7 +7,7 @@ no_list: true Manage users. -See [Manage Users and UserGroups](https://www.gooddata.com/docs/cloud/manage-deployment/manage-users/) to learn how user management works in GoodData. +See [Manage Users and UserGroups](https://www.gooddata.ai/docs/cloud/manage-deployment/manage-users/) to learn how user management works in GoodData. ### Entity Methods diff --git a/docs/content/en/latest/data/data-source/_index.md b/docs/content/en/latest/data/data-source/_index.md index 4a493c7ec..3cf550546 100644 --- a/docs/content/en/latest/data/data-source/_index.md +++ b/docs/content/en/latest/data/data-source/_index.md @@ -7,7 +7,7 @@ no_list: true Manage data sources. -See [Connect Data](https://www.gooddata.com/docs/cloud/connect-data/) to learn how data sources work in GoodData. +See [Connect Data](https://www.gooddata.ai/docs/cloud/connect-data/) to learn how data sources work in GoodData. ### Entity Methods diff --git a/docs/content/en/latest/data/physical-data-model/_index.md b/docs/content/en/latest/data/physical-data-model/_index.md index 09bbca2f9..b22e3b12e 100644 --- a/docs/content/en/latest/data/physical-data-model/_index.md +++ b/docs/content/en/latest/data/physical-data-model/_index.md @@ -7,7 +7,7 @@ no_list: true Manage physical data models. -See [Create a Physical Data Model](https://www.gooddata.com/docs/cloud/model-data/create-pdm/) to about the physical data model in GoodData. +See [Create a Physical Data Model](https://www.gooddata.ai/docs/cloud/model-data/create-pdm/) to about the physical data model in GoodData. ## Methods diff --git a/docs/content/en/latest/execution/ai/_index.md b/docs/content/en/latest/execution/ai/_index.md index c70febfb4..7b29a8bc3 100644 --- a/docs/content/en/latest/execution/ai/_index.md +++ b/docs/content/en/latest/execution/ai/_index.md @@ -7,7 +7,7 @@ no_list: true GoodData AI is a feature that allows you to ask questions about your data in natural language. -For more information on how to use and setup GoodData AI, see the [GoodData AI documentation](https://www.gooddata.com/docs/cloud/ai/). +For more information on how to use and setup GoodData AI, see the [GoodData AI documentation](https://www.gooddata.ai/docs/cloud/ai/). ## Methods diff --git a/docs/content/en/latest/getting-started.md b/docs/content/en/latest/getting-started.md index d9cb0293f..43850a22f 100644 --- a/docs/content/en/latest/getting-started.md +++ b/docs/content/en/latest/getting-started.md @@ -10,9 +10,9 @@ Start integrating GoodData into your Python application right now. 1. [Install Python SDK](../installation/) -1. Ensure you have a running instance of GoodData. If you just want to try things out, we recommend you sign up for a [trial of GoodData Cloud](https://www.gooddata.com/trial/). +1. Ensure you have a running instance of GoodData. If you just want to try things out, we recommend you sign up for a [trial of GoodData Cloud](https://www.gooddata.ai/trial/). -1. [Create a personal access token for GoodData API](https://www.gooddata.com/docs/cloud/getting-started/create-api-token/) +1. [Create a personal access token for GoodData API](https://www.gooddata.ai/docs/cloud/getting-started/create-api-token/) 1. Import Python SDK into your script: diff --git a/docs/content/en/latest/installation.md b/docs/content/en/latest/installation.md index 9eb7e508c..88cba7d96 100644 --- a/docs/content/en/latest/installation.md +++ b/docs/content/en/latest/installation.md @@ -7,7 +7,7 @@ weight: 11 Before installing, ensure you are using: * Python `3.10` or newer -* [GoodData.CN](https://www.gooddata.com/docs/cloud-native/latest/install/) or [GoodData Cloud](https://www.gooddata.com/docs/cloud/getting-started/) +* [GoodData.CN](https://www.gooddata.ai/docs/cloud-native/latest/install/) or [GoodData Cloud](https://www.gooddata.ai/docs/cloud/getting-started/) * The [pip](https://pypi.org/project/pip/) package management tool @@ -28,7 +28,7 @@ it is likely caused by Python, and it occurs if you have installed Python direct To mitigate, please install your SSL certificates in __HD -> Applications -> Python -> Install Certificates.command__. {{% /alert %}} -To make use of the package, you need a running instance of GoodData. If you do not have GoodData yet, sign up for a [trial of GoodData Cloud](https://www.gooddata.com/trial/). +To make use of the package, you need a running instance of GoodData. If you do not have GoodData yet, sign up for a [trial of GoodData Cloud](https://www.gooddata.ai/trial/). ### Versioning @@ -44,4 +44,4 @@ The Python SDK follows the [official Python release cycle](https://devguide.pyth ### Troubleshooting -In case of any issues with Python SDK, feel free to reach out to us on our [community slack](https://www.gooddata.com/slack/) or create a [GitHub issue](https://github.com/gooddata/gooddata-python-sdk/issues). +In case of any issues with Python SDK, feel free to reach out to us on our [community slack](https://www.gooddata.ai/slack/) or create a [GitHub issue](https://github.com/gooddata/gooddata-python-sdk/issues). diff --git a/docs/content/en/latest/pipelines-overview.md b/docs/content/en/latest/pipelines-overview.md index 48e84c470..9fdd27700 100644 --- a/docs/content/en/latest/pipelines-overview.md +++ b/docs/content/en/latest/pipelines-overview.md @@ -4,7 +4,7 @@ linkTitle: "Pipelines Overview" weight: 14 --- -GoodData Pipelines contains tools for automating GoodData lifecycle management. Built on top of [GoodData Python SDK](https://www.gooddata.com/docs/python-sdk/latest/), it enables you to programmatically provision and manage workspaces, users, user groups, and their permissions. +GoodData Pipelines contains tools for automating GoodData lifecycle management. Built on top of [GoodData Python SDK](https://www.gooddata.ai/docs/python-sdk/latest/), it enables you to programmatically provision and manage workspaces, users, user groups, and their permissions. For further information, refer to the PIPELINES section in the left navigation menu. diff --git a/docs/content/en/latest/pipelines/ldm_extension/_index.md b/docs/content/en/latest/pipelines/ldm_extension/_index.md index 2251b01a8..2b5ff949f 100644 --- a/docs/content/en/latest/pipelines/ldm_extension/_index.md +++ b/docs/content/en/latest/pipelines/ldm_extension/_index.md @@ -5,9 +5,9 @@ weight: 3 no_list: true --- -Child workspaces inherit [Logical Data Model](https://www.gooddata.com/docs/cloud/model-data/concepts/logical-data-model/) (LDM) from their parent. You can use GoodData Pipelines to extend child workspace's LDM with extra datasets specific to the tenant requirements. +Child workspaces inherit [Logical Data Model](https://www.gooddata.ai/docs/cloud/model-data/concepts/logical-data-model/) (LDM) from their parent. You can use GoodData Pipelines to extend child workspace's LDM with extra datasets specific to the tenant requirements. -{{% alert color="info" %}} See [Set Up Multiple Tenants](https://www.gooddata.com/docs/cloud/workspaces/) to learn more about leveraging multitenancy in GoodData.{{% /alert %}} +{{% alert color="info" %}} See [Set Up Multiple Tenants](https://www.gooddata.ai/docs/cloud/workspaces/) to learn more about leveraging multitenancy in GoodData.{{% /alert %}} This documentation operates with terms like *custom datasets* and *custom fields*. Within this context, *custom* refers to extension of the LDM beyond inherited datasets. diff --git a/docs/content/en/latest/pipelines/provisioning/user_data_filters.md b/docs/content/en/latest/pipelines/provisioning/user_data_filters.md index d127f3b5b..1abe3f37b 100644 --- a/docs/content/en/latest/pipelines/provisioning/user_data_filters.md +++ b/docs/content/en/latest/pipelines/provisioning/user_data_filters.md @@ -10,7 +10,7 @@ UDFs are currently managed only in full load mode, meaning your input overwrites This tool currently supports only the `{column} IN (udf_value)` MAQL pattern. UDFs using more complex MAQL expressions must be set up manually. -{{% alert color="info" %}} Visit [Set Up Data Filters for Users](https://www.gooddata.com/docs/cloud/workspaces/user-data-filters/) to learn more about User Data Filters setup and use cases in GoodData. {{% /alert %}} +{{% alert color="info" %}} Visit [Set Up Data Filters for Users](https://www.gooddata.ai/docs/cloud/workspaces/user-data-filters/) to learn more about User Data Filters setup and use cases in GoodData. {{% /alert %}} ## Usage @@ -47,7 +47,7 @@ The model expects the following fields: | udf_value | Value for the UDF. | {{% alert color="info" title="Note on IDs"%}} -Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.com/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. +Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.ai/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. {{% /alert %}} Add the model to your imports and create validated instances: diff --git a/docs/content/en/latest/pipelines/provisioning/user_groups.md b/docs/content/en/latest/pipelines/provisioning/user_groups.md index 194b1198e..7d8ad1525 100644 --- a/docs/content/en/latest/pipelines/provisioning/user_groups.md +++ b/docs/content/en/latest/pipelines/provisioning/user_groups.md @@ -38,7 +38,7 @@ The models expect the following fields: - _**is_active**:_ Deletion flag. Present only in the IncrementalLoad models. {{% alert color="info" title="Note on IDs"%}} -Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.com/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. +Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.ai/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. {{% /alert %}} Use the appropriate model to validate your data: diff --git a/docs/content/en/latest/pipelines/provisioning/users.md b/docs/content/en/latest/pipelines/provisioning/users.md index d2ffa0fab..ed7146570 100644 --- a/docs/content/en/latest/pipelines/provisioning/users.md +++ b/docs/content/en/latest/pipelines/provisioning/users.md @@ -39,7 +39,7 @@ The models expect the following fields: - _**is_active**:_ Deletion flag. Present only in the IncrementalLoad models. {{% alert color="info" title="Note on IDs"%}} -Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.com/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. +Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.ai/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. {{% /alert %}} Use the appropriate model to validate your data: diff --git a/docs/content/en/latest/pipelines/provisioning/workspace-permissions.md b/docs/content/en/latest/pipelines/provisioning/workspace-permissions.md index b5caea785..660f778f6 100644 --- a/docs/content/en/latest/pipelines/provisioning/workspace-permissions.md +++ b/docs/content/en/latest/pipelines/provisioning/workspace-permissions.md @@ -4,7 +4,7 @@ linkTitle: "Workspace Permissions" weight: 5 --- -Workspace permission provisioning allows you to create, update, or delete user permissions. See [Manage Workspace Permissions](https://www.gooddata.com/docs/cloud/manage-organization/manage-permissions/set-permissions-for-workspace/) to learn more about workspace permissions in GoodData Cloud. +Workspace permission provisioning allows you to create, update, or delete user permissions. See [Manage Workspace Permissions](https://www.gooddata.ai/docs/cloud/manage-organization/manage-permissions/set-permissions-for-workspace/) to learn more about workspace permissions in GoodData Cloud. You can provision workspace permissions using full or incremental load methods. Each of these methods requires a specific input type. @@ -37,7 +37,7 @@ The models expect the following fields: - _**is_active**:_ Deletion flag. Present only in the IncrementalLoad models. {{% alert color="info" title="Note on IDs"%}} -Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.com/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. +Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.ai/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. {{% /alert %}} Use the appropriate model to validate your data: diff --git a/docs/content/en/latest/pipelines/provisioning/workspaces.md b/docs/content/en/latest/pipelines/provisioning/workspaces.md index b3308b5e8..04383c3fb 100644 --- a/docs/content/en/latest/pipelines/provisioning/workspaces.md +++ b/docs/content/en/latest/pipelines/provisioning/workspaces.md @@ -7,7 +7,7 @@ weight: 1 Workspace provisioning allows you to create, update or delete child workspaces. {{% alert color="info" title="Multitenancy in GoodData"%}} -See [Multitenancy: One Platform, Many Customers](https://www.gooddata.com/resources/multitenancy-product-tour/) to learn more about how to leverage child workspaces in GoodData. +See [Multitenancy: One Platform, Many Customers](https://www.gooddata.ai/resources/multitenancy-product-tour/) to learn more about how to leverage child workspaces in GoodData. {{% /alert %}} You can provision child workspaces using full or incremental load methods. Each of these methods requires a specific input type. @@ -42,7 +42,7 @@ The models expect the following fields: - _**is_active**:_ Deletion flag. Present only in the IncrementalLoad models. {{% alert color="info" title="Note on IDs"%}} -Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.com/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. +Each ID can only contain allowed characters. See [Workspace Object Identification](https://www.gooddata.ai/docs/cloud/create-workspaces/objects-identification/) to learn more about object identifiers. {{% /alert %}} Use the appropriate model to validate your data: @@ -98,7 +98,7 @@ provisioner.full_load(validated_data) If you want to apply Workspace Data Filters to a child workspace, the filter must be set up on the parent workspace before you run the provisioning. {{% alert color="info" title="Workspace Data Filters"%}} -See [Set Up Data Filters in Workspaces](https://www.gooddata.com/docs/cloud/workspaces/workspace-data-filters/) to learn how workspace data filters work in GoodData. +See [Set Up Data Filters in Workspaces](https://www.gooddata.ai/docs/cloud/workspaces/workspace-data-filters/) to learn how workspace data filters work in GoodData. {{% /alert %}} ## Examples diff --git a/docs/content/en/latest/workspace-content/logical-data-model/_index.md b/docs/content/en/latest/workspace-content/logical-data-model/_index.md index fc008e527..46c856869 100644 --- a/docs/content/en/latest/workspace-content/logical-data-model/_index.md +++ b/docs/content/en/latest/workspace-content/logical-data-model/_index.md @@ -7,7 +7,7 @@ no_list: true Manage logical data models. -See [Create a Logical Data Model](https://www.gooddata.com/docs/cloud/model-data/create-ldm/) to learn abour logical data models in GoodData. +See [Create a Logical Data Model](https://www.gooddata.ai/docs/cloud/model-data/create-ldm/) to learn abour logical data models in GoodData. ## Methods diff --git a/docs/content/en/latest/workspace/workspace-data-filters/_index.md b/docs/content/en/latest/workspace/workspace-data-filters/_index.md index 166943118..b9d54008d 100644 --- a/docs/content/en/latest/workspace/workspace-data-filters/_index.md +++ b/docs/content/en/latest/workspace/workspace-data-filters/_index.md @@ -7,7 +7,7 @@ no_list: true Manage workspace data filters. -See [Set Up Data Filters in Workspaces](https://www.gooddata.com/docs/cloud/manage-deployment/manage-workspaces/workspace-data-filters/) to learn how workspace data filters work in GoodData. +See [Set Up Data Filters in Workspaces](https://www.gooddata.ai/docs/cloud/manage-deployment/manage-workspaces/workspace-data-filters/) to learn how workspace data filters work in GoodData. ## Methods diff --git a/docs/layouts/404.html b/docs/layouts/404.html index 1a4eac835..8c932e12f 100644 --- a/docs/layouts/404.html +++ b/docs/layouts/404.html @@ -20,7 +20,7 @@

But you can try:

{{ end }}
- Where did it go? + Where did it go?
@@ -43,7 +43,7 @@

But you can try:

- Where did it go? + Where did it go?
diff --git a/docs/layouts/partials/favicons.html b/docs/layouts/partials/favicons.html index bfb01a80c..ea04577c0 100644 --- a/docs/layouts/partials/favicons.html +++ b/docs/layouts/partials/favicons.html @@ -1,2 +1,2 @@ - - + + diff --git a/docs/layouts/partials/footer.html b/docs/layouts/partials/footer.html index 1169a2ab5..d16320ffa 100644 --- a/docs/layouts/partials/footer.html +++ b/docs/layouts/partials/footer.html @@ -1,4 +1,4 @@ -{{ $footer := getJSON "https://www.gooddata.com/learn-assets/js/footer-definition.json" }} +{{ $footer := getJSON "https://www.gooddata.ai/learn-assets/js/footer-definition.json" }}