From f015e7fc59a1f0e09d3d6e510e7717086d6a6735 Mon Sep 17 00:00:00 2001 From: Kenio de Souza Date: Mon, 8 Dec 2025 16:07:23 -0300 Subject: [PATCH] =?UTF-8?q?fix():=20Ajuste=20no=20retorno=20das=20inform?= =?UTF-8?q?=C3=A7=C3=B5es=20da=20estrutura=20do=20banco=20de=20dados?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../services/log/log_show_database_service.py | 152 ++++++++++++++++-- 1 file changed, 138 insertions(+), 14 deletions(-) diff --git a/packages/v1/administrativo/services/log/log_show_database_service.py b/packages/v1/administrativo/services/log/log_show_database_service.py index c53f58d..1faabb9 100644 --- a/packages/v1/administrativo/services/log/log_show_database_service.py +++ b/packages/v1/administrativo/services/log/log_show_database_service.py @@ -6,6 +6,126 @@ from typing import Dict, Any, List from packages.v1.administrativo.schemas.log_schema import LogClientIdSchema from packages.v1.administrativo.actions.log.log_show_database_action import ShowDatabaseAction +# --- CONSTANTE DE FILTRO (Reintroduzida) --- +# Use para ignorar tabelas ou elementos que não devem ser comparados. +FILTER_SUBSTRING = "" + +def is_ignored(name: str) -> bool: + """Verifica se o nome contém a substring de filtro (case-insensitive).""" + if name is None: + return False + # Garante que a comparação é feita em caixa alta para robustez + return FILTER_SUBSTRING in str(name).upper() + +# --- FUNÇÃO HELPER: NORMALIZAÇÃO DE CHAVES (Reintroduzida) --- + +def _normalize_keys_to_upper(data: Any) -> Any: + """ + Recursivamente converte todas as chaves de dicionário para UPPERCASE. + """ + if isinstance(data, dict): + new_dict = {} + for k, v in data.items(): + new_k = k.upper() if isinstance(k, str) else k + new_dict[new_k] = _normalize_keys_to_upper(v) + return new_dict + elif isinstance(data, list): + return [_normalize_keys_to_upper(item) for item in data] + else: + return data + +# --- FUNÇÃO HELPER PARA REMOÇÃO DE SOURCE_CODE (Reintroduzida) --- + +def _remove_source_code(item: Dict) -> Dict: + """Remove o campo 'SOURCE_CODE' de um item de dicionário (após normalização).""" + item_copy = item.copy() + if 'SOURCE_CODE' in item_copy: + del item_copy['SOURCE_CODE'] + return item_copy + +# --- FUNÇÃO HELPER PARA OBTER IDENTIFICADOR (Reintroduzida) --- +def get_element_identifier(item): + """Retorna o principal identificador de um elemento de schema (assume UPPERCASE).""" + return ( + item.get('TABLE_NAME') or + item.get('CONSTRAINT_NAME') or + item.get('INDEX_NAME') or + item.get('VIEW_NAME') or + item.get('PROCEDURE_NAME') or + item.get('TRIGGER_NAME') + ) + +# --- FUNÇÃO DE COMPARAÇÃO: PADRÃO - CLIENTE (Reintroduzida) --- + +def find_standard_only_elements(standard_structure: Dict[str, Any], client_structure: Dict[str, Any]) -> Dict[str, Any]: + """ + Identifica elementos presentes na estrutura PADRÃO mas ausentes na estrutura CLIENTE. + O resultado representa as 'inconsistencies' (o que falta no cliente). + """ + standard_only = {} + + # 1. Tabelas + client_tables_names = {t.get('TABLE_NAME') for t in client_structure.get('TABLES', []) + if t.get('TABLE_NAME') and not is_ignored(t.get('TABLE_NAME'))} + + standard_fields_by_table: Dict[str, List[Dict]] = {} + for field_item in standard_structure.get('TABLES', []): + table_name = field_item.get('TABLE_NAME') + if table_name and not is_ignored(table_name): + if table_name not in standard_fields_by_table: + standard_fields_by_table[table_name] = [] + standard_fields_by_table[table_name].append(field_item) + + standard_tables_names = set(standard_fields_by_table.keys()) + unique_table_names = standard_tables_names - client_tables_names + + standard_only_tables = [] + for table_name in unique_table_names: + standard_only_tables.append({ + "TABLE_NAME": table_name, + "FIELDS": [_remove_source_code(f) for f in standard_fields_by_table[table_name]] + }) + + if standard_only_tables: + standard_only["TABELAS_UNICAS"] = standard_only_tables + + # 2. Outros Elementos (PKs, FKs, etc.) + elements_to_check = ['PRIMARY_KEYS', 'FOREIGN_KEYS', 'INDEXES', 'VIEWS', 'PROCEDURES', 'TRIGGERS'] + + for element_key in elements_to_check: + standard_elements = standard_structure.get(element_key, []) + client_elements = client_structure.get(element_key, []) + + # Mapeamento do Padrão (para reobter o objeto limpo) + standard_set_normalized = {} + for item in standard_elements: + identifier = get_element_identifier(item) + if identifier and not is_ignored(identifier): + cleaned_item = _remove_source_code(item) + json_str = json.dumps(cleaned_item, sort_keys=True) + standard_set_normalized[json_str] = cleaned_item + + standard_set = set(standard_set_normalized.keys()) + + # Set do Cliente + client_elements_filtered = [item for item in client_elements + if not is_ignored(get_element_identifier(item))] + client_set = {json.dumps(_remove_source_code(item), sort_keys=True) + for item in client_elements_filtered} + + # Diferença Padrão - Cliente + unique_standard_elements_str = standard_set - client_set + + if unique_standard_elements_str: + standard_only[element_key + "_UNICOS"] = [ + standard_set_normalized[json_str] for json_str in unique_standard_elements_str + ] + + return standard_only + + +# --- CLASSE PRINCIPAL DE SERVIÇO (Ajustada) --- + class ShowDatabaseService: def execute(self, client_id_schema: LogClientIdSchema) -> Dict[str, Any]: @@ -16,10 +136,9 @@ class ShowDatabaseService: if dados_json and dados_json.get("file"): - # Extrai e carrega os dados JSON do campo 'file' dados_json = json.loads(dados_json["file"]) - # Extrai as estruturas de dados relevantes + # --- 1. Extração de Estruturas e Metadata --- standard_structure_json_string = dados_json.get("standard_structure_json") standard_structure_data: Dict[str, Any] = {} if standard_structure_json_string: @@ -28,23 +147,27 @@ class ShowDatabaseService: except json.JSONDecodeError: pass - # Extrai a estrutura do cliente do JSON database_data = dados_json.get("database", {}) client_structure: Dict[str, Any] = database_data.get("structure", {}) - # Separa o campo 'partition' das demais chaves - partition_info = database_data.get("partition", {}) - - # Separa o campo file_size_mb das demais chaves + partition_info = database_data.get("partition", {}) file_size_mb = database_data.get("file_size_mb", None) - - # Separa o campo db_accessible das demais chaves db_accessible = database_data.get("db_accessible", None) + last_modified = database_data.get("last_modified", None) + + # --- 2. Normalização e Comparação --- + + # Normaliza as chaves para UPPERCASE em ambas as estruturas + standard_structure_data = _normalize_keys_to_upper(standard_structure_data) + client_structure = _normalize_keys_to_upper(client_structure) - # Separa o campo db_accessible das demais chaves - last_modified = database_data.get("last_modified", None) + # Encontra elementos exclusivos do padrão (Padrão - Cliente) + inconsistencies_data = find_standard_only_elements( + standard_structure_data, + client_structure + ) - # Monta o JSON final + # --- 3. Monta o JSON final --- data = { "cns": dados_json.get("cns"), "cartorio": dados_json.get("cartorio"), @@ -54,11 +177,12 @@ class ShowDatabaseService: "partition": partition_info, "file_size_mb": file_size_mb, "db_accessible": db_accessible, - "last_modified": last_modified + "last_modified": last_modified, + # NOVO CAMPO: Retorna o que falta no cliente + "inconsistencies": inconsistencies_data } } - # Retorna o json com os dados return data else: