From 40c83b3160502052001fc67f4340b533588d0cb5 Mon Sep 17 00:00:00 2001 From: Asim Bera Date: Wed, 3 Sep 2025 10:48:47 +0530 Subject: [PATCH 01/14] feat: integrate DSA BAR tools for disk file system management - Add new BAR tools module for DSA (Data Storage Assistant) operations - Implement disk file system management: list, create, delete, remove - Add DSA REST API client with synchronous requests - Create bar_manageDsaDiskFileSystemOperations MCP tool - Add BAR profile configuration in profiles.yml - Update module loader to support BAR tools - Add demo script and test cases for BAR functionality - Update dependencies: add requests for synchronous HTTP calls - Convert from async/await to synchronous pattern matching DBA tools New files: - src/teradata_mcp_server/tools/bar/ (complete module) - demo_bar_tools.py (demo script) - tests/cases/bar_test_cases.json (test cases) Key features: - List DSA disk file systems with paths and max files - Configure new file systems with custom settings - Remove individual file systems by path - Delete all file systems for cleanup - Full error handling and logging - MCP-compatible JSON response format --- README.md | 2 + demo_bar_tools.py | 47 ++ pyproject.toml | 2 + src/teradata_mcp_server/config/profiles.yml | 12 +- src/teradata_mcp_server/tools/bar/README.md | 48 ++ src/teradata_mcp_server/tools/bar/__init__.py | 2 + .../tools/bar/bar_objects.yml | 9 + .../tools/bar/bar_resources.py | 11 + .../tools/bar/bar_tools.py | 534 ++++++++++++++++++ .../tools/bar/dsa_client.py | 208 +++++++ .../tools/module_loader.py | 1 + tests/cases/bar_test_cases.json | 33 ++ 12 files changed, 908 insertions(+), 1 deletion(-) create mode 100644 demo_bar_tools.py create mode 100644 src/teradata_mcp_server/tools/bar/README.md create mode 100644 src/teradata_mcp_server/tools/bar/__init__.py create mode 100644 src/teradata_mcp_server/tools/bar/bar_objects.yml create mode 100644 src/teradata_mcp_server/tools/bar/bar_resources.py create mode 100644 src/teradata_mcp_server/tools/bar/bar_tools.py create mode 100644 src/teradata_mcp_server/tools/bar/dsa_client.py create mode 100644 tests/cases/bar_test_cases.json diff --git a/README.md b/README.md index 8aebd28..e11200a 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,8 @@ We are providing groupings of tools and associated helpful prompts to support al - **DBA** tools, prompts and resources to facilitate your platform administration tasks: - [DBA Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/dba/README.md) - [Security Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/sec/README.md) +- **BAR** tools, prompts and resources for backup and recovery operations: + - [BAR Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/bar/README.md) to manage DSA disk file systems and backup operations. ## Getting Started diff --git a/demo_bar_tools.py b/demo_bar_tools.py new file mode 100644 index 0000000..dafc5e5 --- /dev/null +++ b/demo_bar_tools.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +""" +Demo script showing the BAR tools integration with Teradata MCP Server +""" + +import os +import asyncio +import json + +# Set up DSA environment variables +os.environ['DSA_HOST'] = 'pe06-dsc-0015.labs.teradata.com' +os.environ['DSA_PORT'] = '9090' +os.environ['DSA_PROTOCOL'] = 'https' +os.environ['DSA_VERIFY_SSL'] = 'false' + +from src.teradata_mcp_server.tools.bar.bar_tools import handle_bar_manageDsaDiskFileSystemOperations + +def demo_bar_tools(): + """Demonstrate BAR tools functionality""" + print("šŸš€ BAR Tools Integration Demo") + print("=" * 60) + print(f"DSA Server: https://{os.environ['DSA_HOST']}:{os.environ['DSA_PORT']}") + print(f"SSL Verify: {os.environ['DSA_VERIFY_SSL']}") + print("=" * 60) + + # Test 1: List disk file systems + print("\nšŸ“‹ Test 1: List Disk File Systems") + print("-" * 40) + + try: + result = handle_bar_manageDsaDiskFileSystemOperations( + conn=None, + operation='list' + ) + + print(f"āœ… Response received successfully!") + print(f"šŸ“Š Result type: {type(result)}") + print("\nšŸ“„ Full Response:") + print(result) + + except Exception as e: + print(f"āŒ Error: {e}") + import traceback + traceback.print_exc() + +if __name__ == "__main__": + demo_bar_tools() diff --git a/pyproject.toml b/pyproject.toml index ae5be7c..b09be45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,8 @@ dependencies = [ "python-dotenv>=1.0.0", "PyYAML>=6.0.0", "sqlalchemy>=2.0.0,<3.0.0", + "httpx>=0.24.0", + "requests>=2.25.0", ] [project.optional-dependencies] diff --git a/src/teradata_mcp_server/config/profiles.yml b/src/teradata_mcp_server/config/profiles.yml index 2b6f407..bf3977a 100644 --- a/src/teradata_mcp_server/config/profiles.yml +++ b/src/teradata_mcp_server/config/profiles.yml @@ -53,4 +53,14 @@ sales: prompt: - sales_* resource: - - sales_* \ No newline at end of file + - sales_* + +bar: + tool: + - ^bar_* + - ^base_readQuery$ + - ^base_databaseList$ + prompt: + - ^bar_* + resource: + - ^bar_* \ No newline at end of file diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md new file mode 100644 index 0000000..485c79b --- /dev/null +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -0,0 +1,48 @@ +# BAR (Backup and Recovery) Tools + +**Dependencies** + +- DSA REST API access +- httpx for async HTTP requests +- Environment variables for DSA connection configuration + +**BAR** tools: + +- bar_manageDsaDiskFileSystemOperations - Unified tool for managing DSA disk file system configurations + +## Configuration + +The BAR tools require the following environment variables for DSA connection: + +- `DSA_BASE_URL` - Base URL for DSA API (default: https://localhost:9090/) +- `DSA_USERNAME` - Username for DSA authentication (default: admin) +- `DSA_PASSWORD` - Password for DSA authentication (default: admin) +- `DSA_VERIFY_SSL` - Whether to verify SSL certificates (default: true) +- `DSA_CONNECTION_TIMEOUT` - Request timeout in seconds (default: 30) + +## Available Operations + +### bar_manageDsaDiskFileSystemOperations + +This unified tool handles all DSA disk file system operations: + +**Operations:** +- `list` - List all configured disk file systems +- `config` - Configure a new disk file system with specified path and max files +- `delete_all` - Remove all file system configurations (use with caution) +- `remove` - Remove a specific file system configuration by path + +**Examples:** +- List file systems: `{"operation": "list"}` +- Add new file system: `{"operation": "config", "file_system_path": "/backup/primary", "max_files": 1000}` +- Remove file system: `{"operation": "remove", "file_system_path": "/old/backup/path"}` +- Delete all: `{"operation": "delete_all"}` + +## Notes + +- File systems must exist and be accessible before configuration +- Removal operations will fail if file systems are in use by backup operations +- Always verify file system availability before configuration +- Check dependencies before removing file systems + +[Return to Main README](../../../../README.md) diff --git a/src/teradata_mcp_server/tools/bar/__init__.py b/src/teradata_mcp_server/tools/bar/__init__.py new file mode 100644 index 0000000..a8c597a --- /dev/null +++ b/src/teradata_mcp_server/tools/bar/__init__.py @@ -0,0 +1,2 @@ +from .bar_resources import * +from .bar_tools import * diff --git a/src/teradata_mcp_server/tools/bar/bar_objects.yml b/src/teradata_mcp_server/tools/bar/bar_objects.yml new file mode 100644 index 0000000..c752b1d --- /dev/null +++ b/src/teradata_mcp_server/tools/bar/bar_objects.yml @@ -0,0 +1,9 @@ +# BAR (Backup and Recovery) Objects Configuration +# This file contains object definitions for BAR tools, prompts, and resources + +# Note: The main BAR tools are implemented in bar_tools.py as they require +# complex HTTP API interactions with DSA systems that cannot be represented +# as simple SQL queries. + +# Future BAR-related prompts and simple tools can be defined here +# following the standard YAML object format. diff --git a/src/teradata_mcp_server/tools/bar/bar_resources.py b/src/teradata_mcp_server/tools/bar/bar_resources.py new file mode 100644 index 0000000..08bb5f5 --- /dev/null +++ b/src/teradata_mcp_server/tools/bar/bar_resources.py @@ -0,0 +1,11 @@ +""" +BAR (Backup and Recovery) Resources for Teradata DSA MCP Server +Provides resources and guidance for DSA backup operations +""" + +import logging + +logger = logging.getLogger("teradata_mcp_server") + +# Resources will be added here in the future +# For now, this module provides the basic structure diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py new file mode 100644 index 0000000..4864f52 --- /dev/null +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -0,0 +1,534 @@ +""" +BAR (Backup and Recovery) Tools for Teradata DSA MCP Server +Provides disk file system ma # First, get the existing file systems + try: + existing_response = dsa_client._make_request( + method="GET", + endpoint="dsa/components/backup-applications/disk-file-system" + )nt operations +""" + +import logging +from typing import Optional + +from teradata_mcp_server.tools.utils import create_response +from .dsa_client import dsa_client + +logger = logging.getLogger("teradata_mcp_server") + + +#------------------ Disk File System Operations ------------------# + +def list_disk_file_systems() -> str: + """List all configured disk file systems in DSA + + Lists all disk file systems configured for backup operations, showing: + - File system paths + - Maximum files allowed per file system + - Configuration status + + Returns: + Formatted summary of all disk file systems with their configurations + """ + try: + logger.info("Listing disk file systems via DSA API") + + # Make request to DSA API + response = dsa_client._make_request( + method="GET", + endpoint="dsa/components/backup-applications/disk-file-system" + ) + + logger.debug(f"DSA API response: {response}") + + results = [] + results.append("šŸ—‚ļø DSA Disk File Systems") + results.append("=" * 50) + + if response.get('status') == 'LIST_DISK_FILE_SYSTEMS_SUCCESSFUL': + file_systems = response.get('fileSystems', []) + + if file_systems: + results.append(f"šŸ“Š Total File Systems: {len(file_systems)}") + results.append("") + + for i, fs in enumerate(file_systems, 1): + results.append(f"šŸ—‚ļø File System #{i}") + results.append(f" šŸ“ Path: {fs.get('fileSystemPath', 'N/A')}") + results.append(f" šŸ“„ Max Files: {fs.get('maxFiles', 'N/A')}") + results.append("") + else: + results.append("šŸ“‹ No disk file systems configured") + + results.append("=" * 50) + results.append(f"āœ… Status: {response.get('status')}") + results.append(f"šŸ” Found Component: {response.get('foundComponent', False)}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + else: + results.append(f"āŒ Failed to list disk file systems") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + if response.get('validationlist'): + validation = response['validationlist'] + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Error: {error.get('message', 'Unknown error')}") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to list disk file systems: {str(e)}") + return f"āŒ Error listing disk file systems: {str(e)}" + + +def config_disk_file_system(file_system_path: str, max_files: int) -> str: + """Configure a disk file system for DSA backup operations + + Adds a new disk file system to the existing list or updates an existing one. + This allows DSA to use the file system for backup storage operations. + + Args: + file_system_path: Full path to the file system directory (e.g., "/var/opt/teradata/backup") + max_files: Maximum number of files allowed in this file system (must be > 0) + + Returns: + Formatted result of the configuration operation with status and any validation messages + """ + try: + logger.info(f"Configuring disk file system: {file_system_path} with max files: {max_files}") + + # First, get the existing file systems + try: + existing_response = dsa_client._make_request( + method="GET", + endpoint="dsa/components/backup-applications/disk-file-system" + ) + + existing_file_systems = [] + if existing_response.get('status') == 'LIST_DISK_FILE_SYSTEMS_SUCCESSFUL': + existing_file_systems = existing_response.get('fileSystems', []) + logger.info(f"Found {len(existing_file_systems)} existing file systems") + else: + logger.info("No existing file systems found or unable to retrieve them") + + except Exception as e: + logger.warning(f"Could not retrieve existing file systems: {e}") + existing_file_systems = [] + + # Check if the new file system path already exists and update it, or add it + file_systems_to_configure = [] + path_exists = False + + for fs in existing_file_systems: + if fs.get('fileSystemPath') == file_system_path: + # Update existing file system + file_systems_to_configure.append({ + "fileSystemPath": file_system_path, + "maxFiles": max_files + }) + path_exists = True + logger.info(f"Updating existing file system: {file_system_path}") + else: + # Keep existing file system unchanged + file_systems_to_configure.append(fs) + + # If path doesn't exist, add the new file system + if not path_exists: + file_systems_to_configure.append({ + "fileSystemPath": file_system_path, + "maxFiles": max_files + }) + logger.info(f"Adding new file system: {file_system_path}") + + # Prepare request data with all file systems (existing + new/updated) + request_data = { + "fileSystems": file_systems_to_configure + } + + logger.info(f"Configuring {len(file_systems_to_configure)} file systems total") + + # Make request to DSA API + response = dsa_client._make_request( + method="POST", + endpoint="dsa/components/backup-applications/disk-file-system", + data=request_data + ) + + logger.debug(f"DSA API response: {response}") + + results = [] + results.append("šŸ—‚ļø DSA Disk File System Configuration") + results.append("=" * 50) + results.append(f"šŸ“ File System Path: {file_system_path}") + results.append(f"šŸ“„ Max Files: {max_files}") + results.append(f"šŸ“Š Total File Systems: {len(file_systems_to_configure)}") + results.append(f"šŸ”„ Operation: {'Update' if path_exists else 'Add'}") + results.append("") + + if response.get('status') == 'CONFIG_DISK_FILE_SYSTEM_SUCCESSFUL': + results.append("āœ… Disk file system configured successfully") + results.append(f"šŸ“Š Status: {response.get('status')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + else: + results.append("āŒ Failed to configure disk file system") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + # Show validation errors if any + if response.get('validationlist'): + validation = response['validationlist'] + results.append("") + results.append("šŸ” Validation Details:") + + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Server Error: {error.get('message', 'Unknown error')}") + results.append(f" Code: {error.get('code', 'N/A')}") + results.append(f" Status: {error.get('valStatus', 'N/A')}") + + if validation.get('clientValidationList'): + for error in validation['clientValidationList']: + results.append(f"āŒ Client Error: {error.get('message', 'Unknown error')}") + + results.append("") + results.append("=" * 50) + results.append("āœ… Disk file system configuration operation completed") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to configure disk file system: {str(e)}") + return f"āŒ Error configuring disk file system '{file_system_path}': {str(e)}" + + +def delete_disk_file_systems() -> str: + """Delete all disk file system configurations from DSA + + Removes all disk file system configurations from DSA. This operation will fail + if any file systems are currently in use by backup operations or file target groups. + + Returns: + Formatted result of the deletion operation with status and any validation messages + + Warning: + This operation removes ALL disk file system configurations. Make sure no + backup operations or file target groups are using these file systems. + """ + try: + logger.info("Deleting all disk file system configurations via DSA API") + + # Make request to DSA API + response = dsa_client._make_request( + method="DELETE", + endpoint="dsa/components/backup-applications/disk-file-system" + ) + + logger.debug(f"DSA API response: {response}") + + results = [] + results.append("šŸ—‚ļø DSA Disk File System Deletion") + results.append("=" * 50) + + if response.get('status') == 'DELETE_COMPONENT_SUCCESSFUL': + results.append("āœ… All disk file systems deleted successfully") + results.append(f"šŸ“Š Status: {response.get('status')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + else: + results.append("āŒ Failed to delete disk file systems") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + # Show validation errors if any + if response.get('validationlist'): + validation = response['validationlist'] + results.append("") + results.append("šŸ” Validation Details:") + + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Server Error: {error.get('message', 'Unknown error')}") + results.append(f" Code: {error.get('code', 'N/A')}") + results.append(f" Status: {error.get('valStatus', 'N/A')}") + + if validation.get('clientValidationList'): + for error in validation['clientValidationList']: + results.append(f"āŒ Client Error: {error.get('message', 'Unknown error')}") + + # If deletion failed due to dependencies, provide guidance + if any('in use by' in error.get('message', '') for error in validation.get('serverValidationList', [])): + results.append("") + results.append("šŸ’” Helpful Notes:") + results.append(" • Remove all backup jobs using these file systems first") + results.append(" • Delete any file target groups that reference these file systems") + results.append(" • Use list_disk_file_systems() to see current configurations") + + results.append("") + results.append("=" * 50) + results.append("āœ… Disk file system deletion operation completed") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to delete disk file systems: {str(e)}") + return f"āŒ Error deleting disk file systems: {str(e)}" + + +def remove_disk_file_system(file_system_path: str) -> str: + """Remove a specific disk file system from DSA configuration + + Removes a specific disk file system from the existing list by reconfiguring + the remaining file systems. This operation will fail if the file system is + currently in use by backup operations or file target groups. + + Args: + file_system_path: Full path to the file system directory to remove (e.g., "/var/opt/teradata/backup") + + Returns: + Formatted result of the removal operation with status and any validation messages + + Warning: + This operation will fail if the file system is in use by any backup operations + or file target groups. Remove those dependencies first. + """ + try: + logger.info(f"Removing disk file system: {file_system_path}") + + # First, get the existing file systems + try: + existing_response = dsa_client._make_request( + method="GET", + endpoint="dsa/components/backup-applications/disk-file-system" + ) + + existing_file_systems = [] + if existing_response.get('status') == 'LIST_DISK_FILE_SYSTEMS_SUCCESSFUL': + existing_file_systems = existing_response.get('fileSystems', []) + logger.info(f"Found {len(existing_file_systems)} existing file systems") + else: + logger.warning("No existing file systems found or unable to retrieve them") + return f"āŒ Could not retrieve existing file systems to remove '{file_system_path}'" + + except Exception as e: + logger.error(f"Could not retrieve existing file systems: {e}") + return f"āŒ Error retrieving existing file systems: {str(e)}" + + # Check if the file system to remove exists + path_exists = False + file_systems_to_keep = [] + + for fs in existing_file_systems: + if fs.get('fileSystemPath') == file_system_path: + path_exists = True + logger.info(f"Found file system to remove: {file_system_path}") + else: + # Keep this file system + file_systems_to_keep.append(fs) + + # If path doesn't exist, return error + if not path_exists: + available_paths = [fs.get('fileSystemPath', 'N/A') for fs in existing_file_systems] + results = [] + results.append("šŸ—‚ļø DSA Disk File System Removal") + results.append("=" * 50) + results.append(f"āŒ File system '{file_system_path}' not found") + results.append("") + results.append("šŸ“‹ Available file systems:") + if available_paths: + for path in available_paths: + results.append(f" • {path}") + else: + results.append(" (No file systems configured)") + results.append("") + results.append("=" * 50) + return "\n".join(results) + + # Prepare request data with remaining file systems + request_data = { + "fileSystems": file_systems_to_keep + } + + logger.info(f"Removing '{file_system_path}', keeping {len(file_systems_to_keep)} file systems") + + # Make request to DSA API to reconfigure with remaining file systems + response = dsa_client._make_request( + method="POST", + endpoint="dsa/components/backup-applications/disk-file-system", + data=request_data + ) + + logger.debug(f"DSA API response: {response}") + + results = [] + results.append("šŸ—‚ļø DSA Disk File System Removal") + results.append("=" * 50) + results.append(f"šŸ“ Removed File System: {file_system_path}") + results.append(f"šŸ“Š Remaining File Systems: {len(file_systems_to_keep)}") + results.append("") + + if response.get('status') == 'CONFIG_DISK_FILE_SYSTEM_SUCCESSFUL': + results.append("āœ… Disk file system removed successfully") + results.append(f"šŸ“Š Status: {response.get('status')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + if file_systems_to_keep: + results.append("") + results.append("šŸ“‹ Remaining file systems:") + for fs in file_systems_to_keep: + path = fs.get('fileSystemPath', 'N/A') + max_files = fs.get('maxFiles', 'N/A') + results.append(f" • {path} (Max Files: {max_files})") + else: + results.append("") + results.append("šŸ“‹ No file systems remaining (all removed)") + + else: + results.append("āŒ Failed to remove disk file system") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + # Show validation errors if any + if response.get('validationlist'): + validation = response['validationlist'] + results.append("") + results.append("šŸ” Validation Details:") + + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Server Error: {error.get('message', 'Unknown error')}") + results.append(f" Code: {error.get('code', 'N/A')}") + results.append(f" Status: {error.get('valStatus', 'N/A')}") + + if validation.get('clientValidationList'): + for error in validation['clientValidationList']: + results.append(f"āŒ Client Error: {error.get('message', 'Unknown error')}") + + results.append("") + results.append("=" * 50) + results.append("āœ… Disk file system removal operation completed") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to remove disk file system: {str(e)}") + return f"āŒ Error removing disk file system '{file_system_path}': {str(e)}" + + +def manage_dsa_disk_file_systems( + operation: str, + file_system_path: Optional[str] = None, + max_files: Optional[int] = None +) -> str: + """Unified DSA Disk File System Management Tool + + This comprehensive tool handles all DSA disk file system operations including + listing, configuring, and removing file system configurations. + + Args: + operation: The operation to perform + file_system_path: Path to the file system (for config and remove operations) + max_files: Maximum number of files allowed (for config operation) + + Available Operations: + - "list" - List all configured disk file systems + - "config" - Configure a new disk file system + - "delete_all" - Remove all file system configurations + - "remove" - Remove a specific file system configuration + + Returns: + Result of the requested operation + """ + + logger.info(f"DSA Disk File System Management - Operation: {operation}") + + try: + # List operation + if operation == "list": + return list_disk_file_systems() + + # Config operation + elif operation == "config": + if not file_system_path: + return "āŒ Error: file_system_path is required for config operation" + if max_files is None: + return "āŒ Error: max_files is required for config operation" + return config_disk_file_system(file_system_path, max_files) + + # Delete all operation + elif operation == "delete_all": + return delete_disk_file_systems() + + # Remove specific operation + elif operation == "remove": + if not file_system_path: + return "āŒ Error: file_system_path is required for remove operation" + return remove_disk_file_system(file_system_path) + + else: + available_operations = [ + "list", "config", "delete_all", "remove" + ] + return f"āŒ Error: Unknown operation '{operation}'. Available operations: {', '.join(available_operations)}" + + except Exception as e: + logger.error(f"DSA Disk File System Management error - Operation: {operation}, Error: {str(e)}") + return f"āŒ Error during {operation}: {str(e)}" + + +#------------------ Tool Handler for MCP ------------------# + +def handle_bar_manageDsaDiskFileSystemOperations( + conn: any, # Not used for DSA operations, but required by MCP framework + operation: str, + file_system_path: str = None, + max_files: int = None, + *args, + **kwargs +): + """ + Handle DSA disk file system operations for the MCP server + + This tool provides unified management of DSA disk file system configurations + for backup and recovery operations. + + Args: + conn: Database connection (not used for DSA operations) + operation: The operation to perform (list, config, delete_all, remove) + file_system_path: Path to the file system (for config and remove operations) + max_files: Maximum number of files allowed (for config operation) + + Returns: + ResponseType: formatted response with operation results + metadata + """ + logger.debug(f"Tool: handle_bar_manageDsaDiskFileSystemOperations: Args: operation: {operation}, file_system_path: {file_system_path}, max_files: {max_files}") + + try: + # Run the synchronous operation + result = manage_dsa_disk_file_systems( + operation=operation, + file_system_path=file_system_path, + max_files=max_files + ) + + metadata = { + "tool_name": "bar_manageDsaDiskFileSystemOperations", + "operation": operation, + "file_system_path": file_system_path, + "max_files": max_files, + "success": True + } + + logger.debug(f"Tool: handle_bar_manageDsaDiskFileSystemOperations: metadata: {metadata}") + return create_response(result, metadata) + + except Exception as e: + logger.error(f"Error in handle_bar_manageDsaDiskFileSystemOperations: {e}") + error_result = f"āŒ Error in DSA disk file system operation: {str(e)}" + metadata = { + "tool_name": "bar_manageDsaDiskFileSystemOperations", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) diff --git a/src/teradata_mcp_server/tools/bar/dsa_client.py b/src/teradata_mcp_server/tools/bar/dsa_client.py new file mode 100644 index 0000000..3c4722d --- /dev/null +++ b/src/teradata_mcp_server/tools/bar/dsa_client.py @@ -0,0 +1,208 @@ +"""DSA REST API client for BAR operations""" + +import json +import logging +import os +from typing import Any, Dict, List, Optional +import requests +from urllib.parse import urljoin + +logger = logging.getLogger("teradata_mcp_server") + + +class DSAClientError(Exception): + """Base exception for DSA client errors""" + pass + + +class DSAAuthenticationError(DSAClientError): + """Authentication error with DSA system""" + pass + + +class DSAConnectionError(DSAClientError): + """Connection error with DSA system""" + pass + + +class DSAAPIError(DSAClientError): + """API error from DSA system""" + pass + + +class DSAClient: + """Client for interacting with Teradata DSA REST API""" + + def __init__( + self, + base_url: Optional[str] = None, + username: Optional[str] = None, + password: Optional[str] = None, + verify_ssl: Optional[bool] = None, + timeout: Optional[float] = None + ): + """Initialize DSA client + + Args: + base_url: Base URL for DSA API (defaults to environment variable) + username: Username for authentication (defaults to environment variable) + password: Password for authentication (defaults to environment variable) + verify_ssl: Whether to verify SSL certificates (defaults to environment variable) + timeout: Request timeout in seconds (defaults to environment variable) + """ + # Handle both DSA_BASE_URL and individual DSA_HOST/DSA_PORT/DSA_PROTOCOL + if base_url: + self.base_url = base_url + else: + dsa_base_url = os.getenv("DSA_BASE_URL") + if dsa_base_url: + self.base_url = dsa_base_url + else: + # Use individual components + dsa_host = os.getenv("DSA_HOST", "localhost") + dsa_port = int(os.getenv("DSA_PORT", "9090")) + dsa_protocol = os.getenv("DSA_PROTOCOL", "https") + self.base_url = f"{dsa_protocol}://{dsa_host}:{dsa_port}/" + + self.username = username or os.getenv("DSA_USERNAME", "admin") + self.password = password or os.getenv("DSA_PASSWORD", "admin") + self.verify_ssl = verify_ssl if verify_ssl is not None else os.getenv("DSA_VERIFY_SSL", "true").lower() in ["true", "1", "yes"] + self.timeout = timeout or float(os.getenv("DSA_CONNECTION_TIMEOUT", "30")) + + # Ensure base URL ends with / + if not self.base_url.endswith('/'): + self.base_url += '/' + + logger.info(f"Initialized DSA client for {self.base_url}") + + def _get_auth(self) -> Optional[tuple]: + """Get authentication credentials if available""" + if self.username and self.password: + return (self.username, self.password) + return None + + def _make_request( + self, + method: str, + endpoint: str, + params: Optional[Dict[str, Any]] = None, + data: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None + ) -> Dict[str, Any]: + """Make an HTTP request to the DSA API + + Args: + method: HTTP method (GET, POST, etc.) + endpoint: API endpoint (relative to base URL) + params: Query parameters + data: Request body data + headers: Additional headers + + Returns: + Response data as dictionary + + Raises: + DSAConnectionError: If connection fails + DSAAuthenticationError: If authentication fails + DSAAPIError: If API returns an error + """ + url = urljoin(self.base_url, endpoint) + + # Prepare headers + request_headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'User-Agent': 'Teradata-MCP-Server-BAR/1.0.0' + } + if headers: + request_headers.update(headers) + + # Prepare authentication + auth = self._get_auth() + + logger.debug(f"Making {method} request to {url} with params: {params}") + + try: + response = requests.request( + method=method, + url=url, + params=params, + json=data, + headers=request_headers, + auth=auth, + verify=self.verify_ssl, + timeout=self.timeout + ) + + logger.debug(f"Response status: {response.status_code}") + + # Handle authentication errors + if response.status_code == 401: + raise DSAAuthenticationError("Authentication failed - check username and password") + + # Handle other client/server errors + if response.status_code >= 400: + error_msg = f"DSA API error: {response.status_code} - {response.text}" + logger.error(error_msg) + raise DSAAPIError(error_msg) + + # Parse JSON response + try: + return response.json() + except json.JSONDecodeError as e: + logger.error(f"Failed to parse JSON response: {e}") + raise DSAAPIError(f"Invalid JSON response from DSA API: {e}") + + except requests.exceptions.ConnectError as e: + error_msg = f"Failed to connect to DSA server at {url}: {e}" + logger.error(error_msg) + raise DSAConnectionError(error_msg) + except requests.exceptions.Timeout as e: + error_msg = f"Request timeout connecting to DSA server: {e}" + logger.error(error_msg) + raise DSAConnectionError(error_msg) + except requests.exceptions.RequestException as e: + error_msg = f"HTTP error communicating with DSA server: {e}" + logger.error(error_msg) + raise DSAConnectionError(error_msg) + + def health_check(self) -> Dict[str, Any]: + """Perform a health check on the DSA system + + Returns: + Dictionary with health check results + """ + try: + # Try to make a simple API call to test connectivity + response = self._make_request( + 'GET', + 'dsa/components/backup-applications/disk-file-system' + ) + + return { + "status": "healthy", + "dsa_status": response.get("status", "unknown"), + "message": "Successfully connected to DSA system" + } + except DSAAuthenticationError: + return { + "status": "unhealthy", + "error": "authentication_failed", + "message": "Authentication failed - check credentials" + } + except DSAConnectionError as e: + return { + "status": "unhealthy", + "error": "connection_failed", + "message": str(e) + } + except Exception as e: + return { + "status": "unhealthy", + "error": "unknown_error", + "message": str(e) + } + + +# Global DSA client instance +dsa_client = DSAClient() diff --git a/src/teradata_mcp_server/tools/module_loader.py b/src/teradata_mcp_server/tools/module_loader.py index 85df8c5..2ac89a9 100644 --- a/src/teradata_mcp_server/tools/module_loader.py +++ b/src/teradata_mcp_server/tools/module_loader.py @@ -19,6 +19,7 @@ class ModuleLoader: # Map tool prefixes to their corresponding module paths MODULE_MAP = { + 'bar': 'teradata_mcp_server.tools.bar', 'base': 'teradata_mcp_server.tools.base', 'dba': 'teradata_mcp_server.tools.dba', 'evs': 'teradata_mcp_server.tools.evs', diff --git a/tests/cases/bar_test_cases.json b/tests/cases/bar_test_cases.json new file mode 100644 index 0000000..2dfaf91 --- /dev/null +++ b/tests/cases/bar_test_cases.json @@ -0,0 +1,33 @@ +{ + "test_cases": { + "bar_manageDsaDiskFileSystemOperations": [ + { + "name": "list_disk_file_systems", + "parameters": { + "operation": "list" + }, + "description": "List all configured disk file systems in DSA" + }, + { + "name": "config_test_file_system", + "parameters": { + "operation": "config", + "file_system_path": "/test/backup/path", + "max_files": 100 + }, + "description": "Configure a test disk file system", + "skip_if_no_dsa": true + }, + { + "name": "remove_test_file_system", + "parameters": { + "operation": "remove", + "file_system_path": "/test/backup/path" + }, + "description": "Remove the test disk file system", + "skip_if_no_dsa": true, + "depends_on": "config_test_file_system" + } + ] + } +} From 9ff7afdd4b6c1ee6a9b477b9e88ad04414788b53 Mon Sep 17 00:00:00 2001 From: Asim Bera Date: Wed, 3 Sep 2025 18:47:28 +0530 Subject: [PATCH 02/14] feat: Enhanced BAR tools documentation and architecture - Added comprehensive DSA architecture diagram with Mermaid - Updated terminology from 'Backup and Recovery' to 'Backup and Restore' - Simplified tool names (removed 'Operations' suffix) - Added prerequisites section with DSA infrastructure requirements - Documented BAR profile configuration from profiles.yml - Added tools inventory (16 tools: 1 developed, 15 planned) - Updated function names in bar_tools.py - Enhanced test cases and cross-platform compatibility - Removed httpx dependency from pyproject.toml --- README.md | 2 +- demo_bar_tools.py | 47 ----- pyproject.toml | 1 - src/teradata_mcp_server/tools/bar/README.md | 176 +++++++++++++++--- .../tools/bar/bar_objects.yml | 2 +- .../tools/bar/bar_resources.py | 2 +- .../tools/bar/bar_tools.py | 23 +-- tests/cases/bar_test_cases.json | 2 +- tests/run_mcp_tests.py | 7 +- 9 files changed, 166 insertions(+), 96 deletions(-) delete mode 100644 demo_bar_tools.py diff --git a/README.md b/README.md index e11200a..7ab49d7 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ We are providing groupings of tools and associated helpful prompts to support al - **DBA** tools, prompts and resources to facilitate your platform administration tasks: - [DBA Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/dba/README.md) - [Security Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/sec/README.md) -- **BAR** tools, prompts and resources for backup and recovery operations: +- **BAR** tools, prompts and resources for backup and restore operations: - [BAR Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/bar/README.md) to manage DSA disk file systems and backup operations. diff --git a/demo_bar_tools.py b/demo_bar_tools.py deleted file mode 100644 index dafc5e5..0000000 --- a/demo_bar_tools.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -""" -Demo script showing the BAR tools integration with Teradata MCP Server -""" - -import os -import asyncio -import json - -# Set up DSA environment variables -os.environ['DSA_HOST'] = 'pe06-dsc-0015.labs.teradata.com' -os.environ['DSA_PORT'] = '9090' -os.environ['DSA_PROTOCOL'] = 'https' -os.environ['DSA_VERIFY_SSL'] = 'false' - -from src.teradata_mcp_server.tools.bar.bar_tools import handle_bar_manageDsaDiskFileSystemOperations - -def demo_bar_tools(): - """Demonstrate BAR tools functionality""" - print("šŸš€ BAR Tools Integration Demo") - print("=" * 60) - print(f"DSA Server: https://{os.environ['DSA_HOST']}:{os.environ['DSA_PORT']}") - print(f"SSL Verify: {os.environ['DSA_VERIFY_SSL']}") - print("=" * 60) - - # Test 1: List disk file systems - print("\nšŸ“‹ Test 1: List Disk File Systems") - print("-" * 40) - - try: - result = handle_bar_manageDsaDiskFileSystemOperations( - conn=None, - operation='list' - ) - - print(f"āœ… Response received successfully!") - print(f"šŸ“Š Result type: {type(result)}") - print("\nšŸ“„ Full Response:") - print(result) - - except Exception as e: - print(f"āŒ Error: {e}") - import traceback - traceback.print_exc() - -if __name__ == "__main__": - demo_bar_tools() diff --git a/pyproject.toml b/pyproject.toml index b09be45..804d978 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,6 @@ dependencies = [ "python-dotenv>=1.0.0", "PyYAML>=6.0.0", "sqlalchemy>=2.0.0,<3.0.0", - "httpx>=0.24.0", "requests>=2.25.0", ] diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index 485c79b..b59d220 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -1,17 +1,63 @@ -# BAR (Backup and Recovery) Tools +# BAR (Backup and Restore) Tools -**Dependencies** +## DSA (Data Stream Architecture) Design -- DSA REST API access -- httpx for async HTTP requests -- Environment variables for DSA connection configuration +```mermaid +flowchart TB + %% AI Agent Layer (Top) + subgraph AI_Layer ["šŸ¤– AI Agent Layer"] + direction LR + User[šŸ‘¤ User Input] --> LLM[šŸ¤– AI Agent/LLM] --> Output[šŸ“„ Output] + end + + %% Integration Layer + subgraph Integration_Layer ["šŸ”Œ Integration Layer"] + direction LR + MCP[šŸ”Œ MCP Server
BAR Tools] --> API[🌐 DSA REST API] + end + + %% Infrastructure Layer + subgraph DSA_Infrastructure ["šŸ¢ DSA Infrastructure"] + direction LR + DSC[šŸŽ›ļø DSC] -.-> DSMain[šŸ“Š DSMain] + DSC -.-> BarNC[šŸ“¦ BarNC] + DB[(šŸ—„ļø Teradata
Database)] <-->|read/write| DSMain + DSMain <-->|data stream| BarNC + end + + %% Storage Layer + subgraph Storage_Solutions ["šŸ’¾ Storage Solutions"] + direction LR + Storage{Backup Storage} --> Disk[šŸ“ Disk] + Storage --> S3[ā˜ļø S3] + Storage --> Azure[šŸ”· Azure] + Storage --> GCS[🌐 GCS] + Storage --> NetBackup[šŸ”’ NetBackup] + Storage --> Spectrum[šŸŽÆ Spectrum] + end + + %% Vertical Flow Between Layers + LLM -.-> MCP + API --> DSC + BarNC <-->|Backup: write
Restore: read| Storage + + %% Styling + classDef userStyle fill:#e3f2fd,stroke:#1976d2,stroke-width:2px + classDef integrationStyle fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px + classDef infraStyle fill:#fff3e0,stroke:#f57c00,stroke-width:2px + classDef storageStyle fill:#e8f5e8,stroke:#388e3c,stroke-width:2px + + class User,LLM,Output userStyle + class MCP,API integrationStyle + class DSC,DSMain,BarNC,DB infraStyle + class Storage,Disk,S3,Azure,GCS,NetBackup,Spectrum storageStyle +``` -**BAR** tools: +## Prerequisites -- bar_manageDsaDiskFileSystemOperations - Unified tool for managing DSA disk file system configurations - -## Configuration +**DSA Infrastructure** must be properly configured and running before using BAR tools +### Environment Variables The BAR tools require the following environment variables for DSA connection: - `DSA_BASE_URL` - Base URL for DSA API (default: https://localhost:9090/) @@ -20,29 +66,103 @@ The BAR tools require the following environment variables for DSA connection: - `DSA_VERIFY_SSL` - Whether to verify SSL certificates (default: true) - `DSA_CONNECTION_TIMEOUT` - Request timeout in seconds (default: 30) -## Available Operations +### BAR Profile Configuration +The BAR profile is defined in `config/profiles.yml` and controls access to BAR-related tools and resources. + +**Profile Configuration:** +```yaml +bar: + tool: + - ^bar_* # All BAR tools (bar_manageDsaDiskFileSystem, etc.) + - ^base_readQuery$ # Read-only database queries + - ^base_databaseList$ # Database listing + prompt: + - ^bar_* # BAR-specific prompts + resource: + - ^bar_* # BAR-specific resources +``` + +**What the BAR profile enables:** +- Access to all `bar_*` tools for backup and restore operations +- Basic database read operations for backup source identification +- Database listing capabilities for backup planning +- BAR-specific prompts and resources + +**Usage:** Specify `--profile bar` when running MCP server to enable BAR-specific functionality. + + +## Available Tools + +**Total Estimated Tools: 16** (1 āœ… Developed, 15 🚧 Planned) + +### Storage Configuration Tools + +#### bar_manageDsaDiskFileSystem āœ… +**Status**: Developed +Unified tool for managing DSA disk file system configurations for backup storage. + +#### bar_manageAwsS3 🚧 +**Status**: Planned +Tool for managing AWS S3 bucket configurations for backup storage. + +#### bar_manageAzureBlob 🚧 +**Status**: Planned +Tool for managing Azure Blob Storage configurations for backup storage. + +#### bar_manageGoogleCloud 🚧 +**Status**: Planned +Tool for managing Google Cloud Storage configurations for backup storage. + +#### bar_manageNetBackup 🚧 +**Status**: Planned +Tool for managing NetBackup configurations for enterprise backup storage. + +#### bar_manageIbmSpectrum 🚧 +**Status**: Planned +Tool for managing IBM Spectrum Protect configurations for backup storage. + +### Infrastructure Management Tools + +#### bar_manageMediaServer 🚧 +**Status**: Planned +Tool for managing BarNC configurations + +#### bar_manageTeradataSystem 🚧 +**Status**: Planned +Tool for managing DSMain configurations and Teradata system integration. + +### Target Group Management Tools + +#### bar_manageDiskFileTargetGroup 🚧 +**Status**: Planned +Tool for managing media server configurations with disk file storage solutions. + +#### bar_manageAwsS3TargetGroup 🚧 +**Status**: Planned +Tool for managing media server configurations with AWS S3 storage solutions. -### bar_manageDsaDiskFileSystemOperations +#### bar_manageAzureBlobTargetGroup 🚧 +**Status**: Planned +Tool for managing media server configurations with Azure Blob storage solutions. -This unified tool handles all DSA disk file system operations: +#### bar_manageGoogleCloudTargetGroup 🚧 +**Status**: Planned +Tool for managing media server configurations with Google Cloud storage solutions. -**Operations:** -- `list` - List all configured disk file systems -- `config` - Configure a new disk file system with specified path and max files -- `delete_all` - Remove all file system configurations (use with caution) -- `remove` - Remove a specific file system configuration by path +#### bar_manageNetBackupTargetGroup 🚧 +**Status**: Planned +Tool for managing media server configurations with NetBackup storage solutions. -**Examples:** -- List file systems: `{"operation": "list"}` -- Add new file system: `{"operation": "config", "file_system_path": "/backup/primary", "max_files": 1000}` -- Remove file system: `{"operation": "remove", "file_system_path": "/old/backup/path"}` -- Delete all: `{"operation": "delete_all"}` +#### bar_manageIbmSpectrumTargetGroup 🚧 +**Status**: Planned +Tool for managing media server configurations with IBM Spectrum storage solutions. -## Notes +### Operations Management Tools -- File systems must exist and be accessible before configuration -- Removal operations will fail if file systems are in use by backup operations -- Always verify file system availability before configuration -- Check dependencies before removing file systems +#### bar_manageJob 🚧 +**Status**: Planned +Tool for managing backup and restore job lifecycle. -[Return to Main README](../../../../README.md) +#### bar_manageSaveSets 🚧 +**Status**: Planned +Tool for managing backup files/objects (save sets) created by backup operations. diff --git a/src/teradata_mcp_server/tools/bar/bar_objects.yml b/src/teradata_mcp_server/tools/bar/bar_objects.yml index c752b1d..bc61057 100644 --- a/src/teradata_mcp_server/tools/bar/bar_objects.yml +++ b/src/teradata_mcp_server/tools/bar/bar_objects.yml @@ -1,4 +1,4 @@ -# BAR (Backup and Recovery) Objects Configuration +# BAR (Backup and Restore) Objects Configuration # This file contains object definitions for BAR tools, prompts, and resources # Note: The main BAR tools are implemented in bar_tools.py as they require diff --git a/src/teradata_mcp_server/tools/bar/bar_resources.py b/src/teradata_mcp_server/tools/bar/bar_resources.py index 08bb5f5..300e686 100644 --- a/src/teradata_mcp_server/tools/bar/bar_resources.py +++ b/src/teradata_mcp_server/tools/bar/bar_resources.py @@ -1,5 +1,5 @@ """ -BAR (Backup and Recovery) Resources for Teradata DSA MCP Server +BAR (Backup and Restore) Resources for Teradata DSA MCP Server Provides resources and guidance for DSA backup operations """ diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 4864f52..ab24e4f 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -1,11 +1,6 @@ """ -BAR (Backup and Recovery) Tools for Teradata DSA MCP Server -Provides disk file system ma # First, get the existing file systems - try: - existing_response = dsa_client._make_request( - method="GET", - endpoint="dsa/components/backup-applications/disk-file-system" - )nt operations +BAR (Backup and Restore) Tools for Teradata DSA MCP Server + """ import logging @@ -478,7 +473,7 @@ def manage_dsa_disk_file_systems( #------------------ Tool Handler for MCP ------------------# -def handle_bar_manageDsaDiskFileSystemOperations( +def handle_bar_manageDsaDiskFileSystem( conn: any, # Not used for DSA operations, but required by MCP framework operation: str, file_system_path: str = None, @@ -490,7 +485,7 @@ def handle_bar_manageDsaDiskFileSystemOperations( Handle DSA disk file system operations for the MCP server This tool provides unified management of DSA disk file system configurations - for backup and recovery operations. + for backup and restore operations. Args: conn: Database connection (not used for DSA operations) @@ -501,7 +496,7 @@ def handle_bar_manageDsaDiskFileSystemOperations( Returns: ResponseType: formatted response with operation results + metadata """ - logger.debug(f"Tool: handle_bar_manageDsaDiskFileSystemOperations: Args: operation: {operation}, file_system_path: {file_system_path}, max_files: {max_files}") + logger.debug(f"Tool: handle_bar_manageDsaDiskFileSystem: Args: operation: {operation}, file_system_path: {file_system_path}, max_files: {max_files}") try: # Run the synchronous operation @@ -512,21 +507,21 @@ def handle_bar_manageDsaDiskFileSystemOperations( ) metadata = { - "tool_name": "bar_manageDsaDiskFileSystemOperations", + "tool_name": "bar_manageDsaDiskFileSystem", "operation": operation, "file_system_path": file_system_path, "max_files": max_files, "success": True } - logger.debug(f"Tool: handle_bar_manageDsaDiskFileSystemOperations: metadata: {metadata}") + logger.debug(f"Tool: handle_bar_manageDsaDiskFileSystem: metadata: {metadata}") return create_response(result, metadata) except Exception as e: - logger.error(f"Error in handle_bar_manageDsaDiskFileSystemOperations: {e}") + logger.error(f"Error in handle_bar_manageDsaDiskFileSystem: {e}") error_result = f"āŒ Error in DSA disk file system operation: {str(e)}" metadata = { - "tool_name": "bar_manageDsaDiskFileSystemOperations", + "tool_name": "bar_manageDsaDiskFileSystem", "operation": operation, "error": str(e), "success": False diff --git a/tests/cases/bar_test_cases.json b/tests/cases/bar_test_cases.json index 2dfaf91..9113abf 100644 --- a/tests/cases/bar_test_cases.json +++ b/tests/cases/bar_test_cases.json @@ -1,6 +1,6 @@ { "test_cases": { - "bar_manageDsaDiskFileSystemOperations": [ + "bar_manageDsaDiskFileSystem": [ { "name": "list_disk_file_systems", "parameters": { diff --git a/tests/run_mcp_tests.py b/tests/run_mcp_tests.py index d443ea3..8875c26 100644 --- a/tests/run_mcp_tests.py +++ b/tests/run_mcp_tests.py @@ -37,10 +37,13 @@ def __init__(self, test_cases_files: list[str] = ["tests/cases/core_test_cases.j def _find_project_root(self) -> str: """Find the project root directory (contains profiles.yml).""" current = os.path.abspath(os.getcwd()) - while current != '/': + while True: if os.path.exists(os.path.join(current, 'profiles.yml')): return current - current = os.path.dirname(current) + parent = os.path.dirname(current) + if parent == current: # Reached root directory (works on both Windows and Unix) + break + current = parent return os.getcwd() async def load_test_cases(self): From 6eb66af77fb744ac052e07157b6f8d2a757d9e8d Mon Sep 17 00:00:00 2001 From: Asim Bera Date: Thu, 4 Sep 2025 09:41:49 +0530 Subject: [PATCH 03/14] Enhance BAR tools documentation with DSA architecture diagram - Add comprehensive AI Agent integration architecture diagram using Mermaid - Redesign with compact square layout for better visual presentation - Document prerequisites including Teradata DSA and AI Agent setup - Add detailed BAR profile configuration examples for profiles.yml - Create comprehensive tool inventory with 16 available functions - Improve tool naming conventions (remove redundant 'Operations' suffix) - Enhance main README with detailed BAR tools description - Add navigation links between documentation sections - Include support for multiple storage backends: disk, AWS S3, Azure Blob, Google Cloud, NetBackup, IBM Spectrum --- README.md | 4 +- src/teradata_mcp_server/tools/bar/README.md | 71 +++++++++------------ 2 files changed, 31 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index 7ab49d7..a8bd2fc 100644 --- a/README.md +++ b/README.md @@ -25,8 +25,8 @@ We are providing groupings of tools and associated helpful prompts to support al - **DBA** tools, prompts and resources to facilitate your platform administration tasks: - [DBA Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/dba/README.md) - [Security Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/sec/README.md) -- **BAR** tools, prompts and resources for backup and restore operations: - - [BAR Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/bar/README.md) to manage DSA disk file systems and backup operations. +- **BAR** tools, prompts and resources for database backup and restore operations: + - [BAR Tools](src/teradata_mcp_server/tools/bar/README.md) integrate AI agents with Teradata DSA (Data Stream Architecture) for comprehensive backup management across multiple storage solutions including disk files, cloud storage (AWS S3, Azure Blob, Google Cloud), and enterprise systems (NetBackup, IBM Spectrum). ## Getting Started diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index b59d220..483a155 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -1,56 +1,39 @@ # BAR (Backup and Restore) Tools -## DSA (Data Stream Architecture) Design +## AI Agent Integration with DSA Architecture ```mermaid -flowchart TB - %% AI Agent Layer (Top) - subgraph AI_Layer ["šŸ¤– AI Agent Layer"] - direction LR - User[šŸ‘¤ User Input] --> LLM[šŸ¤– AI Agent/LLM] --> Output[šŸ“„ Output] - end +flowchart TD + %% AI Agent Layer + User[šŸ‘¤ User] --> LLM[šŸ¤– AI Agent] --> Output[šŸ“„ Output] - %% Integration Layer - subgraph Integration_Layer ["šŸ”Œ Integration Layer"] - direction LR - MCP[šŸ”Œ MCP Server
BAR Tools] --> API[🌐 DSA REST API] - end + %% MCP Integration + LLM --> MCP[šŸ”Œ MCP Server] + MCP --> API[🌐 DSA API] + API --> DSC[šŸŽ›ļø DSC] - %% Infrastructure Layer - subgraph DSA_Infrastructure ["šŸ¢ DSA Infrastructure"] - direction LR - DSC[šŸŽ›ļø DSC] -.-> DSMain[šŸ“Š DSMain] - DSC -.-> BarNC[šŸ“¦ BarNC] - DB[(šŸ—„ļø Teradata
Database)] <-->|read/write| DSMain - DSMain <-->|data stream| BarNC - end + %% DSA Infrastructure + DSC --> DSMain[šŸ“Š DSMain] + DSC --> BarNC[šŸ“¦ BarNC] - %% Storage Layer - subgraph Storage_Solutions ["šŸ’¾ Storage Solutions"] - direction LR - Storage{Backup Storage} --> Disk[šŸ“ Disk] - Storage --> S3[ā˜ļø S3] - Storage --> Azure[šŸ”· Azure] - Storage --> GCS[🌐 GCS] - Storage --> NetBackup[šŸ”’ NetBackup] - Storage --> Spectrum[šŸŽÆ Spectrum] - end + %% Data Flow + DB[(šŸ—„ļø Database)] <--> DSMain + DSMain <--> BarNC + BarNC --> Storage{šŸ’¾ Storage} - %% Vertical Flow Between Layers - LLM -.-> MCP - API --> DSC - BarNC <-->|Backup: write
Restore: read| Storage + %% Storage Options + Storage --> Disk[šŸ“ Disk] + Storage --> Cloud[ā˜ļø Cloud] + Storage --> Enterprise[šŸ”’ Enterprise] %% Styling - classDef userStyle fill:#e3f2fd,stroke:#1976d2,stroke-width:2px - classDef integrationStyle fill:#f3e5f5,stroke:#7b1fa2,stroke-width:2px - classDef infraStyle fill:#fff3e0,stroke:#f57c00,stroke-width:2px - classDef storageStyle fill:#e8f5e8,stroke:#388e3c,stroke-width:2px + classDef primary fill:#e3f2fd,stroke:#1976d2 + classDef secondary fill:#f3e5f5,stroke:#7b1fa2 + classDef storage fill:#e8f5e8,stroke:#388e3c - class User,LLM,Output userStyle - class MCP,API integrationStyle - class DSC,DSMain,BarNC,DB infraStyle - class Storage,Disk,S3,Azure,GCS,NetBackup,Spectrum storageStyle + class User,LLM,Output primary + class MCP,API,DSC secondary + class DSMain,BarNC,DB,Storage,Disk,Cloud,Enterprise storage ``` ## Prerequisites @@ -166,3 +149,7 @@ Tool for managing backup and restore job lifecycle. #### bar_manageSaveSets 🚧 **Status**: Planned Tool for managing backup files/objects (save sets) created by backup operations. + +--- + +[← Return to Main README](../../../../README.md) From ac80cfee21049cb3f83800e443393444fd0aa552 Mon Sep 17 00:00:00 2001 From: Prasad Avadhanam Date: Thu, 18 Sep 2025 12:21:05 +0530 Subject: [PATCH 04/14] AWS S3 Backup Config Mgmt functions --- src/teradata_mcp_server/tools/bar/README.md | 4 +- .../tools/bar/bar_objects.yml | 31 ++ .../tools/bar/bar_tools.py | 268 ++++++++++++++++++ .../tools/bar/dsa_client.py | 10 +- 4 files changed, 304 insertions(+), 9 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index 483a155..d198bf9 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -84,8 +84,8 @@ bar: **Status**: Developed Unified tool for managing DSA disk file system configurations for backup storage. -#### bar_manageAwsS3 🚧 -**Status**: Planned +#### bar_manageAwsS3 āœ… +**Status**: Developed Tool for managing AWS S3 bucket configurations for backup storage. #### bar_manageAzureBlob 🚧 diff --git a/src/teradata_mcp_server/tools/bar/bar_objects.yml b/src/teradata_mcp_server/tools/bar/bar_objects.yml index bc61057..472d4fe 100644 --- a/src/teradata_mcp_server/tools/bar/bar_objects.yml +++ b/src/teradata_mcp_server/tools/bar/bar_objects.yml @@ -7,3 +7,34 @@ # Future BAR-related prompts and simple tools can be defined here # following the standard YAML object format. + +bar_manageAWSS3Operations: + type: tool + description: | + Manage AWS S3 backup configuration for Teradata DSA via MCP server. Supports list, config, delete_all, and remove operations. + parameters: + operation: + type: string + description: Operation to perform (list, config, delete_all, remove) + required: true + accessId: + type: string + description: AWS Access ID + required: false + accessKey: + type: string + description: AWS Access Key + required: false + bucketsByRegion: + type: object + description: Buckets by region configuration (object: dict or list) + required: false + bucketName: + type: string + description: AWS Bucket Name + required: false + acctName: + type: string + description: AWS Account Name + required: false + handler: handle_bar_manageAWSS3Operations diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index ab24e4f..c0ce9e9 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -4,7 +4,23 @@ """ import logging +import string from typing import Optional +import os + +logger = logging.getLogger("teradata_mcp_server") + +# Setup logging to file (always add file handler) +log_dir = os.path.join(os.path.dirname(__file__), '../../../logs') +os.makedirs(log_dir, exist_ok=True) +log_file = os.path.join(log_dir, 'bar_tools.log') +file_handler = logging.FileHandler(log_file) +formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s') +file_handler.setFormatter(formatter) +logger.addHandler(file_handler) +logger.setLevel(logging.DEBUG) +logger.info('Logging initialized to %s', log_file) +logger.info('TEST LOG ENTRY: bar_tools.py imported and logging is active.') from teradata_mcp_server.tools.utils import create_response from .dsa_client import dsa_client @@ -471,6 +487,190 @@ def manage_dsa_disk_file_systems( return f"āŒ Error during {operation}: {str(e)}" +""" +#PA255044 -> START -- AWS S3 Configuration Tool +""" +#------------------ AWS S3 Backup Solution Configuration and Operations ------------------# + + +def list_aws_s3_backup_configurations () -> str: + """List the configured AWS S3 object store systems in DSA + + Lists all configured AWS S3 storage target systems that are currently available configured for the backup operations, showing: + - Bucket names + - Prefix numbers, names and devices configured + - Configuration status + + Returns: + Formatted summary of all S3 file systems with their configurations + """ + + try: + logger.info("Listing AWS S3 target systems via DSA API") + + # Make request to DSA API + response = dsa_client._make_request( + method="GET", + endpoint="dsa/components/backup-applications/aws-s3" + ) + + # Add debug log for full API response + logger.debug("[DEBUG] Full DSA API response from aws-s3 endpoint: %r", response) + + results = [] + results.append("šŸ—‚ļø DSA AWS S3 Backup Solution Systems Available") + results.append("=" * 50) + + if response.get('status') == 'LIST_AWS_APP_SUCCESSFUL': + # Extract bucketsByRegion from nested aws[0]['configAwsRest']['bucketsByRegion'] + bucketsByRegion = [] + aws_list = response.get('aws', []) + if aws_list and isinstance(aws_list, list): + configAwsRest = aws_list[0].get('configAwsRest', {}) + bucketsByRegion = configAwsRest.get('bucketsByRegion', []) + + # Handle if bucketsByRegion is a dict (single region) or list + if isinstance(bucketsByRegion, dict): + bucketsByRegion = [bucketsByRegion] + + bucket_count = 0 + if bucketsByRegion: + for i, region in enumerate(bucketsByRegion, 1): + region_name = region.get('region', 'N/A') + results.append(f"šŸ—‚ļø Region #{i}: {region_name}") + buckets = region.get('buckets', []) + if isinstance(buckets, dict): + buckets = [buckets] + if buckets: + for j, bucket in enumerate(buckets, 1): + bucket_count += 1 + bucket_name = bucket.get('bucketName', 'N/A') + results.append(f" šŸ“ Bucket #{j}: {bucket_name}") + prefix_list = bucket.get('prefixList', []) + if isinstance(prefix_list, dict): + prefix_list = [prefix_list] + if prefix_list: + for k, prefix in enumerate(prefix_list, 1): + prefix_name = prefix.get('prefixName', 'N/A') + storage_devices = prefix.get('storageDevices', 'N/A') + results.append(f" šŸ”– Prefix #{k}: {prefix_name}") + results.append(f" Storage Devices: {storage_devices}") + else: + results.append(f" šŸ”– No prefixes configured") + else: + results.append(f" šŸ“ No buckets configured in this region") + results.append("") + results.insert(1, f"šŸ“Š Total Buckets Configured: {bucket_count}") + else: + results.append("šŸ“‹ No AWS backup Solutions Configured") + + results.append("=" * 50) + results.append(f"āœ… Status: {response.get('status')}") + results.append(f"šŸ” Found Component: {response.get('foundComponent', False)}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + else: + results.append(f"āŒ Failed to list AWS S3 Backup Solutions Configured") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + if response.get('validationlist'): + validation = response['validationlist'] + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Error: {error.get('message', 'Unknown error')}") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to list AWS S3 Backup Solutions Configured: {str(e)}") + return f"āŒ Error listing AWS S3 Backup Solutions Configured: {str(e)}" + + +def manage_AWS_S3_backup_configurations( + operation: str, + accessId: Optional[str] = None, + accessKey: Optional[str] = None, + bucketsByRegion: Optional[object] = None, + bucketName: Optional[str] = None, + acctName: Optional[str] = None +) -> str: + """Unified DSA AWS S3 Backup Configuration Management Tool + + This comprehensive tool handles all DSA AWS S3 backup configuration operations including + listing, configuring, and removing backup configurations. + + Args: + operation: The operation to perform + accessId: AWS Access ID + accessKey: AWS Access Key + bucketsByRegion: Buckets by region configuration (object: dict or list) + bucketName: AWS Bucket Name + acctName: AWS Account Name + + Available Operations: + - "list" - List all configured AWS S3 backup solutions + - "config" - Configure a new AWS S3 backup solution + - "delete_all" - Remove all AWS S3 backup solution configurations + - "remove" - Remove a specific AWS S3 backup solution configuration + + Returns: + Result of the requested operation + """ + + logger.info(f"DSA AWS S3 Backup Solution Management - Operation: {operation}") + + try: + # List operation + if operation == "list": + return list_aws_s3_backup_configurations() + # Config operation + elif operation == "config": + if not accessId: + return "āŒ Error: accessId is required for config operation" + if not accessKey: + return "āŒ Error: accessKey is required for config operation" + if not bucketsByRegion: + return "āŒ Error: bucketsByRegion is required for config operation" + if not acctName: + return "āŒ Error: acctName is required for config operation" + if not bucketName: + return "āŒ Error: bucketName is required for config operation" + # bucketsByRegion is now expected as an object (dict or list) + request_data = { + "configAwsRest": { + "accessId": accessId, + "accessKey": accessKey, + "bucketsByRegion": bucketsByRegion, + "bucketName": bucketName, + "acctName": acctName, + "viewpoint": True, + "viewpointBucketRegion": True + } + } + try: + response = dsa_client._make_request( + method="POST", + endpoint="dsa/components/backup-applications/aws-s3", + data=request_data + ) + return f"āœ… AWS backup solution configuration operation completed\nResponse: {response}" + except Exception as e: + return f"āŒ Error configuring AWS backup solution: {str(e)}" + # Delete all operation + elif operation == "delete_all": + return "āŒ Error: 'delete_all' operation is not implemented yet for AWS S3 Configuration" + # Remove specific operation + elif operation == "remove": + return "āŒ Error: 'remove' operation is not implemented yet for AWS S3 Configuration" + else: + available_operations = [ + "list", "config", "delete_all", "remove" + ] + return f"āŒ Error: Unknown operation '{operation}'. Available operations: {', '.join(available_operations)}" + except Exception as e: + logger.error(f"DSA AWS S3 Configuration Management error - Operation: {operation}, Error: {str(e)}") + return f"āŒ Error during {operation}: {str(e)}" + + #------------------ Tool Handler for MCP ------------------# def handle_bar_manageDsaDiskFileSystem( @@ -527,3 +727,71 @@ def handle_bar_manageDsaDiskFileSystem( "success": False } return create_response(error_result, metadata) + + + +def handle_bar_manageAWSS3Operations( + conn: any, # Not used for DSA operations, but required by MCP framework + operation: str, + accessId: str = None, + accessKey: str = None, + bucketsByRegion: object = None, + bucketName: str = None, + acctName: str = None, + *args, + **kwargs +): + logger.info("handle_bar_manageAWSS3Operations called with operation=%s, accessId=%s, acctName=%s", operation, accessId, acctName) + """ + Handle DSA AWS S3 backup solution configuraiton operations for the MCP server + + This tool provides unified management of DSA AWS S3 backup solution configuration + that is required for backup and restore operations. + + Args: + conn: Database connection (not used for DSA operations) + operation: The operation to perform (list, config). The delete_all, remove and will be implemented later + accessId: AWS access ID (for config operation) + accessKey: AWS access key (for config operation) + bucketsByRegion: List of S3 buckets by region (for config operation) + acctName: AWS account name (for config operation) + + Returns: + ResponseType: formatted response with operation results + metadata + """ + + logger.debug(f"Tool: handle_bar_manageAWSS3Operations: Args: operation: {operation}, accessId: {accessId}, accessKey: {accessKey}, bucketsByRegion: {bucketsByRegion}, acctName: {acctName}") + logger.debug(f"[DEBUG] bucketsByRegion type: {type(bucketsByRegion)} value: {bucketsByRegion}") + try: + # Run the synchronous operation + result = manage_AWS_S3_backup_configurations( + operation=operation, + accessId=accessId, + accessKey=accessKey, + bucketsByRegion=bucketsByRegion, + bucketName="tdedsabucket01", # Hardcoded for now, will be dynamic later + acctName=acctName + ) + metadata = { + "tool_name": "bar_manageAWSS3Operations", + "operation": operation, + "accessId": accessId, + "accessKey": accessKey, + "bucketsByRegion": bucketsByRegion, + "bucketName": bucketName, + "acctName": acctName, + "success": True + } + logger.debug(f"Tool: handle_bar_manageAWSS3Operations: metadata: {metadata}") + return create_response(result, metadata) + except Exception as e: + logger.error(f"Error in handle_bar_manageAWSS3Operations: {e}") + error_result = f"āŒ Error in DSA AWS S3 operation: {str(e)}" + metadata = { + "tool_name": "bar_manageAWSS3Operations", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) + diff --git a/src/teradata_mcp_server/tools/bar/dsa_client.py b/src/teradata_mcp_server/tools/bar/dsa_client.py index 3c4722d..01aa776 100644 --- a/src/teradata_mcp_server/tools/bar/dsa_client.py +++ b/src/teradata_mcp_server/tools/bar/dsa_client.py @@ -4,7 +4,7 @@ import logging import os from typing import Any, Dict, List, Optional -import requests +import requests # pyright: ignore[reportMissingModuleSource] from urllib.parse import urljoin logger = logging.getLogger("teradata_mcp_server") @@ -73,6 +73,7 @@ def __init__( if not self.base_url.endswith('/'): self.base_url += '/' + print(f"[DEBUG] DSAClient initialized with base_url: {self.base_url}") logger.info(f"Initialized DSA client for {self.base_url}") def _get_auth(self) -> Optional[tuple]: @@ -133,27 +134,22 @@ def _make_request( verify=self.verify_ssl, timeout=self.timeout ) - logger.debug(f"Response status: {response.status_code}") - # Handle authentication errors if response.status_code == 401: raise DSAAuthenticationError("Authentication failed - check username and password") - # Handle other client/server errors if response.status_code >= 400: error_msg = f"DSA API error: {response.status_code} - {response.text}" logger.error(error_msg) raise DSAAPIError(error_msg) - # Parse JSON response try: return response.json() except json.JSONDecodeError as e: logger.error(f"Failed to parse JSON response: {e}") raise DSAAPIError(f"Invalid JSON response from DSA API: {e}") - - except requests.exceptions.ConnectError as e: + except requests.exceptions.ConnectionError as e: error_msg = f"Failed to connect to DSA server at {url}: {e}" logger.error(error_msg) raise DSAConnectionError(error_msg) From 527c2de746c9a1ae64268639b6b7f05397fc5db9 Mon Sep 17 00:00:00 2001 From: Prasad Avadhanam Date: Thu, 18 Sep 2025 14:22:20 +0530 Subject: [PATCH 05/14] minor cleanup for AWSS3 config mgmt functions --- src/teradata_mcp_server/tools/bar/README.md | 4 ++-- src/teradata_mcp_server/tools/bar/bar_objects.yml | 5 ++--- src/teradata_mcp_server/tools/bar/bar_tools.py | 4 ++-- src/teradata_mcp_server/tools/bar/dsa_client.py | 1 - 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index d198bf9..23f746e 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -84,8 +84,8 @@ bar: **Status**: Developed Unified tool for managing DSA disk file system configurations for backup storage. -#### bar_manageAwsS3 āœ… -**Status**: Developed +#### bar_manageAwsS3 🚧 +**Status**: Work-In-Progress Tool for managing AWS S3 bucket configurations for backup storage. #### bar_manageAzureBlob 🚧 diff --git a/src/teradata_mcp_server/tools/bar/bar_objects.yml b/src/teradata_mcp_server/tools/bar/bar_objects.yml index 472d4fe..36a9245 100644 --- a/src/teradata_mcp_server/tools/bar/bar_objects.yml +++ b/src/teradata_mcp_server/tools/bar/bar_objects.yml @@ -10,8 +10,7 @@ bar_manageAWSS3Operations: type: tool - description: | - Manage AWS S3 backup configuration for Teradata DSA via MCP server. Supports list, config, delete_all, and remove operations. + description: Manage AWS S3 backup configuration for Teradata DSA via MCP server (list, config, delete_all, remove) parameters: operation: type: string @@ -27,7 +26,7 @@ bar_manageAWSS3Operations: required: false bucketsByRegion: type: object - description: Buckets by region configuration (object: dict or list) + description: "Buckets by region configuration (object: dict or list)" required: false bucketName: type: string diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index c0ce9e9..803c060 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -743,8 +743,8 @@ def handle_bar_manageAWSS3Operations( ): logger.info("handle_bar_manageAWSS3Operations called with operation=%s, accessId=%s, acctName=%s", operation, accessId, acctName) """ - Handle DSA AWS S3 backup solution configuraiton operations for the MCP server - + Handle DSA AWS S3 backup solution configuration operations for the MCP server + This tool provides unified management of DSA AWS S3 backup solution configuration that is required for backup and restore operations. diff --git a/src/teradata_mcp_server/tools/bar/dsa_client.py b/src/teradata_mcp_server/tools/bar/dsa_client.py index 01aa776..aac8bc0 100644 --- a/src/teradata_mcp_server/tools/bar/dsa_client.py +++ b/src/teradata_mcp_server/tools/bar/dsa_client.py @@ -73,7 +73,6 @@ def __init__( if not self.base_url.endswith('/'): self.base_url += '/' - print(f"[DEBUG] DSAClient initialized with base_url: {self.base_url}") logger.info(f"Initialized DSA client for {self.base_url}") def _get_auth(self) -> Optional[tuple]: From 5b7201f74ddb3fad35318f3af7c7aa2a30a0bd60 Mon Sep 17 00:00:00 2001 From: Prasad Avadhanam Date: Fri, 19 Sep 2025 09:03:29 +0530 Subject: [PATCH 06/14] minor fixes in ManageAWSS3ConfigOps --- .../tools/bar/bar_objects.yml | 30 ------------------- .../tools/bar/dsa_client.py | 2 +- 2 files changed, 1 insertion(+), 31 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/bar_objects.yml b/src/teradata_mcp_server/tools/bar/bar_objects.yml index 36a9245..bc61057 100644 --- a/src/teradata_mcp_server/tools/bar/bar_objects.yml +++ b/src/teradata_mcp_server/tools/bar/bar_objects.yml @@ -7,33 +7,3 @@ # Future BAR-related prompts and simple tools can be defined here # following the standard YAML object format. - -bar_manageAWSS3Operations: - type: tool - description: Manage AWS S3 backup configuration for Teradata DSA via MCP server (list, config, delete_all, remove) - parameters: - operation: - type: string - description: Operation to perform (list, config, delete_all, remove) - required: true - accessId: - type: string - description: AWS Access ID - required: false - accessKey: - type: string - description: AWS Access Key - required: false - bucketsByRegion: - type: object - description: "Buckets by region configuration (object: dict or list)" - required: false - bucketName: - type: string - description: AWS Bucket Name - required: false - acctName: - type: string - description: AWS Account Name - required: false - handler: handle_bar_manageAWSS3Operations diff --git a/src/teradata_mcp_server/tools/bar/dsa_client.py b/src/teradata_mcp_server/tools/bar/dsa_client.py index aac8bc0..92737d3 100644 --- a/src/teradata_mcp_server/tools/bar/dsa_client.py +++ b/src/teradata_mcp_server/tools/bar/dsa_client.py @@ -4,7 +4,7 @@ import logging import os from typing import Any, Dict, List, Optional -import requests # pyright: ignore[reportMissingModuleSource] +import requests # from urllib.parse import urljoin logger = logging.getLogger("teradata_mcp_server") From cdb08c4a405f1fc3444fc51c8c1577ee1ee747e8 Mon Sep 17 00:00:00 2001 From: Prasad Avadhanam Date: Fri, 19 Sep 2025 09:12:05 +0530 Subject: [PATCH 07/14] Fix the description forAWSS3Configmgmt --- src/teradata_mcp_server/tools/bar/bar_tools.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 803c060..4e135dc 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -741,7 +741,6 @@ def handle_bar_manageAWSS3Operations( *args, **kwargs ): - logger.info("handle_bar_manageAWSS3Operations called with operation=%s, accessId=%s, acctName=%s", operation, accessId, acctName) """ Handle DSA AWS S3 backup solution configuration operations for the MCP server @@ -759,7 +758,7 @@ def handle_bar_manageAWSS3Operations( Returns: ResponseType: formatted response with operation results + metadata """ - + logger.info("handle_bar_manageAWSS3Operations called with operation=%s, accessId=%s, acctName=%s", operation, accessId, acctName) logger.debug(f"Tool: handle_bar_manageAWSS3Operations: Args: operation: {operation}, accessId: {accessId}, accessKey: {accessKey}, bucketsByRegion: {bucketsByRegion}, acctName: {acctName}") logger.debug(f"[DEBUG] bucketsByRegion type: {type(bucketsByRegion)} value: {bucketsByRegion}") try: From d42f8fdbe861729fd6dff4c954e9f282ce718a08 Mon Sep 17 00:00:00 2001 From: Bera Date: Sat, 20 Sep 2025 09:04:43 +0530 Subject: [PATCH 08/14] Fixed tool description warnings by copying docstrings in make_tool_wrapper, Added complete media server management functionality (list, get, add, delete, list_consumers, list_consumers_by_server) --- src/teradata_mcp_server/app.py | 1 + .../tools/bar/bar_tools.py | 367 +++++++++++++++++- 2 files changed, 367 insertions(+), 1 deletion(-) diff --git a/src/teradata_mcp_server/app.py b/src/teradata_mcp_server/app.py index 3470b3c..489c70c 100644 --- a/src/teradata_mcp_server/app.py +++ b/src/teradata_mcp_server/app.py @@ -243,6 +243,7 @@ def _exec(*args, **kwargs): _exec.__name__ = getattr(func, "__name__", "wrapped_tool") _exec.__signature__ = new_sig + _exec.__doc__ = func.__doc__ # Copy docstring from original function if annotations: _exec.__annotations__ = annotations return _exec diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 4e135dc..1f17c04 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -5,7 +5,8 @@ import logging import string -from typing import Optional +import json +from typing import Optional, List, Dict import os logger = logging.getLogger("teradata_mcp_server") @@ -671,6 +672,287 @@ def manage_AWS_S3_backup_configurations( return f"āŒ Error during {operation}: {str(e)}" +#------------------ Media Server Operations ------------------# + +def manage_dsa_media_servers( + operation: str, + server_name: Optional[str] = None, + port: Optional[int] = None, + ip_addresses: Optional[str] = None, + pool_shared_pipes: Optional[int] = 50, + virtual: Optional[bool] = False +) -> str: + """Unified media server management for all media server operations + + This comprehensive function handles all media server operations in the DSA system, + including listing, getting details, adding, deleting, and managing consumers. + """ + # Validate operation + valid_operations = [ + "list", "get", "add", "delete", + "list_consumers", "list_consumers_by_server" + ] + + if operation not in valid_operations: + return f"āŒ Invalid operation '{operation}'. Valid operations: {', '.join(valid_operations)}" + + try: + # Route to the appropriate operation + if operation == "list": + return _list_media_servers() + + elif operation == "get": + if not server_name: + return "āŒ server_name is required for 'get' operation" + return _get_media_server(server_name) + + elif operation == "add": + if not server_name: + return "āŒ server_name is required for 'add' operation" + if not port: + return "āŒ port is required for 'add' operation" + if not ip_addresses: + return "āŒ ip_addresses is required for 'add' operation" + + try: + import json + ip_list = json.loads(ip_addresses) + return _add_media_server(server_name, port, ip_list, pool_shared_pipes or 50) + except json.JSONDecodeError as e: + return f"āŒ Invalid IP addresses format: {str(e)}\nExpected JSON format: '[{{\"ipAddress\": \"IP\", \"netmask\": \"MASK\"}}]'" + + elif operation == "delete": + if not server_name: + return "āŒ server_name is required for 'delete' operation" + return _delete_media_server(server_name, virtual or False) + + elif operation == "list_consumers": + return _list_media_server_consumers() + + elif operation == "list_consumers_by_server": + if not server_name: + return "āŒ server_name is required for 'list_consumers_by_server' operation" + return _list_media_server_consumers_by_name(server_name) + + except Exception as e: + logger.error(f"Failed to execute media server operation '{operation}': {str(e)}") + return f"āŒ Error executing media server operation '{operation}': {str(e)}" + + +def _list_media_servers() -> str: + """List all media servers from the DSA system""" + try: + # Make request to list media servers + response = dsa_client._make_request("GET", "dsa/components/mediaservers") + + if not response.get("valid", False): + error_messages = [] + validation_list = response.get("validationlist", {}) + if validation_list: + client_errors = validation_list.get("clientValidationList", []) + server_errors = validation_list.get("serverValidationList", []) + + for error in client_errors + server_errors: + error_messages.append(f"Code {error.get('code', 'N/A')}: {error.get('message', 'Unknown error')}") + + if error_messages: + return f"āŒ Failed to list media servers:\n" + "\n".join(error_messages) + else: + return f"āŒ Failed to list media servers: {response.get('status', 'Unknown error')}" + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to list media servers: {str(e)}") + return f"āŒ Error listing media servers: {str(e)}" + + +def _get_media_server(server_name: str) -> str: + """Get details of a specific media server by name""" + try: + # Make request to get specific media server + endpoint = f"dsa/components/mediaservers/{server_name}" + response = dsa_client._make_request("GET", endpoint) + + if not response.get("valid", False): + error_messages = [] + validation_list = response.get("validationlist", {}) + if validation_list: + client_errors = validation_list.get("clientValidationList", []) + server_errors = validation_list.get("serverValidationList", []) + + for error in client_errors + server_errors: + error_messages.append(f"Code {error.get('code', 'N/A')}: {error.get('message', 'Unknown error')}") + + if error_messages: + return f"āŒ Failed to get media server '{server_name}':\n" + "\n".join(error_messages) + else: + return f"āŒ Failed to get media server '{server_name}': {response.get('status', 'Unknown error')}" + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to get media server '{server_name}': {str(e)}") + return f"āŒ Error getting media server '{server_name}': {str(e)}" + + +def _add_media_server( + server_name: str, + port: int, + ip_list: List[Dict[str, str]], + pool_shared_pipes: int = 50 +) -> str: + """Add a new media server to the DSA system""" + try: + # Validate inputs + if not server_name or not server_name.strip(): + return "āŒ Server name is required and cannot be empty" + + if not (1 <= port <= 65535): + return f"āŒ Port must be between 1 and 65535" + + if not ip_list or not isinstance(ip_list, list): + return "āŒ At least one IP address is required" + + # Validate IP addresses format + for ip_info in ip_list: + if not isinstance(ip_info, dict) or 'ipAddress' not in ip_info or 'netmask' not in ip_info: + return "āŒ Each IP address must be a dictionary with 'ipAddress' and 'netmask' keys" + + # Prepare request payload + payload = { + "serverName": server_name.strip(), + "port": port, + "ipInfo": ip_list + } + + # Make request to add media server + response = dsa_client._make_request( + "POST", + "dsa/components/mediaservers", + data=payload, + headers={"Content-Type": "application/json", "Accept": "*/*"} + ) + + if not response.get("valid", False): + error_messages = [] + validation_list = response.get("validationlist", {}) + if validation_list: + client_errors = validation_list.get("clientValidationList", []) + server_errors = validation_list.get("serverValidationList", []) + + for error in client_errors + server_errors: + error_messages.append(f"Code {error.get('code', 'N/A')}: {error.get('message', 'Unknown error')}") + + if error_messages: + return f"āŒ Failed to add media server '{server_name}':\n" + "\n".join(error_messages) + else: + return f"āŒ Failed to add media server '{server_name}': {response.get('status', 'Unknown error')}" + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to add media server '{server_name}': {str(e)}") + return f"āŒ Error adding media server '{server_name}': {str(e)}" + + +def _delete_media_server(server_name: str, virtual: bool = False) -> str: + """Delete a media server from the DSA system""" + try: + # Prepare request parameters + params = {} + if virtual: + params["virtual"] = "true" + + # Make request to delete media server + endpoint = f"dsa/components/mediaservers/{server_name}" + response = dsa_client._make_request("DELETE", endpoint, params=params) + + if not response.get("valid", False): + error_messages = [] + validation_list = response.get("validationlist", {}) + if validation_list: + client_errors = validation_list.get("clientValidationList", []) + server_errors = validation_list.get("serverValidationList", []) + + for error in client_errors + server_errors: + error_messages.append(f"Code {error.get('code', 'N/A')}: {error.get('message', 'Unknown error')}") + + if error_messages: + return f"āŒ Failed to delete media server '{server_name}':\n" + "\n".join(error_messages) + else: + return f"āŒ Failed to delete media server '{server_name}': {response.get('status', 'Unknown error')}" + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to delete media server '{server_name}': {str(e)}") + return f"āŒ Error deleting media server '{server_name}': {str(e)}" + + +def _list_media_server_consumers() -> str: + """List all media server consumers from the DSA system""" + try: + # Make request to list media server consumers + response = dsa_client._make_request("GET", "dsa/components/mediaservers/listconsumers") + + if not response.get("valid", False): + error_messages = [] + validation_list = response.get("validationlist", {}) + if validation_list: + client_errors = validation_list.get("clientValidationList", []) + server_errors = validation_list.get("serverValidationList", []) + + for error in client_errors + server_errors: + error_messages.append(f"Code {error.get('code', 'N/A')}: {error.get('message', 'Unknown error')}") + + if error_messages: + return f"āŒ Failed to list media server consumers:\n" + "\n".join(error_messages) + else: + return f"āŒ Failed to list media server consumers: {response.get('status', 'Unknown error')}" + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to list media server consumers: {str(e)}") + return f"āŒ Error listing media server consumers: {str(e)}" + + +def _list_media_server_consumers_by_name(server_name: str) -> str: + """List consumers for a specific media server by name""" + try: + # Make request to list consumers for specific media server + endpoint = f"dsa/components/mediaservers/listconsumers/{server_name.strip()}" + response = dsa_client._make_request("GET", endpoint) + + if not response.get("valid", False): + error_messages = [] + validation_list = response.get("validationlist", {}) + if validation_list: + client_errors = validation_list.get("clientValidationList", []) + server_errors = validation_list.get("serverValidationList", []) + + for error in client_errors + server_errors: + error_messages.append(f"Code {error.get('code', 'N/A')}: {error.get('message', 'Unknown error')}") + + if error_messages: + return f"āŒ Failed to list consumers for media server '{server_name}':\n" + "\n".join(error_messages) + else: + return f"āŒ Failed to list consumers for media server '{server_name}': {response.get('status', 'Unknown error')}" + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to list consumers for media server '{server_name}': {str(e)}") + return f"āŒ Error listing consumers for media server '{server_name}': {str(e)}" + + #------------------ Tool Handler for MCP ------------------# def handle_bar_manageDsaDiskFileSystem( @@ -794,3 +1076,86 @@ def handle_bar_manageAWSS3Operations( } return create_response(error_result, metadata) + +def handle_bar_manageMediaServer( + conn: any, # Not used for DSA operations, but required by MCP framework + operation: str, + server_name: str = None, + port: int = None, + ip_addresses: str = None, + pool_shared_pipes: int = 50, + virtual: bool = False, + *args, + **kwargs +): + """ + Unified media server management tool for all DSA media server operations. + + This comprehensive tool handles all media server operations in the DSA system, + including listing, getting details, adding, deleting, and managing consumers. + + Arguments: + operation - The operation to perform. Valid values: + "list" - List all media servers + "get" - Get details of a specific media server + "add" - Add a new media server + "delete" - Delete a media server + "list_consumers" - List all media server consumers + "list_consumers_by_server" - List consumers for a specific server + server_name - Name of the media server (required for get, add, delete, list_consumers_by_server) + port - Port number for the media server (required for add operation, 1-65535) + ip_addresses - JSON string containing IP address configuration for add operation, e.g.: + '[{"ipAddress": "192.168.1.100", "netmask": "255.255.255.0"}]' + pool_shared_pipes - Number of shared pipes in the pool (for add operation, 1-99, default: 50) + virtual - Whether to perform a virtual deletion (for delete operation, default: False) + + Returns: + ResponseType: formatted response with media server operation results + metadata + """ + logger.debug(f"Tool: handle_bar_manageMediaServer: Args: operation: {operation}, server_name: {server_name}, port: {port}") + + try: + # Validate operation + valid_operations = [ + "list", "get", "add", "delete", + "list_consumers", "list_consumers_by_server" + ] + + if operation not in valid_operations: + error_result = f"āŒ Invalid operation '{operation}'. Valid operations: {', '.join(valid_operations)}" + metadata = { + "tool_name": "bar_manageMediaServer", + "operation": operation, + "error": "Invalid operation", + "success": False + } + return create_response(error_result, metadata) + + # Execute the media server operation + result = manage_dsa_media_servers( + operation=operation, + server_name=server_name, + port=port, + ip_addresses=ip_addresses, + pool_shared_pipes=pool_shared_pipes, + virtual=virtual + ) + + metadata = { + "tool_name": "bar_manageMediaServer", + "operation": operation, + "server_name": server_name, + "success": True + } + logger.debug(f"Tool: handle_bar_manageMediaServer: metadata: {metadata}") + return create_response(result, metadata) + except Exception as e: + logger.error(f"Error in handle_bar_manageMediaServer: {e}") + error_result = f"āŒ Error in DSA media server operation: {str(e)}" + metadata = { + "tool_name": "bar_manageMediaServer", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) \ No newline at end of file From fb449a3e1b9db1a8f9ddb6e6b25f06af142bb03a Mon Sep 17 00:00:00 2001 From: Bera Date: Sat, 20 Sep 2025 09:15:08 +0530 Subject: [PATCH 09/14] Update BAR README to mark bar_manageMediaServer as implemented - Changed status from Planned to Implemented - Updated tool count: 2 Developed, 14 Planned - Added comprehensive description of media server operations --- src/teradata_mcp_server/tools/bar/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index 23f746e..11568c0 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -76,7 +76,7 @@ bar: ## Available Tools -**Total Estimated Tools: 16** (1 āœ… Developed, 15 🚧 Planned) +**Total Estimated Tools: 16** (2 āœ… Developed, 14 🚧 Planned) ### Storage Configuration Tools @@ -106,9 +106,9 @@ Tool for managing IBM Spectrum Protect configurations for backup storage. ### Infrastructure Management Tools -#### bar_manageMediaServer 🚧 -**Status**: Planned -Tool for managing BarNC configurations +#### bar_manageMediaServer āœ… +**Status**: Implemented +Tool for managing media server configurations including list, get, add, delete, and consumer management operations. #### bar_manageTeradataSystem 🚧 **Status**: Planned From 2436acc4f96db5199f26b3a3feef72cd1892b63e Mon Sep 17 00:00:00 2001 From: Bera Date: Sat, 20 Sep 2025 18:11:26 +0530 Subject: [PATCH 10/14] Implement bar_manageTeradataSystem - Added complete Teradata system management functionality with 7 operations: * list_systems - List all configured Teradata systems * get_system - Get details for a specific Teradata system * config_system - Configure a new Teradata system * enable_system - Enable a Teradata system * delete_system - Delete a Teradata system * list_consumers - List all system consumers * get_consumer - Get details for a specific system consumer - Created manage_dsa_systems function following media server pattern - Updated handle_bar_manageTeradataSystem to use unified architecture - Fixed MCP serialization issues by returning JSON strings consistently - Updated BAR tools README to reflect implementation status - All functions return complete DSA API responses for transparency --- src/teradata_mcp_server/tools/bar/README.md | 8 +- .../tools/bar/bar_tools.py | 323 +++++++++++++++++- 2 files changed, 323 insertions(+), 8 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index 11568c0..faf219c 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -76,7 +76,7 @@ bar: ## Available Tools -**Total Estimated Tools: 16** (2 āœ… Developed, 14 🚧 Planned) +**Total Estimated Tools: 16** (3 āœ… Developed, 13 🚧 Planned) ### Storage Configuration Tools @@ -110,9 +110,9 @@ Tool for managing IBM Spectrum Protect configurations for backup storage. **Status**: Implemented Tool for managing media server configurations including list, get, add, delete, and consumer management operations. -#### bar_manageTeradataSystem 🚧 -**Status**: Planned -Tool for managing DSMain configurations and Teradata system integration. +#### bar_manageTeradataSystem āœ… +**Status**: Implemented +Tool for managing Teradata system configurations and consumers in DSA. Supports listing systems, getting system details, configuring new systems, enabling/deleting systems, and managing system consumers. ### Target Group Management Tools diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 1f17c04..0a874f5 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -952,6 +952,234 @@ def _list_media_server_consumers_by_name(server_name: str) -> str: logger.error(f"Failed to list consumers for media server '{server_name}': {str(e)}") return f"āŒ Error listing consumers for media server '{server_name}': {str(e)}" +#------------------ Teradata System Management Operations ------------------# + +def manage_dsa_systems( + operation: str, + system_name: Optional[str] = None, + tdp_id: Optional[str] = None, + username: Optional[str] = None, + password: Optional[str] = None, + ir_support: Optional[bool] = True, + component_name: Optional[str] = None +) -> str: + """Unified Teradata system management for all system operations + + This comprehensive function handles all Teradata system operations in the DSA system, + including listing, getting details, configuring, enabling, deleting, and managing consumers. + """ + # Validate operation + valid_operations = [ + "list_systems", "get_system", "config_system", + "enable_system", "delete_system", "list_consumers", "get_consumer" + ] + + if operation not in valid_operations: + return f"āŒ Invalid operation '{operation}'. Valid operations: {', '.join(valid_operations)}" + + try: + # Route to the appropriate operation + if operation == "list_systems": + return _list_teradata_systems() + + elif operation == "get_system": + if not system_name: + return "āŒ system_name is required for 'get_system' operation" + return _get_teradata_system(system_name) + + elif operation == "config_system": + if not all([system_name, tdp_id, username, password]): + return "āŒ system_name, tdp_id, username, and password are required for 'config_system' operation" + return _config_teradata_system(system_name, tdp_id, username, password, ir_support) + + elif operation == "enable_system": + if not system_name: + return "āŒ system_name is required for 'enable_system' operation" + return _enable_teradata_system(system_name) + + elif operation == "delete_system": + if not system_name: + return "āŒ system_name is required for 'delete_system' operation" + return _delete_teradata_system(system_name) + + elif operation == "list_consumers": + return _list_system_consumers() + + elif operation == "get_consumer": + if not component_name: + return "āŒ component_name is required for 'get_consumer' operation" + return _get_system_consumer(component_name) + + except Exception as e: + logger.error(f"Failed to execute Teradata system operation '{operation}': {str(e)}") + return f"āŒ Error executing Teradata system operation '{operation}': {str(e)}" + + +def _list_teradata_systems() -> str: + """List all configured Teradata database systems in DSA""" + try: + # Make API call to list Teradata systems + response = dsa_client._make_request( + method='GET', + endpoint='dsa/components/systems/teradata' + ) + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to list Teradata systems: {str(e)}") + return f"āŒ Error listing Teradata systems: {str(e)}" + + +def _get_teradata_system(system_name: str) -> str: + """Get detailed information about a specific Teradata database system""" + try: + if not system_name or not system_name.strip(): + return "āŒ System name is required and cannot be empty" + + system_name = system_name.strip() + + # Make API call to get specific Teradata system + response = dsa_client._make_request( + method='GET', + endpoint=f'dsa/components/systems/teradata/{system_name}' + ) + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to get Teradata system '{system_name}': {str(e)}") + return f"āŒ Error getting Teradata system '{system_name}': {str(e)}" + + +def _config_teradata_system( + system_name: str, + tdp_id: str, + username: str, + password: str, + ir_support: Optional[str] = None +) -> str: + """Configure a new Teradata database system in DSA""" + try: + if not all([system_name, tdp_id, username, password]): + return "āŒ system_name, tdp_id, username, and password are required" + + # Prepare the configuration payload - matching the working model exactly + config_data = { + "systemName": system_name.strip(), + "tdpId": tdp_id.strip(), + "user": username.strip(), + "password": password, + "databaseQueryMethodType": "BASE_VIEW", + "skipForceFull": True, + "irSupport": ir_support or "true", + "irSupportTarget": "true", + "dslJsonLogging": True, + "ajseSupport": "true", + "softLimit": 10, + "hardLimit": 10 + } + + # Make API call to configure Teradata system + response = dsa_client._make_request( + method='POST', + endpoint='dsa/components/systems/teradata', + data=config_data + ) + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to configure Teradata system '{system_name}': {str(e)}") + return f"āŒ Error configuring Teradata system '{system_name}': {str(e)}" + + +def _enable_teradata_system(system_name: str) -> str: + """Enable a configured Teradata database system in DSA""" + try: + if not system_name or not system_name.strip(): + return "āŒ System name is required" + + system_name = system_name.strip() + + # Make API call to enable Teradata system + response = dsa_client._make_request( + method='PATCH', + endpoint=f'dsa/components/systems/enabling/{system_name}/', + data={"enabled": True} + ) + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to enable Teradata system '{system_name}': {str(e)}") + return f"āŒ Error enabling Teradata system '{system_name}': {str(e)}" + + +def _delete_teradata_system(system_name: str) -> str: + """Delete a Teradata database system from DSA""" + try: + if not system_name or not system_name.strip(): + return "āŒ System name is required" + + system_name = system_name.strip() + + # Make API call to delete Teradata system + response = dsa_client._make_request( + method='DELETE', + endpoint=f'dsa/components/systems/teradata/{system_name}' + ) + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to delete Teradata system '{system_name}': {str(e)}") + return f"āŒ Error deleting Teradata system '{system_name}': {str(e)}" + + +def _list_system_consumers() -> str: + """List all system consumers in DSA""" + try: + # Make API call to list system consumers + response = dsa_client._make_request( + method='GET', + endpoint='dsa/components/systems/listconsumers' + ) + + # Return the full response for complete transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to list system consumers: {str(e)}") + return f"āŒ Error listing system consumers: {str(e)}" + + +def _get_system_consumer(component_name: str) -> str: + """Get detailed information about a specific system consumer""" + try: + if not component_name or not component_name.strip(): + return "āŒ Component name is required and cannot be empty" + + component_name = component_name.strip() + + # Make API call to get specific system consumer + response = dsa_client._make_request( + method='GET', + endpoint=f'dsa/components/systems/listconsumers/{component_name}' + ) + + # Return complete DSA response for transparency + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to get system consumer '{component_name}': {str(e)}") + return f"āŒ Error getting system consumer '{component_name}': {str(e)}" + #------------------ Tool Handler for MCP ------------------# @@ -1010,8 +1238,6 @@ def handle_bar_manageDsaDiskFileSystem( } return create_response(error_result, metadata) - - def handle_bar_manageAWSS3Operations( conn: any, # Not used for DSA operations, but required by MCP framework operation: str, @@ -1076,7 +1302,6 @@ def handle_bar_manageAWSS3Operations( } return create_response(error_result, metadata) - def handle_bar_manageMediaServer( conn: any, # Not used for DSA operations, but required by MCP framework operation: str, @@ -1158,4 +1383,94 @@ def handle_bar_manageMediaServer( "error": str(e), "success": False } - return create_response(error_result, metadata) \ No newline at end of file + return create_response(error_result, metadata) + +def handle_bar_manageTeradataSystem( + conn: any, # Not used for DSA operations, but required by MCP framework + operation: str, + system_name: Optional[str] = None, + tdp_id: Optional[str] = None, + username: Optional[str] = None, + password: Optional[str] = None, + ir_support: Optional[str] = None, + component_name: Optional[str] = None, + *args, + **kwargs +): + """Unified DSA System Management Tool + + This comprehensive tool handles all DSA system operations including Teradata systems + and system consumers management in a single interface. + + Args: + operation: The operation to perform. Valid operations: + - "list_systems" - List all configured Teradata systems + - "get_system" - Get details for a specific Teradata system + - "config_system" - Configure a new Teradata system + - "enable_system" - Enable a Teradata system + - "delete_system" - Delete a Teradata system + - "list_consumers" - List all system consumers + - "get_consumer" - Get details for a specific system consumer + system_name: Name of the Teradata system (required for system operations) + tdp_id: TDP ID for Teradata system (required for config operation) + username: Username for Teradata system (required for config operation) + password: Password for Teradata system (required for config operation) + ir_support: IR support level (for config operation) - "SOURCE", "TARGET", or "BOTH" + component_name: Name of the system component (required for consumer operations) + + Returns: + Dict containing the result and metadata + """ + try: + logger.debug(f"Tool: handle_bar_manageTeradataSystem: Args: operation: {operation}, system_name: {system_name}") + + # Validate operation + valid_operations = [ + "list_systems", "get_system", "config_system", + "enable_system", "delete_system", "list_consumers", "get_consumer" + ] + + if operation not in valid_operations: + error_result = f"āŒ Invalid operation '{operation}'. Valid operations: {', '.join(valid_operations)}" + metadata = { + "tool_name": "bar_manageTeradataSystem", + "operation": operation, + "error": "Invalid operation", + "success": False + } + return create_response(error_result, metadata) + + # Execute the Teradata system operation + result = manage_dsa_systems( + operation=operation, + system_name=system_name, + tdp_id=tdp_id, + username=username, + password=password, + ir_support=ir_support, + component_name=component_name + ) + + metadata = { + "tool_name": "bar_manageTeradataSystem", + "operation": operation, + "system_name": system_name, + "success": True + } + + if component_name: + metadata["component_name"] = component_name + + logger.debug(f"Tool: handle_bar_manageTeradataSystem: metadata: {metadata}") + return create_response(result, metadata) + + except Exception as e: + logger.error(f"Error in handle_bar_manageTeradataSystem: {e}") + error_result = f"āŒ Error in DSA Teradata system operation: {str(e)}" + metadata = { + "tool_name": "bar_manageTeradataSystem", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) From 2b404b4780fb4b9462dc311c4c9652908a74cd7c Mon Sep 17 00:00:00 2001 From: Bera Date: Sun, 21 Sep 2025 09:36:14 +0530 Subject: [PATCH 11/14] Complete bar_manageDiskFileTargetGroup implementation with update notes - Implemented complete bar_manageDiskFileTargetGroup with all 6 operations (list, get, create, enable, disable, delete) - Added replication parameter support for all operations following dsa-mcp-server pattern - Fixed DSA REST API endpoints to match working model - Added target_group_config parameter for create operation with JSON payload support - Added update notes to all handle_bar_* functions to guide users on configuration updates - Updated README.md to reflect bar_manageDiskFileTargetGroup as implemented (4 Developed, 12 Planned) - Enhanced documentation with examples and usage patterns --- src/teradata_mcp_server/tools/bar/README.md | 8 +- .../tools/bar/bar_tools.py | 295 ++++++++++++++++++ 2 files changed, 299 insertions(+), 4 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index faf219c..e937650 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -76,7 +76,7 @@ bar: ## Available Tools -**Total Estimated Tools: 16** (3 āœ… Developed, 13 🚧 Planned) +**Total Estimated Tools: 16** (4 āœ… Developed, 12 🚧 Planned) ### Storage Configuration Tools @@ -116,9 +116,9 @@ Tool for managing Teradata system configurations and consumers in DSA. Supports ### Target Group Management Tools -#### bar_manageDiskFileTargetGroup 🚧 -**Status**: Planned -Tool for managing media server configurations with disk file storage solutions. +#### bar_manageDiskFileTargetGroup āœ… +**Status**: Implemented +Tool for managing disk file target group configurations with comprehensive management of backup target groups including create, list, get, enable, disable, and delete operations. #### bar_manageAwsS3TargetGroup 🚧 **Status**: Planned diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 0a874f5..14a65a0 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -1180,6 +1180,170 @@ def _get_system_consumer(component_name: str) -> str: logger.error(f"Failed to get system consumer '{component_name}': {str(e)}") return f"āŒ Error getting system consumer '{component_name}': {str(e)}" +#------------------ Disk File Target Group Operations ------------------# + +def _list_disk_file_target_groups(replication: bool = False) -> str: + """List all disk file target groups""" + try: + response = dsa_client._make_request( + method="GET", + endpoint=f"dsa/components/target-groups/disk-file-system?replication={str(replication).lower()}" + ) + return json.dumps(response, indent=2) + except Exception as e: + logger.error(f"Failed to list disk file target groups: {str(e)}") + return f"āŒ Error listing disk file target groups: {str(e)}" + + +def _get_disk_file_target_group(target_group_name: str, replication: bool = False) -> str: + """Get details of a specific disk file target group""" + try: + response = dsa_client._make_request( + method="GET", + endpoint=f"dsa/components/target-groups/disk-file-system/{target_group_name}/?replication={str(replication).lower()}" + ) + return json.dumps(response, indent=2) + except Exception as e: + logger.error(f"Failed to get disk file target group '{target_group_name}': {str(e)}") + return f"āŒ Error getting disk file target group '{target_group_name}': {str(e)}" + + +def _create_disk_file_target_group(target_group_config: str, replication: bool = False) -> str: + """Create a new disk file target group using JSON configuration""" + try: + import json + try: + config_data = json.loads(target_group_config) + target_group_name = config_data.get('targetGroupName', 'Unknown') + except json.JSONDecodeError as e: + return f"āŒ Error: Invalid JSON in target_group_config: {str(e)}" + + logger.info(f"Creating target disk file system '{target_group_name}' via DSA API") + + response = dsa_client._make_request( + method="POST", + endpoint=f"dsa/components/target-groups/disk-file-system?replication={str(replication).lower()}", + data=config_data + ) + return json.dumps(response, indent=2) + except Exception as e: + logger.error(f"Failed to create disk file target group: {str(e)}") + return f"āŒ Error creating disk file target group: {str(e)}" + + +def _enable_disk_file_target_group(target_group_name: str) -> str: + """Enable a disk file target group""" + try: + response = dsa_client._make_request( + method="PATCH", + endpoint=f"dsa/components/target-groups/disk-file-system/enabling/{target_group_name}/" + ) + return json.dumps(response, indent=2) + except Exception as e: + logger.error(f"Failed to enable disk file target group '{target_group_name}': {str(e)}") + return f"āŒ Error enabling disk file target group '{target_group_name}': {str(e)}" + + +def _disable_disk_file_target_group(target_group_name: str) -> str: + """Disable a disk file target group""" + try: + response = dsa_client._make_request( + method="PATCH", + endpoint=f"dsa/components/target-groups/disk-file-system/disabling/{target_group_name}/" + ) + return json.dumps(response, indent=2) + except Exception as e: + logger.error(f"Failed to disable disk file target group '{target_group_name}': {str(e)}") + return f"āŒ Error disabling disk file target group '{target_group_name}': {str(e)}" + + +def _delete_disk_file_target_group( + target_group_name: str, + replication: bool = False, + delete_all_data: bool = False +) -> str: + """Delete a disk file target group""" + try: + response = dsa_client._make_request( + method="DELETE", + endpoint=f"dsa/components/target-groups/disk-file-system/{target_group_name}/?replication={str(replication).lower()}&deleteAllData={str(delete_all_data).lower()}" + ) + return json.dumps(response, indent=2) + except Exception as e: + logger.error(f"Failed to delete disk file target group '{target_group_name}': {str(e)}") + return f"āŒ Error deleting disk file target group '{target_group_name}': {str(e)}" + + +def manage_dsa_disk_file_target_groups( + operation: str, + target_group_name: Optional[str] = None, + target_group_config: Optional[str] = None, + replication: bool = False, + delete_all_data: bool = False +) -> str: + """Manage DSA disk file target groups + + Provides comprehensive management of disk file target groups including: + - List all target groups + - Get specific target group details + - Create new target groups + - Enable/disable target groups + - Delete target groups + + Args: + operation: Operation to perform ('list', 'get', 'create', 'enable', 'disable', 'delete') + target_group_name: Name of the target group (required for get, enable, disable, delete) + target_group_config: JSON configuration string for create operation + replication: Enable replication (for delete operation) + delete_all_data: Whether to delete all backup data (for delete operation) + + Returns: + JSON string with operation results + """ + try: + logger.info(f"Managing DSA disk file target groups - operation: {operation}") + + if operation == "list": + return _list_disk_file_target_groups(replication) + + elif operation == "get": + if not target_group_name: + return json.dumps({"status": "error", "data": "āŒ target_group_name is required for get operation"}, indent=2) + return _get_disk_file_target_group(target_group_name, replication) + + elif operation == "create": + if not target_group_config: + return json.dumps({"status": "error", "data": "āŒ target_group_config is required for create operation"}, indent=2) + return _create_disk_file_target_group(target_group_config, replication) + + elif operation == "enable": + if not target_group_name: + return json.dumps({"status": "error", "data": "āŒ target_group_name is required for enable operation"}, indent=2) + return _enable_disk_file_target_group(target_group_name) + + elif operation == "disable": + if not target_group_name: + return json.dumps({"status": "error", "data": "āŒ target_group_name is required for disable operation"}, indent=2) + return _disable_disk_file_target_group(target_group_name) + + elif operation == "delete": + if not target_group_name: + return json.dumps({"status": "error", "data": "āŒ target_group_name is required for delete operation"}, indent=2) + return _delete_disk_file_target_group(target_group_name, replication, delete_all_data) + + else: + return json.dumps({ + "status": "error", + "data": f"āŒ Unknown operation: {operation}. Supported operations: list, get, create, enable, disable, delete" + }, indent=2) + + except Exception as e: + logger.error(f"Error in manage_dsa_disk_file_target_groups: {e}") + return json.dumps({ + "status": "error", + "data": f"āŒ Error in disk file target group operation: {str(e)}" + }, indent=2) + #------------------ Tool Handler for MCP ------------------# @@ -1203,6 +1367,10 @@ def handle_bar_manageDsaDiskFileSystem( file_system_path: Path to the file system (for config and remove operations) max_files: Maximum number of files allowed (for config operation) + **Note: To UPDATE an existing disk file system configuration, simply use the 'config' + operation with the same file_system_path. The DSA API will automatically override the + existing configuration - no need to remove and reconfigure the file system.** + Returns: ResponseType: formatted response with operation results + metadata """ @@ -1263,6 +1431,10 @@ def handle_bar_manageAWSS3Operations( bucketsByRegion: List of S3 buckets by region (for config operation) acctName: AWS account name (for config operation) + **Note: To UPDATE an existing AWS S3 configuration, simply use the 'config' operation + with new parameters. The DSA API will automatically override the existing + configuration - no need to remove and reconfigure the S3 settings.** + Returns: ResponseType: formatted response with operation results + metadata """ @@ -1334,6 +1506,10 @@ def handle_bar_manageMediaServer( pool_shared_pipes - Number of shared pipes in the pool (for add operation, 1-99, default: 50) virtual - Whether to perform a virtual deletion (for delete operation, default: False) + **Note: To UPDATE an existing media server configuration, simply use the 'add' operation + with the same server_name. The DSA API will automatically override the existing + configuration - no need to delete and recreate the media server.** + Returns: ResponseType: formatted response with media server operation results + metadata """ @@ -1418,6 +1594,10 @@ def handle_bar_manageTeradataSystem( ir_support: IR support level (for config operation) - "SOURCE", "TARGET", or "BOTH" component_name: Name of the system component (required for consumer operations) + **Note: To UPDATE an existing Teradata system configuration, simply use the 'config_system' + operation with the same system_name. The DSA API will automatically override the existing + configuration - no need to delete and recreate the system.** + Returns: Dict containing the result and metadata """ @@ -1474,3 +1654,118 @@ def handle_bar_manageTeradataSystem( "success": False } return create_response(error_result, metadata) + +def handle_bar_manageDiskFileTargetGroup( + conn: any, # Not used for DSA operations, but required by MCP framework + operation: str, + target_group_name: Optional[str] = None, + target_group_config: Optional[str] = None, + replication: bool = False, + delete_all_data: bool = False +): + """Handle DSA disk file target group management operations + + Manage disk file target groups for backup and restore operations including: + + **Available Operations:** + - `list`: List all disk file target groups + - `get`: Get detailed information about a specific target group + - `create`: Create a new target group with specified configuration or update existing one + - `enable`: Enable a target group for backup operations + - `disable`: Disable a target group + - `delete`: Delete a target group (optionally with all backup data) + + **Parameters:** + - operation: The operation to perform + - target_group_name: Name of the target group (required for get, enable, disable, delete operations) + - target_group_config: JSON configuration string for create operation (required for create) + - replication: Enable replication settings for list, get, create, and delete operations (default: False) + - delete_all_data: Delete all associated backup data when deleting target group (default: False) + + **Examples:** + ``` + # List all target groups without replication + bar_manageDiskFileTargetGroup(operation="list") + # List all target groups with replication + bar_manageDiskFileTargetGroup(operation="list", replication=True) + + # Get specific target group details without replication + bar_manageDiskFileTargetGroup(operation="get", target_group_name="my_target_group") + # Get specific target group details with replication + bar_manageDiskFileTargetGroup(operation="get", target_group_name="my_target_group", replication=True) + + # Create new target group + - Create basic target group: + config = '{"targetGroupName":"test_tg","isEnabled":true,"remoteFileSystems":[{"mediaServerName":"test-ms","fileSystems":[{"path":"/backup/test","files":100,"filesystemId":1}]}]}' + bar_manageDiskFileTargetGroup(operation="create", target_group_config=config) + + - Create multi-server group: + config = '{"targetGroupName":"backup_tg","isEnabled":true,"remoteFileSystems":[{"mediaServerName":"ms1","fileSystems":[{"path":"/backup1","files":500}]},{"mediaServerName":"ms2","fileSystems":[{"path":"/backup2","files":500}]}]}' + bar_manageDiskFileTargetGroup(operation="create", target_group_config=config) + + **Note: To UPDATE an existing target group configuration, simply use the 'create' operation + with the same targetGroupName. The DSA API will automatically override the existing + configuration - no need to delete and recreate the target group.** + + # Enable a target group + bar_manageDiskFileTargetGroup(operation="enable", target_group_name="my_target_group") + # Disable a target group + bar_manageDiskFileTargetGroup(operation="disable", target_group_name="my_target_group") + + # Delete a target group and all its data with replication + - Delete configuration only: + bar_manageDiskFileTargetGroup("delete", target_group_name="test_tg") + - Delete with all data (PERMANENT): + bar_manageDiskFileTargetGroup("delete", + target_group_name="old_tg", + delete_all_data=True) + - Delete replicated group: + bar_manageDiskFileTargetGroup("delete", + target_group_name="repl_tg", + replication=True, + delete_all_data=True) + + ``` + + Returns: + JSON string containing the operation results and status + """ + try: + logger.info(f"BAR Disk File Target Group Management - Operation: {operation}") + + result = manage_dsa_disk_file_target_groups( + operation=operation, + target_group_name=target_group_name, + target_group_config=target_group_config, + replication=replication, + delete_all_data=delete_all_data + ) + + metadata = { + "tool_name": "bar_manageDiskFileTargetGroup", + "operation": operation, + "target_group_name": target_group_name, + "success": True + } + + if target_group_config: + metadata["target_group_config"] = target_group_config + if replication: + metadata["replication"] = replication + if delete_all_data: + metadata["delete_all_data"] = delete_all_data + + logger.debug(f"Tool: handle_bar_manageDiskFileTargetGroup: metadata: {metadata}") + return create_response(result, metadata) + + except Exception as e: + logger.error(f"Error in handle_bar_manageDiskFileTargetGroup: {e}") + error_result = f"āŒ Error in DSA disk file target group operation: {str(e)}" + metadata = { + "tool_name": "bar_manageDiskFileTargetGroup", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) + From 6abba9fc30258b5609005792e5e4c791e27fa81c Mon Sep 17 00:00:00 2001 From: Bera Date: Thu, 25 Sep 2025 12:24:35 +0530 Subject: [PATCH 12/14] Implement comprehensive DSA job management operations - Add individual job operation functions (_list_jobs, _get_job, _create_job, _update_job, _run_job, _get_job_status, _retire_job, _delete_job) - Update manage_job_operations() to use modular approach with individual functions - Implement proper DSA REST API endpoints matching reference implementation - Add support for all 9 job operations: list, get, create, update, run, status, retire, unretire, delete - Include comprehensive error handling and JSON response formatting - Update README.md to reflect bar_manageJob implementation status --- src/teradata_mcp_server/tools/bar/README.md | 8 +- .../tools/bar/bar_tools.py | 708 ++++++++++++++++++ 2 files changed, 712 insertions(+), 4 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/README.md b/src/teradata_mcp_server/tools/bar/README.md index e937650..1bac421 100644 --- a/src/teradata_mcp_server/tools/bar/README.md +++ b/src/teradata_mcp_server/tools/bar/README.md @@ -76,7 +76,7 @@ bar: ## Available Tools -**Total Estimated Tools: 16** (4 āœ… Developed, 12 🚧 Planned) +**Total Estimated Tools: 16** (5 āœ… Developed, 11 🚧 Planned) ### Storage Configuration Tools @@ -142,9 +142,9 @@ Tool for managing media server configurations with IBM Spectrum storage solution ### Operations Management Tools -#### bar_manageJob 🚧 -**Status**: Planned -Tool for managing backup and restore job lifecycle. +#### bar_manageJob āœ… +**Status**: Implemented +Comprehensive tool for managing backup and restore job lifecycle including creation, updates, retrieval, retirement, deletion, execution, and status monitoring. Supports all DSA job operations through REST API endpoints. #### bar_manageSaveSets 🚧 **Status**: Planned diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 14a65a0..ddc2d9a 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -1344,6 +1344,214 @@ def manage_dsa_disk_file_target_groups( "data": f"āŒ Error in disk file target group operation: {str(e)}" }, indent=2) +#------------------ DSA Job Management Operation------------------# + +def _list_jobs(bucket_size: int = 100, bucket: int = 1, job_type: str = "*%", + is_retired: bool = False, status: str = "*%") -> str: + """List all DSA jobs with filtering options""" + try: + params = { + 'bucketSize': bucket_size, + 'bucket': bucket, + 'jobType': job_type, + 'isRetired': str(is_retired).lower(), + 'status': status + } + + response = dsa_client._make_request('GET', 'dsa/jobs', params=params) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to list jobs: {str(e)}") + return f"āŒ Error listing jobs: {str(e)}" + + +def _get_job(job_name: str) -> str: + """Get job definition by name""" + try: + response = dsa_client._make_request( + method="GET", + endpoint=f"dsa/jobs/{job_name}" + ) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to get job '{job_name}': {str(e)}") + return f"āŒ Error getting job '{job_name}': {str(e)}" + + +def _create_job(job_config: dict) -> str: + """Create a new job""" + try: + response = dsa_client._make_request( + method="POST", + endpoint="dsa/jobs", + data=job_config + ) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to create job: {str(e)}") + return f"āŒ Error creating job: {str(e)}" + + +def _update_job(job_config: dict) -> str: + """Update an existing job""" + try: + response = dsa_client._make_request( + method="PUT", + endpoint="dsa/jobs", + data=job_config + ) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to update job: {str(e)}") + return f"āŒ Error updating job: {str(e)}" + + +def _run_job(job_config: dict) -> str: + """Run/execute a job""" + try: + response = dsa_client._make_request( + method="POST", + endpoint="dsa/jobs/running", + data=job_config + ) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to run job: {str(e)}") + return f"āŒ Error running job: {str(e)}" + + +def _get_job_status(job_name: str) -> str: + """Get job status""" + try: + response = dsa_client._make_request( + method="GET", + endpoint=f"dsa/jobs/{job_name}/status" + ) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to get job status for '{job_name}': {str(e)}") + return f"āŒ Error getting job status for '{job_name}': {str(e)}" + + +def _retire_job(job_name: str, retired: bool = True) -> str: + """Retire or unretire a job""" + try: + response = dsa_client._make_request( + method="PATCH", + endpoint=f"dsa/jobs/{job_name}?retired={str(retired).lower()}" + ) + return json.dumps(response, indent=2) + + except Exception as e: + action = "retire" if retired else "unretire" + logger.error(f"Failed to {action} job '{job_name}': {str(e)}") + return f"āŒ Error {action}ing job '{job_name}': {str(e)}" + + +def _delete_job(job_name: str) -> str: + """Delete a job""" + try: + response = dsa_client._make_request( + method="DELETE", + endpoint=f"dsa/jobs/{job_name}" + ) + return json.dumps(response, indent=2) + + except Exception as e: + logger.error(f"Failed to delete job '{job_name}': {str(e)}") + return f"āŒ Error deleting job '{job_name}': {str(e)}" + + +def manage_job_operations(operation: str, job_name: str = None, job_config: str = None) -> str: + """DSA Job Management Operations + + Handles all job operations including list, get, create, update, run, status, retire, unretire, delete + + Args: + operation: The operation to perform + job_name: Name of the job (required for specific operations) + job_config: JSON configuration for creating/updating/running jobs + + Returns: + Formatted result of the requested operation + """ + import json + + try: + logger.info(f"DSA Job Operation: {operation}") + + if operation == "list": + # List all jobs with default parameters + return _list_jobs() + + elif operation == "get": + if not job_name: + return "āŒ Error: job_name is required for get operation" + return _get_job(job_name) + + elif operation == "create": + if not job_config: + return "āŒ Error: job_config is required for create operation" + try: + config_data = json.loads(job_config) + return _create_job(config_data) + except json.JSONDecodeError: + return "āŒ Error: Invalid JSON in job_config parameter" + + elif operation == "update": + if not job_config: + return "āŒ Error: job_config is required for update operation" + try: + config_data = json.loads(job_config) + return _update_job(config_data) + except json.JSONDecodeError: + return "āŒ Error: Invalid JSON in job_config parameter" + + elif operation == "run": + if not job_config: + return "āŒ Error: job_config is required for run operation" + try: + config_data = json.loads(job_config) + return _run_job(config_data) + except json.JSONDecodeError: + return "āŒ Error: Invalid JSON in job_config parameter" + + elif operation == "status": + if not job_name: + return "āŒ Error: job_name is required for status operation" + return _get_job_status(job_name) + + elif operation == "retire": + if not job_name: + return "āŒ Error: job_name is required for retire operation" + return _retire_job(job_name, retired=True) + + elif operation == "unretire": + if not job_name: + return "āŒ Error: job_name is required for unretire operation" + return _retire_job(job_name, retired=False) + + elif operation == "delete": + if not job_name: + return "āŒ Error: job_name is required for delete operation" + return _delete_job(job_name) + + else: + available_operations = ["list", "get", "create", "update", "run", "status", "retire", "unretire", "delete"] + return f"āŒ Error: Unknown operation '{operation}'. Available operations: {', '.join(available_operations)}" + + except Exception as e: + logger.error(f"Failed to execute job operation '{operation}': {str(e)}") + return f"āŒ Error executing job operation '{operation}': {str(e)}" + + + #------------------ Tool Handler for MCP ------------------# @@ -1769,3 +1977,503 @@ def handle_bar_manageDiskFileTargetGroup( } return create_response(error_result, metadata) +def handle_bar_manageJob( + conn: any, # Not used for DSA operations, but required by MCP framework + operation: str, + job_name: str = None, + job_config: str = None +): + """Comprehensive DSA Job Management Tool + + This tool manages backup and restore jobs including creation, updates, + retrieval, retirement, deletion, execution, and status monitoring. It provides + complete CRUD operations and job execution management through the DSA REST API. + + Args: + operation: The operation to perform + job_name: Name of the job (required for specific operations) + job_config: JSON configuration for creating/updating/running jobs + retired: Whether to retire (True) or unretire (False) jobs (default: True) + + Available Operations: + - "list" - List all jobs (uses existing list_jobs functionality) + - "get" - Get complete job definition and configuration + - "create" - Create a new job with specified configuration + - "update" - Update an existing job configuration + - "run" - Execute/start a job (REQUIRES USER CONFIRMATION) + - "status" - Get detailed status of a running or completed job + - "retire" - Retire a job (mark as archived) + - "unretire" - Unretire a job (restore from archive) + - "delete" - Delete a job permanently from repository + + === MINIMAL JOB CONFIGURATION TEMPLATE === + + For create/update operations, here's the minimal required configuration: + + { + "restJobDefinitionModel": { + "sourceSystem": "YOUR_SOURCE_SYSTEM_NAME", + "srcUserName": "TERADATA_USERNAME", + "srcUserPassword": "TERADATA_PASSWORD", + "jobType": "BACKUP", + "targetGroupName": "YOUR_TARGET_GROUP_NAME", + "jobName": "YOUR_JOB_NAME", + "jobDescription": "Your job description", + "dataDictionaryType": "DATA" + }, + "restJobSettingsModel": {}, + "restJobObjectsModels": [ + { + "objectName": "YOUR_DATABASE_NAME", + "objectType": "DATABASE", + "parentName": "YOUR_DATABASE_NAME", + "parentType": "DATABASE" + } + ] + } + + === COMPREHENSIVE CREATE/UPDATE JOB CONFIGURATION === + + For advanced create/update operations with all available options: + + { + "restJobDefinitionModel": { + "sourceSystem": "YOUR_SOURCE_SYSTEM_NAME", + "srcUserName": "TERADATA_USERNAME", + "srcUserPassword": "TERADATA_PASSWORD", + "jobType": "BACKUP", + "targetSystem": "YOUR_TARGET_SYSTEM", + "targetUserName": "TARGET_USERNAME", + "targetUserPassword": "TARGET_PASSWORD", + "targetGroupName": "YOUR_TARGET_GROUP_NAME", + "jobName": "YOUR_JOB_NAME", + "jobDescription": "Your comprehensive job description", + "targetUserAccountId": "YOUR_TARGET_ACCOUNT_ID", + "srcUserAccountId": "YOUR_SOURCE_ACCOUNT_ID", + "dataDictionaryType": "DATA", + "backupName": "YOUR_BACKUP_NAME", + "backupVersion": 0, + "savesetUser": "SAVESET_USERNAME", + "savesetPassword": "SAVESET_PASSWORD", + "savesetAccountId": "YOUR_SAVESET_ACCOUNT_ID", + "allBackupObjects": true, + "autoRetire": true, + "retireValue": 30, + "retireUnits": "DAYS", + "nextIncrementalRestore": true + }, + "restJobSettingsModel": { + "reblock": true, + "trackEmptyTables": true, + "enableTemperatureOverride": true, + "singleObjectLocking": true, + "skipArchive": false, + "skipStats": false, + "loggingLevel": "Info", + "blockLevelCompression": "DEFAULT", + "runAsCopy": false, + "queryband": "ApplicationName=DSA_MCP;Version=1.0;", + "numberParallelBuilds": 2, + "online": false, + "nosync": false, + "temperatureOverride": "DEFAULT", + "disableFallback": false, + "nowait": true, + "configMapName": "YOUR_CONFIG_MAP", + "streamsSoftlimit": 100, + "skipJoinhashIndex": false, + "skipSystemJoinIndex": false, + "mapTo": "YOUR_MAP_TO", + "enableIncrementalRestore": true, + "enableBackupForIr": true, + "skipBuildSecondaryIndexes": false, + "wholeDbc": false, + "dsmainJsonLogging": true, + "includeDbcData": true, + "enableIr": true, + "allowWrite": false, + "cbbEnhancement": true, + "advJobProgressStats": true, + "restJob": "YOUR_REST_JOB", + "previousBackupJob": "YOUR_PREVIOUS_BACKUP_JOB" + }, + "restJobObjectsModels": [ + { + "objectName": "YOUR_DATABASE_NAME", + "objectType": "DATABASE", + "parentType": "DATABASE", + "parentName": "YOUR_DATABASE_NAME", + "renameTo": "YOUR_RENAME_TO", + "mapTo": "YOUR_MAP_TO", + "includeAll": true, + "configMapName": "YOUR_CONFIG_MAP", + "excludeObjects": [ + { + "objectName": "TEMP_TABLE_1", + "objectType": "TABLE" + }, + { + "objectName": "TEMP_TABLE_2", + "objectType": "TABLE" + } + ] + } + ] + } + + === MINIMAL RUN JOB CONFIGURATION TEMPLATE === + + For run operations, here's the minimal required configuration: + + { + "executionType": "FULL", + "jobName": "YOUR_JOB_NAME", + "jobType": "BACKUP" + } + + === COMPREHENSIVE RUN JOB CONFIGURATION === + + For advanced run operations with all available options: + + { + "jobName": "YOUR_JOB_NAME", + "executionType": "FULL", + "backupJobPhase": "DATA", + "allowWrite": true, + "jobType": "BACKUP", + "isRestart": false, + "repositoryJobType": "BACKUP", + "targetName": "YOUR_TARGET_NAME", + "backupVersion": 0, + "promptResponse": true, + "sourceSystem": "YOUR_SOURCE_SYSTEM_NAME", + "srcUserName": "TERADATA_USERNAME", + "srcUserPassword": "TERADATA_PASSWORD", + "jobDescription": "Your job description", + "dataDictionaryType": "DATA", + "targetGroupName": "YOUR_TARGET_GROUP_NAME", + "targetSystem": "YOUR_TARGET_SYSTEM", + "targetUserName": "TARGET_USERNAME", + "targetUserPassword": "TARGET_PASSWORD", + "objectList": [ + { + "objectName": "YOUR_DATABASE_NAME", + "objectType": "DATABASE", + "parentType": "DATABASE", + "parentName": "YOUR_DATABASE_NAME", + "includeAll": true, + "excludeObjects": [] + } + ], + "jobSettings": { + "online": false, + "nowait": true, + "loggingLevel": "Info", + "blockLevelCompression": "DEFAULT", + "skipArchive": false, + "skipStats": false, + "runAsCopy": false, + "queryband": "ApplicationName=DSA_MCP;Version=1.0;" + } + } + + === ALL AVAILABLE CONFIGURATION PARAMETERS === + + **restJobDefinitionModel** (Required for CREATE/UPDATE): + - sourceSystem: Source Teradata system name (REQUIRED) - e.g., "pe06-tdvm-mpp-0002-01" + - srcUserName: Source username - use "TERADATA_USERNAME" (REQUIRED) + - srcUserPassword: Source password - use "TERADATA_PASSWORD" (REQUIRED) + - jobType: "BACKUP", "RESTORE", "COPY" (REQUIRED) + - targetGroupName: Target group name (REQUIRED) - e.g., "dfs_tg" + - jobName: Unique job name (REQUIRED) + - jobDescription: Job description (REQUIRED) + - dataDictionaryType: "DATA" or "STRUCTURE" (REQUIRED) + - targetSystem: Target system name (optional) + - targetUserName: Target username - use "TARGET_USERNAME" (optional) + - targetUserPassword: Target password - use "TARGET_PASSWORD" (optional) + - targetUserAccountId: Target user account ID (optional) + - srcUserAccountId: Source user account ID (optional) + - backupName: Backup name (optional) + - backupVersion: Backup version number (optional) + - savesetUser: Saveset username - use "SAVESET_USERNAME" (optional) + - savesetPassword: Saveset password - use "SAVESET_PASSWORD" (optional) + - savesetAccountId: Saveset account ID (optional) + - allBackupObjects: Include all backup objects (true/false) + - autoRetire: Auto-retirement setting (true/false) + - retireValue: Retirement value (number) + - retireUnits: Retirement units - "DAYS", "WEEKS", "MONTHS", "YEARS" + - nextIncrementalRestore: Enable next incremental restore (true/false) + + **restJobSettingsModel** (Optional for CREATE/UPDATE): + - reblock: Reblock setting (true/false) + - trackEmptyTables: Track empty tables (true/false) + - enableTemperatureOverride: Enable temperature override (true/false) + - singleObjectLocking: Single object locking (true/false) + - skipArchive: Skip archive phase (true/false) + - skipStats: Skip statistics collection (true/false) + - loggingLevel: "Error", "Warning", "Info", "Debug" + - blockLevelCompression: "DEFAULT", "ENABLED", "DISABLED" + - runAsCopy: Run as copy operation (true/false) + - queryband: Query band settings (string) + - numberParallelBuilds: Number of parallel builds (number) + - online: Online backup mode (true/false) + - nosync: No sync mode (true/false) + - temperatureOverride: "DEFAULT", "HOT", "WARM", "COLD" + - disableFallback: Disable fallback (true/false) + - nowait: No-wait mode (true/false) + - configMapName: Configuration map name (string) + - streamsSoftlimit: Streams soft limit (number) + - skipJoinhashIndex: Skip join hash index (true/false) + - skipSystemJoinIndex: Skip system join index (true/false) + - mapTo: Map to setting (string) + - enableIncrementalRestore: Enable incremental restore (true/false) + - enableBackupForIr: Enable backup for incremental restore (true/false) + - skipBuildSecondaryIndexes: Skip build secondary indexes (true/false) + - wholeDbc: Backup whole DBC (true/false) + - dsmainJsonLogging: Enable DSMAIN JSON logging (true/false) + - includeDbcData: Include DBC data (true/false) + - enableIr: Enable incremental restore (true/false) + - allowWrite: Allow write operations (true/false) + - cbbEnhancement: CBB enhancement (true/false) + - advJobProgressStats: Advanced job progress statistics (true/false) + - restJob: REST job reference (string) + - previousBackupJob: Previous backup job reference (string) + + **restJobObjectsModels** (Required - Array for CREATE/UPDATE): + For each object to backup: + - objectName: Database/table name (REQUIRED) - e.g., "DBC", "YourDatabase" + - objectType: "DATABASE", "TABLE", "VIEW", "AGGREGATE_FUNCTION", etc. (REQUIRED) + - parentType: Parent object type (optional) + - parentName: Parent object name (optional) + - renameTo: Rename object to (optional) + - mapTo: Map object to (optional) + - includeAll: Include all child objects (true/false) + - configMapName: Configuration map name (optional) + - excludeObjects: Array of objects to exclude (optional) + Each exclude object has: + - objectName: Name of object to exclude + - objectType: Type of object to exclude + - objectType: "DATABASE", "TABLE", "VIEW", etc. (REQUIRED) + - parentName: Parent object name (optional) + - parentType: Parent object type (optional) + - includeAll: Include all child objects (true/false) + - excludeObjects: Array of objects to exclude (optional) + + === RUN JOB PARAMETERS === + + **Basic Run Parameters** (Required for run operation): + - executionType: "FULL", "INCREMENTAL", "DIFFERENTIAL" (REQUIRED) + - jobName: Name of the job to run (REQUIRED) + - jobType: "BACKUP", "RESTORE", "COPY" (REQUIRED) + + **Advanced Run Parameters** (Optional): + - backupJobPhase: "DICTIONARY", "DATA", "ALL" + - allowWrite: Allow write operations during backup (true/false) + - isRestart: Whether this is a restart operation (true/false) + - repositoryJobType: Repository job type + - targetName: Target system name + - backupVersion: Backup version number + - promptResponse: Auto-respond to prompts (true/false) + - sourceSystem: Source Teradata system name + - srcUserName: Source username - use "TERADATA_USERNAME" + - srcUserPassword: Source password - use "TERADATA_PASSWORD" + - jobDescription: Description for the job execution + - dataDictionaryType: "DATA" or "STRUCTURE" + - targetGroupName: Target group name + - targetSystem: Target system name + - targetUserName: Target username - use "TARGET_USERNAME" + - targetUserPassword: Target password - use "TARGET_PASSWORD" + + **Job Settings for Run** (Optional): + - online: Online mode (true/false) + - nowait: No-wait mode (true/false) + - skipArchive: Skip archive phase (true/false) + - skipStats: Skip statistics collection (true/false) + - runAsCopy: Run as copy operation (true/false) + - loggingLevel: "Error", "Warning", "Info", "Debug" + - blockLevelCompression: "DEFAULT", "ENABLED", "DISABLED" + - queryband: Query band settings + - numberParallelBuilds: Number of parallel builds + - nosync: No sync mode (true/false) + - temperatureOverride: Temperature override setting + - disableFallback: Disable fallback (true/false) + - streamsSoftlimit: Streams soft limit + - skipJoinhashIndex: Skip join hash index (true/false) + - skipSystemJoinIndex: Skip system join index (true/false) + - enableIr: Enable incremental restore (true/false) + - enableIncrementalRestore: Enable incremental restore (true/false) + - enableBackupForIr: Enable backup for incremental restore (true/false) + - skipBuildSI: Skip build secondary index (true/false) + - includeDbcData: Include DBC data (true/false) + + === USER INTERACTION GUIDELINES === + When helping users with job operations: + 1. ALWAYS show the user the payload that will be used + 2. ASK if they want to modify any settings before proceeding + 3. CONFIRM the configuration with the user before executing + 4. Offer to explain any parameters they want to customize + 5. Do NOT show the actual password values for security + + **SPECIAL REQUIREMENTS FOR CREATE/UPDATE OPERATIONS:** + - ALWAYS require user confirmation before creating or updating jobs + - Show the complete configuration payload to the user (minimal or comprehensive) + - Ask if they want to add any additional settings from the comprehensive template + - Explain that this will create/modify job definitions in the DSA repository + - Wait for explicit confirmation before executing create_job or update_job + - Offer to show comprehensive template if user wants advanced options + + **SPECIAL REQUIREMENTS FOR RUN OPERATION:** + - ALWAYS require explicit user confirmation before running jobs + - Show the complete run configuration payload to the user + - Ask if they want to add any additional settings (compression, logging, etc.) + - Explain that running a job will start actual backup/restore operations + - Wait for explicit "yes" or confirmation before executing run_job + - Provide guidance on monitoring job progress with status operation + + Example interaction flow: + - Show the configuration payload (minimal or comprehensive based on user needs) + - Ask: "Would you like to add any additional settings from the comprehensive template?" + - Wait for user confirmation before executing the operation + - Explain available options if user wants to customize + + Returns: + Formatted result of the requested operation with detailed status and validation information + + Examples: + # List all jobs + - View all jobs: + manage_job_operations("list") + + # Get specific job definition + - Get job details: + manage_job_operations("get", job_name="dfs_bk") + + # Create new job with minimal configuration + - Create backup job: + config = '{"restJobDefinitionModel":{"sourceSystem":"YOUR_SOURCE_SYSTEM_NAME","srcUserName":"TERADATA_USERNAME","srcUserPassword":"TERADATA_PASSWORD","jobType":"BACKUP","targetGroupName":"YOUR_TARGET_GROUP_NAME","jobName":"YOUR_JOB_NAME","jobDescription":"Your job description","dataDictionaryType":"DATA"},"restJobSettingsModel":{},"restJobObjectsModels":[{"objectName":"YOUR_DATABASE_NAME","objectType":"DATABASE","parentName":"YOUR_DATABASE_NAME","parentType":"DATABASE"}]}' + manage_job_operations("create", job_config=config) + + # Create job with advanced settings (COMPREHENSIVE TEMPLATE) + - Create comprehensive backup job with all options: + config = '{"restJobDefinitionModel":{"sourceSystem":"YOUR_SOURCE_SYSTEM_NAME","srcUserName":"TERADATA_USERNAME","srcUserPassword":"TERADATA_PASSWORD","jobType":"BACKUP","targetSystem":"YOUR_TARGET_SYSTEM","targetUserName":"TARGET_USERNAME","targetUserPassword":"TARGET_PASSWORD","targetGroupName":"YOUR_TARGET_GROUP_NAME","jobName":"comprehensive_backup","jobDescription":"Comprehensive backup with all settings","targetUserAccountId":"YOUR_TARGET_ACCOUNT_ID","srcUserAccountId":"YOUR_SOURCE_ACCOUNT_ID","dataDictionaryType":"DATA","backupName":"YOUR_BACKUP_NAME","backupVersion":0,"savesetUser":"SAVESET_USERNAME","savesetPassword":"SAVESET_PASSWORD","savesetAccountId":"YOUR_SAVESET_ACCOUNT_ID","allBackupObjects":true,"autoRetire":true,"retireValue":30,"retireUnits":"DAYS","nextIncrementalRestore":true},"restJobSettingsModel":{"reblock":true,"trackEmptyTables":true,"enableTemperatureOverride":true,"singleObjectLocking":true,"skipArchive":false,"skipStats":false,"loggingLevel":"Info","blockLevelCompression":"DEFAULT","runAsCopy":false,"queryband":"ApplicationName=DSA_MCP;Version=1.0;","numberParallelBuilds":2,"online":false,"nosync":false,"temperatureOverride":"DEFAULT","disableFallback":false,"nowait":true,"configMapName":"YOUR_CONFIG_MAP","streamsSoftlimit":100,"skipJoinhashIndex":false,"skipSystemJoinIndex":false,"mapTo":"YOUR_MAP_TO","enableIncrementalRestore":true,"enableBackupForIr":true,"skipBuildSecondaryIndexes":false,"wholeDbc":false,"dsmainJsonLogging":true,"includeDbcData":true,"enableIr":true,"allowWrite":false,"cbbEnhancement":true,"advJobProgressStats":true,"restJob":"YOUR_REST_JOB","previousBackupJob":"YOUR_PREVIOUS_BACKUP_JOB"},"restJobObjectsModels":[{"objectName":"YOUR_DATABASE_NAME","objectType":"DATABASE","parentType":"DATABASE","parentName":"YOUR_DATABASE_NAME","renameTo":"YOUR_RENAME_TO","mapTo":"YOUR_MAP_TO","includeAll":true,"configMapName":"YOUR_CONFIG_MAP","excludeObjects":[{"objectName":"TEMP_TABLE_1","objectType":"TABLE"},{"objectName":"TEMP_TABLE_2","objectType":"TABLE"}]}]}' + manage_job_operations("create", job_config=config) + + # Update existing job + - Update job configuration: + config = '{"restJobDefinitionModel":{"sourceSystem":"YOUR_SOURCE_SYSTEM_NAME","srcUserName":"TERADATA_USERNAME","srcUserPassword":"TERADATA_PASSWORD","jobType":"BACKUP","targetGroupName":"YOUR_TARGET_GROUP_NAME","jobName":"test_job","jobDescription":"Updated test backup"},"restJobSettingsModel":{"online":false,"nowait":true},"restJobObjectsModels":[{"objectName":"DBC","objectType":"DATABASE"}]}' + manage_job_operations("update", job_config=config) + + # Run job operations (REQUIRES USER CONFIRMATION) + - Run job with minimal configuration: + config = '{"executionType":"FULL","jobName":"YOUR_JOB_NAME","jobType":"BACKUP"}' + manage_job_operations("run", job_config=config) + + - Run job with advanced settings: + config = '{"jobName":"backup_job","executionType":"FULL","backupJobPhase":"DATA","allowWrite":true,"jobType":"BACKUP","isRestart":false,"sourceSystem":"YOUR_SOURCE_SYSTEM_NAME","srcUserName":"TERADATA_USERNAME","srcUserPassword":"TERADATA_PASSWORD","targetGroupName":"YOUR_TARGET_GROUP_NAME","jobSettings":{"online":false,"nowait":true,"loggingLevel":"Info","blockLevelCompression":"DEFAULT"}}' + manage_job_operations("run", job_config=config) + + # Get job status + - Check job status: + manage_job_operations("status", job_name="backup_job") + - Monitor running job: + manage_job_operations("status", job_name="dfs_bk") + + # Retire/Unretire jobs + - Retire job: + manage_job_operations("retire", job_name="old_job") + - Unretire job: + manage_job_operations("unretire", job_name="old_job", retired=False) + + # Delete job + - Delete job permanently: + manage_job_operations("delete", job_name="old_job") + + Notes: + - Job creation/update requires comprehensive JSON configuration + - Two configuration templates available: minimal (basic) and comprehensive (all options) + - Source and target systems must be properly configured + - Target groups must exist before creating jobs + - Retirement marks jobs as archived but keeps them in repository + - Deletion permanently removes jobs from repository + - JSON configuration must include restJobDefinitionModel, restJobSettingsModel, and restJobObjectsModels + - Use get operation to see proper configuration format for existing jobs + - Always use "TERADATA_USERNAME" pattern for all credential fields + + COMPREHENSIVE TEMPLATE: Use when users need advanced features like: + - Auto-retirement settings, backup versioning, temperature overrides + - Advanced job settings, parallel builds, compression options + - Complex object mappings, exclusions, secondary indexes control + - Target system credentials, saveset configurations + + IMPORTANT: When assisting users with job creation/updates/runs: + 1. Always show the user the exact payload that will be used (minimal or comprehensive) + 2. Ask if they want to modify any settings or upgrade to comprehensive template + 3. FOR CREATE/UPDATE: Require user confirmation before creating/updating jobs + 4. FOR RUN: Require explicit user confirmation before executing (starts actual operations) + 5. Confirm the configuration with the user before executing the operation + 4. Offer to explain available parameters for customization + 5. Never show actual password values - only show "TERADATA_PASSWORD" + 6. FOR RUN OPERATIONS: Always require explicit user confirmation before execution + 7. Explain that run operations start actual backup/restore processes + 8. Suggest using status operation to monitor job progress after running + """ + try: + logger.debug(f"Tool: bar_manageJob: Args: operation: {operation}, job_name: {job_name}") + + # Validate operation + valid_operations = [ + "list", "get", "create", "update", "run", + "status", "retire", "unretire", "delete" + ] + + if operation not in valid_operations: + error_result = f"āŒ Invalid operation '{operation}'. Valid operations: {', '.join(valid_operations)}" + metadata = { + "tool_name": "bar_manageJob", + "operation": operation, + "error": "Invalid operation", + "success": False + } + return create_response(error_result, metadata) + + # Execute the job management operation + result = manage_job_operations( + operation=operation, + job_name=job_name, + job_config=job_config + ) + + metadata = { + "tool_name": "bar_manageJob", + "operation": operation, + "job_name": job_name, + "success": True + } + + if job_config: + metadata["job_config"] = job_config + + logger.debug(f"Tool: bar_manageJob: metadata: {metadata}") + return create_response(result, metadata) + + except Exception as e: + logger.error(f"Error in bar_manageJob: {e}") + error_result = f"āŒ Error in DSA job management operation: {str(e)}" + metadata = { + "tool_name": "bar_manageJob", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) + + except Exception as e: + logger.error(f"Error in bar_manageJob: {e}") + error_result = f"āŒ Error in DSA job management operation: {str(e)}" + metadata = { + "tool_name": "bar_manageJob", + "operation": operation, + "error": str(e), + "success": False + } + return create_response(error_result, metadata) \ No newline at end of file From 55ccaf6e126d723bf5c8931035d0910aa68bbe5e Mon Sep 17 00:00:00 2001 From: Prasad Avadhanam Date: Thu, 25 Sep 2025 13:41:06 +0530 Subject: [PATCH 13/14] fix some error handling, minor issues in the list aws s3 config and also add remove aws s3 config functions --- .../tools/bar/bar_tools.py | 455 ++++++++++++++++-- 1 file changed, 412 insertions(+), 43 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 14a65a0..7fd541a 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -518,50 +518,79 @@ def list_aws_s3_backup_configurations () -> str: # Add debug log for full API response logger.debug("[DEBUG] Full DSA API response from aws-s3 endpoint: %r", response) + # Temporary debug output to console for investigation + print(f"[DEBUG] AWS LIST API RESPONSE:") + print(f"Status: {response.get('status')}") + print(f"AWS list length: {len(response.get('aws', []))}") + print(f"Full aws list: {response.get('aws', [])}") + print("=" * 80) + results = [] results.append("šŸ—‚ļø DSA AWS S3 Backup Solution Systems Available") results.append("=" * 50) if response.get('status') == 'LIST_AWS_APP_SUCCESSFUL': - # Extract bucketsByRegion from nested aws[0]['configAwsRest']['bucketsByRegion'] - bucketsByRegion = [] + # Extract all AWS configurations from the aws list aws_list = response.get('aws', []) + + total_configurations = 0 + total_buckets = 0 + if aws_list and isinstance(aws_list, list): - configAwsRest = aws_list[0].get('configAwsRest', {}) - bucketsByRegion = configAwsRest.get('bucketsByRegion', []) - - # Handle if bucketsByRegion is a dict (single region) or list - if isinstance(bucketsByRegion, dict): - bucketsByRegion = [bucketsByRegion] - - bucket_count = 0 - if bucketsByRegion: - for i, region in enumerate(bucketsByRegion, 1): - region_name = region.get('region', 'N/A') - results.append(f"šŸ—‚ļø Region #{i}: {region_name}") - buckets = region.get('buckets', []) - if isinstance(buckets, dict): - buckets = [buckets] - if buckets: - for j, bucket in enumerate(buckets, 1): - bucket_count += 1 - bucket_name = bucket.get('bucketName', 'N/A') - results.append(f" šŸ“ Bucket #{j}: {bucket_name}") - prefix_list = bucket.get('prefixList', []) - if isinstance(prefix_list, dict): - prefix_list = [prefix_list] - if prefix_list: - for k, prefix in enumerate(prefix_list, 1): - prefix_name = prefix.get('prefixName', 'N/A') - storage_devices = prefix.get('storageDevices', 'N/A') - results.append(f" šŸ”– Prefix #{k}: {prefix_name}") - results.append(f" Storage Devices: {storage_devices}") + total_configurations = len(aws_list) + results.append(f"šŸ“Š Total AWS S3 Configurations: {total_configurations}") + results.append("") + + # Process each AWS configuration + for config_idx, aws_config in enumerate(aws_list, 1): + configAwsRest = aws_config.get('configAwsRest', {}) + account_name = configAwsRest.get('acctName', 'N/A') + access_id = configAwsRest.get('accessId', 'N/A') + + results.append(f"šŸ”§ AWS Configuration #{config_idx}") + results.append(f" šŸ“‹ Account Name: {account_name}") + results.append(f" šŸ”‘ Access ID: {access_id}") + + bucketsByRegion = configAwsRest.get('bucketsByRegion', []) + + # Handle if bucketsByRegion is a dict (single region) or list + if isinstance(bucketsByRegion, dict): + bucketsByRegion = [bucketsByRegion] + + config_bucket_count = 0 + if bucketsByRegion: + for i, region in enumerate(bucketsByRegion, 1): + region_name = region.get('region', 'N/A') + results.append(f" šŸ—‚ļø Region #{i}: {region_name}") + buckets = region.get('buckets', []) + if isinstance(buckets, dict): + buckets = [buckets] + if buckets: + for j, bucket in enumerate(buckets, 1): + config_bucket_count += 1 + total_buckets += 1 + bucket_name = bucket.get('bucketName', 'N/A') + results.append(f" šŸ“ Bucket #{j}: {bucket_name}") + prefix_list = bucket.get('prefixList', []) + if isinstance(prefix_list, dict): + prefix_list = [prefix_list] + if prefix_list: + for k, prefix in enumerate(prefix_list, 1): + prefix_name = prefix.get('prefixName', 'N/A') + storage_devices = prefix.get('storageDevices', 'N/A') + results.append(f" šŸ”– Prefix #{k}: {prefix_name}") + results.append(f" Storage Devices: {storage_devices}") + else: + results.append(f" šŸ”– No prefixes configured") else: - results.append(f" šŸ”– No prefixes configured") + results.append(f" šŸ“ No buckets configured in this region") else: - results.append(f" šŸ“ No buckets configured in this region") + results.append(" šŸ“‹ No regions configured for this account") + results.append("") - results.insert(1, f"šŸ“Š Total Buckets Configured: {bucket_count}") + + # Update the total bucket count + results[1] = f"šŸ“Š Total Buckets Configured: {total_buckets}" else: results.append("šŸ“‹ No AWS backup Solutions Configured") @@ -586,12 +615,293 @@ def list_aws_s3_backup_configurations () -> str: return f"āŒ Error listing AWS S3 Backup Solutions Configured: {str(e)}" +def delete_aws_s3_backup_configurations() -> str: + """Delete all AWS S3 backup configurations from DSA + + Removes all AWS S3 backup solution configurations from DSA. This operation will fail + if any S3 configurations are currently in use by backup operations or target groups. + + Returns: + Formatted result of the deletion operation with status and any validation messages + + Warning: + This operation removes ALL AWS S3 backup configurations. Make sure no + backup operations or target groups are using these configurations. + """ + try: + logger.info("Deleting all AWS S3 backup configurations via DSA API") + + # Make request to DSA API + response = dsa_client._make_request( + method="DELETE", + endpoint="dsa/components/backup-applications/aws-s3" + ) + + logger.debug(f"DSA API response: {response}") + + results = [] + results.append("šŸ—‚ļø DSA AWS S3 Backup Configuration Deletion") + results.append("=" * 50) + + if response.get('status') == 'DELETE_COMPONENT_SUCCESSFUL': + results.append("āœ… All AWS S3 backup configurations deleted successfully") + results.append(f"šŸ“Š Status: {response.get('status')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + else: + results.append("āŒ Failed to delete AWS S3 backup configurations") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + # Show validation errors if any + if response.get('validationlist'): + validation = response['validationlist'] + results.append("") + results.append("šŸ” Validation Details:") + + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Server Error: {error.get('message', 'Unknown error')}") + results.append(f" Code: {error.get('code', 'N/A')}") + results.append(f" Status: {error.get('valStatus', 'N/A')}") + + if validation.get('clientValidationList'): + for error in validation['clientValidationList']: + results.append(f"āŒ Client Error: {error.get('message', 'Unknown error')}") + + # If deletion failed due to dependencies, provide guidance + if any('in use by' in error.get('message', '') for error in validation.get('serverValidationList', [])): + results.append("") + results.append("šŸ’” Helpful Notes:") + results.append(" • Remove all backup jobs using these AWS S3 configurations first") + results.append(" • Delete any target groups that reference these S3 configurations") + results.append(" • Use list_aws_s3_backup_configurations() to see current configurations") + + results.append("") + results.append("=" * 50) + results.append("āœ… AWS S3 backup configuration deletion operation completed") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to delete AWS S3 backup configurations: {str(e)}") + return f"āŒ Error deleting AWS S3 backup configurations: {str(e)}" + + + +def remove_AWS_S3_backup_configuration(aws_acct_name: str) -> str: + """Remove a specific AWS S3 configuration from DSA + + Removes a specific AWS S3 configuration from the existing list by reconfiguring + the remaining S3 configurations. This operation will fail if the S3 configuration is + currently in use by backup operations or S3 target groups. + + Args: + aws_acct_name : Name of the AWS S3 account for which the configuration needs to be removed from DSA (e.g., "/var/opt/teradata/backup") + + Returns: + Formatted result of the removal operation with status and any validation messages + + Warning: + This operation will fail if the AWS S3 config is in use by any backup operations + or target groups. Remove those dependencies first. + """ + try: + logger.info(f"Removing AWS S3 configuration: {aws_acct_name}") + + # Prepare request data with aws_acct_name as that is the input for the + request_data = aws_acct_name + + # First, get the existing S3 configurations + try: + existing_response = dsa_client._make_request( + method="GET", + endpoint="dsa/components/backup-applications/aws-s3" + ) + + existing_s3_configurations = [] + if existing_response.get('status') == 'LIST_AWS_APP_SUCCESSFUL': + # Use the exact same logic as the list function + aws_list = existing_response.get('aws', []) + logger.debug(f"AWS list from API: {aws_list}") + logger.debug(f"AWS list type: {type(aws_list)}, length: {len(aws_list) if aws_list else 0}") + if aws_list and isinstance(aws_list, list): + # For consistency with list function, treat each aws entry as a configuration + existing_s3_configurations = aws_list + logger.info(f"Successfully parsed {len(existing_s3_configurations)} S3 configurations") + else: + logger.warning(f"No aws list found or wrong type. aws_list: {aws_list}") + else: + logger.warning("No existing S3 configurations found or unable to retrieve them") + logger.debug(f"API response status: {existing_response.get('status')}") + return f"āŒ Could not retrieve existing S3 configurations to remove '{aws_acct_name}'" + + except Exception as e: + logger.error(f"Could not retrieve existing S3 configurations: {e}") + return f"āŒ Error retrieving existing S3 configurations: {str(e)}" + + # Check if the S3 configuration to remove, actually exists or not + s3config_exists = False + s3_configurations_to_keep = [] + + for s3 in existing_s3_configurations: + # Extract account name from the nested structure + config_aws_rest = s3.get('configAwsRest', {}) + current_acct_name = config_aws_rest.get('acctName', '') + + logger.debug(f"Checking S3 config - current_acct_name: '{current_acct_name}', target: '{aws_acct_name}'") + if current_acct_name == aws_acct_name: + s3config_exists = True + logger.info(f"Found S3 configuration to remove: {aws_acct_name}") + else: + # Keep this S3 configuration + s3_configurations_to_keep.append(s3) + + # If S3 config doesn't exist, return error + if not s3config_exists: + available_s3_configs = [] + debug_info = [] + for i, s3 in enumerate(existing_s3_configurations): + config_aws_rest = s3.get('configAwsRest', {}) + acct_name = config_aws_rest.get('acctName', 'N/A') + + # Also collect bucket names as potential identifiers + bucket_names = [] + buckets_by_region = config_aws_rest.get('bucketsByRegion', []) + if isinstance(buckets_by_region, dict): + buckets_by_region = [buckets_by_region] + for region in buckets_by_region: + buckets = region.get('buckets', []) + if isinstance(buckets, dict): + buckets = [buckets] + for bucket in buckets: + bucket_name = bucket.get('bucketName', '') + if bucket_name: + bucket_names.append(bucket_name) + + available_s3_configs.append(acct_name) + # Add debug info about the structure - show all possible account fields + debug_info.append(f"Config #{i+1}: Top level keys: {list(s3.keys())}") + debug_info.append(f" configAwsRest keys: {list(config_aws_rest.keys())}") + debug_info.append(f" Bucket names: {bucket_names}") + # Look for any field that might contain account info + for key, value in config_aws_rest.items(): + if 'acc' in key.lower() or 'name' in key.lower() or 'id' in key.lower(): + debug_info.append(f" {key}: {value}") + results = [] + results.append("šŸ—‚ļø DSA S3 Configuration Removal") + results.append("=" * 50) + results.append(f"āŒ S3 configuration '{aws_acct_name}' not found") + results.append("") + results.append("šŸ“‹ Available S3 configurations:") + if available_s3_configs: + for path in available_s3_configs: + results.append(f" • {path}") + else: + results.append(" (No S3 configurations configured)") + results.append("") + results.append("šŸ” Debug Info:") + for debug in debug_info: + results.append(f" {debug}") + results.append("") + results.append("=" * 50) + return "\n".join(results) + + logger.info(f"Removing '{aws_acct_name}', keeping {len(s3_configurations_to_keep)} S3 configurations") + + # this code logic is not required. If the account is found, we can just delete it, do not complicate with reconfiguring the rest + # reconfiguring the rest is not going to work in the single call to the API + # Make request to DSA API to reconfigure with remaining S3 configurations + # If no configurations remain, we need to delete all instead of posting empty config + #if not s3_configurations_to_keep: + # logger.info("No S3 configurations remaining, deleting all S3 configurations") + # response = dsa_client._make_request( + # method="DELETE", + # endpoint="dsa/components/backup-applications/aws-s3" + # ) + #else: + # logger.info(f"Reconfiguring with {len(s3_configurations_to_keep)} remaining S3 configurations") + # response = dsa_client._make_request( + # method="POST", + # endpoint="dsa/components/backup-applications/aws-s3", + # data=request_data + # ) + + + + # Build the request data and delete the specific configuration that is already found to be existing + # Use the correct endpoint with account name and trailing slash (matching successful Swagger call) + response = dsa_client._make_request( + method="DELETE", + endpoint=f"dsa/components/backup-applications/aws-s3/{aws_acct_name}/" + ) + + logger.debug(f"DSA API response: {response}") + + results = [] + results.append("šŸ—‚ļø DSA S3 Configuration Removal") + results.append("=" * 50) + results.append(f"šŸ“ Removed S3 Configuration: {aws_acct_name}") + results.append(f"šŸ“Š Remaining S3 Configurations: {len(s3_configurations_to_keep)}") + results.append("") + + success_statuses = ['CONFIG_AWS_APP_SUCCESSFUL', 'LIST_AWS_APP_SUCCESSFUL', 'DELETE_COMPONENT_SUCCESSFUL'] + if response.get('status') in success_statuses: + results.append("āœ… AWS S3 configuration removed successfully") + results.append(f"šŸ“Š Status: {response.get('status')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + if s3_configurations_to_keep: + results.append("") + results.append("šŸ“‹ Remaining S3 configurations:") + for s3 in s3_configurations_to_keep: + config_aws_rest = s3.get('configAwsRest', {}) + acct_name = config_aws_rest.get('acctName', 'N/A') + results.append(f" • {acct_name}") + else: + results.append("") + results.append("šŸ“‹ No S3 configurations remaining (all removed)") + + else: + results.append("āŒ Failed to remove AWS S3 configuration") + results.append(f"šŸ“Š Status: {response.get('status', 'Unknown')}") + results.append(f"āœ”ļø Valid: {response.get('valid', False)}") + + # Show validation errors if any + if response.get('validationlist'): + validation = response['validationlist'] + results.append("") + results.append("šŸ” Validation Details:") + + if validation.get('serverValidationList'): + for error in validation['serverValidationList']: + results.append(f"āŒ Server Error: {error.get('message', 'Unknown error')}") + results.append(f" Code: {error.get('code', 'N/A')}") + results.append(f" Status: {error.get('valStatus', 'N/A')}") + + if validation.get('clientValidationList'): + for error in validation['clientValidationList']: + results.append(f"āŒ Client Error: {error.get('message', 'Unknown error')}") + + results.append("") + results.append("=" * 50) + results.append("āœ… AWS S3 backup configuration removal operation completed") + + return "\n".join(results) + + except Exception as e: + logger.error(f"Failed to remove AWS S3 configuration: {str(e)}") + return f"āŒ Error removing AWS S3 configuration '{aws_acct_name}': {str(e)}" + + def manage_AWS_S3_backup_configurations( operation: str, accessId: Optional[str] = None, accessKey: Optional[str] = None, bucketsByRegion: Optional[object] = None, bucketName: Optional[str] = None, + prefixName: Optional[str] = "dsa-backup", + storageDevices: Optional[int] = 4, acctName: Optional[str] = None ) -> str: """Unified DSA AWS S3 Backup Configuration Management Tool @@ -605,6 +915,8 @@ def manage_AWS_S3_backup_configurations( accessKey: AWS Access Key bucketsByRegion: Buckets by region configuration (object: dict or list) bucketName: AWS Bucket Name + prefixName: AWS S3 Prefix Name + storageDevices: Storage devices to use (default 4) acctName: AWS Account Name Available Operations: @@ -624,6 +936,7 @@ def manage_AWS_S3_backup_configurations( if operation == "list": return list_aws_s3_backup_configurations() # Config operation + # Config operation elif operation == "config": if not accessId: return "āŒ Error: accessId is required for config operation" @@ -631,37 +944,84 @@ def manage_AWS_S3_backup_configurations( return "āŒ Error: accessKey is required for config operation" if not bucketsByRegion: return "āŒ Error: bucketsByRegion is required for config operation" - if not acctName: - return "āŒ Error: acctName is required for config operation" if not bucketName: return "āŒ Error: bucketName is required for config operation" + if not prefixName: + return "āŒ Error: prefixName is required for config operation" + # Validate storageDevices as integer + if not storageDevices or not isinstance(storageDevices, int) or storageDevices <= 0: + return "āŒ Error: storageDevices must be a positive integer for config operation" + if not acctName: + return "āŒ Error: acctName is required for config operation" + + # Transform bucketsByRegion to match API expectations + formatted_buckets_by_region = [] + + + # Debug information + debug_msg = f"Original bucketsByRegion: type={type(bucketsByRegion)}, value={bucketsByRegion}" + + if isinstance(bucketsByRegion, list): + # Handle if it's a simple list of regions like ["us-west-2"] + if bucketsByRegion and isinstance(bucketsByRegion[0], str): + # Convert simple region string to proper structure + region_name = bucketsByRegion[0] + formatted_buckets_by_region = [{ + "region": region_name, + "buckets": [{ + "bucketName": bucketName, + "prefixList": [{ + "prefixName": prefixName, + "storageDevices": storageDevices, + "prefixId": 0 + }] + }] + }] + debug_msg += f" | Converted to: {formatted_buckets_by_region}" + else: + # Assume it's already properly formatted + formatted_buckets_by_region = bucketsByRegion + debug_msg += " | Used as-is (already formatted)" + elif isinstance(bucketsByRegion, dict): + # Handle if it's a single region object + formatted_buckets_by_region = [bucketsByRegion] + debug_msg += f" | Wrapped dict in list: {formatted_buckets_by_region}" + else: + return f"āŒ Error: bucketsByRegion must be a list or dict, got {type(bucketsByRegion)} | {debug_msg}" + # bucketsByRegion is now expected as an object (dict or list) request_data = { "configAwsRest": { "accessId": accessId, "accessKey": accessKey, - "bucketsByRegion": bucketsByRegion, - "bucketName": bucketName, + "bucketsByRegion": formatted_buckets_by_region, "acctName": acctName, "viewpoint": True, "viewpointBucketRegion": True } } + + # Debug: return debug info for testing + debug_info = f"DEBUG INFO:\n{debug_msg}\nFormatted structure: {formatted_buckets_by_region}\nFull request data: {request_data}" + try: response = dsa_client._make_request( method="POST", endpoint="dsa/components/backup-applications/aws-s3", data=request_data ) - return f"āœ… AWS backup solution configuration operation completed\nResponse: {response}" + return f"āœ… AWS backup solution configuration operation completed\nResponse: {response}\n\n{debug_info}" except Exception as e: - return f"āŒ Error configuring AWS backup solution: {str(e)}" + return f"āŒ Error configuring AWS backup solution: {str(e)}\n\n{debug_info}" + # Delete all operation elif operation == "delete_all": - return "āŒ Error: 'delete_all' operation is not implemented yet for AWS S3 Configuration" + return delete_aws_s3_backup_configurations() # Remove specific operation elif operation == "remove": - return "āŒ Error: 'remove' operation is not implemented yet for AWS S3 Configuration" + if not acctName: + return "āŒ Error: acctName is required for remove operation" + return remove_AWS_S3_backup_configuration(acctName) else: available_operations = [ "list", "config", "delete_all", "remove" @@ -1413,6 +1773,8 @@ def handle_bar_manageAWSS3Operations( accessKey: str = None, bucketsByRegion: object = None, bucketName: str = None, + prefixName: str = None, + storageDevices: int = None, acctName: str = None, *args, **kwargs @@ -1429,6 +1791,9 @@ def handle_bar_manageAWSS3Operations( accessId: AWS access ID (for config operation) accessKey: AWS access key (for config operation) bucketsByRegion: List of S3 buckets by region (for config operation) + bucketName: S3 bucket name (for config operation) + prefixName: S3 prefix name (for config operation) + storageDevices: Number of Storage devices (for config operation) acctName: AWS account name (for config operation) **Note: To UPDATE an existing AWS S3 configuration, simply use the 'config' operation @@ -1448,7 +1813,9 @@ def handle_bar_manageAWSS3Operations( accessId=accessId, accessKey=accessKey, bucketsByRegion=bucketsByRegion, - bucketName="tdedsabucket01", # Hardcoded for now, will be dynamic later + bucketName=bucketName, + prefixName=prefixName, + storageDevices=storageDevices, acctName=acctName ) metadata = { @@ -1458,6 +1825,8 @@ def handle_bar_manageAWSS3Operations( "accessKey": accessKey, "bucketsByRegion": bucketsByRegion, "bucketName": bucketName, + "prefixName": prefixName, + "storageDevices": storageDevices, "acctName": acctName, "success": True } From b5c96b0b22cd5b17bec64fa6d811567126575635 Mon Sep 17 00:00:00 2001 From: Prasad Avadhanam Date: Thu, 25 Sep 2025 13:48:35 +0530 Subject: [PATCH 14/14] remove minor debug printf --- src/teradata_mcp_server/tools/bar/bar_tools.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/teradata_mcp_server/tools/bar/bar_tools.py b/src/teradata_mcp_server/tools/bar/bar_tools.py index 7fd541a..9cd3d0b 100644 --- a/src/teradata_mcp_server/tools/bar/bar_tools.py +++ b/src/teradata_mcp_server/tools/bar/bar_tools.py @@ -517,14 +517,7 @@ def list_aws_s3_backup_configurations () -> str: # Add debug log for full API response logger.debug("[DEBUG] Full DSA API response from aws-s3 endpoint: %r", response) - - # Temporary debug output to console for investigation - print(f"[DEBUG] AWS LIST API RESPONSE:") - print(f"Status: {response.get('status')}") - print(f"AWS list length: {len(response.get('aws', []))}") - print(f"Full aws list: {response.get('aws', [])}") - print("=" * 80) - + results = [] results.append("šŸ—‚ļø DSA AWS S3 Backup Solution Systems Available") results.append("=" * 50)