Wednesday, September 10, 2025

Import



SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
  -- Configurable parameters
  v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Directory name
  v_schema_name VARCHAR2(128) := 'HEMANT'; -- Schema to process
  v_dumpfile VARCHAR2(256) := 'YOUR_DUMPFILE.dmp'; -- Dump file name
  v_sqlfile VARCHAR2(256) := v_schema_name || '_ddl_preview.sql'; -- SQL file with schema name
  v_logfile VARCHAR2(256) := v_schema_name || '_sqlfile_gen.log'; -- Log file with schema name
  v_job_name VARCHAR2(128) := 'SQLFILE_EXP_' || v_schema_name; -- Job name with schema
  v_file_exists BOOLEAN;
  v_file_length NUMBER;
  v_block_size NUMBER;
  v_handle NUMBER;
  v_job_state VARCHAR2(30);
  v_status ku$_Status;
  v_log_entry ku$_LogEntry;
  v_ind NUMBER;
BEGIN
  -- Check if log file exists and remove it
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_logfile,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  IF v_file_exists THEN
    UTL_FILE.FREMOVE(v_directory, v_logfile);
    DBMS_OUTPUT.PUT_LINE('Existing log file ' || v_logfile || ' deleted');
  END IF;
  
  -- Check if SQL file exists and remove it to avoid conflicts
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_sqlfile,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  IF v_file_exists THEN
    UTL_FILE.FREMOVE(v_directory, v_sqlfile);
    DBMS_OUTPUT.PUT_LINE('Existing SQL file ' || v_sqlfile || ' deleted');
  END IF;
  
  -- Check if dump file exists
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_dumpfile,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  IF NOT v_file_exists THEN
    RAISE_APPLICATION_ERROR(-20001, 'Dump file ' || v_dumpfile || ' does not exist in ' || v_directory);
  END IF;
  
  -- Open Data Pump job
  v_handle := DBMS_DATAPUMP.OPEN(
    operation => 'SQL_FILE',
    job_mode => 'SCHEMA',
    job_name => UPPER(v_job_name)
  );
  DBMS_OUTPUT.PUT_LINE('Opened Data Pump job: ' || v_job_name);
  
  -- Set parallel degree
  DBMS_DATAPUMP.SET_PARALLEL(
    handle => v_handle,
    degree => 2
  );
  
  -- Add dump file
  DBMS_DATAPUMP.ADD_FILE(
    handle => v_handle,
    filename => v_dumpfile,
    directory => v_directory,
    filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
  );
  
  -- Add SQL file
  DBMS_DATAPUMP.ADD_FILE(
    handle => v_handle,
    filename => v_sqlfile,
    directory => v_directory,
    filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_SQL_FILE
  );
  
  -- Add log file
  DBMS_DATAPUMP.ADD_FILE(
    handle => v_handle,
    filename => v_logfile,
    directory => v_directory,
    filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
  );
  
  -- Apply schema filter
  DBMS_DATAPUMP.METADATA_FILTER(
    handle => v_handle,
    name => 'SCHEMA_LIST',
    value => '''' || UPPER(v_schema_name) || ''''
  );
  
  -- Start job
  DBMS_DATAPUMP.START_JOB(
    handle => v_handle,
    skip_current => 0,
    abort_step => 0
  );
  DBMS_OUTPUT.PUT_LINE('Started Data Pump job to generate ' || v_sqlfile);
  
  -- Monitor job
  v_job_state := 'UNDEFINED';
  WHILE v_job_state NOT IN ('COMPLETED', 'STOPPED') LOOP
    DBMS_DATAPUMP.GET_STATUS(
      handle => v_handle,
      mask => DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR + DBMS_DATAPUMP.KU$_STATUS_JOB_STATUS,
      timeout => -1,
      job_state => v_job_state,
      status => v_status
    );
    IF BITAND(v_status.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
      v_log_entry := v_status.error;
      IF v_log_entry IS NOT NULL THEN
        v_ind := v_log_entry.FIRST;
        WHILE v_ind IS NOT NULL LOOP
          DBMS_OUTPUT.PUT_LINE('Error: ' || v_log_entry(v_ind).LogText);
          v_ind := v_log_entry.NEXT(v_ind);
        END LOOP;
      END IF;
    END IF;
    DBMS_LOCK.SLEEP(1); -- Avoid tight loop
  END LOOP;
  
  DBMS_OUTPUT.PUT_LINE('Job completed with state: ' || v_job_state);
  DBMS_DATAPUMP.DETACH(v_handle);
EXCEPTION
  WHEN OTHERS THEN
    IF v_handle != 0 THEN
      DBMS_DATAPUMP.DETACH(v_handle);
    END IF;
    DBMS_OUTPUT.PUT_LINE('Error in Data Pump job: ' || SQLERRM);
    RAISE;
END;
/


=================================================================

SET SERVEROUTPUT ON SIZE UNLIMITED;

DECLARE
  -- Configurable parameters
  v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Directory name
  v_schema_name VARCHAR2(128) := 'HEMANT'; -- Schema to process
  v_dumpfile VARCHAR2(256) := v_schema_name || '_indexes_constraints.dmp'; -- Dump file name
  v_logfile VARCHAR2(256) := v_schema_name || '_indexes_constraints_exp.log'; -- Log file
  v_job_name VARCHAR2(128) := 'EXP_INDEXES_CONSTRAINTS_' || v_schema_name; -- Job name
  v_file_exists BOOLEAN;
  v_file_length NUMBER;
  v_block_size NUMBER;
  v_handle NUMBER;
  v_job_state VARCHAR2(30);
  v_status ku$_Status;
  v_log_entry ku$_LogEntry;
  v_ind NUMBER;

BEGIN
  -- Check if log file exists and remove it
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_logfile,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  IF v_file_exists THEN
    UTL_FILE.FREMOVE(v_directory, v_logfile);
    DBMS_OUTPUT.PUT_LINE('Existing log file ' || v_logfile || ' deleted');
  END IF;

  -- Check if dump file exists and remove it
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_dumpfile,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  IF v_file_exists THEN
    UTL_FILE.FREMOVE(v_directory, v_dumpfile);
    DBMS_OUTPUT.PUT_LINE('Existing dump file ' || v_dumpfile || ' deleted');
  END IF;

  -- Open Data Pump job for export
  v_handle := DBMS_DATAPUMP.OPEN(
    operation => 'EXPORT',
    job_mode => 'SCHEMA',
    job_name => UPPER(v_job_name)
  );
  DBMS_OUTPUT.PUT_LINE('Opened Data Pump export job: ' || v_job_name);

  -- Set parallel degree
  DBMS_DATAPUMP.SET_PARALLEL(
    handle => v_handle,
    degree => 2
  );

  -- Add dump file
  DBMS_DATAPUMP.ADD_FILE(
    handle => v_handle,
    filename => v_dumpfile,
    directory => v_directory,
    filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
  );

  -- Add log file
  DBMS_DATAPUMP.ADD_FILE(
    handle => v_handle,
    filename => v_logfile,
    directory => v_directory,
    filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
  );

  -- Apply schema filter
  DBMS_DATAPUMP.METADATA_FILTER(
    handle => v_handle,
    name => 'SCHEMA_LIST',
    value => '''' || UPPER(v_schema_name) || ''''
  );

  -- Filter to include only indexes and constraints
  DBMS_DATAPUMP.METADATA_FILTER(
    handle => v_handle,
    name => 'INCLUDE_METADATA',
    value => 'INDEX,CONSTRAINT'
  );

  -- Start job
  DBMS_DATAPUMP.START_JOB(
    handle => v_handle,
    skip_current => 0,
    abort_step => 0
  );
  DBMS_OUTPUT.PUT_LINE('Started Data Pump export job to generate ' || v_dumpfile);

  -- Monitor job
  v_job_state := 'UNDEFINED';
  WHILE v_job_state NOT IN ('COMPLETED', 'STOPPED') LOOP
    DBMS_DATAPUMP.GET_STATUS(
      handle => v_handle,
      mask => DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR + DBMS_DATAPUMP.KU$_STATUS_JOB_STATUS,
      timeout => -1,
      job_state => v_job_state,
      status => v_status
    );
    IF BITAND(v_status.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
      v_log_entry := v_status.error;
      IF v_log_entry IS NOT NULL THEN
        v_ind := v_log_entry.FIRST;
        WHILE v_ind IS NOT NULL LOOP
          DBMS_OUTPUT.PUT_LINE('Error: ' || v_log_entry(v_ind).LogText);
          v_ind := v_log_entry.NEXT(v_ind);
        END LOOP;
      END IF;
    END IF;
    DBMS_LOCK.SLEEP(1); -- Avoid tight loop
  END LOOP;

  DBMS_OUTPUT.PUT_LINE('Export job completed with state: ' || v_job_state);
  DBMS_DATAPUMP.DETACH(v_handle);

EXCEPTION
  WHEN OTHERS THEN
    IF v_handle != 0 THEN
      DBMS_DATAPUMP.DETACH(v_handle);
    END IF;
    DBMS_OUTPUT.PUT_LINE('Error in Data Pump export job: ' || SQLERRM);
    RAISE;
END;
/

================================================================
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
  v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Your directory
  v_file_name VARCHAR2(256) := 'ddl_preview.sql'; -- Your SQL file
  v_new_tablespace VARCHAR2(128) := 'NEW_TABLESPACE_01'; -- Target tablespace
  v_file UTL_FILE.FILE_TYPE;
  v_line VARCHAR2(32767);
  v_tablespace VARCHAR2(128);
  TYPE t_ts_tab IS TABLE OF VARCHAR2(128) INDEX BY VARCHAR2(128); -- Associative array for deduplication
  v_ts_tab t_ts_tab;
  v_file_exists BOOLEAN;
  v_file_length NUMBER;
  v_block_size NUMBER;
BEGIN
  -- Check if file exists
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_file_name,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  
  IF NOT v_file_exists THEN
    DBMS_OUTPUT.PUT_LINE('File ' || v_file_name || ' does not exist in ' || v_directory);
    RETURN;
  END IF;
  
  -- Read SQL file and extract unique tablespaces
  v_file := UTL_FILE.FOPEN(v_directory, v_file_name, 'R');
  LOOP
    BEGIN
      UTL_FILE.GET_LINE(v_file, v_line);
      v_line := SUBSTR(v_line, 1, 32767); -- Prevent buffer overflow
      IF REGEXP_LIKE(UPPER(v_line), 'TABLESPACE\s+[''"]?([^''"[:space:]]+)[''"]?') THEN
        v_tablespace := REGEXP_SUBSTR(UPPER(v_line), 'TABLESPACE\s+[''"]?([^''"[:space:]]+)[''"]?', 1, 1, NULL, 1);
        IF v_tablespace IS NOT NULL AND LENGTH(v_tablespace) <= 128 THEN
          -- Store in associative array to deduplicate
          IF NOT v_ts_tab.EXISTS(v_tablespace) THEN
            v_ts_tab(v_tablespace) := v_tablespace;
            DBMS_OUTPUT.PUT_LINE('Found tablespace: ' || v_tablespace);
          END IF;
        ELSE
          DBMS_OUTPUT.PUT_LINE('Skipping invalid tablespace in line: ' || SUBSTR(v_line, 1, 100));
        END IF;
      END IF;
    EXCEPTION
      WHEN NO_DATA_FOUND THEN
        EXIT;
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error processing line: ' || SUBSTR(v_line, 1, 100) || ' - ' || SQLERRM);
        -- Continue to next line
    END;
  END LOOP;
  UTL_FILE.FCLOSE(v_file);
  
  -- Generate REMAP_TABLESPACES statements for unique tablespaces
  IF v_ts_tab.COUNT = 0 THEN
    DBMS_OUTPUT.PUT_LINE('No tablespaces found in ' || v_file_name);
  ELSE
    DBMS_OUTPUT.PUT_LINE('Total unique tablespaces found: ' || v_ts_tab.COUNT);
    v_tablespace := v_ts_tab.FIRST;
    WHILE v_tablespace IS NOT NULL LOOP
      BEGIN
        DBMS_OUTPUT.PUT_LINE(
          'DBMS_DATAPUMP.METADATA_REMAP(' ||
          'handle => h1, ' ||
          'name => ''REMAP_TABLESPACES'', ' ||
          'old_value => ''' || v_ts_tab(v_tablespace) || ''', ' ||
          'value => ''' || v_new_tablespace || ''');'
        );
        v_tablespace := v_ts_tab.NEXT(v_tablespace);
      EXCEPTION
        WHEN OTHERS THEN
          DBMS_OUTPUT.PUT_LINE('Error generating remap for ' || v_ts_tab(v_tablespace) || ': ' || SQLERRM);
      END;
    END LOOP;
  END IF;
EXCEPTION
  WHEN OTHERS THEN
    IF UTL_FILE.IS_OPEN(v_file) THEN
      UTL_FILE.FCLOSE(v_file);
    END IF;
    DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
    RAISE;
END;
/

====================================================================

SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
  v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Your directory
  v_file_name VARCHAR2(256) := 'ddl_preview.sql'; -- Your SQL file
  v_new_tablespace VARCHAR2(128) := 'NEW_TABLESPACE_01'; -- Target tablespace
  v_file UTL_FILE.FILE_TYPE;
  v_line VARCHAR2(32767);
  v_tablespace VARCHAR2(128);
  TYPE t_ts_tab IS TABLE OF VARCHAR2(128);
  v_ts_tab t_ts_tab := t_ts_tab();
  v_file_exists BOOLEAN;
  v_file_length NUMBER;
  v_block_size NUMBER;
BEGIN
  -- Check if file exists
  UTL_FILE.FGETATTR(
    location => v_directory,
    filename => v_file_name,
    fexists => v_file_exists,
    file_length => v_file_length,
    block_size => v_block_size
  );
  
  IF NOT v_file_exists THEN
    DBMS_OUTPUT.PUT_LINE('File ' || v_file_name || ' does not exist in ' || v_directory);
    RETURN;
  END IF;
  
  -- Read SQL file and extract tablespaces
  v_file := UTL_FILE.FOPEN(v_directory, v_file_name, 'R');
  LOOP
    BEGIN
      UTL_FILE.GET_LINE(v_file, v_line);
      v_line := SUBSTR(v_line, 1, 32767); -- Prevent buffer overflow
      IF REGEXP_LIKE(UPPER(v_line), 'TABLESPACE\s+[''"]?([^''"[:space:]]+)[''"]?') THEN
        v_tablespace := REGEXP_SUBSTR(UPPER(v_line), 'TABLESPACE\s+[''"]?([^''"[:space:]]+)[''"]?', 1, 1, NULL, 1);
        IF v_tablespace IS NOT NULL AND LENGTH(v_tablespace) <= 128 THEN
          -- Check if tablespace already in collection
          IF NOT v_ts_tab.EXISTS(v_ts_tab.COUNT + 1) THEN
            v_ts_tab.EXTEND;
            v_ts_tab(v_ts_tab.COUNT) := v_tablespace;
            DBMS_OUTPUT.PUT_LINE('Found tablespace: ' || v_tablespace);
          END IF;
        ELSE
          DBMS_OUTPUT.PUT_LINE('Skipping invalid tablespace in line: ' || SUBSTR(v_line, 1, 100));
        END IF;
      END IF;
    EXCEPTION
      WHEN NO_DATA_FOUND THEN
        EXIT;
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error processing line: ' || SUBSTR(v_line, 1, 100) || ' - ' || SQLERRM);
        -- Continue to next line
    END;
  END LOOP;
  UTL_FILE.FCLOSE(v_file);
  
  -- Generate REMAP_TABLESPACES statements
  IF v_ts_tab.COUNT = 0 THEN
    DBMS_OUTPUT.PUT_LINE('No tablespaces found in ' || v_file_name);
  ELSE
    FOR i IN 1 .. v_ts_tab.COUNT LOOP
      BEGIN
        DBMS_OUTPUT.PUT_LINE(
          'DBMS_DATAPUMP.METADATA_REMAP(' ||
          'handle => h1, ' ||
          'name => ''REMAP_TABLESPACES'', ' ||
          'old_value => ''' || v_ts_tab(i) || ''', ' ||
          'value => ''' || v_new_tablespace || ''');'
        );
      EXCEPTION
        WHEN OTHERS THEN
          DBMS_OUTPUT.PUT_LINE('Error generating remap for ' || v_ts_tab(i) || ': ' || SQLERRM);
      END;
    END LOOP;
  END IF;
EXCEPTION
  WHEN OTHERS THEN
    IF UTL_FILE.IS_OPEN(v_file) THEN
      UTL_FILE.FCLOSE(v_file);
    END IF;
    DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
    RAISE;
END;
/


==================================================================
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
  v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Replace with your directory
  TYPE t_file_tab IS TABLE OF VARCHAR2(256) INDEX BY PLS_INTEGER;
  v_files t_file_tab;
  v_file_exists BOOLEAN;
  v_file_length NUMBER;
  v_block_size NUMBER;
BEGIN
  -- Define files to delete (add more as needed)
  v_files(1) := 'ddl_preview.sql';
  v_files(2) := 'ddl_preview.log';
  -- v_files(3) := 'YOUR_DUMPFILE.dmp'; -- Uncomment and add other files if needed
  
  -- Loop through and delete each file
  FOR i IN v_files.FIRST .. v_files.LAST LOOP
    -- Check if file exists
    BEGIN
      UTL_FILE.FGETATTR(
        location => v_directory,
        filename => v_files(i),
        fexists => v_file_exists,
        file_length => v_file_length,
        block_size => v_block_size
      );
      
      IF v_file_exists THEN
        UTL_FILE.FREMOVE(
          location => v_directory,
          filename => v_files(i)
        );
        DBMS_OUTPUT.PUT_LINE('File ' || v_files(i) || ' deleted successfully from ' || v_directory);
      ELSE
        DBMS_OUTPUT.PUT_LINE('File ' || v_files(i) || ' does not exist in ' || v_directory);
      END IF;
    EXCEPTION
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error deleting file ' || v_files(i) || ': ' || SQLERRM);
        -- Continue with next file instead of raising
    END;
  END LOOP;
EXCEPTION
  WHEN OTHERS THEN
    DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
    RAISE;
END;
/

Step-by-Step Solution for Oracle RDS

  1. Understand RDS Constraints:

    • No OS-level access means no direct impdp command-line execution.
    • You must use DBMS_DATAPUMP within a SQL*Plus or SQL Developer session (or similar tool) connected to your RDS instance.
    • The dump file must be in an Oracle directory accessible to RDS (e.g., DATA_PUMP_DIR or a custom directory created via CREATE DIRECTORY).
    • RDS requires specific privileges: ensure your user has DATAPUMP_IMP_FULL_DATABASE or explicit grants for DBMS_DATAPUMP, CREATE DIRECTORY, and read/write on the directory.
  2. Generate SQLFILE Using DBMS_DATAPUMP:

    • Instead of impdp ... SQLFILE, run a PL/SQL block to create a SQL file containing the DDL from the dump. This file will be written to the Oracle directory (e.g., DATA_PUMP_DIR).
    • Below is a PL/SQL script to generate the SQLFILE equivalent:
    sql
    SET SERVEROUTPUT ON SIZE UNLIMITED;
    DECLARE
      h1 NUMBER;
      v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Replace with your directory
      v_dumpfile VARCHAR2(256) := 'YOUR_DUMPFILE.dmp'; -- Your dump file name
      v_sqlfile VARCHAR2(256) := 'ddl_preview.sql'; -- Output SQL file
      v_logfile VARCHAR2(256) := 'ddl_preview.log'; -- Log file for errors
      job_state VARCHAR2(30);
      sts ku$_Status;
      le ku$_LogEntry;
      ind NUMBER;
    BEGIN
      -- Open a Data Pump job in SQLFILE mode
      h1 := DBMS_DATAPUMP.OPEN(
        operation => 'IMPORT',
        job_mode => 'SCHEMA',
        job_name => NULL
      );
    
      -- Add the dump file
      DBMS_DATAPUMP.ADD_FILE(
        handle => h1,
        filename => v_dumpfile,
        directory => v_directory,
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
      );
    
      -- Add the SQL file (this is where DDL will be written)
      DBMS_DATAPUMP.ADD_FILE(
        handle => h1,
        filename => v_sqlfile,
        directory => v_directory,
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_SQL_FILE
      );
    
      -- Add a log file for diagnostics
      DBMS_DATAPUMP.ADD_FILE(
        handle => h1,
        filename => v_logfile,
        directory => v_directory,
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
      );
    
      -- Restrict to specific schema
      DBMS_DATAPUMP.METADATA_FILTER(
        handle => h1,
        name => 'SCHEMA_LIST',
        value => '''YOUR_SCHEMA_NAME'''
      );
    
      -- Start the job (this writes DDL to sqlfile, no actual import)
      DBMS_DATAPUMP.START_JOB(h1);
    
      -- Monitor job completion
      job_state := 'UNDEFINED';
      WHILE job_state NOT IN ('COMPLETED', 'STOPPED') LOOP
        DBMS_DATAPUMP.GET_STATUS(
          h1,
          DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR + DBMS_DATAPUMP.KU$_STATUS_JOB_STATUS,
          -1,
          job_state,
          sts
        );
        -- Log any errors
        IF BITAND(sts.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
          le := sts.error;
          IF le IS NOT NULL THEN
            ind := le.FIRST;
            WHILE ind IS NOT NULL LOOP
              DBMS_OUTPUT.PUT_LINE('Error: ' || le(ind).LogText);
              ind := le.NEXT(ind);
            END LOOP;
          END IF;
        END IF;
        DBMS_LOCK.SLEEP(1); -- Avoid tight loop
      END LOOP;
    
      DBMS_OUTPUT.PUT_LINE('SQLFILE generation completed. State: ' || job_state);
      DBMS_DATAPUMP.DETACH(h1);
    EXCEPTION
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error in SQLFILE job: ' || SQLERRM);
        IF h1 IS NOT NULL THEN
          DBMS_DATAPUMP.DETACH(h1);
        END IF;
        RAISE;
    END;
    /
    • Replace: YOUR_SCHEMA_NAME, YOUR_DUMPFILE.dmp, and DATA_PUMP_DIR (or your custom directory).
    • Run this in SQL*Plus or SQL Developer connected to your RDS instance as the schema user or a privileged user.
    • This creates ddl_preview.sql in the specified directory, containing all CREATE TABLE/INDEX statements with their tablespace clauses (including REVANTH14).
  3. Retrieve the SQL File from RDS:

    • Since RDS doesn't provide direct filesystem access, use the aws_s3 package or RDS Data Pump directory access to download ddl_preview.sql:
      • Option 1: AWS S3 (if dump is in S3):
        • If your dump file is in an S3 bucket (common in RDS), you can list and download the generated ddl_preview.sql using:
          sql
          SELECT * FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'));
          • Copy the file to S3:
            sql
            BEGIN
              RDSADMIN.RDS_FILE_UTIL.COPY_TO_S3(
                p_directory => 'DATA_PUMP_DIR',
                p_filename => 'ddl_preview.sql',
                p_bucket_name => 'YOUR_S3_BUCKET',
                p_s3_prefix => 'exports/'
              );
            END;
            /
          • Download from S3 using AWS CLI or console:
            text
            aws s3 cp s3://YOUR_S3_BUCKET/exports/ddl_preview.sql .
      • Option 2: UTL_FILE (if no S3):
        • Read the file contents directly in PL/SQL (see parsing script below).
        • Requires UTL_FILE permissions on the directory.
  4. Parse the SQL File for Tablespaces:

    • If you can download ddl_preview.sql, parse it locally (as described previously) using grep/PowerShell to extract unique tablespaces:

      text
      grep -i "TABLESPACE" ddl_preview.sql | sed -n "s/.*TABLESPACE[[:space:]]*\(['\"]\)\([^'\"]*\)\1.*/\2/p" | sort -u > tablespaces.txt
      • Or, in PowerShell:
        text
        Select-String -Path "ddl_preview.sql" -Pattern "TABLESPACE\s+['`"]([^'`"]+)['`"]" | ForEach-Object { $_.Matches.Groups[1].Value } | Sort-Object -Unique | Out-File -Encoding utf8 tablespaces.txt
    • If you can't download the file easily, parse it within RDS using UTL_FILE (requires directory read privileges):

      sql
      SET SERVEROUTPUT ON SIZE UNLIMITED;
      DECLARE
        v_file UTL_FILE.FILE_TYPE;
        v_line VARCHAR2(32767);
        v_tablespace VARCHAR2(128);
        TYPE t_ts_tab IS TABLE OF VARCHAR2(128) INDEX BY PLS_INTEGER;
        v_ts_tab t_ts_tab;
        v_idx PLS_INTEGER := 1;
      BEGIN
        v_file := UTL_FILE.FOPEN('DATA_PUMP_DIR', 'ddl_preview.sql', 'R');
        LOOP
          BEGIN
            UTL_FILE.GET_LINE(v_file, v_line);
            IF REGEXP_LIKE(UPPER(v_line), 'TABLESPACE\s+[''"]?([^''"[:space:]]+)[''"]?') THEN
              v_tablespace := REGEXP_SUBSTR(UPPER(v_line), 'TABLESPACE\s+[''"]?([^''"[:space:]]+)[''"]?', 1, 1, NULL, 1);
              IF v_tablespace IS NOT NULL AND NOT v_ts_tab.EXISTS(v_tablespace) THEN
                v_ts_tab(v_tablespace) := v_tablespace; -- Dedup using index
                DBMS_OUTPUT.PUT_LINE('Found tablespace: ' || v_tablespace);
              END IF;
            END IF;
          EXCEPTION
            WHEN NO_DATA_FOUND THEN EXIT;
          END;
        END LOOP;
        UTL_FILE.FCLOSE(v_file);
      
        -- Output remap statements
        v_idx := v_ts_tab.FIRST;
        WHILE v_idx IS NOT NULL LOOP
          DBMS_OUTPUT.PUT_LINE(
            'DBMS_DATAPUMP.METADATA_REMAP(handle => h1, name => ''REMAP_TABLESPACES'', old_value => ''' ||
            v_ts_tab(v_idx) || ''', value => ''NEW_TABLESPACE_01'');'
          );
          v_idx := v_ts_tab.NEXT(v_idx);
        END LOOP;
      EXCEPTION
        WHEN OTHERS THEN
          IF UTL_FILE.IS_OPEN(v_file) THEN UTL_FILE.FCLOSE(v_file); END IF;
          DBMS_OUTPUT.PUT_LINE('Error parsing SQL file: ' || SQLERRM);
          RAISE;
      END;
      /
      • This prints all unique tablespaces (e.g., REVANTH14, etc.) and generates the exact DBMS_DATAPUMP.METADATA_REMAP statements.
      • Copy the outputted remap statements for use in your import script.
  5. Update Your Import Script:

    • Take the generated remap statements and insert them into your original script (or the enhanced one I provided earlier) between SET_PARAMETER and START_JOB.

    • Example integration into your script:

      sql
      -- ... existing code ...
      DBMS_DATAPUMP.SET_PARAMETER(h1, 'TABLE_EXISTS_ACTION', 'REPLACE');
      
      -- Pasted remap statements from SQLFILE parsing
      DBMS_DATAPUMP.METADATA_REMAP(handle => h1, name => 'REMAP_TABLESPACES', old_value => 'REVANTH14', value => 'NEW_TABLESPACE_01');
      DBMS_DATAPUMP.METADATA_REMAP(handle => h1, name => 'REMAP_TABLESPACES', old_value => 'OTHER_TS1', value => 'NEW_TABLESPACE_01');
      -- ... other remaps ...
      
      DBMS_DATAPUMP.START_JOB(h1);
      -- ... rest of script ...
  6. Run the Import:

    • Execute the updated script in SQL*Plus or SQL Developer.
    • Since all tablespaces (including REVANTH14) are now remapped, the ORA-00959: tablespace 'REVANTH14' does not exist error should be resolved.






========================================================================
SELECT 
    TABLESPACE_NAME,
    SEGMENT_TYPE,  -- e.g., TABLE, INDEX, LOBSEGMENT, LOBINDEX, TABLE PARTITION
    COUNT(*) AS OBJECT_COUNT,
    ROUND(SUM(BYTES)/1024/1024, 2) AS TOTAL_USED_MB
FROM DBA_SEGMENTS  -- Use USER_SEGMENTS if no DBA access
WHERE OWNER = 'YOUR_SOURCE_SCHEMA'  -- Replace with your schema
GROUP BY TABLESPACE_NAME, SEGMENT_TYPE
ORDER BY TABLESPACE_NAME, SEGMENT_TYPE;

SET SERVEROUTPUT ON;
DECLARE
    PRAGMA AUTONOMOUS_TRANSACTION;
    v_handle NUMBER;
    v_job_name VARCHAR2(30);
    v_dumpfile VARCHAR2(100);
    v_logfile VARCHAR2(100);
    v_timestamp VARCHAR2(12) := TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISS');
    v_remap_pairs CLOB := 'OLD_TS1:GENERIC_01,OLD_TS2:GENERIC_01,OLD_TS3:GENERIC_01'; -- Your full remap string
    v_pair VARCHAR2(4000);
    v_old_value VARCHAR2(1000);
    v_new_value VARCHAR2(1000);
    i NUMBER := 1;
    v_pair_count NUMBER := 0;
    v_count NUMBER;
    v_existing_jobs NUMBER;
BEGIN
    -- Hardcoded validations
    SELECT COUNT(*) INTO v_count FROM dba_users WHERE username = 'TARGET1';
    IF v_count = 0 THEN RAISE_APPLICATION_ERROR(-20003, 'Target schema TARGET1 does not exist.'); END IF;
    SELECT COUNT(*) INTO v_count FROM dba_tablespaces WHERE tablespace_name = 'GENERIC_01';
    IF v_count = 0 THEN RAISE_APPLICATION_ERROR(-20005, 'Target tablespace GENERIC_01 does not exist.'); END IF;
    DBMS_OUTPUT.PUT_LINE('Validations passed.');

    -- Check for existing jobs (warning only)
    SELECT COUNT(*) INTO v_existing_jobs FROM dba_datapump_jobs WHERE job_name LIKE 'IMP_T1_%' AND state != 'NOT RUNNING';
    IF v_existing_jobs > 0 THEN
        DBMS_OUTPUT.PUT_LINE('Warning: ' || v_existing_jobs || ' existing IMP_T1_% jobs found. Consider dropping stale jobs.');
    END IF;

    -- Job setup
    v_job_name := 'IMP_T1_' || v_timestamp; -- Dynamic, unique name
    v_logfile := 'TARGET1_meta_import_' || v_timestamp || '.log';
    v_dumpfile := 'REVANTH_LONGNAME_meta_250909220159.dmp'; -- Hardcoded 15-char schema + timestamp

    -- Open import
    v_handle := DBMS_DATAPUMP.OPEN(operation => 'IMPORT', job_mode => 'SCHEMA', remote_link => NULL, job_name => v_job_name);
    DBMS_DATAPUMP.ADD_FILE(v_handle, v_logfile, 'DATA_PUMP_DIR', DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);
    DBMS_DATAPUMP.ADD_FILE(v_handle, v_dumpfile, 'DATA_PUMP_DIR', DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE, reusefile => 1);

    -- Remaps: Schema
    DBMS_DATAPUMP.METADATA_REMAP(handle => v_handle, name => 'REMAP_SCHEMA', old_value => 'REVANTH_LONGNAME', value => 'TARGET1');

    -- Apply all tablespace remaps
    WHILE i <= NVL(LENGTH(v_remap_pairs), 0) LOOP
        v_pair := TRIM(SUBSTR(v_remap_pairs, i, INSTR(v_remap_pairs || ',', i, ',') - i));
        IF LENGTH(v_pair) > 0 AND LENGTH(v_pair) <= 4000 AND INSTR(v_pair, ':') > 0 THEN
            v_old_value := TRIM(SUBSTR(v_pair, 1, INSTR(v_pair, ':') - 1));
            v_new_value := TRIM(SUBSTR(v_pair, INSTR(v_pair, ':') + 1));
            IF LENGTH(v_old_value) > 0 AND LENGTH(v_new_value) > 0 AND LENGTH(v_old_value) <= 1000 AND LENGTH(v_new_value) <= 1000 THEN
                DBMS_DATAPUMP.METADATA_REMAP(handle => v_handle, name => 'REMAP_TABLESPACE', old_value => v_old_value, value => v_new_value);
                v_pair_count := v_pair_count + 1;
            END IF;
        END IF;
        i := INSTR(v_remap_pairs || ',', i, ',') + 1;
    END LOOP;
    DBMS_OUTPUT.PUT_LINE('Remaps complete: ' || v_pair_count || ' pairs.');

    -- Metadata filters (match export)
    DBMS_DATAPUMP.METADATA_FILTER(v_handle, 'EXCLUDE_PATH_EXPR', 'IN (''INDEX'', ''STATISTICS'')');

    -- Parameters (match export)
    DBMS_DATAPUMP.SET_PARAMETER(v_handle, 'CONTENT', 'METADATA_ONLY');
    DBMS_DATAPUMP.SET_PARAMETER(v_handle, 'TABLE_EXISTS_ACTION', 'SKIP');
    DBMS_DATAPUMP.SET_PARALLEL(v_handle, 2); -- Customize
    DBMS_DATAPUMP.SET_PARAMETER(v_handle, 'TRACE', '1FF0');

    -- Execute
    DBMS_DATAPUMP.START_JOB(v_handle);
    DBMS_DATAPUMP.DETACH(v_handle);
    DBMS_OUTPUT.PUT_LINE('Import job submitted: ' || v_job_name || '. Log: ' || v_logfile);

EXCEPTION
    WHEN OTHERS THEN
        IF v_handle IS NOT NULL THEN DBMS_DATAPUMP.DETACH(v_handle); END IF;
        RAISE_APPLICATION_ERROR(-20001, SUBSTR(SQLERRM, 1, 4000));
END;
/

-- Monitoring
SELECT job_name, state, attached_sessions, error_count, ROUND(filesize/1024/1024, 2) AS filesize_mb
FROM dba_datapump_jobs
WHERE job_name LIKE 'IMP_T1_%'
ORDER BY job_name DESC;

-- Log read
SELECT * FROM TABLE(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR', 'TARGET1_meta_import_*.log'));

BEGIN
    FOR job IN (SELECT job_name FROM dba_datapump_jobs WHERE job_name LIKE 'IMP_T1_%' AND state != 'NOT RUNNING') LOOP
        DBMS_DATAPUMP.ATTACH(job.job_name);
        DBMS_DATAPUMP.STOP_JOB(DBMS_DATAPUMP.ATTACH(job.job_name));
        DBMS_OUTPUT.PUT_LINE('Dropped job: ' || job.job_name);
    END LOOP;
END;
/

==================================================================
SET SERVEROUTPUT ON;

SET SERVEROUTPUT ON;
DECLARE
  -- Define the list of 8 tablespaces you want to generate batches for
  TYPE tablespace_list IS TABLE OF VARCHAR2(30);
  target_tablespaces tablespace_list := tablespace_list(
      'GENERIC_01', 'GENERIC_02', 'GENERIC_03', 'GENERIC_04',
      'GENERIC_05', 'GENERIC_06', 'GENERIC_07', 'GENERIC_08'
  );
  -- Define a cursor to retrieve the batched data
  CURSOR c_batches (p_new_ts VARCHAR2, p_batch_size NUMBER) IS
    WITH ranked_mappings AS (
        SELECT
            old_ts || ':' || p_new_ts AS pair,
            ROW_NUMBER() OVER (ORDER BY old_ts) AS rn,
            CEIL(ROW_NUMBER() OVER (ORDER BY old_ts) / p_batch_size) AS batch_num
        FROM ts_mappings
        WHERE new_ts = p_new_ts
          AND old_ts NOT IN ('SYSTEM', 'SYSAUX', 'TEMP', 'RDSADMIN')
    )
    SELECT
        'Batch ' || batch_num AS batch_label,
        COUNT(*) AS pair_count,
        LISTAGG(pair, ',') WITHIN GROUP (ORDER BY rn) AS remap_batch_str,
        LENGTH(LISTAGG(pair, ',')) AS char_length  -- Added: For size check
    FROM ranked_mappings
    GROUP BY batch_num
    ORDER BY batch_num;
  v_total_pairs NUMBER;
  v_batch_size NUMBER;
BEGIN
  -- Loop through each target tablespace
  FOR i IN 1..target_tablespaces.COUNT LOOP
    DBMS_OUTPUT.PUT_LINE(CHR(10));
    DBMS_OUTPUT.PUT_LINE('--- Generating Batches for ' || target_tablespaces(i) || ' ---');
   
    -- Step 1: Get the total number of pairs for the current tablespace
    SELECT COUNT(*) INTO v_total_pairs
    FROM ts_mappings
    WHERE new_ts = target_tablespaces(i)
      AND old_ts NOT IN ('SYSTEM', 'SYSAUX', 'TEMP', 'RDSADMIN');
    IF v_total_pairs = 0 THEN
      DBMS_OUTPUT.PUT_LINE('No mappings found for ' || target_tablespaces(i) || '.');
      CONTINUE;
    END IF;
    -- Step 2: Calculate the dynamic batch size for 5 batches
    v_batch_size := CEIL(v_total_pairs / 5); -- Always 5 batches, as requested
    DBMS_OUTPUT.PUT_LINE('Total Pairs: ' || v_total_pairs || ', Calculated Batch Size: ' || v_batch_size);
   
    -- Step 3: Run the cursor and print the results
    FOR rec IN c_batches(target_tablespaces(i), v_batch_size) LOOP
      DBMS_OUTPUT.PUT_LINE(rec.batch_label || ' (' || rec.pair_count || ' pairs, ' || rec.char_length || ' chars):');
      DBMS_OUTPUT.PUT_LINE(rec.remap_batch_str);
    END LOOP;
  END LOOP;
 
  DBMS_OUTPUT.PUT_LINE(CHR(10) || '--- All Batches Generated ---');
EXCEPTION
  WHEN OTHERS THEN
    DBMS_OUTPUT.PUT_LINE('An error occurred: ' || SQLERRM);
END;
/



-- Verification (run after all batches)
SELECT 'Final check' AS step, table_name, tablespace_name FROM dba_tables WHERE owner = UPPER('&target_schema') AND ROWNUM <= 5;
-- Full log: SELECT * FROM TABLE(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR', '&target_schema || _meta_import_batch_' || v_timestamp || '.log'));
-- Note: Replace v_timestamp in the log query with the actual timestamp from output.


-- Generate 5 batches from ts_mappings (adjust BATCH_SIZE=103 for your 516 pairs)
WITH ranked_mappings AS (
    SELECT 
        old_ts || ':' || new_ts AS pair,
        ROW_NUMBER() OVER (ORDER BY old_ts) AS rn,  -- Alphabetical order
        CEIL(ROW_NUMBER() OVER (ORDER BY old_ts) / 103.0) AS batch_num  -- ~103 pairs/batch
    FROM ts_mappings
    WHERE new_ts = 'GENERIC_01'
      AND old_ts NOT IN ('SYSTEM', 'SYSAUX', 'TEMP', 'RDSADMIN')
)
SELECT 
    'Batch ' || batch_num || ' (' || COUNT(*) || ' pairs, ' || LENGTH(LISTAGG(pair, ',')) || ' chars):' AS label,
    LISTAGG(pair, ',') WITHIN GROUP (ORDER BY rn) AS remap_batch_str
FROM ranked_mappings
GROUP BY batch_num
ORDER BY batch_num;

-- cross_schema_ts_check.sql
-- Comprehensive Cross-Schema Tablespace Usage Analysis (Fixed for ORA-06502 NULL Index Key)
-- Run as DBA user. Edit l_schemas below with your 7 schemas.

SET SERVEROUTPUT ON SIZE UNLIMITED
SET PAGESIZE 0
SET LINESIZE 200
SET FEEDBACK OFF
SET HEADING OFF

SPOOL cross_schema_report.txt

DECLARE
    -- Edit these: Your 7 schemas (add/remove as needed)
    TYPE t_schema_array IS TABLE OF VARCHAR2(128);
    l_schemas t_schema_array := t_schema_array('SCHEMA1', 'SCHEMA2', 'SCHEMA3', 'SCHEMA4', 'SCHEMA5', 'SCHEMA6', 'SCHEMA7');
   
    -- For cross-usage tracking
    TYPE t_ts_schema_map IS TABLE OF t_schema_array INDEX BY VARCHAR2(128); -- Key: tablespace, Value: array of schemas using it
    l_ts_to_schemas t_ts_schema_map;
   
    l_schema VARCHAR2(128);
    l_tablespace VARCHAR2(4000);  -- Handles long LISTAGG
    l_count NUMBER;
    l_shared_count NUMBER := 0;
    
    -- Helper for schema list
    l_schema_list VARCHAR2(4000) := l_schemas(1);
    
    -- Helper for output line
    l_output_line VARCHAR2(4000);
BEGIN
    -- Build schema list manually
    FOR j IN 2..l_schemas.COUNT LOOP
        l_schema_list := l_schema_list || ', ' || l_schemas(j);
    END LOOP;
    
    DBMS_OUTPUT.PUT_LINE('=== CROSS-SCHEMA TABLESPACE USAGE REPORT ===');
    DBMS_OUTPUT.PUT_LINE('Generated on: ' || TO_CHAR(SYSDATE, 'YYYY-MM-DD HH24:MI:SS'));
    DBMS_OUTPUT.PUT_LINE('Schemas Analyzed: ' || l_schema_list);
    DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== PER-SCHEMA TABLESPACE USAGE ===');
    DBMS_OUTPUT.PUT_LINE('SCHEMA_NAME                  | UNIQUE_TABLESPACES (Count)');
    DBMS_OUTPUT.PUT_LINE('-----------------------------|----------------------------');
   
    -- Loop per schema: Collect tablespaces used by its objects
    FOR i IN 1..l_schemas.COUNT LOOP
        l_schema := UPPER(l_schemas(i));
       
        -- Query to get unique tablespaces for this schema (all object types) - FIXED: Add WHERE tablespace_name IS NOT NULL
        BEGIN
            SELECT SUBSTR(LISTAGG(tablespace_name, ', ') WITHIN GROUP (ORDER BY tablespace_name), 1, 4000),
                   COUNT(DISTINCT tablespace_name)
            INTO l_tablespace, l_count
            FROM (
                -- Tables
                SELECT tablespace_name FROM dba_tables WHERE owner = l_schema AND tablespace_name IS NOT NULL
                UNION
                -- Indexes
                SELECT tablespace_name FROM dba_indexes WHERE owner = l_schema AND tablespace_name IS NOT NULL
                UNION
                -- LOBs
                SELECT tablespace_name FROM dba_lobs WHERE owner = l_schema AND tablespace_name IS NOT NULL
                UNION
                -- Table Partitions (use TABLE_OWNER)
                SELECT tablespace_name FROM dba_tab_partitions WHERE table_owner = l_schema AND tablespace_name IS NOT NULL
                UNION
                -- Table Subpartitions (use TABLE_OWNER)
                SELECT tablespace_name FROM dba_tab_subpartitions WHERE table_owner = l_schema AND tablespace_name IS NOT NULL
                UNION
                -- Index Partitions (use INDEX_OWNER)
                SELECT tablespace_name FROM dba_ind_partitions WHERE index_owner = l_schema AND tablespace_name IS NOT NULL
                UNION
                -- Index Subpartitions (use INDEX_OWNER)
                SELECT tablespace_name FROM dba_ind_subpartitions WHERE index_owner = l_schema AND tablespace_name IS NOT NULL
            );
        EXCEPTION
            WHEN NO_DATA_FOUND THEN
                l_count := 0;
                l_tablespace := 'NO OBJECTS FOUND';
        END;
       
        IF l_count = 0 THEN
            l_tablespace := 'NO OBJECTS FOUND';
        END IF;
       
        -- Safe output construction
        l_output_line := RPAD(l_schema, 30) || ' | ' || 
                         SUBSTR(RPAD(NVL(l_tablespace, 'NONE'), 30), 1, 30) ||
                         ' (' || TO_CHAR(l_count) || ')';
        DBMS_OUTPUT.PUT_LINE(SUBSTR(l_output_line, 1, 200));
       
        -- Track cross-usage: For each TS in this schema, add schema to map - FIXED: Add WHERE tablespace_name IS NOT NULL
        IF l_count > 0 THEN
            FOR rec IN (
                SELECT DISTINCT tablespace_name FROM (
                    -- Tables
                    SELECT tablespace_name FROM dba_tables WHERE owner = l_schema AND tablespace_name IS NOT NULL
                    UNION SELECT tablespace_name FROM dba_indexes WHERE owner = l_schema AND tablespace_name IS NOT NULL
                    UNION SELECT tablespace_name FROM dba_lobs WHERE owner = l_schema AND tablespace_name IS NOT NULL
                    UNION SELECT tablespace_name FROM dba_tab_partitions WHERE table_owner = l_schema AND tablespace_name IS NOT NULL
                    UNION SELECT tablespace_name FROM dba_tab_subpartitions WHERE table_owner = l_schema AND tablespace_name IS NOT NULL
                    UNION SELECT tablespace_name FROM dba_ind_partitions WHERE index_owner = l_schema AND tablespace_name IS NOT NULL
                    UNION SELECT tablespace_name FROM dba_ind_subpartitions WHERE index_owner = l_schema AND tablespace_name IS NOT NULL
                )
            ) LOOP
                l_tablespace := rec.tablespace_name;
                -- Extra safety: Skip if somehow NULL (though WHERE prevents it)
                IF l_tablespace IS NOT NULL THEN
                    IF NOT l_ts_to_schemas.EXISTS(l_tablespace) THEN
                        l_ts_to_schemas(l_tablespace) := t_schema_array();
                    END IF;
                    l_ts_to_schemas(l_tablespace).EXTEND;
                    l_ts_to_schemas(l_tablespace)(l_ts_to_schemas(l_tablespace).COUNT) := l_schema;
                END IF;
            END LOOP;
        END IF;
    END LOOP;
   
    -- Report shared tablespaces
    DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== CROSS-SCHEMA SHARED TABLESPACES (Vice Versa Overlaps) ===');
    DBMS_OUTPUT.PUT_LINE('TABLESPACE_NAME              | SCHEMAS_USING_IT (Count)                  | NOTES');
    DBMS_OUTPUT.PUT_LINE('-----------------------------|-------------------------------------------|------');
   
    l_shared_count := 0;
    IF l_ts_to_schemas.COUNT > 0 THEN
        l_tablespace := l_ts_to_schemas.FIRST;
        WHILE l_tablespace IS NOT NULL LOOP
            IF l_ts_to_schemas(l_tablespace).COUNT > 1 THEN
                l_shared_count := l_shared_count + 1;
                
                -- Manual concat for schemas per TS
                DECLARE
                    l_schemas_str VARCHAR2(4000) := l_ts_to_schemas(l_tablespace)(1);
                BEGIN
                    FOR k IN 2..l_ts_to_schemas(l_tablespace).COUNT LOOP
                        l_schemas_str := l_schemas_str || ', ' || l_ts_to_schemas(l_tablespace)(k);
                    END LOOP;
                    
                    -- Safe output for shared line
                    l_output_line := RPAD(l_tablespace, 30) || ' | ' || 
                                     SUBSTR(l_schemas_str, 1, 50) ||
                                     ' (' || TO_CHAR(l_ts_to_schemas(l_tablespace).COUNT) || ') | *** SHARED - CHECK DEPENDENCIES ***';
                    DBMS_OUTPUT.PUT_LINE(SUBSTR(l_output_line, 1, 200));
                END;
            END IF;
            l_tablespace := l_ts_to_schemas.NEXT(l_tablespace);
        END LOOP;
    END IF;
   
    IF l_shared_count = 0 THEN
        DBMS_OUTPUT.PUT_LINE('No shared tablespaces found across the 7 schemas.');
    ELSE
        DBMS_OUTPUT.PUT_LINE(CHR(10) || 'Total shared tablespaces: ' || l_shared_count || '. Review for vice-versa usage (e.g., Schema A objects in Schema B''s default TS).');
    END IF;
   
    -- Optional: Detailed objects per schema/tablespace (uncomment to enable)
    /*
    DBMS_OUTPUT.PUT_LINE(CHR(10) || '=== DETAILED OBJECTS PER SCHEMA/TS (SAMPLE) ===');
    FOR i IN 1..l_schemas.COUNT LOOP
        l_schema := UPPER(l_schemas(i));
        FOR rec IN (
            SELECT object_type, object_name, tablespace_name
            FROM (
                SELECT 'TABLE' object_type, table_name object_name, tablespace_name FROM dba_tables WHERE owner = l_schema AND tablespace_name IS NOT NULL
                UNION ALL SELECT 'INDEX', index_name, tablespace_name FROM dba_indexes WHERE owner = l_schema AND tablespace_name IS NOT NULL
                UNION ALL SELECT 'LOB', table_name || '.' || column_name, tablespace_name FROM dba_lobs WHERE owner = l_schema AND tablespace_name IS NOT NULL
                -- Add partitions here if needed
            )
            ORDER BY object_type, object_name
        ) LOOP
            DBMS_OUTPUT.PUT_LINE(l_schema || ' | ' || rec.object_type || ' | ' || rec.object_name || ' | ' || rec.tablespace_name);
        END LOOP;
    END LOOP;
    */
   
    DBMS_OUTPUT.PUT_LINE(CHR(10) || 'Report complete. Check for overlaps and adjust tablespace assignments if needed.');
END;
/

SPOOL OFF
SET PAGESIZE 14
SET LINESIZE 80
SET FEEDBACK ON
SET HEADING ON

SELECT filename, type, filesize, TO_CHAR(mtime, 'YYYY-MM-DD HH24:MI:SS') AS modified
FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'))
ORDER BY mtime DESC;

2. List Specific Files (e.g., Matching a Pattern)

Filter for files (exclude directories) matching a pattern like %.dmp (all dump files) or old_export% (prefix match).

sql
SELECT filename, filesize, TO_CHAR(mtime, 'YYYY-MM-DD HH24:MI:SS') AS modified
FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'))
WHERE type = 'file' AND UPPER(filename) LIKE '%.DMP'
ORDER BY mtime DESC;
SELECT filename,
       CASE
         WHEN filesize < POWER(1024, 3) THEN ROUND(filesize / POWER(1024, 2), 2) || ' MB'
         ELSE ROUND(filesize / POWER(1024, 3), 2) || ' GB'
       END AS size,
       TO_CHAR(mtime, 'YYYY-MM-DD HH24:MI:SS') AS modified
FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'))
WHERE type = 'file'
  AND LOWER(filename) LIKE '%meta%'
ORDER BY mtime DESC;


BEGIN
  FOR rec IN (
    SELECT filename, filesize
    FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'))
    WHERE type = 'file'
      AND LOWER(filename) LIKE '%meta%'
  ) LOOP
    BEGIN
      UTL_FILE.FREMOVE('DATA_PUMP_DIR', rec.filename);
      DBMS_OUTPUT.PUT_LINE('Deleted: ' || rec.filename || ' (' ||
        CASE
          WHEN rec.filesize < POWER(1024, 3) THEN ROUND(rec.filesize / POWER(1024, 2), 2) || ' MB'
          ELSE ROUND(rec.filesize / POWER(1024, 3), 2) || ' GB'
        END || ')');
    EXCEPTION
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error deleting ' || rec.filename || ': ' || SQLERRM);
    END;
  END LOOP;
  DBMS_OUTPUT.PUT_LINE('Deletion process complete.');
END;
/

3. Delete a Specific File

Use this EXEC statement to remove an exact file (replace 'export_2025.dmp' with the filename from the list). It requires DELETE_FILE privilege.

sql
EXEC UTL_FILE.FREMOVE('DATA_PUMP_DIR', 'export_2025.dmp');

4. Delete Multiple Files Matching a Pattern

Oracle doesn't support direct DELETE on table functions, so use an anonymous PL/SQL block (run as one statement). This loops over matching files (e.g., all .dmp files) and deletes them. Adjust the WHERE clause for your pattern.

SET SERVEROUTPUT ON SIZE UNLIMITED;

BEGIN
  FOR rec IN (
    SELECT filename
    FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'))
    WHERE type = 'file' AND UPPER(filename) LIKE '%.DMP'  -- Change pattern here, e.g., 'OLD_EXPORT%'
  ) LOOP
    BEGIN
      UTL_FILE.FREMOVE('DATA_PUMP_DIR', rec.filename);
      DBMS_OUTPUT.PUT_LINE('Deleted: ' || rec.filename);  -- Optional: Enable with SET SERVEROUTPUT ON
    EXCEPTION
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error deleting ' || rec.filename || ': ' || SQLERRM);
    END;
  END LOOP;
END;
/


SET SERVEROUTPUT ON SIZE UNLIMITED SET VERIFY OFF ACCEPT dir_name CHAR PROMPT 'Enter directory name (default: DATA_PUMP_DIR): ' DEFAULT 'DATA_PUMP_DIR' ACCEPT file_pattern CHAR PROMPT 'Enter file pattern for deletion (e.g., "old_export%.dmp"; press Enter to skip deletion): ' DECLARE -- Cursor for listing files using RDSADMIN.RDS_FILE_UTIL.LISTDIR CURSOR file_cursor IS SELECT filename, type, filesize, mtime FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('&dir_name')) ORDER BY mtime DESC; lv_pattern VARCHAR2(1024) := '&file_pattern'; -- User input for deletion pattern lv_del_count NUMBER := 0; lv_confirm CHAR(1); BEGIN DBMS_OUTPUT.PUT_LINE('Directory: ' || UPPER('&dir_name')); DBMS_OUTPUT.PUT_LINE('=== Listing all files ==='); DBMS_OUTPUT.PUT_LINE(RPAD('File Name', 40) || ' | Type | Size (bytes) | Modified'); DBMS_OUTPUT.PUT_LINE(RPAD('=', 40, '-') || ' | ---- | ------------ | --------'); FOR rec IN file_cursor LOOP DBMS_OUTPUT.PUT_LINE(RPAD(NVL(rec.filename, ''), 40) || ' | ' || rec.type || ' | ' || TO_CHAR(rec.filesize) || ' | ' || TO_CHAR(rec.mtime, 'YYYY-MM-DD HH24:MI:SS')); END LOOP; -- If no pattern provided, skip deletion IF lv_pattern IS NULL OR TRIM(lv_pattern) = '' THEN DBMS_OUTPUT.PUT_LINE('No deletion pattern provided. Exiting.'); RETURN; END IF; -- Confirm deletion (prompt via SQL*Plus ACCEPT outside; simulate here) -- For full interactivity, run the confirmation part separately if needed lv_confirm := 'N'; -- Default to no; override with manual input or adjust -- To prompt: Add ACCEPT confirm CHAR PROMPT 'Delete matching files? (Y/N): ' DEFAULT 'N' IF UPPER(lv_confirm) = 'Y' THEN DBMS_OUTPUT.PUT_LINE('=== Deleting files matching: ' || lv_pattern || ' ==='); FOR rec IN file_cursor LOOP IF rec.type = 'file' AND UPPER(rec.filename) LIKE UPPER(lv_pattern) THEN -- Match pattern, files only lv_del_count := lv_del_count + 1; BEGIN UTL_FILE.FREMOVE(UPPER('&dir_name'), rec.filename); DBMS_OUTPUT.PUT_LINE('Deleted: ' || rec.filename || ' (' || rec.filesize || ' bytes)'); EXCEPTION WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE('Error deleting ' || rec.filename || ': ' || SQLERRM); END; END IF; END LOOP; DBMS_OUTPUT.PUT_LINE('Deletion complete. ' || lv_del_count || ' files processed.'); ELSE DBMS_OUTPUT.PUT_LINE('=== Dry run: Files that would be deleted ==='); FOR rec IN file_cursor LOOP IF rec.type = 'file' AND UPPER(rec.filename) LIKE UPPER(lv_pattern) THEN DBMS_OUTPUT.PUT_LINE('Would delete: ' || rec.filename || ' (' || rec.filesize || ' bytes)'); lv_del_count := lv_del_count + 1; END IF; END LOOP; IF lv_del_count = 0 THEN DBMS_OUTPUT.PUT_LINE('No matching files found.'); END IF; END IF; EXCEPTION WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM); END; /


SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
    v_db_name VARCHAR2(30);
    v_i NUMBER;
BEGIN
    -- Get the database name
    v_i := DBMS_UTILITY.GET_PARAMETER_VALUE('db_name', v_i, v_db_name);
    
    DBMS_OUTPUT.PUT_LINE('System Users in Oracle RDS: ' || v_db_name);
    DBMS_OUTPUT.PUT_LINE('Username                     Account Status');
    DBMS_OUTPUT.PUT_LINE('---------------------------- ---------------');
    FOR rec IN (SELECT username, account_status FROM dba_users WHERE oracle_maintained = 'Y' ORDER BY username) LOOP
        DBMS_OUTPUT.PUT_LINE(RPAD(rec.username, 28) || ' ' || rec.account_status);
    END LOOP;
END;
/

let
    East1 = Excel.CurrentWorkbook(){[Name="East1_Params"]}[Content],
    East2 = Excel.CurrentWorkbook(){[Name="East2_Params"]}[Content],
    // Promote headers if not already
    East1_Clean = Table.PromoteHeaders(East1, [PromoteAllScalars=true]),
    East2_Clean = Table.PromoteHeaders(East2, [PromoteAllScalars=true]),
    // Merge on Name (inner join for matching params)
    Merged = Table.NestedJoin(East1_Clean, {"Name"}, East2_Clean, {"Name"}, "East2_Data", JoinKind.Inner),
    // Expand East2 columns
    Expanded = Table.ExpandTableColumn(Merged, "East2_Data", {"Value", "Apply Type", "Data Type", "Value Type", "Source"}, {"East2_Value", "East2_Apply Type", "East2_Data Type", "East2_Value Type", "East2_Source"}),
    // Add discrepancy columns (1 = mismatch, 0 = match)
    AddDiscrepancy = Table.AddColumn(Expanded, "Value_Discrepancy", each if [Value] <> [East2_Value] then 1 else 0),
    AddApplyDiscrepancy = Table.AddColumn(AddDiscrepancy, "Apply_Type_Discrepancy", each if [Apply Type] <> [East2_Apply Type] then 1 else 0),
    AddDataTypeDiscrepancy = Table.AddColumn(AddApplyDiscrepancy, "Data_Type_Discrepancy", each if [Data Type] <> [East2_Data Type] then 1 else 0),
    AddValueTypeDiscrepancy = Table.AddColumn(AddDataTypeDiscrepancy, "Value_Type_Discrepancy", each if [Value Type] <> [East2_Value Type] then 1 else 0),
    AddSourceDiscrepancy = Table.AddColumn(AddValueTypeDiscrepancy, "Source_Discrepancy", each if [Source] <> [East2_Source] then 1 else 0),
    // Total discrepancies per row
    AddTotalDiscrepancy = Table.AddColumn(AddSourceDiscrepancy, "Total_Discrepancies", each [Value_Discrepancy] + [Apply_Type_Discrepancy] + [Data_Type_Discrepancy] + [Value_Type_Discrepancy] + [Source_Discrepancy]),
    // Filter to only discrepancies (Total > 0)
    FilteredDiscrepancies = Table.SelectRows(AddTotalDiscrepancy, each [Total_Discrepancies] > 0),
    // Summary stats
    Summary = Table.FromRecords({
        [Metric = "Total Parameters", East1_Count = Table.RowCount(East1_Clean), East2_Count = Table.RowCount(East2_Clean), Discrepancies = Table.RowCount(FilteredDiscrepancies)],
        [Metric = "Value Mismatches", Count = List.Sum(Table.Column(AddTotalDiscrepancy, "Value_Discrepancy"))],
        [Metric = "Apply Type Mismatches", Count = List.Sum(Table.Column(AddTotalDiscrepancy, "Apply_Type_Discrepancy"))],
        [Metric = "Data Type Mismatches", Count = List.Sum(Table.Column(AddTotalDiscrepancy, "Data_Type_Discrepancy"))],
        [Metric = "Value Type Mismatches", Count = List.Sum(Table.Column(AddTotalDiscrepancy, "Value_Type_Discrepancy"))],
        [Metric = "Source Mismatches", Count = List.Sum(Table.Column(AddTotalDiscrepancy, "Source_Discrepancy"))]
    })
in
    FilteredDiscrepancies

For Non-Partitioned Indexes

This script now includes the table_name in the generated comment, making it clear which table the index belongs to.

SQL
SELECT
    CASE
        WHEN degree IS NULL OR degree = '1' THEN -- Degree '1' is the effective NOPARALLEL
            'ALTER INDEX "' || owner || '"."' || index_name || '" NOPARALLEL; -- On Table: ' || table_name
        WHEN degree = 'DEFAULT' THEN
            'ALTER INDEX "' || owner || '"."' || index_name || '" PARALLEL (DEGREE DEFAULT); -- On Table: ' || table_name
        ELSE
            'ALTER INDEX "' || owner || '"."' || index_name || '" PARALLEL ' || degree || '; -- On Table: ' || table_name
    END AS revert_statement
FROM
    dba_indexes
WHERE
    -- Filter for non-partitioned indexes
    partitioned = 'NO'
    -- You can add the same filters you used for the rebuild script
    -- AND status IN ('VALID', 'UNUSABLE', 'FAILED')
    -- AND table_name = 'YOUR_TABLE_NAME_HERE'
ORDER BY
    owner, index_name;

-- Set server output on to view the reports and messages
SET SERVEROUTPUT ON;

-- Prompt the user for an option to either report or execute
ACCEPT execute_option CHAR PROMPT 'Do you want to rebuild and revert unusable indexes? (Y/N): '

DECLARE
    -- Variable to hold the user's input
    v_execute_option VARCHAR2(1) := UPPER('&execute_option');
    
    -- Cursor to find all unusable indexes (both partitioned and non-partitioned)
    CURSOR c_unusable_indexes IS
        SELECT owner, index_name, table_name, partitioned, degree
        FROM dba_indexes
        WHERE status IN ('UNUSABLE', 'FAILED')
          -- Exclude system-owned schemas
          AND owner NOT IN ('SYS', 'SYSTEM', 'SYSMAN', 'MDSYS', 'CTXSYS', 'WMSYS', 'XDB', 'EXFSYS', 'DBSNMP')
        ORDER BY owner, table_name, index_name;

    -- Variables to hold the dynamically generated SQL statements
    v_rebuild_sql VARCHAR2(1000);
    v_revert_sql  VARCHAR2(1000);

BEGIN
    DBMS_OUTPUT.PUT_LINE('--- Step 1: Unusable Index Report ---');
    DBMS_OUTPUT.PUT_LINE('The following indexes are currently unusable:');
    DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');

    -- Report which indexes need to be rebuilt and what their revert commands will be
    FOR r_index IN c_unusable_indexes LOOP
        -- Build the REBUILD statement
        IF r_index.partitioned = 'YES' THEN
            -- For partitioned indexes, we need to find and list partitions
            FOR p_part IN (SELECT partition_name FROM dba_ind_partitions WHERE index_owner = r_index.owner AND index_name = r_index.index_name) LOOP
                v_rebuild_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" REBUILD PARTITION ' || p_part.partition_name || ' ONLINE NO LOGGING PARALLEL 8';
                DBMS_OUTPUT.PUT_LINE('Partitioned Index: ' || r_index.owner || '.' || r_index.index_name || ' (Partition: ' || p_part.partition_name || ')');
                DBMS_OUTPUT.PUT_LINE('  Rebuild Command: ' || v_rebuild_sql || ';');
            END LOOP;
        ELSE
            v_rebuild_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" REBUILD ONLINE NO LOGGING PARALLEL 8';
            DBMS_OUTPUT.PUT_LINE('Non-Partitioned Index: ' || r_index.owner || '.' || r_index.index_name);
            DBMS_OUTPUT.PUT_LINE('  Rebuild Command: ' || v_rebuild_sql || ';');
        END IF;

        -- Build the REVERT statement based on the original degree
        CASE
            WHEN r_index.degree IS NULL OR r_index.degree = '1' THEN
                v_revert_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" NOPARALLEL';
            WHEN r_index.degree = 'DEFAULT' THEN
                v_revert_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" PARALLEL (DEGREE DEFAULT)';
            ELSE
                v_revert_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" PARALLEL ' || r_index.degree;
        END CASE;
        
        DBMS_OUTPUT.PUT_LINE('  Revert Command: ' || v_revert_sql || ';');
        DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');

    END LOOP;

    DBMS_OUTPUT.PUT_LINE(CHR(10));
    
    -- Step 2: Conditional Execution
    IF v_execute_option = 'Y' THEN
        DBMS_OUTPUT.PUT_LINE('--- Rebuild and Revert Execution ---');
        DBMS_OUTPUT.PUT_LINE('Executing rebuilds. See log for progress...');

        FOR r_index IN c_unusable_indexes LOOP
            BEGIN
                -- Build the REBUILD statement and execute
                IF r_index.partitioned = 'YES' THEN
                    FOR p_part IN (SELECT partition_name FROM dba_ind_partitions WHERE index_owner = r_index.owner AND index_name = r_index.index_name) LOOP
                        v_rebuild_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" REBUILD PARTITION ' || p_part.partition_name || ' ONLINE NO LOGGING PARALLEL 8';
                        DBMS_OUTPUT.PUT_LINE('  -> Rebuilding Partition: ' || r_index.owner || '.' || r_index.index_name || '(' || p_part.partition_name || ')');
                        EXECUTE IMMEDIATE v_rebuild_sql;
                    END LOOP;
                ELSE
                    v_rebuild_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" REBUILD ONLINE NO LOGGING PARALLEL 8';
                    DBMS_OUTPUT.PUT_LINE('  -> Rebuilding Index: ' || r_index.owner || '.' || r_index.index_name);
                    EXECUTE IMMEDIATE v_rebuild_sql;
                END IF;

                -- Build the REVERT statement and execute
                CASE
                    WHEN r_index.degree IS NULL OR r_index.degree = '1' THEN
                        v_revert_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" NOPARALLEL';
                    WHEN r_index.degree = 'DEFAULT' THEN
                        v_revert_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" PARALLEL (DEGREE DEFAULT)';
                    ELSE
                        v_revert_sql := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" PARALLEL ' || r_index.degree;
                END CASE;

                DBMS_OUTPUT.PUT_LINE('  -> Reverting Parallelism: ' || r_index.owner || '.' || r_index.index_name);
                EXECUTE IMMEDIATE v_revert_sql;
                
                DBMS_OUTPUT.PUT_LINE('  -> SUCCESS: ' || r_index.owner || '.' || r_index.index_name || ' rebuilt and reverted.');

            EXCEPTION
                WHEN OTHERS THEN
                    DBMS_OUTPUT.PUT_LINE('  -> FAILED: ' || r_index.owner || '.' || r_index.index_name || ' -> ' || SQLERRM);
            END;
        END LOOP;
        
    ELSE
        DBMS_OUTPUT.PUT_LINE('Script finished in report-only mode. No changes were made.');
    END IF;

    DBMS_OUTPUT.PUT_LINE('--- Script Complete ---');
END;
/

For Partitioned Indexes

The dba_ind_partitions view does not contain the table_name column directly. You must join it with the dba_indexes view to retrieve this information.

SQL
SELECT
    'ALTER INDEX ' || dba_ind_partitions.index_owner || '.' || dba_ind_partitions.index_name || ' REBUILD PARTITION ' || dba_ind_partitions.partition_name || ' ONLINE NO LOGGING; -- On Table: ' || dba_indexes.table_name AS rebuild_statement
FROM
    dba_ind_partitions
JOIN
    dba_indexes ON dba_ind_partitions.index_owner = dba_indexes.owner
                 AND dba_ind_partitions.index_name = dba_indexes.index_name
WHERE
    dba_ind_partitions.status IN ('UNUSABLE', 'FAILED')
    AND dba_ind_partitions.index_owner NOT IN ('SYS', 'SYSTEM', 'SYSMAN', 'MDSYS', 'CTXSYS', 'WMSYS', 'XDB', 'EXFSYS', 'DBSNMP')
ORDER BY
    dba_ind_partitions.index_owner, dba_ind_partitions.index_name;

-- Enable server output for results
SET SERVEROUTPUT ON SIZE UNLIMITED
SET LINESIZE 200
SET FEEDBACK OFF
SET HEADING OFF

-- Prompt for rebuild option and schema
ACCEPT rebuild_option CHAR PROMPT 'Do you want to rebuild unusable indexes? (Y/N): '
ACCEPT schema_list CHAR PROMPT 'Enter schema name(s) to process (comma-separated, or ALL for all non-system schemas): '

DECLARE
  v_rebuild_option VARCHAR2(1) := UPPER('&rebuild_option');
  v_schema_list VARCHAR2(4000) := UPPER('&schema_list');
  v_sql_statement VARCHAR2(500);
  v_parallel_degree NUMBER;
  v_rebuilt_count NUMBER := 0;
  v_failed_count NUMBER := 0;
  v_skipped_count NUMBER := 0;
  v_start_time TIMESTAMP := SYSTIMESTAMP;
  
  -- Cursor for non-partitioned indexes
  CURSOR c_unusable_indexes IS
    SELECT di.owner, di.index_name, di.tablespace_name
    FROM dba_indexes di
    JOIN dba_tablespaces dt ON di.tablespace_name = dt.tablespace_name
    WHERE di.partitioned = 'NO'
      AND di.status IN ('UNUSABLE', 'FAILED')
      AND di.owner NOT IN ('SYS', 'SYSTEM', 'SYSMAN', 'MDSYS', 'CTXSYS', 'WMSYS', 'XDB', 'EXFSYS', 'DBSNMP')
      AND (v_schema_list = 'ALL' OR di.owner IN (
        SELECT REGEXP_SUBSTR(v_schema_list, '[^,]+', 1, LEVEL) FROM dual
        CONNECT BY REGEXP_SUBSTR(v_schema_list, '[^,]+', 1, LEVEL) IS NOT NULL
      ))
      AND dt.status IN ('ONLINE', 'READ WRITE')  -- Skip offline/read-only tablespaces
    ORDER BY di.owner, di.index_name;

  -- Cursor for index partitions
  CURSOR c_unusable_partitions IS
    SELECT dip.index_owner, dip.index_name, dip.partition_name, dip.tablespace_name
    FROM dba_ind_partitions dip
    JOIN dba_tablespaces dt ON dip.tablespace_name = dt.tablespace_name
    WHERE dip.status IN ('UNUSABLE', 'FAILED')
      AND dip.index_owner NOT IN ('SYS', 'SYSTEM', 'SYSMAN', 'MDSYS', 'CTXSYS', 'WMSYS', 'XDB', 'EXFSYS', 'DBSNMP')
      AND (v_schema_list = 'ALL' OR dip.index_owner IN (
        SELECT REGEXP_SUBSTR(v_schema_list, '[^,]+', 1, LEVEL) FROM dual
        CONNECT BY REGEXP_SUBSTR(v_schema_list, '[^,]+', 1, LEVEL) IS NOT NULL
      ))
      AND dt.status IN ('ONLINE', 'READ WRITE')
    ORDER BY dip.index_owner, dip.index_name, dip.partition_name;

BEGIN
  -- Validate rebuild option
  IF v_rebuild_option NOT IN ('Y', 'N') THEN
    RAISE_APPLICATION_ERROR(-20001, 'Invalid rebuild option. Enter Y or N.');
  END IF;

  -- Get safe parallel degree (cap at parallel_max_servers/2, min 4)
  SELECT LEAST(GREATEST(4, FLOOR(value/2)), 8)
  INTO v_parallel_degree
  FROM v$parameter
  WHERE name = 'parallel_max_servers';

  -- Warn about NOLOGGING
  DBMS_OUTPUT.PUT_LINE('WARNING: NOLOGGING is used. Ensure backups are current, as redo logging is minimized.');
  DBMS_OUTPUT.PUT_LINE('Script started at: ' || TO_CHAR(v_start_time, 'YYYY-MM-DD HH24:MI:SS'));
  DBMS_OUTPUT.PUT_LINE('Processing schemas: ' || NVL(v_schema_list, 'ALL (non-system)'));
  DBMS_OUTPUT.PUT_LINE('Rebuild option: ' || CASE v_rebuild_option WHEN 'Y' THEN 'Execute rebuild' ELSE 'Report only' END);
  DBMS_OUTPUT.PUT_LINE('Parallel degree: ' || v_parallel_degree);
  DBMS_OUTPUT.PUT_LINE('----------------------------------------');

  -- Process non-partitioned indexes
  FOR r_index IN c_unusable_indexes LOOP
    BEGIN
      v_sql_statement := 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || 
                        '" REBUILD ONLINE NOLOGGING PARALLEL ' || v_parallel_degree;
      
      IF v_rebuild_option = 'Y' THEN
        DBMS_OUTPUT.PUT_LINE('Executing: ' || v_sql_statement);
        EXECUTE IMMEDIATE v_sql_statement;
        -- Reset parallelism to avoid query impact
        EXECUTE IMMEDIATE 'ALTER INDEX "' || r_index.owner || '"."' || r_index.index_name || '" NOPARALLEL';
        v_rebuilt_count := v_rebuilt_count + 1;
        DBMS_OUTPUT.PUT_LINE('Reset to NOPARALLEL for ' || r_index.owner || '.' || r_index.index_name);
      ELSE
        DBMS_OUTPUT.PUT_LINE('Would rebuild: ' || v_sql_statement);
        v_skipped_count := v_skipped_count + 1;
      END IF;
    EXCEPTION
      WHEN OTHERS THEN
        v_failed_count := v_failed_count + 1;
        DBMS_OUTPUT.PUT_LINE('Error rebuilding index ' || r_index.owner || '.' || r_index.index_name || 
                             ': ' || SQLERRM);
        IF SQLCODE = -1502 THEN  -- ORA-01502: Index or partition is unusable
          DBMS_OUTPUT.PUT_LINE('Index may already be in progress or have structural issues.');
        END IF;
    END;
    -- Commit periodically to avoid undo issues
    IF v_rebuild_option = 'Y' AND MOD(v_rebuilt_count, 10) = 0 THEN
      COMMIT;
    END IF;
  END LOOP;

  -- Process partitioned indexes
  FOR r_partition IN c_unusable_partitions LOOP
    BEGIN
      v_sql_statement := 'ALTER INDEX "' || r_partition.index_owner || '"."' || r_partition.index_name || 
                        '" REBUILD PARTITION "' || r_partition.partition_name || '" ONLINE NOLOGGING PARALLEL ' || 
                        v_parallel_degree;
      
      IF v_rebuild_option = 'Y' THEN
        DBMS_OUTPUT.PUT_LINE('Executing: ' || v_sql_statement);
        EXECUTE IMMEDIATE v_sql_statement;
        -- Reset parallelism for the partition
        EXECUTE IMMEDIATE 'ALTER INDEX "' || r_partition.index_owner || '"."' || r_partition.index_name || 
                          '" MODIFY PARTITION "' || r_partition.partition_name || '" NOPARALLEL';
        v_rebuilt_count := v_rebuilt_count + 1;
        DBMS_OUTPUT.PUT_LINE('Reset to NOPARALLEL for ' || r_partition.index_owner || '.' || 
                             r_partition.index_name || ' PARTITION ' || r_partition.partition_name);
      ELSE
        DBMS_OUTPUT.PUT_LINE('Would rebuild: ' || v_sql_statement);
        v_skipped_count := v_skipped_count + 1;
      END IF;
    EXCEPTION
      WHEN OTHERS THEN
        v_failed_count := v_failed_count + 1;
        DBMS_OUTPUT.PUT_LINE('Error rebuilding partition ' || r_partition.index_owner || '.' || 
                             r_partition.index_name || ' ' || r_partition.partition_name || 
                             ': ' || SQLERRM);
        IF SQLCODE = -1502 THEN
          DBMS_OUTPUT.PUT_LINE('Partition may already be in progress or have structural issues.');
        END IF;
    END;
    IF v_rebuild_option = 'Y' AND MOD(v_rebuilt_count, 10) = 0 THEN
      COMMIT;
    END IF;
  END LOOP;

  -- Final commit
  IF v_rebuild_option = 'Y' THEN
    COMMIT;
  END IF;

  -- Summary
  DBMS_OUTPUT.PUT_LINE('----------------------------------------');
  DBMS_OUTPUT.PUT_LINE('Script completed at: ' || TO_CHAR(SYSTIMESTAMP, 'YYYY-MM-DD HH24:MI:SS'));
  DBMS_OUTPUT.PUT_LINE('Total indexes/partitions processed: ' || (v_rebuilt_count + v_skipped_count + v_failed_count));
  DBMS_OUTPUT.PUT_LINE('Successfully rebuilt: ' || v_rebuilt_count);
  DBMS_OUTPUT.PUT_LINE('Skipped (report-only): ' || v_skipped_count);
  DBMS_OUTPUT.PUT_LINE('Failed: ' || v_failed_count);
  IF v_failed_count > 0 THEN
    DBMS_OUTPUT.PUT_LINE('Check RDS logs or V$SESSION_LONGOPS for details on failures.');
  END IF;
END;
/



Usage for One-Stop Maintenance/Ad-Hoc Runs

  • Ad-Hoc (On-Demand): Connect via SQL*Plus/SQL Developer as master user, run the script, enter 'Y'/'N' and schemas. It's self-contained—no setup required. Example for 100+ across schemas:

    text
    Do you want to rebuild unusable indexes? (Y/N): Y
    Enter schema name(s) to process (comma-separated, or ALL for all non-system schemas): HR,SCOTT,ALL  -- Mix as needed
    • Output includes progress (e.g., "Executing: ALTER INDEX...") and final summary (e.g., "Successfully rebuilt: 127, Failed: 3").
  • Scheduled Maintenance (One-Stop Automation): Wrap it in Oracle Scheduler for weekly/monthly runs during off-peak (e.g., 2 AM Sundays). This makes it truly "one-stop"—create once, forget it runs.

    Step 1: Create a Stored Procedure (save the script's PL/SQL as a procedure, hardcoding or parameterizing schemas/rebuild='Y'):

    sql
    CREATE OR REPLACE PROCEDURE rebuild_unusable_indexes_proc(
      p_schema_list IN VARCHAR2 DEFAULT 'ALL',
      p_rebuild_option IN VARCHAR2 DEFAULT 'Y'
    ) AS
      -- Paste the entire DECLARE/BEGIN...END; block from the script here,
      -- but replace v_schema_list and v_rebuild_option with parameters
      -- (e.g., v_schema_list := UPPER(p_schema_list); v_rebuild_option := UPPER(p_rebuild_option);)
    BEGIN
      -- Your script body
    END;
    /

    Step 2: Schedule the Job (run daily/weekly for all schemas):

    sql
    BEGIN
      DBMS_SCHEDULER.CREATE_JOB(
        job_name        => 'REBUILD_UNUSABLE_INDEXES_JOB',
        job_type        => 'PLSQL_BLOCK',
        job_action      => 'BEGIN rebuild_unusable_indexes_proc(p_schema_list => ''ALL'', p_rebuild_option => ''Y''); END;',
        start_date      => SYSTIMESTAMP + INTERVAL '1' HOUR,  -- Start soon, or set future
        repeat_interval => 'FREQ=WEEKLY; BYDAY=SUN; BYHOUR=2; BYMINUTE=0;',  -- Sundays at 2 AM
        enabled         => TRUE,
        comments        => 'Weekly unusable index maintenance'
      );
    END;
    /
    • Monitor: SELECT * FROM dba_scheduler_jobs WHERE job_name = 'REBUILD_UNUSABLE_INDEXES_JOB';
    • Logs: Check DBA_SCHEDULER_JOB_RUN_DETAILS for output/errors, or CloudWatch for RDS.
    • For ad-hoc trigger: EXEC DBMS_SCHEDULER.RUN_JOB('REBUILD_UNUSABLE_INDEXES_JOB');

Potential Enhancements for Extreme Scale

If you're dealing with 500+ indexes regularly:

  • Add Undo/Temp Space Check: Before starting, query V$UNDOSTAT and V$TEMP_SPACE_HEADER—abort if >80% full.
  • Progress Monitoring: Integrate V$SESSION_LONGOPS polling in the loop for ETA on long rebuilds.
  • Email Alerts: Use UTL_MAIL to send summary (if licensed) or log to a table for RDS integration.
  • Filter by Size/Age: Add to cursors: AND last_analyzed > SYSDATE - 30 (recently analyzed) or join DBA_SEGMENTS for blocks > 1000 (large only).
=============================================================
SET SERVEROUTPUT ON

DECLARE
  l_killed_count NUMBER := 0;
BEGIN
  FOR rec IN (SELECT sid, serial#, sql_id, username
              FROM v$session
              WHERE sql_id IN ('sql_id1', 'sql_id2', 'sql_id3')  -- Replace with your 3 SQL_IDs
              AND status = 'ACTIVE'
              AND type = 'USER') LOOP  -- Exclude background processes
    BEGIN
      -- Execute the kill inline
      rdsadmin.rdsadmin_util.kill(
        sid    => rec.sid,
        serial => rec.serial#,
        method => 'IMMEDIATE'
      );
      l_killed_count := l_killed_count + 1;
      DBMS_OUTPUT.PUT_LINE('Killed session: SID=' || rec.sid || 
                           ', SERIAL#=' || rec.serial# || 
                           ', SQL_ID=' || rec.sql_id || 
                           ', USERNAME=' || rec.username);
    EXCEPTION
      WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error killing SID=' || rec.sid || 
                             ', SERIAL#=' || rec.serial# || 
                             ': ' || SQLERRM);
    END;
  END LOOP;
  
  IF l_killed_count = 0 THEN
    DBMS_OUTPUT.PUT_LINE('No active sessions found for the specified SQL_IDs.');
  ELSE
    DBMS_OUTPUT.PUT_LINE('Total sessions killed: ' || l_killed_count);
  END IF;
END;
/

SELECT 
    owner,
    table_name,
    CASE 
        WHEN partitioned = 'YES' THEN 'All partitions gathered on ' || TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS')
        ELSE TO_CHAR(last_analyzed, 'YYYY-MM-DD HH24:MI:SS')
    END AS last_analyzed_date
FROM 
    dba_tables
WHERE 
    owner NOT IN ('SYS', 'SYSTEM', 'SYSMAN', 'MDSYS', 'CTXSYS', 'WMSYS', 'XDB', 'EXFSYS', 'DBSNMP')
ORDER BY 
    last_analyzed DESC NULLS LAST;


```sql
DECLARE
    v_bucket_name VARCHAR2(128) := 'my-rds-dumps-bucket'; -- Replace with your S3 bucket name
    v_s3_prefix VARCHAR2(200) := 'exports/'; -- S3 folder path where dump files are stored
    v_file_prefix VARCHAR2(200) := 'DEMO1_meta_2509092151234.dmp'; -- Specific file or pattern (e.g., 'DEMO1_meta_%.dmp')
    v_task_id VARCHAR2(100); -- For S3 download task ID
    v_error_msg VARCHAR2(4000);
    v_file_count NUMBER;
    TYPE file_list_t IS TABLE OF VARCHAR2(200);
    v_files file_list_t;
BEGIN
    -- Validate file prefix
    IF v_file_prefix IS NULL OR LENGTH(v_file_prefix) = 0 THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: File prefix cannot be null or empty.');
        RAISE_APPLICATION_ERROR(-20001, 'File prefix cannot be null or empty.');
    END IF;
    -- Debug: Output download details
    DBMS_OUTPUT.PUT_LINE('Starting S3 download for files matching: ' || v_file_prefix || ' from S3 path: ' || v_s3_prefix);
    -- Download from S3
    v_task_id := rdsadmin.rdsadmin_s3_tasks.download_from_s3(
        p_bucket_name => v_bucket_name,
        p_prefix => v_file_prefix,
        p_s3_prefix => v_s3_prefix,
        p_directory_name => 'DATA_PUMP_DIR'
    );
    -- Output task ID
    DBMS_OUTPUT.PUT_LINE('S3 download task started for ' || v_file_prefix || ', Task ID: ' || v_task_id);
    -- Wait for download to complete
    DBMS_LOCK.SLEEP(30); -- Wait 30 seconds to ensure download finishes
    -- Verify downloaded files
    SELECT filename
    BULK COLLECT INTO v_files
    FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
    WHERE UPPER(filename) LIKE UPPER(v_file_prefix);
    
    v_file_count := v_files.COUNT;
    IF v_file_count = 0 THEN
        DBMS_OUTPUT.PUT_LINE('No files found in DATA_PUMP_DIR after download matching: ' || v_file_prefix);
        DBMS_OUTPUT.PUT_LINE('Available files in DATA_PUMP_DIR:');
        FOR rec IN (SELECT filename FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR')) WHERE filename LIKE '%') LOOP
            DBMS_OUTPUT.PUT_LINE(' - ' || rec.filename);
        END LOOP;
        RAISE_APPLICATION_ERROR(-20003, 'No files downloaded matching prefix: ' || v_file_prefix);
    END IF;
    -- Debug: Output downloaded files
    DBMS_OUTPUT.PUT_LINE('Found ' || v_file_count || ' file(s) matching prefix: ' || v_file_prefix);
    FOR i IN 1..v_files.COUNT LOOP
        DBMS_OUTPUT.PUT_LINE(' - File ' || i || ': ' || v_files(i));
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        v_error_msg := 'Error in download_from_s3: ' || SQLERRM;
        DBMS_OUTPUT.PUT_LINE(v_error_msg);
        RAISE_APPLICATION_ERROR(-20001, v_error_msg);
END;
/
```
### 2. Import Script with Tablespace Remapping
This anonymous block imports the metadata-only `.dmp` files into the target database, remapping 24 source tablespaces to 12 target tablespaces using `REMAP_TABLESPACE`. It aligns with the baseline’s naming convention (`<schema_name>_meta_<timestamp>.dmp`) and parameters.
<xaiArtifact artifact_id="f7a6e9ef-432a-430d-b40d-754cbc3730f4" artifact_version_id="5f727391-c504-44d5-93d6-96d29e447bde" title="import_metadata_schemas.sql" contentType="text/sql">
```sql
DECLARE
    v_handle NUMBER;
    v_job_name VARCHAR2(128); -- Increased to avoid ORA-06502
    v_dumpfile VARCHAR2(200); -- Increased for safety
    v_logfile VARCHAR2(200);  -- Increased for safety
    v_timestamp VARCHAR2(19) := '2509092151234'; -- Replace with the timestamp from your dump file
    v_error_msg VARCHAR2(4000);
    p_schema VARCHAR2(128) := 'DEMO1'; -- Example schema name
    p_parallel NUMBER := 2;        -- Example parallelism (e.g., 1, 2, 3)
    p_multiple_files VARCHAR2(1) := 'N'; -- 'Y' for multiple files, 'N' for single file
    TYPE tablespace_mapping_t IS TABLE OF VARCHAR2(256);
    v_tablespace_mappings tablespace_mapping_t := tablespace_mapping_t(
        'TBS1:NEW_TBS1', 'TBS2:NEW_TBS1', -- Customize with your mappings
        'TBS3:NEW_TBS2', 'TBS4:NEW_TBS2',
        'TBS5:NEW_TBS3', 'TBS6:NEW_TBS3',
        'TBS7:NEW_TBS4', 'TBS8:NEW_TBS4',
        'TBS9:NEW_TBS5', 'TBS10:NEW_TBS5',
        'TBS11:NEW_TBS6', 'TBS12:NEW_TBS6',
        'TBS13:NEW_TBS7', 'TBS14:NEW_TBS7',
        'TBS15:NEW_TBS8', 'TBS16:NEW_TBS8',
        'TBS17:NEW_TBS9', 'TBS18:NEW_TBS9',
        'TBS19:NEW_TBS10', 'TBS20:NEW_TBS10',
        'TBS21:NEW_TBS11', 'TBS22:NEW_TBS11',
        'TBS23:NEW_TBS12', 'TBS24:NEW_TBS12'
    );
BEGIN
    -- Validate schema name length
    IF LENGTH(p_schema) > 100 THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: Schema name too long.');
        RAISE_APPLICATION_ERROR(-20002, 'Schema name too long. Maximum length is 100 characters.');
    END IF;
    -- Validate p_multiple_files
    IF UPPER(p_multiple_files) NOT IN ('Y', 'N') THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: p_multiple_files must be Y or N.');
        RAISE_APPLICATION_ERROR(-20003, 'p_multiple_files must be ''Y'' or ''N''.');
    END IF;
    -- Create a unique job name
    v_job_name := SUBSTR('SCHEMA_IMP_' || UPPER(p_schema) || '_' || TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISSFF3'), 1, 128);
    -- Create unique log and dump file names
    v_dumpfile := UPPER(p_schema) || '_meta_' || v_timestamp || CASE WHEN UPPER(p_multiple_files) = 'Y' THEN '%U.dmp' ELSE '.dmp' END;
    v_logfile := UPPER(p_schema) || '_meta_import_' || TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISSFF3') || '.log';
    -- Validate file name lengths
    IF LENGTH(v_dumpfile) > 200 OR LENGTH(v_logfile) > 200 THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: File name too long.');
        RAISE_APPLICATION_ERROR(-20004, 'Generated file name too long. Maximum length is 200 characters.');
    END IF;
    -- Debug: Output job details
    DBMS_OUTPUT.PUT_LINE('Starting import job: ' || v_job_name);
    -- Open the import job
    v_handle := DBMS_DATAPUMP.OPEN(
        operation => 'IMPORT',
        job_mode => 'SCHEMA',
        job_name => v_job_name,
        version => 'LATEST'
    );
    DBMS_OUTPUT.PUT_LINE('Job opened with handle: ' || v_handle);
    -- Add log file
    DBMS_DATAPUMP.ADD_FILE(
        handle => v_handle,
        filename => v_logfile,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
    );
    DBMS_OUTPUT.PUT_LINE('Log file added: ' || v_logfile);
    -- Add dump file
    DBMS_DATAPUMP.ADD_FILE(
        handle => v_handle,
        filename => v_dumpfile,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
    );
    DBMS_OUTPUT.PUT_LINE('Dump file added: ' || v_dumpfile);
    -- Filter to specified schemas
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => v_handle,
        name => 'SCHEMA_EXPR',
        value => q'[= ']' || '''' || UPPER(p_schema) || '''' || q'[']'
    );
    DBMS_OUTPUT.PUT_LINE('Schema filter applied for: ' || p_schema);
    -- Remap tablespaces (24 to 12)
    FOR i IN 1..v_tablespace_mappings.COUNT LOOP
        DBMS_DATAPUMP.METADATA_REMAP(
            handle => v_handle,
            name => 'REMAP_TABLESPACE',
            value => v_tablespace_mappings(i)
        );
        DBMS_OUTPUT.PUT_LINE('Remapped tablespace: ' || v_tablespace_mappings(i));
    END LOOP;
    -- Set parallelism
    DBMS_DATAPUMP.SET_PARALLEL(
        handle => v_handle,
        degree => GREATEST(1, LEAST(p_parallel, 100))
    );
    DBMS_OUTPUT.PUT_LINE('Parallelism set to: ' || GREATEST(1, LEAST(p_parallel, 100)));
    -- Start the job
    DBMS_DATAPUMP.START_JOB(v_handle);
    DBMS_OUTPUT.PUT_LINE('Job started');
    -- Detach to run in background
    DBMS_DATAPUMP.DETACH(v_handle);
    DBMS_OUTPUT.PUT_LINE('Job detached');
    -- Output confirmation
    DBMS_OUTPUT.PUT_LINE('Job ' || v_job_name || ' started for schema ' || p_schema || '. Log: ' || v_logfile || ' in DATA_PUMP_DIR.');
EXCEPTION
    WHEN OTHERS THEN
        v_error_msg := 'Error in import_metadata_schemas: ' || SQLERRM;
        DBMS_OUTPUT.PUT_LINE(v_error_msg);
        RAISE_APPLICATION_ERROR(-20001, v_error_msg);
END;
/
```
### How to Run
1. **Download Script**:
   - **Customize Parameters**:
     ```sql
     v_bucket_name VARCHAR2(128) := 'my-rds-dumps-bucket'; -- Your S3 bucket name
     v_s3_prefix VARCHAR2(200) := 'exports/'; -- S3 folder path
     v_file_prefix VARCHAR2(200) := 'DEMO1_meta_2509092151234.dmp'; -- Match your dump file
     ```
     - For multiple files, use a pattern (e.g., `'DEMO1_meta_%.dmp'`).
   - **Enable Server Output**:
     ```sql
     SET SERVEROUTPUT ON;
     ```
   - **Run**:
     ```sql
     DECLARE
     ...
     END;
     /
     ```
   - **Verify**:
     ```sql
     SELECT filename FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR')) WHERE filename LIKE 'DEMO1_meta_%';
     SELECT task_id, status, error_msg FROM rdsadmin.rdsadmin_s3_tasks WHERE task_id = 'task_12345'; -- Replace with task_id
     ```
2. **Import Script**:
   - **Customize Parameters**:
     ```sql
     p_schema VARCHAR2(128) := 'DEMO1'; -- Match source schema
     p_parallel NUMBER := 2;
     p_multiple_files VARCHAR2(1) := 'N';
     v_timestamp VARCHAR2(19) := '2509092151234'; -- Match dump file timestamp
     v_tablespace_mappings tablespace_mapping_t := tablespace_mapping_t(
         'TBS1:NEW_TBS1', 'TBS2:NEW_TBS1', -- Customize with your mappings
         'TBS3:NEW_TBS2', 'TBS4:NEW_TBS2',
         ...
         'TBS23:NEW_TBS12', 'TBS24:NEW_TBS12'
     );
     ```
   - **Enable Server Output**:
     ```sql
     SET SERVEROUTPUT ON;
     ```
   - **Run**:
     ```sql
     DECLARE
     ...
     END;
     /
     ```
   - **Verify**:
     ```sql
     SELECT job_name, state, error_msg FROM dba_datapump_jobs WHERE job_name LIKE 'SCHEMA_IMP_%';
     ```
### Troubleshooting
1. **Download Issues**:
   - Check task status:
     ```sql
     SELECT task_id, status, error_msg FROM rdsadmin.rdsadmin_s3_tasks WHERE task_id = 'task_12345';
     ```
   - Ensure `v_file_prefix` matches the S3 file (case-sensitive).
   - Verify IAM role permissions and bucket region.
2. **Import Issues**:
   - Check log file in `DATA_PUMP_DIR` (e.g., `DEMO1_meta_import_2509092151234.log`).
   - Ensure target tablespaces exist and the schema is valid.
   - If `ORA-31626` occurs, add `PRAGMA AUTONOMOUS_TRANSACTION` to the import script:
     ```sql
     DECLARE
         PRAGMA AUTONOMOUS_TRANSACTION;
         ...
     BEGIN
         ...
         COMMIT;
     EXCEPTION
         ...
         COMMIT;
     END;
     /
     ```
3. **Provide Details**:
   - Share `DBMS_OUTPUT`, task status, log file contents, and Oracle RDS version.
### Notes
- **Tablespace Mapping**: Customize `v_tablespace_mappings` with your actual 24-to-12 mappings.
- **File Naming**: Ensure `v_timestamp` matches the source dump file’s timestamp.
- **Future Enhancements**: If needed, add `DBMS_SCHEDULER` for multiple schemas or `.sql` file generation for review.
Please run these scripts and share any errors or output for further assistance!

No comments: