Monday, September 8, 2025

individual expdp per achema for small




DECLARE
    v_handle NUMBER;
    v_timestamp VARCHAR2(19) := TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISSFF3');
    p_schema VARCHAR2(128) := '1';
BEGIN
    v_handle := DBMS_DATAPUMP.OPEN(operation => 'EXPORT', job_mode => 'SCHEMA');
    DBMS_DATAPUMP.ADD_FILE(handle => v_handle, filename => 'schema.sql', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_SQL_FILE);
    DBMS_DATAPUMP.METADATA_FILTER(handle => v_handle, name => 'SCHEMA_EXPR', value => q'[= ']' || '''' || UPPER(p_schema) || '''' || q'[']');
    DBMS_DATAPUMP.METADATA_FILTER(handle => v_handle, name => 'EXCLUDE_PATH_EXPR', value => q'[IN ('INDEX', 'STATISTICS')]');
    DBMS_DATAPUMP.DATA_FILTER(handle => v_handle, name => 'INCLUDE_ROWS', value => 0);
    DBMS_DATAPUMP.START_JOB(v_handle);
    DBMS_DATAPUMP.DETACH(v_handle);
    DBMS_OUTPUT.PUT_LINE('SQL file generated: schema.sql');
END;
/

DECLARE
    v_handle NUMBER;
    v_job_name VARCHAR2(128) := 'SCHEMA_EXP_' || TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISS');
BEGIN
    v_handle := DBMS_DATAPUMP.OPEN(operation => 'EXPORT', job_mode => 'SCHEMA', job_name => v_job_name);
    DBMS_DATAPUMP.ADD_FILE(
        handle => v_handle,
        filename => 'schema.sql',
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_SQL_FILE
    );
    DBMS_DATAPUMP.METADATA_FILTER(handle => v_handle, name => 'SCHEMA_EXPR', value => q'[= '1']');
    DBMS_DATAPUMP.START_JOB(v_handle);
    DBMS_DATAPUMP.DETACH(v_handle);
END;
/

```sql
DECLARE
    v_bucket_name VARCHAR2(128) := 'my-rds-dumps-bucket'; -- Replace with your S3 bucket name
    v_s3_prefix VARCHAR2(200) := 'exports/' || TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISS') || '/'; -- Unique folder
    v_file_prefix VARCHAR2(200) := '1_schema_exp_2509092151234.dmp'; -- Specific file or pattern (e.g., '1_schema_exp_%.dmp')
    v_task_id VARCHAR2(100); -- For S3 upload task ID
    v_error_msg VARCHAR2(4000);
    v_file_count NUMBER;
BEGIN
    -- Validate file prefix
    IF v_file_prefix IS NULL OR LENGTH(v_file_prefix) = 0 THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: File prefix cannot be null or empty.');
        RAISE_APPLICATION_ERROR(-20001, 'File prefix cannot be null or empty.');
    END IF;

    -- Check if files exist in DATA_PUMP_DIR
    SELECT COUNT(*)
    INTO v_file_count
    FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
    WHERE filename LIKE v_file_prefix;

    IF v_file_count = 0 THEN
        DBMS_OUTPUT.PUT_LINE('No files found in DATA_PUMP_DIR matching: ' || v_file_prefix);
        RAISE_APPLICATION_ERROR(-20003, 'No files found matching prefix: ' || v_file_prefix);
    END IF;

    -- Debug: Output upload details
    DBMS_OUTPUT.PUT_LINE('Starting S3 upload for files matching: ' || v_file_prefix || ' to S3 path: ' || v_s3_prefix);
    DBMS_OUTPUT.PUT_LINE('Found ' || v_file_count || ' file(s) matching prefix.');

    -- Upload to S3 without compression
    v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => v_bucket_name,
        p_prefix => v_file_prefix, -- Specific file or pattern
        p_s3_prefix => v_s3_prefix,
        p_directory_name => 'DATA_PUMP_DIR'
    );

    -- Output task ID
    DBMS_OUTPUT.PUT_LINE('S3 upload task started for ' || v_file_prefix || ', Task ID: ' || v_task_id);
EXCEPTION
    WHEN OTHERS THEN
        v_error_msg := 'Error in upload_to_s3: ' || SQLERRM;
        DBMS_OUTPUT.PUT_LINE(v_error_msg);
        RAISE_APPLICATION_ERROR(-20002, v_error_msg);
END;
/
```

### Key Changes
1. **File Existence Check**:
   - Added a check using `rdsadmin.rds_file_util.listdir` to count files matching `v_file_prefix` before uploading.
   - Raises an error if no files match, preventing silent task completion.
2. **Dynamic `v_s3_prefix`**:
   - Uses `exports/YYMMDDHH24MISS/` to ensure unique S3 paths, avoiding overwrites.
3. **No Compression**:
   - Omitted `p_compression_level` to upload uncompressed `.dmp` files, simplifying imports.
4. **Debugging**:
   - Outputs the number of matching files and confirms the upload task ID.
5. **Correct Parameters**:
   - Uses `p_prefix` correctly, as confirmed by AWS documentation.

### How to Run
1. **Customize Parameters**:
   - Edit the `DECLARE` section:
     ```sql
     v_bucket_name VARCHAR2(128) := 'my-rds-dumps-bucket'; -- Your S3 bucket name
     v_s3_prefix VARCHAR2(200) := 'exports/' || TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISS') || '/'; -- Unique folder
     v_file_prefix VARCHAR2(200) := '1_schema_exp_2509092151234.dmp'; -- Specific file or pattern
     ```
     - For a single file: Use the exact filename (e.g., `'1_schema_exp_2509092151234.dmp'`).
     - For multiple files: Use a pattern (e.g., `'1_schema_exp_%.dmp'`).
2. **Enable Server Output**:
   ```sql
   SET SERVEROUTPUT ON;
   ```
3. **Run the Script**:
   ```sql
   DECLARE
   ...
   END;
   /
   ```
   - Expected output (if files exist):
     ```
     Starting S3 upload for files matching: 1_schema_exp_2509092151234.dmp to S3 path: exports/250910125123/
     Found 1 file(s) matching prefix.
     S3 upload task started for 1_schema_exp_2509092151234.dmp, Task ID: task_12345
     ```
   - If no files match:
     ```
     No files found in DATA_PUMP_DIR matching: 1_schema_exp_2509092151234.dmp
     Error in upload_to_s3: ORA-20003: No files found matching prefix: 1_schema_exp_2509092151234.dmp
     ```

### Monitoring and Verification
1. **Check Upload Status**:
   ```sql
   SELECT task_id, status, percent_complete, error_msg 
   FROM rdsadmin.rdsadmin_s3_tasks 
   WHERE task_id = 'task_12345'; -- Replace with actual task_id
   ```
2. **Verify Files in S3**:
   ```bash
   aws s3 ls s3://my-rds-dumps-bucket/exports/250910125123/
   ```
3. **Check Dump Files**:
   ```sql
   SELECT * FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR')) WHERE filename LIKE '%.dmp';
   ```

### Troubleshooting
1. **No Files in S3**:
   - If `v_file_count = 0`, the `p_prefix` didn’t match any files. Adjust `v_file_prefix` to match the exact filename or pattern from `listdir`.
   - Ensure the Data Pump job completed successfully (check `dba_datapump_jobs` and the log file).
2. **Task Status Errors**:
   - If `status = 'FAILURE'`, check `error_msg` for issues like `AccessDenied` or region mismatch.
3. **S3 Integration**:
   - Verify `S3_INTEGRATION` option, IAM role, and bucket region.
4. **Provide Details**:
   - Share the `DBMS_OUTPUT` output, task status (`rdsadmin.rdsadmin_s3_tasks`), and `listdir` results.

### Integration with Data Pump Script
If this upload script works, you can integrate it with your `export_metadata_schemas` block (artifact ID `f7a6e9ef-432a-430d-b40d-754cbc3730f4`) by replacing its S3 upload section:
```sql
v_s3_prefix VARCHAR2(200) := 'exports/' || v_timestamp || '/';
IF UPPER(p_multiple_files) = 'Y' THEN
    SELECT COUNT(*)
    INTO v_file_count
    FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
    WHERE filename LIKE UPPER(p_schema) || '_schema_exp_' || v_timestamp || '%.dmp';
    IF v_file_count = 0 THEN
        RAISE_APPLICATION_ERROR(-20003, 'No files found matching prefix: ' || UPPER(p_schema) || '_schema_exp_' || v_timestamp || '%.dmp');
    END IF;
    v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => v_bucket_name,
        p_prefix => UPPER(p_schema) || '_schema_exp_' || v_timestamp || '%.dmp',
        p_s3_prefix => v_s3_prefix,
        p_directory_name => 'DATA_PUMP_DIR'
    );
ELSE
    SELECT COUNT(*)
    INTO v_file_count
    FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
    WHERE filename = v_dumpfile;
    IF v_file_count = 0 THEN
        RAISE_APPLICATION_ERROR(-20003, 'No files found matching: ' || v_dumpfile);
    END IF;
    v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => v_bucket_name,
        p_prefix => v_dumpfile,
        p_s3_prefix => v_s3_prefix,
        p_directory_name => 'DATA_PUMP_DIR'
    );
END IF;
DBMS_OUTPUT.PUT_LINE('S3 upload task started for ' || v_dumpfile || ', Task ID: ' || v_task_id);
```

Please execute the updated upload script and check the S3 bucket. If no files appear, share the `DBMS_OUTPUT`, task status, and `listdir` results, and I’ll help diagnose further!

```sql
DECLARE
    v_handle NUMBER;
    v_job_name VARCHAR2(128); -- Increased to avoid ORA-06502
    v_dumpfile VARCHAR2(200); -- Increased for safety
    v_logfile VARCHAR2(200);  -- Increased for safety
    v_timestamp VARCHAR2(19) := TO_CHAR(SYSTIMESTAMP, 'YYMMDDHH24MISSFF3'); -- Higher precision for uniqueness
    v_error_msg VARCHAR2(4000);
    p_schema VARCHAR2(128) := '1'; -- Example schema name (e.g., '1', '2', '3')
    p_parallel NUMBER := 2;        -- Example parallelism (e.g., 1, 2, 3)
    p_multiple_files VARCHAR2(1) := 'N'; -- 'Y' for multiple files, 'N' for single file
    v_bucket_name VARCHAR2(128) := 'my-rds-dumps-bucket'; -- Replace with your S3 bucket name
    v_s3_prefix VARCHAR2(200) := 'exports/'; -- Optional S3 folder path
    v_task_id VARCHAR2(100); -- For S3 upload task ID
BEGIN
    -- Validate schema name length
    IF LENGTH(p_schema) > 100 THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: Schema name too long.');
        RAISE_APPLICATION_ERROR(-20002, 'Schema name too long. Maximum length is 100 characters.');
    END IF;

    -- Validate p_multiple_files
    IF UPPER(p_multiple_files) NOT IN ('Y', 'N') THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: p_multiple_files must be Y or N.');
        RAISE_APPLICATION_ERROR(-20003, 'p_multiple_files must be ''Y'' or ''N''.');
    END IF;

    -- Create a unique job name
    v_job_name := SUBSTR('SCHEMA_EXP_' || UPPER(p_schema) || '_' || v_timestamp, 1, 128);
    -- Create unique log and dump file names
    v_dumpfile := UPPER(p_schema) || '_schema_exp_' || v_timestamp || CASE WHEN UPPER(p_multiple_files) = 'Y' THEN '%U.dmp' ELSE '.dmp' END;
    v_logfile := UPPER(p_schema) || '_schema_exp_' || v_timestamp || '.log';

    -- Validate file name lengths
    IF LENGTH(v_dumpfile) > 200 OR LENGTH(v_logfile) > 200 THEN
        DBMS_OUTPUT.PUT_LINE('Validation failed: File name too long.');
        RAISE_APPLICATION_ERROR(-20004, 'Generated file name too long. Maximum length is 200 characters.');
    END IF;

    -- Debug: Output job details
    DBMS_OUTPUT.PUT_LINE('Starting job: ' || v_job_name);

    -- Open the export job
    v_handle := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => v_job_name,
        version => 'LATEST'
    );
    DBMS_OUTPUT.PUT_LINE('Job opened with handle: ' || v_handle);

    -- Add log file
    DBMS_DATAPUMP.ADD_FILE(
        handle => v_handle,
        filename => v_logfile,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
    );
    DBMS_OUTPUT.PUT_LINE('Log file added: ' || v_logfile);

    -- Add dump file
    DBMS_DATAPUMP.ADD_FILE(
        handle => v_handle,
        filename => v_dumpfile,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
    );
    DBMS_OUTPUT.PUT_LINE('Dump file added: ' || v_dumpfile);

    -- Filter to specified schemas
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => v_handle,
        name => 'SCHEMA_EXPR',
        value => q'[= ']' || '''' || UPPER(p_schema) || '''' || q'[']'
    );
    DBMS_OUTPUT.PUT_LINE('Schema filter applied for: ' || p_schema);

    -- Exclude indexes and statistics
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => v_handle,
        name => 'EXCLUDE_PATH_EXPR',
        value => q'[IN ('INDEX', 'STATISTICS')]'
    );
    DBMS_OUTPUT.PUT_LINE('Excluded indexes and statistics');

    -- Metadata only (no data)
    DBMS_DATAPUMP.DATA_FILTER(
        handle => v_handle,
        name => 'INCLUDE_ROWS',
        value => 0
    );
    DBMS_OUTPUT.PUT_LINE('Set metadata only export');

    -- Set parallelism
    DBMS_DATAPUMP.SET_PARALLEL(
        handle => v_handle,
        degree => GREATEST(1, LEAST(p_parallel, 100))
    );
    DBMS_OUTPUT.PUT_LINE('Parallelism set to: ' || GREATEST(1, LEAST(p_parallel, 100)));

    -- Start the job
    DBMS_DATAPUMP.START_JOB(v_handle);
    DBMS_OUTPUT.PUT_LINE('Job started');

    -- Detach to run in background
    DBMS_DATAPUMP.DETACH(v_handle);
    DBMS_OUTPUT.PUT_LINE('Job detached');

    -- Upload dump file(s) to S3
    IF UPPER(p_multiple_files) = 'Y' THEN
        -- For multiple files, use a pattern to match %U files
        v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
            p_bucket_name => v_bucket_name,
            p_prefix => UPPER(p_schema) || '_schema_exp_' || v_timestamp || '%.dmp',
            p_s3_prefix => v_s3_prefix,
            p_directory_name => 'DATA_PUMP_DIR',
            p_compression_level => 6 -- Optional: GZIP level 6
        );
        DBMS_OUTPUT.PUT_LINE('S3 upload task started for files matching ' || UPPER(p_schema) || '_schema_exp_' || v_timestamp || '%.dmp, Task ID: ' || v_task_id);
    ELSE
        -- For single file
        v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
            p_bucket_name => v_bucket_name,
            p_prefix => v_dumpfile,
            p_s3_prefix => v_s3_prefix,
            p_directory_name => 'DATA_PUMP_DIR',
            p_compression_level => 6 -- Optional: GZIP level 6
        );
        DBMS_OUTPUT.PUT_LINE('S3 upload task started for file ' || v_dumpfile || ', Task ID: ' || v_task_id);
    END IF;

    -- Output confirmation
    DBMS_OUTPUT.PUT_LINE('Job ' || v_job_name || ' started for schema ' || p_schema || '. Log: ' || v_logfile || ' in DATA_PUMP_DIR. S3 upload initiated.');
EXCEPTION
    WHEN OTHERS THEN
        v_error_msg := 'Error in export_metadata_schemas: ' || SQLERRM;
        DBMS_OUTPUT.PUT_LINE(v_error_msg);
        RAISE_APPLICATION_ERROR(-20001, v_error_msg);
END;
/
```

### Fixes and Enhancements
1. **Corrected S3 Parameters**:
   - Replaced `p_file_name` with `p_prefix` in `rdsadmin.rdsadmin_s3_tasks.upload_to_s3`.
   - For single files (`p_multiple_files = 'N'`), `p_prefix` is set to `v_dumpfile` (e.g., `1_schema_exp_2509092151234.dmp`).
   - For multiple files (`p_multiple_files = 'Y'`), `p_prefix` uses a pattern (e.g., `1_schema_exp_2509092151234%.dmp`) to match `%U` files (e.g., `01.dmp`, `02.dmp`).
2. **Baseline Enhancements**:
   - Increased variable sizes: `v_job_name` to `VARCHAR2(128)`, `v_dumpfile` and `v_logfile` to `VARCHAR2(200)` to prevent `ORA-06502`.
   - Updated naming: `SCHEMA_EXP_` for job, `_schema_exp_` for files.
   - Higher-precision timestamp (`YYMMDDHH24MISSFF3`) for uniqueness.
   - Simplified exception block to avoid `ORA-31626` by removing `DBMS_DATAPUMP.SET_PARAMETER` and `LOG_MESSAGE`.
3. **Parameters**:
   - `p_schema := '1'`, `p_parallel := 2`, `p_multiple_files := 'N'` as defaults.
   - Validated `p_multiple_files` for `'Y'` or `'N'`.
4. **Debugging**:
   - Added `DBMS_OUTPUT.PUT_LINE` for each step, including S3 upload task ID.

### Prerequisites
1. **S3 Integration**:
   - Enable `S3_INTEGRATION` in your RDS instance’s Option Group (RDS Console > Option Groups > Add `S3_INTEGRATION`).
   - Attach an IAM role with permissions:
     ```json
     {
         "Version": "2012-10-17",
         "Statement": [
             {
                 "Effect": "Allow",
                 "Action": ["s3:PutObject", "s3:ListBucket", "s3:GetObject"],
                 "Resource": ["arn:aws:s3:::my-rds-dumps-bucket", "arn:aws:s3:::my-rds-dumps-bucket/*"]
             }
         ]
     }
     ```
   - Attach role to RDS instance (RDS > Modify > IAM roles).
2. **Privileges**:
   ```sql
   GRANT DATAPUMP_EXP_FULL_DATABASE TO your_user;
   GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
   GRANT EXECUTE ON rdsadmin.rdsadmin_s3_tasks TO your_user;
   ```
3. **Directory**:
   ```sql
   SELECT directory_name, directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
   ```
   - If missing, create (RDS `master` user):
     ```sql
     EXEC rdsadmin.rdsadmin_util.create_directory('DATA_PUMP_DIR', '/rdsdbdata/datapump');
     GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
     ```
4. **Schema**:
   ```sql
   SELECT username FROM dba_users WHERE username = '1';
   ```
5. **S3 Bucket**:
   - Create a bucket in the same region as your RDS instance (e.g., `us-east-1`).
   - Update `v_bucket_name := 'my-rds-dumps-bucket'` in the script.

### How to Run
1. **Customize Parameters**:
   - Edit the `DECLARE` section:
     ```sql
     p_schema VARCHAR2(128) := '1';        -- Change to '2', '3', etc.
     p_parallel NUMBER := 2;               -- Change to 1, 2, 3, etc.
     p_multiple_files VARCHAR2(1) := 'N';   -- Change to 'Y' for multiple files
     v_bucket_name VARCHAR2(128) := 'my-rds-dumps-bucket'; -- Your S3 bucket
     v_s3_prefix VARCHAR2(200) := 'exports/'; -- Optional S3 folder
     ```
2. **Enable Server Output**:
   ```sql
   SET SERVEROUTPUT ON;
   ```
3. **Run the Anonymous Block**:
   - Copy and paste the entire block into SQL*Plus, SQL Developer, or another Oracle SQL client and execute:
     ```sql
     DECLARE
     ...
     END;
     /
     ```
   - Expected output (if successful):
     ```
     Starting job: SCHEMA_EXP_1_2509092151234
     Job opened with handle: 123
     Log file added: 1_schema_exp_2509092151234.log
     Dump file added: 1_schema_exp_2509092151234.dmp
     Schema filter applied for: 1
     Excluded indexes and statistics
     Set metadata only export
     Parallelism set to: 2
     Job started
     Job detached
     S3 upload task started for file 1_schema_exp_2509092151234.dmp, Task ID: task_12345
     Job SCHEMA_EXP_1_2509092151234 started for schema 1. Log: 1_schema_exp_2509092151234.log in DATA_PUMP_DIR. S3 upload initiated.
     ```

### Monitoring and Verification
1. **Data Pump Job**:
   ```sql
   SELECT job_name, state FROM dba_datapump_jobs WHERE job_name LIKE 'SCHEMA_EXP_%';
   ```
2. **S3 Upload Status**:
   ```sql
   SELECT task_id, status, percent_complete, error_msg 
   FROM rdsadmin.rdsadmin_s3_tasks 
   WHERE task_id = 'task_12345'; -- Replace with actual task_id
   ```
   - Check S3 bucket:
     ```bash
     aws s3 ls s3://my-rds-dumps-bucket/exports/
     ```
3. **Log File**:
   - Check `DATA_PUMP_DIR` for `1_schema_exp_2509092151234.log`.

### Troubleshooting
If errors occur (`PLS-00306`, `ORA-31626`, or others):
1. **PLS-00306**:
   - Verify the correct parameters (`p_prefix`, not `p_file_name`) are used.
   - Check `S3_INTEGRATION` option is enabled.
2. **ORA-31626**:
   - Try adding `PRAGMA AUTONOMOUS_TRANSACTION`:
     ```sql
     DECLARE
         PRAGMA AUTONOMOUS_TRANSACTION;
         ...
     BEGIN
         ...
         COMMIT;
     EXCEPTION
         ...
         COMMIT;
     END;
     /
     ```
3. **S3 Issues**:
   - Ensure IAM role has correct permissions and bucket is in the same region.
   - Verify file exists:
     ```sql
     SELECT * FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR')) WHERE filename LIKE '1_schema_exp_%';
     ```
4. **Provide Details**:
   - Share error stack, log file contents, task status, and Oracle RDS version.

### Notes
- **S3 Upload**:
  - Uses `p_prefix` to target specific files (e.g., `1_schema_exp_2509092151234.dmp` or pattern `1_schema_exp_2509092151234%.dmp` for multiple files).
  - Log file is not uploaded; add a separate `upload_to_s3` call if needed.
- **Oracle Version**: For Oracle 11g, change `SUBSTR(..., 1, 128)` to `SUBSTR(..., 1, 30)` for `v_job_name` if needed.
- **Future Enhancements**:
  - Add `DBMS_SCHEDULER` for multiple schemas.
  - Include logging table.
  - Add random suffix for job uniqueness.

If issues persist, please share the full error stack or log details, and I’ll refine further!

### Enhancements from Baseline
1. **S3 Upload Integration**:
   - Added `rdsadmin.rdsadmin_s3_tasks.upload_to_s3` to upload the dump file(s) to S3.
   - Supports both single (`p_multiple_files = 'N'`) and multiple files (`p_multiple_files = 'Y'`), handling `%U` patterns (e.g., `01.dmp`, `02.dmp`) based on parallelism.
   - Uses variables `v_bucket_name` (replace with your S3 bucket name) and `v_s3_prefix` (optional folder path in S3, e.g., `'exports/'`).
   - Stores the task ID from `upload_to_s3` in `v_task_id` and outputs it for tracking.
2. **Parameters**:
   - Declared `p_schema VARCHAR2(128) := '1'`, `p_parallel NUMBER := 2`, and `p_multiple_files VARCHAR2(1) := 'N'` for flexibility.
   - Validated `p_multiple_files` to ensure it’s `'Y'` or `'N'`.
3. **Variable Sizes**:
   - Increased `v_job_name` to `VARCHAR2(128)`, `v_dumpfile` and `v_logfile` to `VARCHAR2(200)` to prevent `ORA-06502`.
   - Added file name length validation.
4. **Naming**:
   - Changed `EXP_META_` to `SCHEMA_EXP_` for job name (e.g., `SCHEMA_EXP_1_2509092151234`).
   - Changed `_meta_` to `_schema_exp_` for dump/log files (e.g., `1_schema_exp_2509092151234.dmp`).
   - Used higher-precision timestamp (`YYMMDDHH24MISSFF3`) for uniqueness.
5. **Exception Handling**:
   - Removed `DBMS_DATAPUMP.SET_PARAMETER` and `LOG_MESSAGE` from the exception block to avoid `ORA-31626`.
   - Added `DBMS_OUTPUT.PUT_LINE` for error reporting.
6. **Debugging**:
   - Added `DBMS_OUTPUT.PUT_LINE` for each major step, including S3 upload, to trace execution.

### How to Run
1. **Set Up Environment**:
   - **S3 Configuration**:
     - Ensure S3 Integration is enabled in your RDS instance’s Option Group (see Prerequisites).
     - Attach an IAM role with `s3:PutObject`, `s3:ListBucket`, and `s3:GetObject` permissions to your RDS instance.
     - Create an S3 bucket in the same region as your RDS instance.
   - **Privileges**:
     ```sql
     GRANT DATAPUMP_EXP_FULL_DATABASE TO your_user;
     GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
     GRANT EXECUTE ON rdsadmin.rdsadmin_s3_tasks TO your_user;
     ```
   - **Directory**:
     ```sql
     SELECT directory_name, directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
     ```
     - If missing, create (RDS `master` user):
       ```sql
       EXEC rdsadmin.rdsadmin_util.create_directory('DATA_PUMP_DIR', '/rdsdbdata/datapump');
       GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
       ```
   - **Schema**:
     ```sql
     SELECT username FROM dba_users WHERE username = '1';
     ```
2. **Customize Parameters**:
   - Edit the `DECLARE` section:
     ```sql
     p_schema VARCHAR2(128) := '1';        -- Change to '2', '3', etc.
     p_parallel NUMBER := 2;               -- Change to 1, 2, 3, etc.
     p_multiple_files VARCHAR2(1) := 'N';   -- Change to 'Y' for multiple files
     v_bucket_name VARCHAR2(128) := 'your-bucket-name'; -- Replace with your S3 bucket
     v_s3_prefix VARCHAR2(200) := 'exports/'; -- Optional S3 folder path
     ```
3. **Enable Server Output**:
   ```sql
   SET SERVEROUTPUT ON;
   ```
4. **Run the Anonymous Block**:
   - Copy and paste the entire block into SQL*Plus, SQL Developer, or another Oracle SQL client and execute:
     ```sql
     DECLARE
     ...
     END;
     /
     ```
   - Expected output (if successful):
     ```
     Starting job: SCHEMA_EXP_1_2509092151234
     Job opened with handle: 123
     Log file added: 1_schema_exp_2509092151234.log
     Dump file added: 1_schema_exp_2509092151234.dmp
     Schema filter applied for: 1
     Excluded indexes and statistics
     Set metadata only export
     Parallelism set to: 2
     Job started
     Job detached
     S3 upload task started for file 1_schema_exp_2509092151234.dmp, Task ID: task_12345
     Job SCHEMA_EXP_1_2509092151234 started for schema 1. Log: 1_schema_exp_2509092151234.log in DATA_PUMP_DIR. S3 upload initiated.
     ```

### Monitoring and Verification
1. **Check Data Pump Job**:
   ```sql
   SELECT job_name, state FROM dba_datapump_jobs WHERE job_name LIKE 'SCHEMA_EXP_%';
   ```
2. **Check S3 Upload Status**:
   - Query the task status:
     ```sql
     SELECT task_id, status, percent_complete, error_msg 
     FROM rdsadmin.rdsadmin_s3_tasks 
     WHERE task_id = 'task_12345'; -- Replace with actual task_id from output
     ```
   - Check the S3 bucket (AWS Console or CLI) for the uploaded file (e.g., `exports/1_schema_exp_2509092151234.dmp`).
   - CLI command to verify:
     ```bash
     aws s3 ls s3://your-bucket-name/exports/
     ```
3. **Check Log File**:
   - Review the log file in `DATA_PUMP_DIR` (e.g., `1_schema_exp_2509092151234.log`) for Data Pump errors.
   - Find the directory path:
     ```sql
     SELECT directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
     ```

### Troubleshooting `ORA-31626` or S3 Issues
If `ORA-31626` or other errors occur:
1. **Debug Output**:
   - Check `DBMS_OUTPUT` for the last successful step and error message.
2. **Log File**:
   - Review the Data Pump log file in `DATA_PUMP_DIR` for details.
3. **S3 Upload Errors**:
   - Common issues:
     - **Missing S3 Integration**: Verify the `S3_INTEGRATION` option is enabled.
     - **IAM Role**: Ensure the RDS instance’s IAM role has `s3:PutObject` permissions.
     - **Bucket Region**: Confirm the bucket is in the same region as the RDS instance.
     - **File Existence**: Ensure the dump file exists in `DATA_PUMP_DIR` before upload.
   - Check task status for errors:
     ```sql
     SELECT task_id, status, error_msg FROM rdsadmin.rdsadmin_s3_tasks;
     ```
4. **Test with `PRAGMA AUTONOMOUS_TRANSACTION`**:
   - If `ORA-31626` persists, add `PRAGMA AUTONOMOUS_TRANSACTION`:
     ```sql
     DECLARE
         PRAGMA AUTONOMOUS_TRANSACTION;
         ...
     BEGIN
         ...
         COMMIT;
     EXCEPTION
         WHEN OTHERS THEN
             v_error_msg := 'Error in export_metadata_schemas: ' || SQLERRM;
             DBMS_OUTPUT.PUT_LINE(v_error_msg);
             COMMIT;
             RAISE_APPLICATION_ERROR(-20001, v_error_msg);
     END;
     /
     ```
5. **RDS Restrictions**:
   - Oracle RDS may limit some `DBMS_DATAPUMP` operations. Test in a non-RDS environment if possible.
6. **Provide Details**:
   - Share the error stack, log file contents, S3 task status, and Oracle version (e.g., 11g, 12c, 19c).

### Notes
- **S3 Upload**:
   - The block uploads only the dump file(s), not the log file. To upload the log file, add another `upload_to_s3` call for `v_logfile`.
   - For `p_multiple_files = 'Y'`, it assumes files follow the `%U` pattern (e.g., `01.dmp`, `02.dmp`) up to the parallelism degree.
- **Customization**:
   - Change `p_schema`, `p_parallel`, `p_multiple_files`, `v_bucket_name`, and `v_s3_prefix` in the `DECLARE` section.
   - Example: `p_schema := '2'; p_parallel := 3; p_multiple_files := 'Y'; v_bucket_name := 'my-bucket'; v_s3_prefix := 'backups/';`
- **Oracle Version**:
   - For Oracle 11g, change `SUBSTR(..., 1, 128)` to `SUBSTR(..., 1, 30)` for `v_job_name` if needed.
- **Future Enhancements**:
   - Add `DBMS_SCHEDULER` for multiple schemas.
   - Include a logging table for persistent debugging.
   - Add `PRAGMA AUTONOMOUS_TRANSACTION` if `ORA-31626` persists.
   - Add job name uniqueness with a random suffix.

If `ORA-31626` or S3 upload issues occur, please share the error details, log file, or S3 task status, and I’ll refine further!

### How to Run
1. **Set Up Environment**:
   - Ensure the user has the necessary privileges:
     ```sql
     GRANT DATAPUMP_EXP_FULL_DATABASE TO your_user;
     GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
     ```
   - Verify `DATA_PUMP_DIR` exists:
     ```sql
     SELECT directory_name, directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
     ```
     - If missing, create as a DBA:
       ```sql
       CREATE DIRECTORY DATA_PUMP_DIR AS '/path/to/directory';
       GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
       ```
   - Confirm the schema (e.g., `'1'`) exists:
     ```sql
     SELECT username FROM dba_users WHERE username = '1';
     ```

2. **Enable Server Output**:
   - Run this to see confirmation or error messages:
     ```sql
     SET SERVEROUTPUT ON;
     ```

3. **Run the Anonymous Block**:
   - Copy and paste the entire block above into SQL*Plus, SQL Developer, or another Oracle SQL client.
   - Modify the parameter values in the `DECLARE` section if needed:
     ```sql
     p_schema VARCHAR2(128) := '1';        -- Change to your schema, e.g., '2' or '3'
     p_parallel NUMBER := 2;               -- Change to 1, 2, 3, etc.
     p_multiple_files VARCHAR2(1) := 'N';   -- Change to 'Y' for multiple files
     ```
   - Execute the block as-is:
     ```sql
     DECLARE
     ...
     END;
     /
     ```
   - Expected output (if successful):
     ```
     Job SCHEMA_EXP_1_2509092151 started for schema 1. Log: 1_schema_exp_2509092151.log in DATA_PUMP_DIR.
     ```

4. **Check Output Files**:
   - **Log File**: Check `DATA_PUMP_DIR` for the log file (e.g., `1_schema_exp_2509092151.log`) for job details.
   - **Dump File**: Look for the dump file (e.g., `1_schema_exp_2509092151.dmp` or `1_schema_exp_2509092151%U.dmp` if `p_multiple_files := 'Y'`).
   - Find the directory path:
     ```sql
     SELECT directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
     ```

5. **Monitor Job Status**:
   - Check if the Data Pump job is running:
     ```sql
     SELECT job_name, state FROM dba_datapump_jobs WHERE job_name LIKE 'SCHEMA_EXP_%';
     ```

### Key Changes from Baseline
1. **Converted to Anonymous Block**:
   - Reverted from a stored procedure to an anonymous block, as requested.
   - Declared `p_schema`, `p_parallel`, and `p_multiple_files` as variables with example values (`'1'`, `2`, `'N'`).
2. **Updated Naming**:
   - Job name: Changed `EXP_META_` to `SCHEMA_EXP_` (e.g., `SCHEMA_EXP_1_2509092151`).
   - Dump file: Changed `_meta_` to `_schema_exp_` (e.g., `1_schema_exp_2509092151.dmp`).
   - Log file: Changed `_meta_export_` to `_schema_exp_` (e.g., `1_schema_exp_2509092151.log`).
3. **Increased Variable Sizes**:
   - `v_job_name`: `VARCHAR2(128)` to avoid `ORA-06502` and support Oracle 12c+ job name limits (truncate

### Key Changes from Baseline
1. **Added `PRAGMA AUTONOMOUS_TRANSACTION`**:
   - Included at the start of the procedure to ensure `DBMS_DATAPUMP` operations (e.g., `OPEN`, `START_JOB`, `DETACH`) run in an isolated transaction, addressing `ORA-31626` when called from another procedure or SQL context.
   - Added `COMMIT` after successful execution and in the exception block to ensure the autonomous transaction is finalized.
2. **Updated Naming Conventions**:
   - Job name: Changed `EXP_META_` to `SCHEMA_EXP_` (e.g., `SCHEMA_EXP_DEMO1_2509092151`).
   - Dump file: Changed `_meta_` to `_schema_exp_` (e.g., `DEMO1_schema_exp_2509092151.dmp`).
   - Log file: Changed `_meta_export_` to `_schema_exp_` (e.g., `DEMO1_schema_exp_2509092151.log`).
3. **Increased Variable Sizes**:
   - `v_job_name`: Increased to `VARCHAR2(128)` to avoid `ORA-06502` and support Oracle 12c+ job name limits (128 characters). Use `SUBSTR(..., 1, 30)` for Oracle 11g if needed.
   - `v_dumpfile` and `v_logfile`: Increased to `VARCHAR2(200)` for safety.
4. **Simplified Exception Handling**:
   - Removed `DBMS_DATAPUMP.SET_PARAMETER` and `LOG_MESSAGE` from the exception block to avoid `ORA-31626`.
   - Kept `DETACH` in the main block, as `PRAGMA AUTONOMOUS_TRANSACTION` should prevent handle issues.
   - Added `COMMIT` in the exception block to ensure the transaction is closed.
5. **Fixed Schema Filter**:
   - Corrected the `SCHEMA_EXPR` value to include proper quoting: `q'[= ']' || '''' || UPPER(p_schema) || '''' || q'[']'`.
6. **Added Calling Procedure**:
   - Created `call_export_single_schema` to execute the procedure for a single schema (`DEMO1` by default).
   - Includes basic error handling and output for confirmation.

### How to Execute
1. **Run the Script**:
   - Execute the entire script to create both procedures (`export_metadata_schemas` and `call_export_single_schema`).
2. **Call the Single Schema Procedure**:
   ```sql
   EXEC call_export_single_schema;
   ```
   - This calls `export_metadata_schemas` for `DEMO1` with default parameters (`p_parallel => 2`, `p_multiple_files => FALSE`).
3. **Alternative Direct Call**:
   ```sql
   EXEC export_metadata_schemas(p_schema => 'DEMO1', p_parallel => 2, p_multiple_files => FALSE);
   ```
4. **Enable Output**:
   - Ensure server output is enabled to see confirmation or errors:
     ```sql
     SET SERVEROUTPUT ON;
     ```

### Prerequisites
1. **Privileges**:
   - Ensure the user has:
     ```sql
     GRANT DATAPUMP_EXP_FULL_DATABASE TO your_user;
     GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
     ```
2. **Directory**:
   - Verify `DATA_PUMP_DIR` exists:
     ```sql
     SELECT directory_name, directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
     ```
   - If missing, create as a DBA:
     ```sql
     CREATE DIRECTORY DATA_PUMP_DIR AS '/path/to/directory';
     GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
     ```
3. **Schema**:
   - Ensure the schema (e.g., `DEMO1`) exists:
     ```sql
     SELECT username FROM dba_users WHERE username = 'DEMO1';
     ```

###
### Key Changes
1. **Improved Timestamp Precision**:
   - Changed `v_timestamp` format from `'YYMMDDHH24MISS'` (12 characters) to `'YYMMDDHH24MISSFF3'` (19 characters, including milliseconds) to reduce the chance of duplicate job names when called rapidly from another procedure.
   - This ensures `v_job_name` is unique even for calls within the same second.
2. **Directory Validation**:
   - Added a check for the existence of `DATA_PUMP_DIR` using `dba_directories` to catch directory issues early.
   - Raises a custom error (`-20004`) if the directory is missing.
3. **Debugging Output**:
   - Added `DBMS_OUTPUT.PUT_LINE` calls after each major step (`OPEN`, `ADD_FILE`, `METADATA_FILTER`, `DATA_FILTER`, `SET_PARALLEL`, `START_JOB`, `DETACH`) to trace where the failure occurs.
   - Outputs the error message in the exception block for better visibility.
4. **Retained Simplicity**:
   - Kept the simplified exception block from the previous version, avoiding any `v_handle` operations (no `SET_PARAMETER` or `DETACH`) to prevent `ORA-31626`.
   - Maintained all functionality: parameterized `p_schema`, `p_parallel`, `p_multiple_files`, and naming conventions (`SCHEMA_EXPORT_`, `_schema_exp_`).
5. **Job Name Truncation**:
   - Kept `SUBSTR(..., 1, 30)` for `v_job_name` to ensure compatibility with Oracle 11g’s 30-character limit. For Oracle 12c+, you can test adjusting to `SUBSTR(..., 1, 128)` or removing it if the limit is 128 characters.

### How to Execute
From another procedure or SQL*Plus, call the procedure as:
```sql
EXEC export_metadata_schemas(p_schema => 'DEMO1');
```
With custom parameters:
```sql
EXEC export_metadata_schemas(p_schema => 'DEMO1', p_parallel => 4, p_multiple_files => TRUE);
```

### Example Calling Procedure
If calling from another procedure, ensure proper exception handling:
```sql
CREATE OR REPLACE PROCEDURE call_export_metadata AS
BEGIN
    export_metadata_schemas(p_schema => 'DEMO1', p_parallel => 4, p_multiple_files => TRUE);
    DBMS_OUTPUT.PUT_LINE('Export completed successfully.');
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error in call_export_metadata: ' || SQLERRM);
        RAISE;
END;
/
EXEC call_export_metadata;
```

### Troubleshooting Steps
1. **Check Debug Output**:
   - Enable server output in SQL*Plus or your client:
     ```sql
     SET SERVEROUTPUT ON;
     ```
   - Run the procedure and review the `DBMS_OUTPUT` messages to see which step fails (e.g., “Job opened with handle: ...” or “Job started”).
   - The error message in the exception block will indicate the root cause (e.g., `ORA-39002: invalid operation` for `OPEN` failures).
2. **Verify Privileges**:
   - Ensure the user has the necessary roles:
     ```sql
     SELECT granted_role FROM dba_role_privs WHERE grantee = USER;
     ```
   - Required roles: `EXP_FULL_DATABASE` or `DATAPUMP_EXP_FULL_DATABASE`.
   - Verify write access to `DATA_PUMP_DIR`:
     ```sql
     SELECT privilege FROM dba_tab_privs WHERE table_name = 'DATA_PUMP_DIR';
     ```
3. **Check Directory**:
   - Confirm `DATA_PUMP_DIR` exists and is accessible:
     ```sql
     SELECT directory_name, directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
     ```
   - If missing, create it as a DBA:
     ```sql
     CREATE DIRECTORY DATA_PUMP_DIR AS '/path/to/directory';
     GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO your_user;
     ```
4. **Check for Job Conflicts**:
   - List running or stalled Data Pump jobs:
     ```sql
     SELECT job_name, state FROM dba_datapump_jobs;
     ```
   - If duplicate jobs exist, clean them up manually or ensure the timestamp precision (`FF3`) avoids conflicts.
5. **Log File**:
   - Check the log file (e.g., `DEMO1_schema_exp_2509092110123.log`) in `DATA_PUMP_DIR` for detailed error messages.

### Notes
- **Timestamp Precision**: The millisecond precision (`FF3`) in `v_timestamp` reduces job name conflicts but may still truncate significantly due to the 30-character limit on `v_job_name`. If using Oracle 12c+, test removing `SUBSTR(..., 1, 30)` or adjusting to `SUBSTR(..., 1, 128)`.
- **ORA-31626 Prevention**: By avoiding `v_handle` operations in the exception block, we minimize the risk of this error. The debug output will help pinpoint if the issue occurs at `START_JOB` or `DETACH`.
- **Calling Context**: If the calling procedure runs in a loop or triggers multiple calls, ensure it handles exceptions and doesn’t overwhelm the Data Pump job queue.
- **Further Enhancements**: If you need additional logging (e.g., to a custom table) or want to handle stalled jobs automatically, let me know.

If the error persists, please share:
- The exact call from the parent procedure.
- The debug output from `DBMS_OUTPUT`.
- The Oracle version (e.g., 11g, 12c, 19c).
- Any additional error details from the log file in `DATA_PUMP_DIR`.

This will help me pinpoint the exact issue and provide a more targeted fix!

### Key Changes
1. **Removed Handle-Related Operations in Exception Block**:
   - Eliminated the nested exception block containing `DBMS_DATAPUMP.SET_PARAMETER(v_handle, 'TRACE', 1)` and `DBMS_DATAPUMP.DETACH(v_handle)`.
   - Removed the `v_handle_initialized` flag, as it’s no longer needed.
   - The exception block now only constructs the error message and raises it with `RAISE_APPLICATION_ERROR`, avoiding any `v_handle` usage that could trigger `ORA-31626`.
2. **Retained Core Functionality**:
   - Kept the parameterized `p_schema`, `p_parallel`, and `p_multiple_files`.
   - Maintained the naming convention (`SCHEMA_EXPORT_`, `_schema_exp_`).
   - Preserved validations for schema name length (`<= 100`) and file name lengths (`<= 200`).
   - Left the main Data Pump operations (`OPEN`, `ADD_FILE`, `METADATA_FILTER`, `DATA_FILTER`, `SET_PARALLEL`, `START_JOB`, `DETACH`) unchanged.

### How to Execute
To execute the procedure:
```sql
EXEC export_metadata_schemas(p_schema => 'DEMO1');
```
With custom parallelism and multiple files:
```sql
EXEC export_metadata_schemas(p_schema => 'DEMO1', p_parallel => 4, p_multiple_files => TRUE);
```

### Notes
- **Error Handling**: If an error occurs (e.g., during `DBMS_DATAPUMP.OPEN` due to insufficient privileges or an invalid `DATA_PUMP_DIR`), the procedure will raise the error with `SQLERRM` without attempting to use `v_handle` in the exception block, preventing `ORA-31626`.
- **Implications of Removing Handle Operations**:
  - If the job fails before `DETACH` in the main block, it may remain in the `dba_datapump_jobs` view in a `NOT RUNNING` state. You can clean up such jobs manually:
    ```sql
    SELECT job_name, state FROM dba_datapump_jobs;
    -- To clean up: DBMS_DATAPUMP.DETACH(job_handle); or drop the job
    ```
  - Error details will still be written to the log file specified by `v_logfile` (e.g., `DEMO1_schema_exp_2509092110.log`) if the job reaches the `ADD_FILE` step.
- **Troubleshooting**:
  - **Privileges**: Ensure the user has `EXP_FULL_DATABASE` or `DATAPUMP_EXP_FULL_DATABASE` roles and write access to `DATA_PUMP_DIR`. Check:
    ```sql
    SELECT directory_name, directory_path FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';
    ```
  - **Job Name**: The `SUBSTR(..., 1, 30)` ensures job name compatibility with Oracle 11g. For Oracle 12c+, you can test removing it or using `SUBSTR(..., 1, 128)` if the limit is 128 characters.
  - **Log File**: Check the log file in `DATA_PUMP_DIR` for detailed errors if the procedure fails.
- **Further Enhancements**: If you need additional logging (e.g., to a custom table) or want to reintroduce safe handle management, let me know.

If the error persists or you have specific requirements (e.g., logging errors differently or handling stalled jobs), please provide details, and I’ll refine the solution further!
### Key Changes
1. **Increased Variable Sizes**:
   - `v_job_name` increased to `VARCHAR2(128)` to support Oracle 12c+ job name limits (128 characters). Note: If using Oracle 11g or earlier, the limit is 30 characters, so we truncate with `SUBSTR`.
   - `v_dumpfile` and `v_logfile` increased to `VARCHAR2(200)` to accommodate longer schema names and file extensions.
2. **Job Name Truncation**:
   - Used `SUBSTR(..., 1, 30)` for `v_job_name` to ensure compatibility with Oracle versions that enforce a 30-character limit. If you’re using Oracle 12c or later and confirm the limit is 128, you can remove the `SUBSTR` or adjust to 128.
3. **Input Validation**:
   - Added a check for `p_schema` length to prevent excessively long schema names.
   - Added checks for `v_dumpfile` and `v_logfile` lengths to catch potential overflows before calling `DBMS_DATAPUMP`.
4. **Improved Error Handling**:
   - Wrapped the error logging in a nested block to handle cases where `v_handle` might be invalid during the exception, preventing secondary errors.
5. **Minor Output Tweak**:
   - Adjusted the `DBMS_OUTPUT.PUT_LINE` message for clarity.

### Notes
- **Oracle Version Consideration**: If you’re using Oracle 11g or earlier, the `DBMS_DATAPUMP` job name limit is 30 characters, so the `SUBSTR` truncation is necessary. For Oracle 12c or later, you can test without the `SUBSTR` to allow up to 128 characters.
- **Testing**: Test the script with a long `p_schema` value (e.g., a 50-character string) to ensure the error is resolved.
- **File Name Lengths**: Ensure the `DATA_PUMP_DIR` directory and Oracle’s file system support the generated file names. Some file systems have shorter limits than 200 characters.
- **Error Logging**: The `DBMS_DATAPUMP.SET_PARAMETER(v_handle, 'TRACE', 1)` call in the exception block might not work if `v_handle` is invalid. The nested block mitigates this.

If you encounter further issues or want to test specific scenarios (e.g., longer schema names or Oracle version specifics), let me know, and we can refine further!

==================

-- Create DBMS_SCHEDULER jobs for each schema
DECLARE
v_schema_names VARCHAR2(4000) := '&1';
v_bucket_name VARCHAR2(100) := '&2';
v_parallel_degree NUMBER := &3;
v_use_multiple_files VARCHAR2(1) := '&4';
v_schema_list DBMS_UTILITY.UNCL_ARRAY;
v_count NUMBER;
v_schema_name VARCHAR2(30);
BEGIN
DBMS_UTILITY.COMMA_TO_TABLE(v_schema_names, v_count, v_schema_list);
FOR i IN 1..v_count LOOP
v_schema_name := TRIM(v_schema_list(i));
DBMS_SCHEDULER.CREATE_JOB (
job_name => 'EXP_' || v_schema_name || '_' || TO_CHAR(SYSDATE, 'HH24MISS'),
job_type => 'PLSQL_BLOCK',
job_action => 'BEGIN export_schemas_to_dir(p_schema_names => ''' || v_schema_name || ''', p_bucket_name => ''' || v_bucket_name || ''', p_parallel_degree => ' || v_parallel_degree || ', p_use_multiple_files => ''' || v_use_multiple_files || '''); END;',
start_date => SYSTIMESTAMP,
repeat_interval => NULL,
enabled => TRUE,
auto_drop => FALSE,
comments => 'Scheduled job to export schema ' || v_schema_name || ' metadata to DATA_PUMP_DIR'
);
DBMS_OUTPUT.PUT_LINE('Scheduled job created for schema: ' || v_schema_name);
END LOOP;
END;
/

### Notes
- **Job Not Running**: The script now includes checks for `job_queue_processes` and schema existence, plus full scheduler logging to diagnose failures.
- **Toad**: Enable DBMS Output (View > DBMS Output) and use the data grid for results.
- **Privileges**: Verify `DATAPUMP_EXP_FULL_DATABASE` and `SCHEDULER_ADMIN` roles.
- **Large Schemas**: Use `&3=Y` and higher `&2` (e.g., 8) for SCHEMA4, SCHEMA7, SCHEMA9.
- **Debugging**: `auto_drop=FALSE` and `logging_level=FULL` help troubleshoot.

If the job still doesn’t run after diagnostics, please share the output of the monitoring query or job logs, and I’ll refine further!

This script should now compile and run in Toad without errors, meeting all requirements. Let me know if you encounter other issues or need further adjustments!

1. Reading Contents of Log Files in DATA_PUMP_DIR

Log files in DATA_PUMP_DIR (e.g., hr_export.log, finance_export.log) are generated by the Data Pump export job (as specified in the script with DBMS_DATAPUMP.ADD_FILE for filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE). These files contain detailed information about the export process (e.g., start/completion times, errors, objects exported). You can read these files using the rdsadmin.rds_file_util.read_text_file procedure, which is similar to how S3 task logs are read in the BDUMP directory (as shown in the original document).

Script to Read Contents of a Specific Log File in DATA_PUMP_DIR:

sql
-- Set formatting
SET LINESIZE 180;
SET SERVEROUTPUT ON;

-- Read contents of a specific log file
DECLARE
    v_log_file VARCHAR2(100) := 'hr_export.log'; -- Replace with your log file name (e.g., 'finance_export.log')
BEGIN
    FOR log_line IN (SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR', v_log_file))) LOOP
        DBMS_OUTPUT.PUT_LINE(log_line.text);
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error reading ' || v_log_file || ': ' || SQLERRM);
END;
/
  • Usage:
    • Replace hr_export.log with the name of the log file (from SELECT * FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))).
    • Run in Toad’s SQL Editor (Ctrl+Enter or F5).
    • Output in the DBMS Output tab shows the log file contents (e.g., Data Pump job details, errors like "materialized views not exported").
    • Example output:
      text
      Starting "HR"."HR_EXPORT_20250909131700":  
      Processing object type SCHEMA_EXPORT/TABLE/TABLE_DATA
      Completed 25 TABLE objects

Script to List and Read All Log Files in DATA_PUMP_DIR: This lists all log files matching your export pattern and reads their contents.

sql
-- Set formatting
SET LINESIZE 180;
SET SERVEROUTPUT ON;

-- List and read all log files in DATA_PUMP_DIR
BEGIN
    -- List log files
    DBMS_OUTPUT.PUT_LINE('Log Files in DATA_PUMP_DIR:');
    FOR file_rec IN (SELECT filename, mtime
                     FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                     WHERE filename LIKE '%_export%.log'
                     ORDER BY mtime DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('File: ' || file_rec.filename || ' | Modified: ' || file_rec.mtime);
    END LOOP;

    -- Read contents of each log file
    DBMS_OUTPUT.PUT_LINE('--- Log Contents ---');
    FOR file_rec IN (SELECT filename
                     FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                     WHERE filename LIKE '%_export%.log') LOOP
        DBMS_OUTPUT.PUT_LINE('Reading: ' || file_rec.filename);
        FOR log_line IN (SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR', file_rec.filename))) LOOP
            DBMS_OUTPUT.PUT_LINE(log_line.text);
        END LOOP;
        DBMS_OUTPUT.PUT_LINE('--- End of ' || file_rec.filename || ' ---');
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
  • Usage:
    • Run in SQL Editor (F5).
    • Lists log files (e.g., hr_export.log, finance_export.log) and prints their contents.
    • Adjust the LIKE '%_export%.log' pattern if your log files have a different naming convention.

Notes:

  • Requires EXECUTE on rdsadmin.rds_file_util. If you get a privilege error, grant it as the master user:
    sql
    GRANT EXECUTE ON rdsadmin.rds_file_util TO YOUR_USER;
  • Log files may be large; consider limiting output or filtering specific lines (e.g., for errors) if needed.

2. Fetching File Sizes in GBs in DATA_PUMP_DIR

The rdsadmin.rds_file_util.listdir function provides file sizes in bytes. You can convert these to GBs (1 GB = 1024^3 bytes) for easier monitoring, especially for large dump files created with multiple files (%u pattern).

Script to List File Sizes in GBs:

sql
-- Set formatting
SET LINESIZE 180;
SET SERVEROUTPUT ON;

-- List all files in DATA_PUMP_DIR with sizes in GBs
BEGIN
    DBMS_OUTPUT.PUT_LINE('Files in DATA_PUMP_DIR:');
    FOR file_rec IN (SELECT filename, type, bytes, ROUND(bytes / 1024 / 1024 / 1024, 4) AS size_gb, mtime
                     FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                     ORDER BY mtime DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('File: ' || RPAD(file_rec.filename, 30) || ' | Type: ' || RPAD(file_rec.type, 10) || 
                             ' | Size (GB): ' || TO_CHAR(file_rec.size_gb, 'FM9990.0000') || ' | Modified: ' || file_rec.mtime);
    END LOOP;

    -- Total size in GBs
    SELECT ROUND(SUM(bytes) / 1024 / 1024 / 1024, 4) INTO :total_size_gb
    FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'));
    DBMS_OUTPUT.PUT_LINE('Total Size (GB): ' || TO_CHAR(:total_size_gb, 'FM9990.0000'));
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
  • Usage:
    • Run in SQL Editor (F5).
    • Output shows each file’s size in GBs and the total size (e.g., hr_export_u01.dmp | DUMP | Size (GB): 1.2345).
    • Example output:
      text
      File: hr_export_u01.dmp        | Type: DUMP       | Size (GB): 1.2345 | Modified: 09-SEP-25 13:17:00
      File: hr_export.log            | Type: LOG        | Size (GB): 0.0001 | Modified: 09-SEP-25 13:17:00
      Total Size (GB): 1.2346

Script to List Sizes for Specific Export Files: Filters for files matching your export pattern (e.g., hr_export*).

sql
-- Set formatting
SET LINESIZE 180;
SET SERVEROUTPUT ON;

-- List sizes for specific export files
BEGIN
    DBMS_OUTPUT.PUT_LINE('Export Files in DATA_PUMP_DIR:');
    FOR file_rec IN (SELECT filename, type, bytes, ROUND(bytes / 1024 / 1024 / 1024, 4) AS size_gb, mtime
                     FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                     WHERE filename LIKE '%_export%'  -- Adjust pattern (e.g., '%_export_u%.dmp' for multiple files)
                     ORDER BY mtime DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('File: ' || RPAD(file_rec.filename, 30) || ' | Type: ' || RPAD(file_rec.type, 10) || 
                             ' | Size (GB): ' || TO_CHAR(file_rec.size_gb, 'FM9990.0000') || ' | Modified: ' || file_rec.mtime);
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
  • Usage:
    • Adjust the LIKE clause (e.g., '%_export_u%.dmp' for multiple dump files, '%_export%.log' for logs).
    • Run in SQL Editor (F5).

3. Comprehensive Monitoring Scripts

These combine monitoring for scheduler jobs, Data Pump jobs, S3 upload tasks, and DATA_PUMP_DIR files (including log contents and sizes in GBs).

Script to Monitor All Aspects:

sql
-- Set formatting
SET LINESIZE 180;
SET PAGESIZE 100;
SET SERVEROUTPUT ON;

BEGIN
    -- Scheduler Jobs
    DBMS_OUTPUT.PUT_LINE('--- Scheduler Jobs ---');
    FOR rec IN (SELECT job_name, state, last_start_date, last_run_duration
                FROM dba_scheduler_jobs
                WHERE job_name LIKE 'EXPORT_%_JOB_%'
                ORDER BY last_start_date DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('Job: ' || RPAD(rec.job_name, 30) || ' | State: ' || RPAD(rec.state, 10) || 
                             ' | Last Start: ' || rec.last_start_date || ' | Duration: ' || rec.last_run_duration);
    END LOOP;

    -- Scheduler Job Run Details
    DBMS_OUTPUT.PUT_LINE('--- Scheduler Job Run Details ---');
    FOR rec IN (SELECT job_name, status, error#, additional_info
                FROM dba_scheduler_job_run_details
                WHERE job_name LIKE 'EXPORT_%_JOB_%'
                ORDER BY log_date DESC FETCH FIRST 10 ROWS ONLY) LOOP
        DBMS_OUTPUT.PUT_LINE('Job: ' || RPAD(rec.job_name, 30) || ' | Status: ' || RPAD(rec.status, 10) || 
                             ' | Error#: ' || rec.error# || ' | Info: ' || SUBSTR(rec.additional_info, 1, 100));
    END LOOP;

    -- Data Pump Jobs
    DBMS_OUTPUT.PUT_LINE('--- Data Pump Jobs ---');
    FOR rec IN (SELECT owner_name, job_name, state, operation, job_mode, degree
                FROM dba_datapump_jobs
                ORDER BY job_name DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('Job: ' || RPAD(rec.job_name, 30) || ' | Owner: ' || RPAD(rec.owner_name, 15) || 
                             ' | State: ' || rec.state || ' | Parallelism: ' || rec.degree);
    END LOOP;

    -- Files in DATA_PUMP_DIR with Sizes in GBs
    DBMS_OUTPUT.PUT_LINE('--- Files in DATA_PUMP_DIR ---');
    FOR file_rec IN (SELECT filename, type, ROUND(bytes / 1024 / 1024 / 1024, 4) AS size_gb, mtime
                     FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                     WHERE filename LIKE '%_export%'
                     ORDER BY mtime DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('File: ' || RPAD(file_rec.filename, 30) || ' | Type: ' || RPAD(file_rec.type, 10) || 
                             ' | Size (GB): ' || TO_CHAR(file_rec.size_gb, 'FM9990.0000') || ' | Modified: ' || file_rec.mtime);
    END LOOP;

    -- Total Size in GBs
    DECLARE
        v_total_size_gb NUMBER;
    BEGIN
        SELECT ROUND(SUM(bytes) / 1024 / 1024 / 1024, 4) INTO v_total_size_gb
        FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'));
        DBMS_OUTPUT.PUT_LINE('Total Size (GB): ' || TO_CHAR(v_total_size_gb, 'FM9990.0000'));
    END;

    -- Read Log Files in DATA_PUMP_DIR
    DBMS_OUTPUT.PUT_LINE('--- Log File Contents ---');
    FOR file_rec IN (SELECT filename
                     FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                     WHERE filename LIKE '%_export%.log'
                     ORDER BY mtime DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('Reading: ' || file_rec.filename);
        FOR log_line IN (SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR', file_rec.filename))) LOOP
            DBMS_OUTPUT.PUT_LINE(log_line.text);
        END LOOP;
        DBMS_OUTPUT.PUT_LINE('--- End of ' || file_rec.filename || ' ---');
    END LOOP;

    -- Recent S3 Task Logs in BDUMP
    DBMS_OUTPUT.PUT_LINE('--- Recent S3 Task Logs in BDUMP ---');
    FOR rec IN (SELECT filename, mtime
                FROM TABLE(rdsadmin.rds_file_util.listdir('BDUMP'))
                WHERE filename LIKE 'dbtask-%.log'
                ORDER BY mtime DESC FETCH FIRST 10 ROWS ONLY) LOOP
        DBMS_OUTPUT.PUT_LINE('Task Log: ' || rec.filename || ' | Modified: ' || rec.mtime);
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
  • Usage:
    • Run in SQL Editor (F5).
    • Outputs scheduler job status, Data Pump job status, file sizes in GBs, log file contents, and recent S3 task logs.

4. S3 Upload Task Monitoring (After Manual Upload)

Since you’re using Option 1 and manually uploading to S3 with rdsadmin.rdsadmin_s3_tasks.upload_to_s3, here’s a script to initiate and monitor uploads in parallel.

Script to Initiate and Monitor S3 Uploads:

sql
-- Set formatting
SET LINESIZE 180;
SET SERVEROUTPUT ON;

-- Initiate S3 uploads for all schemas and monitor
DECLARE
    v_schema_names VARCHAR2(4000) := 'HR,FINANCE'; -- Same schemas as export
    v_bucket_name VARCHAR2(100) := 'mys3bucket';
    v_use_multiple_files VARCHAR2(1) := 'Y'; -- Match export setting
    v_schema_list DBMS_UTILITY.UNCL_ARRAY;
    v_count NUMBER;
    v_schema_name VARCHAR2(30);
    v_prefix VARCHAR2(100);
    v_task_id VARCHAR2(100);
    TYPE t_task_ids IS TABLE OF VARCHAR2(100) INDEX BY PLS_INTEGER;
    v_task_ids t_task_ids;
    v_status VARCHAR2(4000);
BEGIN
    -- Submit uploads
    DBMS_UTILITY.COMMA_TO_TABLE(v_schema_names, v_count, v_schema_list);
    FOR i IN 1..v_count LOOP
        v_schema_name := TRIM(v_schema_list(i));
        IF v_use_multiple_files = 'Y' THEN
            v_prefix := LOWER(v_schema_name) || '_export_u';
        ELSE
            v_prefix := LOWER(v_schema_name) || '_export';
        END IF;
        v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
            p_bucket_name => v_bucket_name,
            p_prefix => v_prefix,
            p_s3_prefix => '',
            p_directory_name => 'DATA_PUMP_DIR'
        );
        v_task_ids(i) := v_task_id;
        DBMS_OUTPUT.PUT_LINE('S3 Upload Task ID for ' || v_schema_name || ': ' || v_task_id);
    END LOOP;

    -- Monitor upload tasks (optional polling)
    FOR i IN 1..v_count LOOP
        v_schema_name := TRIM(v_schema_list(i));
        DBMS_OUTPUT.PUT_LINE('Checking status for Task ID: ' || v_task_ids(i));
        BEGIN
            FOR log_line IN (SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('BDUMP', 'dbtask-' || v_task_ids(i) || '.log'))) LOOP
                DBMS_OUTPUT.PUT_LINE(log_line.text);
            END LOOP;
        EXCEPTION
            WHEN OTHERS THEN
                DBMS_OUTPUT.PUT_LINE('Error reading task log for ' || v_task_ids(i) || ': ' || SQLERRM);
        END;
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
  • Usage:
    • Run after export jobs complete.
    • Submits uploads in parallel (each upload_to_s3 call returns immediately).
    • Outputs task_id for each schema and reads task logs immediately (logs may not be complete if uploads are still running).
    • Re-run the log-reading part later for updated status:
      sql
      SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('BDUMP', 'dbtask-1663788182757-49.log'));

Execution Workflow for Option 1

  1. Run Export Jobs:
    • Load schedule_export_multiple_schemas_no_s3.sql.
    • Hardcode parameters:
      sql
      v_schema_names VARCHAR2(4000) := 'HR,FINANCE';
      v_bucket_name VARCHAR2(100) := 'mys3bucket';
      v_parallel_degree NUMBER := 4;
      v_use_multiple_files VARCHAR2(1) := 'Y';
    • Execute with F5.
  2. Monitor Exports:
    • Use the comprehensive monitoring script above.
  3. Upload to S3:
    • Run the S3 upload script to submit uploads in parallel.
  4. Cleanup:
    • After verifying S3 uploads:
      sql
      DECLARE
          CURSOR c_files IS
              SELECT filename
              FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
              WHERE filename LIKE '%_export%';
      BEGIN
          FOR rec IN c_files LOOP
              UTL_FILE.FREMOVE('DATA_PUMP_DIR', rec.filename);
              DBMS_OUTPUT.PUT_LINE('Removed: ' || rec.filename);
          END LOOP;
      END;
      /

Troubleshooting

  • Log File Access:
    • If rdsadmin.rds_file_util.read_text_file fails, grant:
      sql
      GRANT EXECUTE ON rdsadmin.rds_file_util TO YOUR_USER;
  • Large Log Files:
    • Filter specific lines (e.g., errors) by modifying the read script:
      sql
      WHERE text LIKE '%ERROR%';
  • S3 Upload Failures:
    • Check IAM role and S3_INTEGRATION:
      sql
      SELECT * FROM v$option WHERE parameter = 'S3_INTEGRATION';

If you need automation (e.g., polling S3 tasks until completion) or additional monitoring features, let me know!

scheduler:

Monitoring Scripts for Oracle RDS Data Pump Export Job, S3 Upload, and Related Operations

Below, I'll provide SQL scripts for monitoring all aspects of the process based on the original document and our discussion. These include:

  • Fetching and monitoring S3 upload task IDs (including how to fetch if not captured initially).
  • Data Pump job monitoring.
  • File monitoring in DATA_PUMP_DIR (list files, sizes, and sizes of each file created).
  • Scheduler job monitoring (state, details, logs).
  • Cleanup scripts for DATA_PUMP_DIR.
  • Additional comprehensive queries for overall job monitoring (e.g., combining scheduler, Data Pump, and files).

All scripts are designed to be run in Toad for Oracle (or SQL*Plus) as the master user or a privileged user. Enable DBMS_OUTPUT in Toad (View > DBMS Output, click the green Play button) for output. Use SET LINESIZE 180 and SET PAGESIZE 100 for better formatting.

1. S3 Upload Task ID Monitoring

The rdsadmin.rdsadmin_s3_tasks.upload_to_s3 procedure returns a task_id immediately, and the upload runs in the background. If you didn't capture the task_id during execution, there's no direct system view to list all historical task IDs. However, you can fetch recent task logs from the BDUMP directory (where RDS stores task logs as dbtask-task_id.log), list recent files, and read them to identify upload tasks.

Script to Fetch Recent S3 Upload Task IDs (if not captured): This lists recent dbtask-*.log files in BDUMP (sorted by mtime), extracts potential task IDs from filenames, and reads the log content to confirm if it's an S3 upload task.

sql
-- Set formatting
SET LINESIZE 180;
SET PAGESIZE 100;

-- List recent task logs in BDUMP (sorted by most recent)
SELECT filename, type, bytes, mtime
FROM TABLE(rdsadmin.rds_file_util.listdir('BDUMP'))
WHERE filename LIKE 'dbtask-%.log'
ORDER BY mtime DESC;

-- To fetch and read a specific task log (replace 'task_id' with one from above, e.g., from filename 'dbtask-1663788182757-49.log')
DECLARE
    v_task_id VARCHAR2(100) := '1663788182757-49'; -- Replace with actual task ID from filename
BEGIN
    FOR log_line IN (SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('BDUMP', 'dbtask-' || v_task_id || '.log'))) LOOP
        DBMS_OUTPUT.PUT_LINE(log_line.text);
    END LOOP;
END;
/
  • Usage: Run the first query to list recent logs (e.g., look for files created around the time of your upload). Extract the task ID from the filename (e.g., 1663788182757-49 from dbtask-1663788182757-49.log). Then run the second block to read the log and confirm it's an S3 upload (look for lines like "Upload to S3 started" or "Completed").
  • Effective Way if Not Captured: Always capture task_id in future runs by assigning it to a variable and outputting it (as in the upload script). If lost, use the above to scan recent logs (BDUMP retains logs for a period, but old ones may be purged).

Script to Monitor S3 Upload Task Status (if task_id known):

sql
-- Replace with your task_id
DECLARE
    v_task_id VARCHAR2(100) := '1663788182757-49';
BEGIN
    FOR log_line IN (SELECT text FROM TABLE(rdsadmin.rds_file_util.read_text_file('BDUMP', 'dbtask-' || v_task_id || '.log'))) LOOP
        DBMS_OUTPUT.PUT_LINE(log_line.text);
    END LOOP;
END;
/
  • Output: Shows the full log, including status (e.g., "Starting upload", "Completed successfully", or errors).

2. Data Pump Job Monitoring

Monitor running or completed Data Pump jobs, including state and owner.

Script to Monitor Data Pump Jobs:

sql
-- Set formatting
SET LINESIZE 180;

-- Check executing Data Pump jobs
SELECT owner_name, job_name, state, operation, job_mode, degree
FROM dba_datapump_jobs
WHERE state = 'EXECUTING'
ORDER BY job_name;

-- Check all Data Pump jobs (including completed or failed)
SELECT owner_name, job_name, state, operation, job_mode, degree
FROM dba_datapump_jobs
ORDER BY job_name DESC;

-- Detailed view if v$datapump_job is accessible (may require additional privileges)
SELECT * FROM v$datapump_job;
  • Usage: Run periodically to see if jobs are EXECUTING, COMPLETED, or NOT RUNNING. The degreecolumn shows parallelism.

3. File Size Monitoring in DATA_PUMP_DIR

Monitor files in DATA_PUMP_DIR, including sizes (in bytes) and total size.

Script to List Files and Sizes in DATA_PUMP_DIR:

sql
-- Set formatting
SET LINESIZE 180;

-- List all files in DATA_PUMP_DIR with sizes (bytes), type, and mtime
SELECT filename, type, bytes AS size_bytes, ROUND(bytes / 1024 / 1024, 2) AS size_mb, mtime
FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
ORDER BY mtime DESC;

-- Total size of all files in DATA_PUMP_DIR
SELECT SUM(bytes) AS total_size_bytes, ROUND(SUM(bytes) / 1024 / 1024, 2) AS total_size_mb
FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'));
  • Usage: Run to see individual file sizes (size_mb) and total directory size. Filter for specific files, e.g., WHERE filename LIKE 'hr_export%'.

Script to Monitor Size of Each File Created: This filters for files matching your export prefix (e.g., hr_export*).

sql
-- Replace 'prefix' with your file prefix (e.g., 'hr_export')
SELECT filename, type, bytes AS size_bytes, ROUND(bytes / 1024 / 1024, 2) AS size_mb, mtime
FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
WHERE filename LIKE '%_export%'  -- Adjust for your dump file pattern (e.g., '%export_u%')
ORDER BY mtime DESC;
  • Usage: Adjust the LIKE clause based on your dump file names (e.g., '%_export.dmp' for single files or '%_export_u%.dmp' for multiple).

4. Scheduler Job Monitoring

Monitor the DBMS_SCHEDULER jobs, including state, run details, and logs.

Script to Monitor Scheduler Jobs:

sql
-- Set formatting
SET LINESIZE 180;

-- Check state of scheduler jobs
SELECT job_name, state, enabled, run_count, last_start_date, last_run_duration, next_run_date
FROM dba_scheduler_jobs
WHERE job_name LIKE 'EXPORT_%_JOB_%'
ORDER BY last_start_date DESC;

-- Detailed run history and logs for scheduler jobs
SELECT job_name, status, error#, additional_info
FROM dba_scheduler_job_run_details
WHERE job_name LIKE 'EXPORT_%_JOB_%'
ORDER BY log_date DESC;
  • Usage: Run to see if jobs are RUNNING, SUCCEEDED, or FAILED. The additional_info column shows any errors or output.

5. Cleanup Scripts

Clean up DATA_PUMP_DIR after verifying S3 uploads.

Script for Cleanup:

sql
-- List files before cleanup
SELECT * FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR')) ORDER BY mtime;

-- Remove a specific file (repeat for each file)
EXEC UTL_FILE.FREMOVE('DATA_PUMP_DIR', 'hr_export.dmp');

-- Bulk cleanup (remove all export files; run carefully!)
DECLARE
    CURSOR c_files IS
        SELECT filename
        FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
        WHERE filename LIKE '%_export%';
BEGIN
    FOR rec IN c_files LOOP
        UTL_FILE.FREMOVE('DATA_PUMP_DIR', rec.filename);
        DBMS_OUTPUT.PUT_LINE('Removed: ' || rec.filename);
    END LOOP;
END;
/
  • Usage: Run the list query first to confirm files. Use FREMOVE for individual files or the bulk block for all matching files.

6. Comprehensive Job Monitoring Script (All in One)

This combines all monitoring into a single script that outputs key details.

Script for Overall Monitoring:

sql
-- Set formatting
SET LINESIZE 180;
SET SERVEROUTPUT ON;

BEGIN
    DBMS_OUTPUT.PUT_LINE('--- Scheduler Jobs ---');
    FOR rec IN (SELECT job_name, state, last_start_date, last_run_duration
                FROM dba_scheduler_jobs
                WHERE job_name LIKE 'EXPORT_%_JOB_%'
                ORDER BY last_start_date DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('Job: ' || rec.job_name || ' | State: ' || rec.state || ' | Last Start: ' || rec.last_start_date || ' | Duration: ' || rec.last_run_duration);
    END LOOP;

    DBMS_OUTPUT.PUT_LINE('--- Data Pump Jobs ---');
    FOR rec IN (SELECT owner_name, job_name, state
                FROM dba_datapump_jobs
                ORDER BY job_name DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('Job: ' || rec.job_name || ' | Owner: ' || rec.owner_name || ' | State: ' || rec.state);
    END LOOP;

    DBMS_OUTPUT.PUT_LINE('--- Files in DATA_PUMP_DIR ---');
    FOR rec IN (SELECT filename, type, ROUND(bytes / 1024 / 1024, 2) AS size_mb, mtime
                FROM TABLE(rdsadmin.rds_file_util.listdir('DATA_PUMP_DIR'))
                ORDER BY mtime DESC) LOOP
        DBMS_OUTPUT.PUT_LINE('File: ' || rec.filename || ' | Type: ' || rec.type || ' | Size (MB): ' || rec.size_mb || ' | MTime: ' || rec.mtime);
    END LOOP;

    DBMS_OUTPUT.PUT_LINE('--- Recent S3 Task Logs in BDUMP ---');
    FOR rec IN (SELECT filename, mtime
                FROM TABLE(rdsadmin.rds_file_util.listdir('BDUMP'))
                WHERE filename LIKE 'dbtask-%.log'
                ORDER BY mtime DESC FETCH FIRST 10 ROWS ONLY) LOOP
        DBMS_OUTPUT.PUT_LINE('Task Log: ' || rec.filename || ' | MTime: ' || rec.mtime);
    END LOOP;
END;
/
  • Usage: Run this script (F5) for a snapshot of all monitoring aspects. It outputs scheduler state, Data Pump state, file sizes, and recent task logs.

If you need modifications (e.g., alerts or automation), let me know!

-- Create a stored procedure for Data Pump export only
CREATE OR REPLACE PROCEDURE export_schemas_to_dir (
    p_schema_names IN VARCHAR2,
    p_bucket_name IN VARCHAR2,
    p_parallel_degree IN NUMBER,
    p_use_multiple_files IN VARCHAR2
) AUTHID CURRENT_USER AS
    hdl NUMBER;
    job_state VARCHAR2(30);
    dump_file VARCHAR2(100);
    dump_file_prefix VARCHAR2(100);
    log_file VARCHAR2(100);
    v_schema_name VARCHAR2(30);
    v_schema_list DBMS_UTILITY.UNCL_ARRAY;
    v_count NUMBER;
BEGIN
    DBMS_UTILITY.COMMA_TO_TABLE(p_schema_names, v_count, v_schema_list);
    FOR i IN 1..v_count LOOP
        v_schema_name := TRIM(v_schema_list(i));
        IF p_use_multiple_files = 'Y' THEN
            dump_file := LOWER(v_schema_name) || '_export_u%02.0f.dmp';
            dump_file_prefix := LOWER(v_schema_name) || '_export_u';
        ELSE
            dump_file := LOWER(v_schema_name) || '_export.dmp';
            dump_file_prefix := LOWER(v_schema_name) || '_export';
        END IF;
        log_file := LOWER(v_schema_name) || '_export.log';
        hdl := DBMS_DATAPUMP.OPEN(
            operation => 'EXPORT',
            job_mode => 'SCHEMA',
            job_name => v_schema_name || '_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
        );
        DBMS_DATAPUMP.SET_PARALLEL(handle => hdl, degree => p_parallel_degree);
        DBMS_DATAPUMP.ADD_FILE(
            handle => hdl,
            filename => dump_file,
            directory => 'DATA_PUMP_DIR',
            filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
        );
        DBMS_DATAPUMP.ADD_FILE(
            handle => hdl,
            filename => log_file,
            directory => 'DATA_PUMP_DIR',
            filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
        );
        DBMS_DATAPUMP.METADATA_FILTER(
            handle => hdl,
            name => 'SCHEMA_LIST',
            value => '''' || v_schema_name || ''''
        );
        DBMS_DATAPUMP.METADATA_FILTER(
            handle => hdl,
            name => 'EXCLUDE_NAME_EXPR',
            value => q'[IN (SELECT NAME FROM sys.OBJ$ WHERE TYPE# IN (66,67,74,79,59,62,46) AND OWNER# IN (SELECT USER# FROM SYS.USER$ WHERE NAME IN ('RDSADMIN','SYS','SYSTEM','RDS_DATAGUARD','RDSSEC')))]',
            object_type => 'PROCOBJ'
        );
        DBMS_DATAPUMP.START_JOB(handle => hdl);
        DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Schema: ' || v_schema_name || ' | Dump File(s): ' || dump_file || ' | Parallelism: ' || p_parallel_degree);
        DBMS_DATAPUMP.WAIT_FOR_JOB(handle => hdl, job_state => job_state);
        DBMS_OUTPUT.PUT_LINE('Export job completed for ' || v_schema_name || ' with state: ' || job_state);
        DBMS_OUTPUT.PUT_LINE('Files saved in DATA_PUMP_DIR: ' || dump_file_prefix || '*');
        DBMS_DATAPUMP.DETACH(handle => hdl);
    END LOOP;
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for schema ' || v_schema_name || ': ' || SQLERRM);
        IF hdl IS NOT NULL THEN
            DBMS_DATAPUMP.DETACH(handle => hdl);
        END IF;
        RAISE;
END;
/

-- Create DBMS_SCHEDULER jobs for export
DECLARE
    v_schema_names VARCHAR2(4000) := '&1';
    v_bucket_name VARCHAR2(100) := '&2';
    v_parallel_degree NUMBER := &3;
    v_use_multiple_files VARCHAR2(1) := '&4';
    v_schema_list DBMS_UTILITY.UNCL_ARRAY;
    v_count NUMBER;
    v_schema_name VARCHAR2(30);
BEGIN
    DBMS_UTILITY.COMMA_TO_TABLE(v_schema_names, v_count, v_schema_list);
    FOR i IN 1..v_count LOOP
        v_schema_name := TRIM(v_schema_list(i));
        DBMS_SCHEDULER.CREATE_JOB (
            job_name => 'EXPORT_' || v_schema_name || '_JOB_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS'),
            job_type => 'PLSQL_BLOCK',
            job_action => 'BEGIN export_schemas_to_dir(p_schema_names => ''' || v_schema_name || ''', p_bucket_name => ''' || v_bucket_name || ''', p_parallel_degree => ' || v_parallel_degree || ', p_use_multiple_files => ''' || v_use_multiple_files || '''); END;',
            start_date => SYSTIMESTAMP,
            repeat_interval => NULL,
            enabled => TRUE,
            auto_drop => FALSE,
            comments => 'Scheduled job to export schema ' || v_schema_name || ' to DATA_PUMP_DIR'
        );
        DBMS_OUTPUT.PUT_LINE('Scheduled job created for schema: ' || v_schema_name);
    END LOOP;
END;
/

-- Separate script for S3 upload (run interactively)
DECLARE
    v_schema_names VARCHAR2(4000) := '&1';
    v_bucket_name VARCHAR2(100) := '&2';
    v_schema_list DBMS_UTILITY.UNCL_ARRAY;
    v_count NUMBER;
    v_schema_name VARCHAR2(30);
    v_prefix VARCHAR2(100);
    v_task_id VARCHAR2(100);
BEGIN
    DBMS_UTILITY.COMMA_TO_TABLE(v_schema_names, v_count, v_schema_list);
    FOR i IN 1..v_count LOOP
        v_schema_name := TRIM(v_schema_list(i));
        v_prefix := LOWER(v_schema_name) || '_export';
        v_task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
            p_bucket_name => v_bucket_name,
            p_prefix => v_prefix,
            p_s3_prefix => '',
            p_directory_name => 'DATA_PUMP_DIR'
        );
        DBMS_OUTPUT.PUT_LINE('S3 Upload Task ID for ' || v_schema_name || ': ' || v_task_id);
    END LOOP;
END;
/

o modify the small schemas export script to run individual Data Pump export jobs for each schema (SCHEMA_01, SCHEMA_02, SCHEMA_03, SCHEMA_07) instead of combining them into a single dump file (small_schemas.dmp), you can create separate DBMS_DATAPUMP jobs for each schema. This approach ensures each schema has its own dump file (e.g., schema_01.dmp, schema_02.dmp, etc.) and log file, which can be useful for modularity, easier troubleshooting, or parallel execution. Since you're in an Oracle RDS environment (where expdp binaries are unavailable), the scripts use the DBMS_DATAPUMP API, are metadata-only (CONTENT=METADATA_ONLY), exclude INDEX and STATISTICS, and integrate with S3 for dump file storage, as per your previous requirements.

Below, I’ll provide:

  1. generic script to export a single schema, which you can reuse for each of SCHEMA_01, SCHEMA_02, SCHEMA_03, and SCHEMA_07.
  2. Individual scripts for each schema, with unique dump file names and S3 upload commands.
  3. Steps to run and monitor these jobs, considering your context of tablespace consolidation (40 old to 8 new) and Toad session disconnects.

Key Modifications

  • Individual Dump Files: Each schema gets its own dump file (e.g., schema_01.dmp) instead of a combined small_schemas.dmp.
  • Metadata-Only: Retains CONTENT => 'METADATA' and excludes INDEX, STATISTICS as per your original expdp example.
  • No Parallelism: Since these are small schemas, parallelism is set to 1 (default), unlike the large schemas (SCHEMA_04, SCHEMA_05, SCHEMA_06 with parallel=24).
  • S3 Integration: Each dump file is uploaded to a unique S3 path for clarity.
  • Background Execution: Scripts include DBMS_DATAPUMP.DETACH to ensure jobs run in the background if Toad disconnects due to a time limit.
  • Robustness: Error handling, job handle output for monitoring, and compatibility with RDS constraints.

Step 1: Generic Script for Single Schema Export

DECLARE
    hdl NUMBER;                    -- Job handle
    job_state VARCHAR2(30);        -- Job status
    schema_name VARCHAR2(30) := '&1'; -- Replace with schema name (e.g., YOUR_SCHEMA_NAME)
    bucket_name VARCHAR2(100) := '&2'; -- Replace with your S3 bucket name (e.g., mys3bucket)
    parallel_degree NUMBER := &3;  -- Replace with desired parallelism degree (e.g., 4)
    use_multiple_files VARCHAR2(1) := '&4'; -- Y for multiple files, N for single file
    dump_file VARCHAR2(100);
    dump_file_prefix VARCHAR2(100);
    log_file VARCHAR2(100) := LOWER(schema_name) || '_export.log';
    task_id VARCHAR2(100);         -- S3 upload task ID
BEGIN
    -- Set dump file name based on whether multiple files are requested
    IF use_multiple_files = 'Y' THEN
        dump_file := LOWER(schema_name) || '_export_u%02.0f.dmp';
        dump_file_prefix := LOWER(schema_name) || '_export_u';
    ELSE
        dump_file := LOWER(schema_name) || '_export.dmp';
        dump_file_prefix := LOWER(schema_name) || '_export';
    END IF;

    -- Open export job for single schema
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => schema_name || '_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );

    -- Set parallelism for the export job
    DBMS_DATAPUMP.SET_PARALLEL(
        handle => hdl,
        degree => parallel_degree
    );

    -- Add dump file(s) to DATA_PUMP_DIR
    DBMS_DATAPUMP.ADD_FILE(
        handle => hdl,
        filename => dump_file,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
    );

    -- Add log file
    DBMS_DATAPUMP.ADD_FILE(
        handle => hdl,
        filename => log_file,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
    );

    -- Specify single schema
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => hdl,
        name => 'SCHEMA_LIST',
        value => '''' || schema_name || ''''
    );

    -- Exclude specific RDS system objects
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => hdl,
        name => 'EXCLUDE_NAME_EXPR',
        value => q'[IN (SELECT NAME FROM sys.OBJ$ WHERE TYPE# IN (66,67,74,79,59,62,46) AND OWNER# IN (SELECT USER# FROM SYS.USER$ WHERE NAME IN ('RDSADMIN','SYS','SYSTEM','RDS_DATAGUARD','RDSSEC')))]',
        object_type => 'PROCOBJ'
    );

    -- Start the export job
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Schema: ' || schema_name || ' | Dump File(s): ' || dump_file || ' | Parallelism: ' || parallel_degree);

    -- Wait for the job to complete
    DBMS_DATAPUMP.WAIT_FOR_JOB(
        handle => hdl,
        job_state => job_state
    );
    DBMS_OUTPUT.PUT_LINE('Export job completed with state: ' || job_state);

    -- Upload dump file(s) to S3
    task_id := rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => bucket_name,
        p_prefix => dump_file_prefix,
        p_s3_prefix => '',
        p_directory_name => 'DATA_PUMP_DIR'
    );
    DBMS_OUTPUT.PUT_LINE('S3 Upload Task ID: ' || task_id || ' | File Pattern: ' || dump_file_prefix || '* | Bucket: ' || bucket_name);

    -- Detach from the job
    DBMS_DATAPUMP.DETACH(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job completed for ' || schema_name || '. Detaching...');

EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for ' || schema_name || ': ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/

scheduler:


sql

DECLARE
    hdl NUMBER;                    -- Job handle
    job_state VARCHAR2(30);        -- Job status
    schema_name VARCHAR2(30) := '&1'; -- Replace with schema name (e.g., SCHEMA_01)
    dump_file VARCHAR2(100) := LOWER(schema_name) || '.dmp';
    log_file VARCHAR2(100) := LOWER(schema_name) || '_export.log';
BEGIN
    -- Open export job for single schema
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => schema_name || '_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );

    -- Add dump and log files
    DBMS_DATAPUMP.ADD_FILE(
        handle => hdl,
        filename => dump_file,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
    );
    DBMS_DATAPUMP.ADD_FILE(
        handle => hdl,
        filename => log_file,
        directory => 'DATA_PUMP_DIR',
        filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
    );

    -- Specify single schema
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => hdl,
        name => 'SCHEMA_LIST',
        value => '''' || schema_name || ''''
    );

    -- Metadata only
    DBMS_DATAPUMP.SET_PARAMETER(
        handle => hdl,
        name => 'CONTENT',
        value => 'METADATA'
    );

    -- Exclude INDEX and STATISTICS
    DBMS_DATAPUMP.METADATA_FILTER(
        handle => hdl,
        name => 'EXCLUDE_PATH_EXPR',
        value => 'IN (''INDEX'', ''STATISTICS'')'
    );

    -- Start the job
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Schema: ' || schema_name || ' | Dump File: ' || dump_file);

    -- Detach to run in background
    DBMS_DATAPUMP.DETACH(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job started for ' || schema_name || '. Detaching...');

EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for ' || schema_name || ': ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/

Usage:

  • Run in SQL*Plus or Toad as rdsadm/frusi@db_name.
  • Replace &1 with the schema name (e.g., SCHEMA_01) or hardcode it.
  • Example: SQL> @export_single_schema.sql SCHEMA_01
  • Output: Job handle and dump file name (e.g., schema_01.dmp).

S3 Upload (run after job completes):

sql
BEGIN
    rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => 'your-rds-s3-bucket',
        p_prefix => 'dmp_files/small/' || LOWER('&1') || '/',
        p_directory_name => 'DATA_PUMP_DIR'
    );
END;
/

Step 2: Individual Scripts for Each Schema

For convenience, here are specific scripts for SCHEMA_01, SCHEMA_02, SCHEMA_03, and SCHEMA_07, with unique dump files and S3 paths.

Export SCHEMA_01

sql
DECLARE
    hdl NUMBER;
    job_state VARCHAR2(30);
BEGIN
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => 'SCHEMA_01_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_01.dmp', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE);
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_01_export.log', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'SCHEMA_LIST', value => '''SCHEMA_01''');
    DBMS_DATAPUMP.SET_PARAMETER(handle => hdl, name => 'CONTENT', value => 'METADATA');
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'EXCLUDE_PATH_EXPR', value => 'IN (''INDEX'', ''STATISTICS'')');
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Dump File: schema_01.dmp');
    DBMS_DATAPUMP.DETACH(handle => hdl);
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for SCHEMA_01: ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/
-- Upload to S3
BEGIN
    rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => 'your-rds-s3-bucket',
        p_prefix => 'dmp_files/small/schema_01/',
        p_directory_name => 'DATA_PUMP_DIR'
    );
END;
/

Export SCHEMA_02

sql
DECLARE
    hdl NUMBER;
    job_state VARCHAR2(30);
BEGIN
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => 'SCHEMA_02_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_02.dmp', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE);
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_02_export.log', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'SCHEMA_LIST', value => '''SCHEMA_02''');
    DBMS_DATAPUMP.SET_PARAMETER(handle => hdl, name => 'CONTENT', value => 'METADATA');
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'EXCLUDE_PATH_EXPR', value => 'IN (''INDEX'', ''STATISTICS'')');
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Dump File: schema_02.dmp');
    DBMS_DATAPUMP.DETACH(handle => hdl);
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for SCHEMA_02: ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/
-- Upload to S3
BEGIN
    rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => 'your-rds-s3-bucket',
        p_prefix => 'dmp_files/small/schema_02/',
        p_directory_name => 'DATA_PUMP_DIR'
    );
END;
/

Export SCHEMA_03

sql
DECLARE
    hdl NUMBER;
    job_state VARCHAR2(30);
BEGIN
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => 'SCHEMA_03_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_03.dmp', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE);
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_03_export.log', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'SCHEMA_LIST', value => '''SCHEMA_03''');
    DBMS_DATAPUMP.SET_PARAMETER(handle => hdl, name => 'CONTENT', value => 'METADATA');
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'EXCLUDE_PATH_EXPR', value => 'IN (''INDEX'', ''STATISTICS'')');
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Dump File: schema_03.dmp');
    DBMS_DATAPUMP.DETACH(handle => hdl);
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for SCHEMA_03: ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/
-- Upload to S3
BEGIN
    rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => 'your-rds-s3-bucket',
        p_prefix => 'dmp_files/small/schema_03/',
        p_directory_name => 'DATA_PUMP_DIR'
    );
END;
/

Export SCHEMA_07

sql
DECLARE
    hdl NUMBER;
    job_state VARCHAR2(30);
BEGIN
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'EXPORT',
        job_mode => 'SCHEMA',
        job_name => 'SCHEMA_07_EXPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_07.dmp', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE);
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_07_export.log', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'SCHEMA_LIST', value => '''SCHEMA_07''');
    DBMS_DATAPUMP.SET_PARAMETER(handle => hdl, name => 'CONTENT', value => 'METADATA');
    DBMS_DATAPUMP.METADATA_FILTER(handle => hdl, name => 'EXCLUDE_PATH_EXPR', value => 'IN (''INDEX'', ''STATISTICS'')');
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Dump File: schema_07.dmp');
    DBMS_DATAPUMP.DETACH(handle => hdl);
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for SCHEMA_07: ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/
-- Upload to S3
BEGIN
    rdsadmin.rdsadmin_s3_tasks.upload_to_s3(
        p_bucket_name => 'your-rds-s3-bucket',
        p_prefix => 'dmp_files/small/schema_07/',
        p_directory_name => 'DATA_PUMP_DIR'
    );
END;
/

Step 3: Running and Monitoring the Jobs

  1. Run Exports:
    • Execute each script in Toad or SQL*Plus as rdsadm/frusi@db_name.
    • Run sequentially or in parallel sessions (since these are small schemas, sequential is fine unless time is critical).
    • Note the job handle for each job (e.g., Job Handle: 1234 | Dump File: schema_01.dmp).
    • Jobs run in the background after DETACH, even if Toad disconnects due to a time limit.
  2. Monitor Jobs:
    • Use the monitoring scripts from previous responses to track progress:
      • Real-Time (Job Handle): Use monitor_datapump_job.sql with the job handle (replace :job_handle).
      • All Jobs (No Handle): Use monitor_datapump_views.sql to check dba_datapump_jobs:
        sql
        SELECT job_name, state, attached_sessions
        FROM dba_datapump_jobs
        WHERE state NOT IN ('NOT RUNNING')
        ORDER BY job_name;
      • Logged History: Use monitor_datapump_log.sql to log to datapump_monitor_log:
        sql
        SELECT job_name, state, percent_done, error_message
        FROM datapump_monitor_log
        WHERE log_time > SYSDATE - 1/24
        ORDER BY log_time DESC;
  3. Upload to S3:
    • Run the S3 upload block after each job completes (or monitor logs to confirm completion).
    • Example: After SCHEMA_01 export, run its S3 upload script.
  4. Verify Dump Files:
    sql
    SELECT filename FROM dba_directories WHERE directory_name = 'DATA_PUMP_DIR';

Step 4: Import Considerations

For importing these individual dump files, you’ll need corresponding import scripts with REMAP_TABLESPACE. Here’s a generic import script for a single schema (e.g., SCHEMA_01):

sql
DECLARE
    hdl NUMBER;
    job_state VARCHAR2(30);
BEGIN
    rdsadmin.rdsadmin_s3_tasks.download_from_s3(
        p_bucket_name => 'your-rds-s3-bucket',
        p_s3_prefix => 'dmp_files/small/schema_01/',
        p_directory_name => 'DATA_PUMP_DIR'
    );
    hdl := DBMS_DATAPUMP.OPEN(
        operation => 'IMPORT',
        job_mode => 'SCHEMA',
        job_name => 'SCHEMA_01_IMPORT_' || TO_CHAR(SYSDATE, 'YYYYMMDDHH24MISS')
    );
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_01.dmp', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE);
    DBMS_DATAPUMP.ADD_FILE(handle => hdl, filename => 'schema_01_import.log', directory => 'DATA_PUMP_DIR', filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE);
    FOR rec IN (SELECT old_tablespace, new_tablespace FROM ts_mappings) LOOP
        DBMS_DATAPUMP.METADATA_REMAP(handle => hdl, name => 'REMAP_TABLESPACE', old_value => rec.old_tablespace, value => rec.new_tablespace);
    END LOOP;
    DBMS_DATAPUMP.METADATA_TRANSFORM(handle => hdl, name => 'LOB_STORAGE', value => 'SECUREFILE');
    DBMS_DATAPUMP.START_JOB(handle => hdl);
    DBMS_OUTPUT.PUT_LINE('Job Handle: ' || hdl || ' | Dump File: schema_01.dmp');
    DBMS_DATAPUMP.DETACH(handle => hdl);
EXCEPTION
    WHEN OTHERS THEN
        DBMS_OUTPUT.PUT_LINE('Error for SCHEMA_01: ' || SQLERRM);
        DBMS_DATAPUMP.DETACH(handle => hdl);
        RAISE;
END;
/

Repeat for Other Schemas:

  • Create similar scripts for SCHEMA_02, SCHEMA_03, SCHEMA_07, updating the dump file and S3 prefix (e.g., schema_02.dmp, dmp_files/small/schema_02/).

Additional Notes

  • Why Individual Exports?:
    • Modularity: Easier to manage, troubleshoot, or re-run specific schemas.
    • Flexibility: Import schemas independently if needed.
    • Smaller files: Metadata-only exports are typically small, so splitting doesn’t impact storage significantly.
  • RDS Considerations:
    • Ensure new tablespaces (e.g., TRU_DATA_01) exist on the target before import.
    • Verify S3 bucket permissions and IAM roles.
    • Monitor CloudWatch for FreeStorageSpace and CPUUtilization.
  • Toad Disconnect:
    • Jobs run in the background after DETACH, even if Toad times out.
    • Use monitoring scripts (from previous responses) to check status post-disconnect.
  • Validation:
    • After export, check DATA_PUMP_DIR for schema_01.dmp, etc.
    • After import, validate object placement:
      sql
      SELECT object_type, tablespace_name, COUNT(*)
      FROM dba_objects
      WHERE owner IN ('SCHEMA_01', 'SCHEMA_02', 'SCHEMA_03', 'SCHEMA_07')
      GROUP BY object_type, tablespace_name;

If you provide specific schema names or ts_mappings details, I can further customize the scripts. Let me know if you need import scripts for all schemas or additional monitoring steps!

No comments: