/*
* Script: ImportSchemaWithMultiDump.sql
* Purpose: Performs a Data Pump schema import in Oracle RDS 19.8 from multiple dump files
* matching a pattern (e.g., REVANTH_%U_meta_%.dmp) for a specified schema (e.g., HEMANT).
* Dynamically adds dump files, uses schema-specific log file naming, and includes robust
* error handling and job monitoring. Compatible with generate_tablespace_remaps.sql for
* tablespace remapping.
*
* Parameters:
* - v_directory: Oracle directory (e.g., DATA_PUMP_DIR)
* - v_schema_name: Schema to import (e.g., HEMANT)
* - v_dumpfile_pattern: Dump file pattern (e.g., REVANTH_%U_meta_%.dmp)
* - v_logfile: Log file name (e.g., HEMANT_import.log)
*
* Dependencies:
* - Privileges: EXECUTE ON DBMS_DATAPUMP, UTL_FILE, RDSADMIN; READ, WRITE ON DATA_PUMP_DIR
* - Used with: generate_tablespace_remaps.sql to generate REMAP_TABLESPACE statements
*
* Usage:
* 1. Update v_directory, v_schema_name, v_dumpfile_pattern.
* 2. Paste REMAP_TABLESPACE statements from generate_tablespace_remaps.sql if needed.
* 3. Run in SQL*Plus or SQL Developer: @ImportSchemaWithMultiDump.sql
* 4. Clean up log file using UTL_FILE.FREMOVE if desired.
*
* Notes:
* - Handles single or multiple dump files dynamically.
* - Case-sensitive pattern matching for dump files (e.g., REVANTH_ uppercase).
* - Compatible with Oracle RDS 19.8, no S3 dependency.
* - Verify dump files with RDSADMIN.RDS_FILE_UTIL.LISTDIR before running.
*
* Created: 2025-09-16
* Author: Grok (xAI)
*/
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
-- Configurable parameters
v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Directory name
v_schema_name VARCHAR2(128) := 'HEMANT'; -- Schema to import
v_dumpfile_pattern VARCHAR2(256) := 'REVANTH_%U_meta_%.dmp'; -- Dump file pattern
v_logfile VARCHAR2(256) := v_schema_name || '_import.log'; -- Schema-specific log file
v_file_exists BOOLEAN;
v_file_length NUMBER;
v_block_size NUMBER;
-- Data Pump variables
ind NUMBER; -- Loop index
spos NUMBER; -- String starting position
slen NUMBER; -- String length for output
h1 NUMBER; -- Data Pump job handle
percent_done NUMBER; -- Percentage of job complete
job_state VARCHAR2(30); -- To keep track of job state
le ku$_LogEntry; -- For WIP and error messages
js ku$_JobStatus; -- The job status from get_status
jd ku$_JobDesc; -- The job description from get_status
sts ku$_Status; -- The status object returned by get_status
-- Dump file collection
TYPE t_file_tab IS TABLE OF VARCHAR2(256);
v_dumpfiles t_file_tab;
BEGIN
-- Check if log file exists and remove it
UTL_FILE.FGETATTR(
location => v_directory,
filename => v_logfile,
fexists => v_file_exists,
file_length => v_file_length,
block_size => v_block_size
);
IF v_file_exists THEN
UTL_FILE.FREMOVE(v_directory, v_logfile);
DBMS_OUTPUT.PUT_LINE('Existing log file ' || v_logfile || ' deleted');
END IF;
-- Find all dump files matching the pattern
SELECT filename
BULK COLLECT INTO v_dumpfiles
FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR(v_directory))
WHERE type = 'file'
AND REGEXP_LIKE(filename, '^REVANTH_[0-9]+_meta_.*\.dmp$'); -- Match pattern with numeric prefix
IF v_dumpfiles.COUNT = 0 THEN
RAISE_APPLICATION_ERROR(-20001, 'No dump files matching ' || v_dumpfile_pattern || ' found in ' || v_directory);
END IF;
-- Log found dump files
DBMS_OUTPUT.PUT_LINE('Found ' || v_dumpfiles.COUNT || ' dump file(s):');
IF v_dumpfiles.COUNT = 1 THEN
DBMS_OUTPUT.PUT_LINE(' (Single file mode)');
ELSE
DBMS_OUTPUT.PUT_LINE(' (Multiple files mode)');
END IF;
FOR i IN 1 .. v_dumpfiles.COUNT LOOP
DBMS_OUTPUT.PUT_LINE(' - ' || v_dumpfiles(i));
END LOOP;
-- Open Data Pump job
h1 := DBMS_DATAPUMP.OPEN(operation => 'IMPORT', job_mode => 'SCHEMA', job_name => NULL);
DBMS_OUTPUT.PUT_LINE('Opened Data Pump job');
-- Add all dump files
FOR i IN 1 .. v_dumpfiles.COUNT LOOP
DBMS_DATAPUMP.ADD_FILE(
handle => h1,
filename => v_dumpfiles(i),
directory => v_directory,
filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
);
DBMS_OUTPUT.PUT_LINE('Added dump file: ' || v_dumpfiles(i));
END LOOP;
-- Add log file
DBMS_DATAPUMP.ADD_FILE(
handle => h1,
filename => v_logfile,
directory => v_directory,
filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
);
-- Set parameter for table exists action
DBMS_DATAPUMP.SET_PARAMETER(h1, 'TABLE_EXISTS_ACTION', 'REPLACE');
-- Apply schema filter
DBMS_DATAPUMP.METADATA_FILTER(
handle => h1,
name => 'SCHEMA_LIST',
value => '''' || UPPER(v_schema_name) || ''''
);
-- Start job with error handling
BEGIN
DBMS_DATAPUMP.START_JOB(h1);
DBMS_OUTPUT.PUT_LINE('Data Pump job started successfully');
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = DBMS_DATAPUMP.SUCCESS_WITH_INFO_NUM THEN
DBMS_OUTPUT.PUT_LINE('Data Pump job started with info available:');
DBMS_DATAPUMP.GET_STATUS(
h1,
DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR,
0,
job_state,
sts
);
IF BITAND(sts.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
le := sts.error;
IF le IS NOT NULL THEN
ind := le.FIRST;
WHILE ind IS NOT NULL LOOP
DBMS_OUTPUT.PUT_LINE(le(ind).LogText);
ind := le.NEXT(ind);
END LOOP;
END IF;
END IF;
ELSE
RAISE;
END IF;
END;
-- Monitor job progress
percent_done := 0;
job_state := 'UNDEFINED';
WHILE (job_state != 'COMPLETED') AND (job_state != 'STOPPED') LOOP
DBMS_DATAPUMP.GET_STATUS(
h1,
DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR +
DBMS_DATAPUMP.KU$_STATUS_JOB_STATUS +
DBMS_DATAPUMP.KU$_STATUS_WIP,
-1,
job_state,
sts
);
js := sts.job_status;
-- If the percentage done changed, display the new value
IF js.percent_done != percent_done THEN
DBMS_OUTPUT.PUT_LINE('*** Job percent done = ' || TO_CHAR(js.percent_done));
percent_done := js.percent_done;
END IF;
-- Display WIP or error messages
IF BITAND(sts.mask, DBMS_DATAPUMP.KU$_STATUS_WIP) != 0 THEN
le := sts.wip;
ELSIF BITAND(sts.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
le := sts.error;
ELSE
le := NULL;
END IF;
IF le IS NOT NULL THEN
ind := le.FIRST;
WHILE ind IS NOT NULL LOOP
DBMS_OUTPUT.PUT_LINE(le(ind).LogText);
ind := le.NEXT(ind);
END LOOP;
END IF;
DBMS_LOCK.SLEEP(1); -- Avoid tight loop
END LOOP;
DBMS_OUTPUT.PUT_LINE('Job has completed');
DBMS_OUTPUT.PUT_LINE('Final job state = ' || job_state);
DBMS_DATAPUMP.DETACH(h1);
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Exception in Data Pump job: ' || SQLERRM);
IF h1 IS NOT NULL THEN
DBMS_DATAPUMP.GET_STATUS(
h1,
DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR,
0,
job_state,
sts
);
IF BITAND(sts.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
le := sts.error;
IF le IS NOT NULL THEN
ind := le.FIRST;
WHILE ind IS NOT NULL LOOP
spos := 1;
slen := LENGTH(le(ind).LogText);
IF slen > 255 THEN
slen := 255;
END IF;
WHILE slen > 0 LOOP
DBMS_OUTPUT.PUT_LINE(SUBSTR(le(ind).LogText, spos, slen));
spos := spos + 255;
slen := LENGTH(le(ind).LogText) + 1 - spos;
END LOOP;
ind := le.NEXT(ind);
END LOOP;
END IF;
END IF;
DBMS_DATAPUMP.DETACH(h1);
END IF;
RAISE;
END;
/
============================================================
UTL_FILE.GET_LINE(v_file, v_line);
v_line := SUBSTR(v_line, 1, 32767); -- Prevent buffer overflow
IF REGEXP_LIKE(UPPER(v_line), 'TABLESPACE\s+[''"]?([A-Z0-9_]+)[''"]?') THEN
v_tablespace := REGEXP_SUBSTR(UPPER(v_line), 'TABLESPACE\s+[''"]?([A-Z0-9_]{1,30})[''"]?', 1, 1, NULL, 1);
IF v_tablespace IS NOT NULL AND v_tablespace LIKE '||%' THEN
DBMS_OUTPUT.PUT_LINE('Skipped placeholder in line: ' || SUBSTR(v_line, 1, 200));
END IF;
IF v_tablespace IS NOT NULL AND LENGTH(v_tablespace) <= 30 AND v_tablespace NOT LIKE '||%' THEN
IF NOT v_ts_tab.EXISTS(v_tablespace) THEN
v_ts_tab(v_tablespace) := v_tablespace;
DBMS_OUTPUT.PUT_LINE('Found tablespace: ' || v_tablespace);
END IF;
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
v_keywords VARCHAR2(100) := 'ERROR|ORA-|HEMANT'; -- Replace with your keywords
v_context_lines NUMBER := 2; -- Lines before/after
TYPE t_lines IS TABLE OF VARCHAR2(4000) INDEX BY PLS_INTEGER;
v_prev_lines t_lines; -- Buffer for previous lines
v_line_count NUMBER := 0;
v_context_counter NUMBER := 0;
v_match_found BOOLEAN := FALSE;
BEGIN
FOR rec IN (
SELECT column_value AS line_text, ROWNUM AS line_num
FROM TABLE(rdsadmin.rds_file_util.read_text_file('DATA_PUMP_DIR', 'alert_copy.log'))
WHERE ROWNUM <= 300 -- Limit to 300 lines
) LOOP
v_line_count := v_line_count + 1;
-- Store line in previous lines buffer
IF v_line_count <= v_context_lines THEN
v_prev_lines(v_line_count) := rec.line_text;
ELSE
FOR i IN 1..(v_context_lines - 1) LOOP
v_prev_lines(i) := v_prev_lines(i + 1);
END LOOP;
v_prev_lines(v_context_lines) := rec.line_text;
END IF;
-- Check for keywords
IF REGEXP_LIKE(rec.line_text, v_keywords, 'i') THEN
-- Print lines before
IF NOT v_match_found THEN
FOR i IN 1..v_context_lines LOOP
IF v_prev_lines.EXISTS(i) THEN
DBMS_OUTPUT.PUT_LINE('Before (line ' || (rec.line_num - v_context_lines + i - 1) || '): ' || v_prev_lines(i));
END IF;
END LOOP;
END IF;
-- Print matching line
DBMS_OUTPUT.PUT_LINE('Match (line ' || rec.line_num || '): ' || rec.line_text);
v_match_found := TRUE;
v_context_counter := v_context_lines;
ELSIF v_match_found AND v_context_counter > 0 THEN
-- Print lines after
DBMS_OUTPUT.PUT_LINE('After (line ' || rec.line_num || '): ' || rec.line_text);
v_context_counter := v_context_counter - 1;
ELSE
v_match_found := FALSE;
END IF;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Processed ' || v_line_count || ' lines from alert_copy.log.');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
RAISE;
END;
/
SELECT object_type, COUNT(*) AS object_count
FROM (
SELECT object_type
FROM dba_objects
WHERE owner = 'HEMANT' AND object_name NOT LIKE 'BIN$%' AND temporary = 'N'
UNION ALL
SELECT 'TABLE PARTITION' AS object_type
FROM dba_tab_partitions
WHERE table_owner = 'HEMANT' AND table_name NOT LIKE 'BIN$%'
UNION ALL
SELECT 'TABLE SUBPARTITION' AS object_type
FROM dba_tab_subpartitions
WHERE table_owner = 'HEMANT' AND table_name NOT LIKE 'BIN$%'
UNION ALL
SELECT 'INDEX PARTITION' AS object_type
FROM dba_ind_partitions
WHERE index_owner = 'HEMANT' AND index_name NOT LIKE 'BIN$%'
UNION ALL
SELECT 'INDEX SUBPARTITION' AS object_type
FROM dba_ind_subpartitions
WHERE index_owner = 'HEMANT' AND index_name NOT LIKE 'BIN$%'
)
GROUP BY object_type
ORDER BY object_type;
SET SERVEROUTPUT ON SIZE UNLIMITED;
BEGIN
-- Kill active sessions to avoid ORA-00054 (resource busy)
FOR rec IN (SELECT sid, serial# FROM v$session WHERE username = 'HEMANT' AND status != 'KILLED') LOOP
BEGIN
rdsadmin.rdsadmin_util.kill(rec.sid, rec.serial#, 'IMMEDIATE');
DBMS_OUTPUT.PUT_LINE('Killed session: SID=' || rec.sid || ', SERIAL#=' || rec.serial#);
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error killing SID=' || rec.sid || ': ' || SQLERRM);
END;
END LOOP;
-- Drop tables (includes partitions, subpartitions, constraints, indexes)
FOR rec IN (SELECT table_name FROM user_tables WHERE table_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP TABLE ' || rec.table_name || ' CASCADE CONSTRAINTS PURGE';
DBMS_OUTPUT.PUT_LINE('Dropped table: ' || rec.table_name);
END LOOP;
-- Drop views
FOR rec IN (SELECT view_name FROM user_views WHERE view_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP VIEW ' || rec.view_name;
DBMS_OUTPUT.PUT_LINE('Dropped view: ' || rec.view_name);
END LOOP;
-- Drop sequences
FOR rec IN (SELECT sequence_name FROM user_sequences WHERE sequence_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP SEQUENCE ' || rec.sequence_name;
DBMS_OUTPUT.PUT_LINE('Dropped sequence: ' || rec.sequence_name);
END LOOP;
-- Drop procedures
FOR rec IN (SELECT object_name FROM user_procedures WHERE object_type = 'PROCEDURE' AND object_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP PROCEDURE ' || rec.object_name;
DBMS_OUTPUT.PUT_LINE('Dropped procedure: ' || rec.object_name);
END LOOP;
-- Drop functions
FOR rec IN (SELECT object_name FROM user_procedures WHERE object_type = 'FUNCTION' AND object_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP FUNCTION ' || rec.object_name;
DBMS_OUTPUT.PUT_LINE('Dropped function: ' || rec.object_name);
END LOOP;
-- Drop packages
FOR rec IN (SELECT object_name FROM user_procedures WHERE object_type = 'PACKAGE' AND object_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP PACKAGE ' || rec.object_name;
DBMS_OUTPUT.PUT_LINE('Dropped package: ' || rec.object_name);
END LOOP;
-- Drop triggers
FOR rec IN (SELECT trigger_name FROM user_triggers WHERE trigger_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP TRIGGER ' || rec.trigger_name;
DBMS_OUTPUT.PUT_LINE('Dropped trigger: ' || rec.trigger_name);
END LOOP;
-- Drop synonyms
FOR rec IN (SELECT synonym_name FROM user_synonyms WHERE synonym_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP SYNONYM ' || rec.synonym_name;
DBMS_OUTPUT.PUT_LINE('Dropped synonym: ' || rec.synonym_name);
END LOOP;
-- Drop types
FOR rec IN (SELECT type_name FROM user_types WHERE type_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP TYPE ' || rec.type_name || ' FORCE';
DBMS_OUTPUT.PUT_LINE('Dropped type: ' || rec.type_name);
END LOOP;
-- Drop materialized views
FOR rec IN (SELECT mview_name FROM user_mviews WHERE mview_name NOT LIKE 'BIN$%') LOOP
EXECUTE IMMEDIATE 'DROP MATERIALIZED VIEW ' || rec.mview_name;
DBMS_OUTPUT.PUT_LINE('Dropped materialized view: ' || rec.mview_name);
END LOOP;
-- Purge recycle bin to ensure clean metadata export
EXECUTE IMMEDIATE 'PURGE RECYCLEBIN';
DBMS_OUTPUT.PUT_LINE('Purged recycle bin.');
DBMS_OUTPUT.PUT_LINE('All objects dropped successfully.');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
RAISE;
END;
/
BEGIN
-- Step 1: Clear the pending area (to avoid conflicts)
DBMS_RESOURCE_MANAGER.CLEAR_PENDING_AREA;
-- Step 2: Create a pending area for changes
DBMS_RESOURCE_MANAGER.CREATE_PENDING_AREA;
-- Step 3: Update or create directive for MGMT_PL to use 30% CPU at level 1
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(
plan => 'PLAN_1',
group_or_subplan => 'MGMT_PL',
comment => 'Allocate 30% CPU to MGMT_PL consumer group',
mgmt_p1 => 30 -- Set 30% CPU at priority level 1
);
-- Step 4: Ensure OTHER_GROUPS has a directive (required by Oracle)
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(
plan => 'PLAN_1',
group_or_subplan => 'OTHER_GROUPS',
comment => 'Default group for unassigned sessions',
mgmt_p1 => 70 -- Allocate remaining 70% CPU to others
);
-- Step 5: Validate the changes
DBMS_RESOURCE_MANAGER.VALIDATE_PENDING_AREA;
-- Step 6: Submit the changes
DBMS_RESOURCE_MANAGER.SUBMIT_PENDING_AREA;
-- Step 7: Ensure the plan is active (if not already)
EXEC DBMS_RESOURCE_MANAGER.SWITCH_PLAN('PLAN_1');
END;
/
Plain SQL Query
This query:
- Prompts for a comma-separated list of old tablespaces.
- Splits the input using REGEXP_SUBSTR (Oracle 11g+) to treat each as a row.
- Joins with ts_mappings to find corresponding new_tablespace values.
- Generates DBMS_DATAPUMP.METADATA_REMAP statements or a "No mappings found" message.
sql
ACCEPT v_old_tablespaces CHAR PROMPT 'Enter old tablespace names (comma-separated, e.g., FN2_IDX_01,EDI_BIZOPS_DATA,FN2_DATA_01): '
WITH split_tablespaces AS (
-- Split comma-separated input into rows
SELECT TRIM(REGEXP_SUBSTR(UPPER('&v_old_tablespaces'), '[^,]+', 1, LEVEL)) AS old_tablespace
FROM dual
CONNECT BY REGEXP_SUBSTR(UPPER('&v_old_tablespaces'), '[^,]+', 1, LEVEL) IS NOT NULL
)
SELECT 'DBMS_DATAPUMP.METADATA_REMAP(handle => handle, name => ''REMAP_TABLESPACE'', old_value => ''' || t.old_tablespace || ''', value => ''' || m.new_tablespace || ''');' AS remap_statement
FROM split_tablespaces t
JOIN ts_mappings m ON t.old_tablespace = m.old_tablespace
UNION ALL
SELECT 'No mappings found for old tablespaces: ' || UPPER('&v_old_tablespaces')
FROM dual
WHERE NOT EXISTS (
SELECT 1
FROM ts_mappings m
JOIN split_tablespaces t ON t.old_tablespace = m.old_tablespace
)
ORDER BY remap_statement;Non-Interactive Version (Hardcoded Value)
For environments like SQL Developer that don’t support ACCEPT, hardcode the old tablespaces:
sql
WITH split_tablespaces AS (
-- Hardcoded comma-separated list
SELECT TRIM(REGEXP_SUBSTR(UPPER('FN2_IDX_01,EDI_BIZOPS_DATA,FN2_DATA_01'), '[^,]+', 1, LEVEL)) AS old_tablespace
FROM dual
CONNECT BY REGEXP_SUBSTR(UPPER('FN2_IDX_01,EDI_BIZOPS_DATA,FN2_DATA_01'), '[^,]+', 1, LEVEL) IS NOT NULL
)
SELECT 'DBMS_DATAPUMP.METADATA_REMAP(handle => handle, name => ''REMAP_TABLESPACE'', old_value => ''' || t.old_tablespace || ''', value => ''' || m.new_tablespace || ''');' AS remap_statement
FROM split_tablespaces t
JOIN ts_mappings m ON t.old_tablespace = m.old_tablespace
UNION ALL
SELECT 'No mappings found for old tablespaces: ' || UPPER('FN2_IDX_01,EDI_BIZOPS_DATA,FN2_DATA_01')
FROM dual
WHERE NOT EXISTS (
SELECT 1
FROM ts_mappings m
JOIN split_tablespaces t ON t.old_tablespace = m.old_tablespace
)
ORDER BY remap_statement;Sample ts_mappings Data
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
v_user VARCHAR2(128) := 'HEMANT'; -- Replace with target user; set to NULL for all users
BEGIN
FOR rec IN (
SELECT sid, serial#
FROM v$session
WHERE username IS NOT NULL
AND username NOT IN ('SYS', 'RDSADMIN', 'RDSREPLADMIN')
AND (username = v_user OR v_user IS NULL)
) LOOP
BEGIN
rdsadmin.rdsadmin_util.kill(
sid => rec.sid,
serial => rec.serial#,
method => 'IMMEDIATE' -- Or 'PROCESS' for immediate kill
);
DBMS_OUTPUT.PUT_LINE('Killed session: SID=' || rec.sid || ', SERIAL#=' || rec.serial#);
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error killing SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ': ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Mass kill completed.');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
RAISE;
END;
/
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
v_user VARCHAR2(128) := 'HEMANT'; -- Replace with target user; set to NULL for all users
BEGIN
FOR rec IN (
SELECT sid, serial#
FROM v$session
WHERE username IS NOT NULL
AND username NOT IN ('SYS', 'RDSADMIN', 'RDSREPLADMIN', 'RDS_DATAGUARD')
AND type != 'BACKGROUND' -- Exclude background processes
AND status != 'KILLED' -- Skip already killed sessions
AND (username = v_user OR v_user IS NULL)
-- Optional: Add for idle sessions only
-- AND last_call_et > 3600 -- Idle > 60 minutes
) LOOP
DECLARE
v_count NUMBER;
BEGIN
-- Validate session still exists
SELECT COUNT(*) INTO v_count
FROM v$session
WHERE sid = rec.sid
AND serial# = rec.serial#
AND status != 'KILLED';
IF v_count > 0 THEN
rdsadmin.rdsadmin_util.kill(
sid => rec.sid,
serial => rec.serial#,
method => 'IMMEDIATE' -- Use 'PROCESS' if IMMEDIATE fails
);
DBMS_OUTPUT.PUT_LINE('Killed session: SID=' || rec.sid || ', SERIAL#=' || rec.serial#);
DBMS_LOCK.SLEEP(0.5); -- 0.5-second pause to reduce contention
ELSE
DBMS_OUTPUT.PUT_LINE('Skipped: Session SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ' no longer exists.');
END IF;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error killing SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ': ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Mass kill completed.');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
RAISE;
END;
/
/*
* Script: ForceKillKilledSessions.sql
* Purpose: Forcefully terminates sessions marked as 'KILLED' for a specified user (e.g., HEMANT)
* in Oracle RDS 19.8 using rdsadmin.rdsadmin_util.kill with PROCESS method.
* Retries with IMMEDIATE method for any remaining sessions. Includes error handling
* and logging for safe execution in RDS environments.
*
* Parameters:
* - v_user: Target user (e.g., HEMANT); set to NULL to target all non-system users
*
* Dependencies:
* - Privileges: EXECUTE ON RDSADMIN
* - Related: Used in context of Data Pump operations to clear sessions before import
*
* Usage:
* 1. Update v_user to target specific user (e.g., HEMANT) or NULL for all.
* 2. Run in SQL*Plus or SQL Developer: @ForceKillKilledSessions.sql
* 3. Verify sessions are cleared with V$SESSION query.
*
* Notes:
* - PROCESS method kills the OS process, use cautiously to avoid data corruption.
* - Excludes SYS, RDSADMIN, RDSREPLADMIN to prevent system issues.
* - Run in test environment first; verify with preview query before executing.
*
* Created: 2025-09-16
* Author: Grok (xAI)
*/
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
v_user VARCHAR2(128) := 'HEMANT'; -- Replace with target user; set to NULL for all users
BEGIN
-- Force kill sessions marked as KILLED using PROCESS method
FOR rec IN (
SELECT sid, serial#
FROM v$session
WHERE status = 'KILLED'
AND username IS NOT NULL
AND username NOT IN ('SYS', 'RDSADMIN', 'RDSREPLADMIN')
AND (username = v_user OR v_user IS NULL)
) LOOP
BEGIN
rdsadmin.rdsadmin_util.kill(
sid => rec.sid,
serial => rec.serial#,
method => 'PROCESS' -- Force kill OS process
);
DBMS_OUTPUT.PUT_LINE('Force killed session: SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ' (PROCESS)');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error force killing SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ': ' || SQLERRM);
END;
END LOOP;
-- Retry any remaining sessions with IMMEDIATE method
FOR rec IN (
SELECT sid, serial#
FROM v$session
WHERE status = 'KILLED'
AND username IS NOT NULL
AND username NOT IN ('SYS', 'RDSADMIN', 'RDSREPLADMIN')
AND (username = v_user OR v_user IS NULL)
) LOOP
BEGIN
rdsadmin.rdsadmin_util.kill(
sid => rec.sid,
serial => rec.serial#,
method => 'IMMEDIATE' -- Retry with IMMEDIATE
);
DBMS_OUTPUT.PUT_LINE('Retried killing session: SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ' (IMMEDIATE)');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error retrying SID=' || rec.sid || ', SERIAL#=' || rec.serial# || ': ' || SQLERRM);
END;
END LOOP;
DBMS_OUTPUT.PUT_LINE('Mass kill of KILLED sessions completed.');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Unexpected error: ' || SQLERRM);
RAISE;
END;
/
/*
* Script: GenerateSchemaDDLFromDumpFiles.sql
* Purpose: Generates a SQL file (e.g., HEMANT_ddl_preview.sql) containing DDL for a specified schema
* from one or more Data Pump dump files matching a pattern (e.g., REVANTH_%U_meta_%U.dmp)
* in Oracle RDS 19.8. Supports single or multiple dump files, robust error handling,
* and file cleanup. Used with generate_tablespace_remaps.sql for tablespace remapping.
*
* Parameters:
* - v_directory: Oracle directory (e.g., DATA_PUMP_DIR)
* - v_schema_name: Schema to process (e.g., HEMANT)
* - v_dumpfile_pattern: Dump file pattern (e.g., REVANTH_%U_meta_%.dmp)
* - v_sqlfile: Output SQL file (e.g., HEMANT_ddl_preview.sql)
* - v_logfile: Log file (e.g., HEMANT_sqlfile_gen.log)
* - v_job_name: Data Pump job name (e.g., SQLFILE_EXP_HEMANT)
*
* Dependencies:
* - Privileges: EXECUTE on UTL_FILE, DBMS_DATAPUMP, RDSADMIN; READ, WRITE on DATA_PUMP_DIR
* - Used with: generate_tablespace_remaps.sql to extract REMAP_TABLESPACES statements
*
* Usage:
* 1. Update configurable parameters (v_directory, v_schema_name, v_dumpfile_pattern).
* 2. Run in SQL*Plus or SQL Developer: @GenerateSchemaDDLFromDumpFiles.sql
* 3. Use output SQL file with generate_tablespace_remaps.sql for import remapping.
* 4. Clean up SQL and log files afterward using UTL_FILE.FREMOVE.
*
* Notes:
* - Handles single or multiple dump files dynamically.
* - Case-sensitive pattern matching for dump files (e.g., REVANTH_ uppercase, meta_ lowercase).
* - No S3 dependency, fully compatible with Oracle RDS 19.8.
* - Verify dump files with RDSADMIN.RDS_FILE_UTIL.LISTDIR before running.
*
* Created: 2025-09-16
* Author: Grok (xAI)
*/
SET SERVEROUTPUT ON SIZE UNLIMITED;
DECLARE
-- Configurable parameters
v_directory VARCHAR2(128) := 'DATA_PUMP_DIR'; -- Directory name
v_schema_name VARCHAR2(128) := 'HEMANT'; -- Schema to process
v_dumpfile_pattern VARCHAR2(256) := 'REVANTH_%U_meta_%.dmp'; -- Pattern with timestamp
v_sqlfile VARCHAR2(256) := v_schema_name || '_ddl_preview.sql'; -- e.g., HEMANT_ddl_preview.sql
v_logfile VARCHAR2(256) := v_schema_name || '_sqlfile_gen.log'; -- e.g., HEMANT_sqlfile_gen.log
v_job_name VARCHAR2(128) := 'SQLFILE_EXP_' || v_schema_name; -- e.g., SQLFILE_EXP_HEMANT
v_file_exists BOOLEAN;
v_file_length NUMBER;
v_block_size NUMBER;
v_handle NUMBER;
v_job_state VARCHAR2(30);
v_status ku$_Status;
v_log_entry ku$_LogEntry;
v_ind NUMBER;
TYPE t_file_tab IS TABLE OF VARCHAR2(256);
v_dumpfiles t_file_tab;
BEGIN
-- Check if log file exists and remove it
UTL_FILE.FGETATTR(
location => v_directory,
filename => v_logfile,
fexists => v_file_exists,
file_length => v_file_length,
block_size => v_block_size
);
IF v_file_exists THEN
UTL_FILE.FREMOVE(v_directory, v_logfile);
DBMS_OUTPUT.PUT_LINE('Existing log file ' || v_logfile || ' deleted');
END IF;
-- Check if SQL file exists and remove it to avoid conflicts
UTL_FILE.FGETATTR(
location => v_directory,
filename => v_sqlfile,
fexists => v_file_exists,
file_length => v_file_length,
block_size => v_block_size
);
IF v_file_exists THEN
UTL_FILE.FREMOVE(v_directory, v_sqlfile);
DBMS_OUTPUT.PUT_LINE('Existing SQL file ' || v_sqlfile || ' deleted');
END IF;
-- Find all dump files matching the pattern (case-sensitive)
SELECT filename
BULK COLLECT INTO v_dumpfiles
FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR(v_directory))
WHERE type = 'file'
AND REGEXP_LIKE(filename, '^REVANTH_[0-9]{2}_meta_[0-9]+\.dmp$'); -- Match numeric timestamp
IF v_dumpfiles.COUNT = 0 THEN
RAISE_APPLICATION_ERROR(-20001, 'No dump files matching ' || v_dumpfile_pattern || ' found in ' || v_directory);
END IF;
-- Log found dump files
DBMS_OUTPUT.PUT_LINE('Found ' || v_dumpfiles.COUNT || ' dump file(s):');
IF v_dumpfiles.COUNT = 1 THEN
DBMS_OUTPUT.PUT_LINE(' (Single file mode)');
ELSE
DBMS_OUTPUT.PUT_LINE(' (Multiple files mode)');
END IF;
FOR i IN 1 .. v_dumpfiles.COUNT LOOP
DBMS_OUTPUT.PUT_LINE(' - ' || v_dumpfiles(i));
END LOOP;
-- Open Data Pump job
v_handle := DBMS_DATAPUMP.OPEN(
operation => 'SQL_FILE',
job_mode => 'SCHEMA',
job_name => UPPER(v_job_name)
);
DBMS_OUTPUT.PUT_LINE('Opened Data Pump job: ' || v_job_name);
-- Set parallel degree
DBMS_DATAPUMP.SET_PARALLEL(
handle => v_handle,
degree => 2
);
-- Add all dump files
FOR i IN 1 .. v_dumpfiles.COUNT LOOP
DBMS_DATAPUMP.ADD_FILE(
handle => v_handle,
filename => v_dumpfiles(i),
directory => v_directory,
filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_DUMP_FILE
);
DBMS_OUTPUT.PUT_LINE('Added dump file: ' || v_dumpfiles(i));
END LOOP;
-- Add SQL file
DBMS_DATAPUMP.ADD_FILE(
handle => v_handle,
filename => v_sqlfile,
directory => v_directory,
filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_SQL_FILE
);
-- Add log file
DBMS_DATAPUMP.ADD_FILE(
handle => v_handle,
filename => v_logfile,
directory => v_directory,
filetype => DBMS_DATAPUMP.KU$_FILE_TYPE_LOG_FILE
);
-- Apply schema filter
DBMS_DATAPUMP.METADATA_FILTER(
handle => v_handle,
name => 'SCHEMA_LIST',
value => '''' || UPPER(v_schema_name) || ''''
);
-- Start job
DBMS_DATAPUMP.START_JOB(
handle => v_handle,
skip_current => 0,
abort_step => 0
);
DBMS_OUTPUT.PUT_LINE('Started Data Pump job to generate ' || v_sqlfile);
-- Monitor job
v_job_state := 'UNDEFINED';
WHILE v_job_state NOT IN ('COMPLETED', 'STOPPED') LOOP
DBMS_DATAPUMP.GET_STATUS(
handle => v_handle,
mask => DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR + DBMS_DATAPUMP.KU$_STATUS_JOB_STATUS,
timeout => -1,
job_state => v_job_state,
status => v_status
);
IF BITAND(v_status.mask, DBMS_DATAPUMP.KU$_STATUS_JOB_ERROR) != 0 THEN
v_log_entry := v_status.error;
IF v_log_entry IS NOT NULL THEN
v_ind := v_log_entry.FIRST;
WHILE v_ind IS NOT NULL LOOP
DBMS_OUTPUT.PUT_LINE('Error: ' || v_log_entry(v_ind).LogText);
v_ind := v_log_entry.NEXT(v_ind);
END LOOP;
END IF;
END IF;
DBMS_LOCK.SLEEP(1); -- Avoid tight loop
END LOOP;
DBMS_OUTPUT.PUT_LINE('Job completed with state: ' || v_job_state);
DBMS_DATAPUMP.DETACH(v_handle);
EXCEPTION
WHEN OTHERS THEN
IF v_handle != 0 THEN
DBMS_DATAPUMP.DETACH(v_handle);
END IF;
DBMS_OUTPUT.PUT_LINE('Error in Data Pump job: ' || SQLERRM);
RAISE;
END;
/
Clean Up:
BEGIN
FOR f IN (
SELECT filename
FROM TABLE(RDSADMIN.RDS_FILE_UTIL.LISTDIR('DATA_PUMP_DIR'))
WHERE type = 'file' AND (filename = 'HEMANT_ddl_preview.sql' OR filename = 'HEMANT_sqlfile_gen.log')
) LOOP
UTL_FILE.FREMOVE('DATA_PUMP_DIR', f.filename);
DBMS_OUTPUT.PUT_LINE('File ' || f.filename || ' deleted');
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error deleting ' || f.filename || ': ' || SQLERRM);
END;
END;
/
No comments:
Post a Comment