SELECT
'ALTER TABLE ' || owner || '.' || table_name ||
' DROP CONSTRAINT ' || constraint_name ||
CASE constraint_type
WHEN 'R' THEN ' CASCADE' -- Add CASCADE if it's a Foreign Key (R - Referential Integrity)
ELSE NULL
END || ';' AS drop_command
FROM
all_constraints
WHERE
owner = 'SCHEMA_NAME' -- 🚨 IMPORTANT: Replace 'SCHEMA_NAME'
AND table_name = 'TABLE_NAME' -- 🚨 IMPORTANT: Replace 'TABLE_NAME'
AND constraint_type IN ('P', 'R', 'U', 'C'); -- P=PK, R=FK, U=Unique, C=Check
====
WITH part AS (
SELECT
table_owner AS schema_name,
table_name,
partition_name,
partition_position,
NVL(num_rows, 0) AS row_count,
SUM(NVL(num_rows, 0)) OVER () AS total_rows
FROM dba_tab_partitions
WHERE table_owner = 'YOUR_SCHEMA_NAME' AND table_name = 'YOUR_TABLE_NAME'
),
cum AS (
SELECT
schema_name,
table_name,
partition_name,
partition_position,
row_count,
SUM(row_count) OVER (ORDER BY partition_position) AS cumulative_rows,
total_rows
FROM part
),
assigned AS (
SELECT
schema_name,
table_name,
partition_name,
partition_position,
row_count,
CEIL((cumulative_rows - row_count / 2) / (total_rows / 5)) AS task_id
FROM cum
WHERE total_rows > 0
)
SELECT
task_id,
partition_name,
row_count,
SUM(row_count) OVER (PARTITION BY task_id) AS task_row_count,
'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || schema_name || '.' || table_name ||
' a PARTITION (' || partition_name || ');' AS count_query,
'{
"rules": [{
"rule-type": "selection",
"rule-id": "' || task_id || '_' || partition_position || '",
"rule-name": "' || task_id || '_' || partition_name || '",
"object-locator": {
"schema-name": "' || schema_name || '",
"table-name": "' || table_name || '"
},
"rule-action": "include",
"filters": [{
"filter-type": "source",
"partition-name": "' || partition_name || '"
}]
}]
}' AS dms_mapping
FROM assigned
ORDER BY task_id, partition_position;
===
query 2
-- Create a temporary table to store results
BEGIN
EXECUTE IMMEDIATE '
CREATE GLOBAL TEMPORARY TABLE dms_task_ranges (
task_id NUMBER,
start_value NUMBER,
end_value NUMBER,
row_count NUMBER,
count_query VARCHAR2(4000),
dms_mapping CLOB
) ON COMMIT PRESERVE ROWS';
EXCEPTION
WHEN OTHERS THEN
IF SQLCODE = -955 THEN -- Table already exists
NULL;
ELSE
RAISE;
END IF;
END;
/
DECLARE
v_schema_name VARCHAR2(30) := 'YOUR_SCHEMA_NAME'; -- Replace with schema
v_table_name VARCHAR2(30) := 'YOUR_TABLE_NAME'; -- Replace with table
v_num_tasks NUMBER := 5; -- Number of DMS tasks
v_min_id NUMBER;
v_max_id NUMBER;
v_total_rows NUMBER;
v_target_rows NUMBER;
v_sample_size NUMBER;
v_sql VARCHAR2(4000);
TYPE range_rec IS RECORD (
start_id NUMBER,
end_id NUMBER,
row_count NUMBER
);
TYPE range_tab IS TABLE OF range_rec;
v_ranges range_tab := range_tab();
v_current_id NUMBER;
v_count NUMBER;
v_task_id NUMBER := 1;
v_task_rows NUMBER := 0;
v_task_start_id NUMBER;
v_is_partitioned BOOLEAN := FALSE;
v_error_msg VARCHAR2(4000);
v_gap_count NUMBER;
BEGIN
-- Ensure fresh statistics
BEGIN
DBMS_STATS.GATHER_TABLE_STATS(
ownname => v_schema_name,
tabname => v_table_name,
granularity => 'PARTITION',
cascade => TRUE
);
DBMS_OUTPUT.PUT_LINE('Statistics refreshed for ' || v_schema_name || '.' || v_table_name);
EXCEPTION
WHEN OTHERS THEN
v_error_msg := 'Warning: Stats gathering failed - ' || SQLERRM;
DBMS_OUTPUT.PUT_LINE(v_error_msg);
END;
-- Get min/max obj_id and total rows
BEGIN
EXECUTE IMMEDIATE
'SELECT MIN(obj_id), MAX(obj_id), COUNT(*) FROM ' || v_schema_name || '.' || v_table_name
INTO v_min_id, v_max_id, v_total_rows;
SELECT COUNT(*) INTO v_count
FROM dba_tab_partitions
WHERE table_owner = v_schema_name AND table_name = v_table_name;
v_is_partitioned := v_count > 0;
DBMS_OUTPUT.PUT_LINE('Table is ' || (CASE WHEN v_is_partitioned THEN 'partitioned' ELSE 'non-partitioned' END));
DBMS_OUTPUT.PUT_LINE('Total Rows: ' || v_total_rows || ', Min obj_id: ' || v_min_id || ', Max obj_id: ' || v_max_id);
EXCEPTION
WHEN OTHERS THEN
v_error_msg := 'Error: Failed to get table metadata - ' || SQLERRM;
DBMS_OUTPUT.PUT_LINE(v_error_msg);
RETURN;
END;
-- Calculate target rows per task
v_target_rows := CEIL(v_total_rows / v_num_tasks);
DBMS_OUTPUT.PUT_LINE('Target Rows per Task: ' || v_target_rows);
-- Check for data in gap (402000001–402200000)
BEGIN
v_sql := 'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || v_schema_name || '.' || v_table_name ||
' a WHERE obj_id BETWEEN 402000001 AND 402200000';
EXECUTE IMMEDIATE v_sql INTO v_gap_count;
IF v_gap_count > 0 THEN
DBMS_OUTPUT.PUT_LINE('Warning: Found ' || v_gap_count || ' rows in gap 402000001–402200000');
END IF;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Warning: Gap check failed - ' || SQLERRM);
END;
IF v_is_partitioned THEN
-- Try to use partition boundaries
DECLARE
TYPE part_rec IS RECORD (
start_id NUMBER,
end_id NUMBER,
row_count NUMBER
);
TYPE part_tab IS TABLE OF part_rec;
v_partitions part_tab;
BEGIN
SELECT
CASE WHEN partition_position = 1 THEN v_min_id
ELSE LAG(TO_NUMBER(EXTRACTVALUE(DBMS_XMLGEN.GETXMLTYPE(
'SELECT high_value FROM dba_tab_partitions WHERE table_owner = ''' || table_owner ||
''' AND table_name = ''' || table_name ||
''' AND partition_name = ''' || partition_name || ''''),
'//text()'))) OVER (ORDER BY partition_position) + 1
END,
TO_NUMBER(EXTRACTVALUE(DBMS_XMLGEN.GETXMLTYPE(
'SELECT high_value FROM dba_tab_partitions WHERE table_owner = ''' || table_owner ||
''' AND table_name = ''' || table_name ||
''' AND partition_name = ''' || partition_name || ''''),
'//text()')),
NVL(num_rows, 0)
BULK COLLECT INTO v_partitions
FROM dba_tab_partitions
WHERE table_owner = v_schema_name AND table_name = v_table_name
ORDER BY partition_position;
IF v_partitions.COUNT > 0 THEN
FOR i IN 1..v_partitions.COUNT LOOP
v_ranges.EXTEND;
v_ranges(v_ranges.COUNT) := range_rec(
v_partitions(i).start_id,
v_partitions(i).end_id,
v_partitions(i).row_count
);
END LOOP;
DBMS_OUTPUT.PUT_LINE('Using ' || v_partitions.COUNT || ' partition boundaries');
END IF;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Warning: Failed to use partition boundaries - ' || SQLERRM);
v_is_partitioned := FALSE;
END;
END IF;
-- Fall back to sampling if not partitioned or partition boundaries fail
IF NOT v_is_partitioned OR v_ranges.COUNT = 0 THEN
-- Dynamic sample size: ~0.5% of ID range or 100,000, whichever is larger
v_sample_size := GREATEST(100000, CEIL((v_max_id - v_min_id) / 200));
DBMS_OUTPUT.PUT_LINE('Sampling with size: ' || v_sample_size);
v_current_id := v_min_id;
WHILE v_current_id <= v_max_id LOOP
v_sql := 'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || v_schema_name || '.' || v_table_name ||
' a WHERE obj_id BETWEEN :1 AND :2';
BEGIN
EXECUTE IMMEDIATE v_sql INTO v_count
USING v_current_id, LEAST(v_current_id + v_sample_size - 1, v_max_id);
v_ranges.EXTEND;
v_ranges(v_ranges.COUNT) := range_rec(
v_current_id,
LEAST(v_current_id + v_sample_size - 1, v_max_id),
v_count
);
v_current_id := v_current_id + v_sample_size;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error sampling range ' || v_current_id || ': ' || SQLERRM);
END;
END LOOP;
END IF;
-- Distribute ranges to exactly 5 tasks
v_task_start_id := v_min_id;
v_task_rows := 0;
FOR i IN 1..v_ranges.COUNT LOOP
v_task_rows := v_task_rows + v_ranges(i).row_count;
-- Force last task to include all remaining rows
IF v_task_id = v_num_tasks OR v_task_rows >= v_target_rows OR i = v_ranges.COUNT THEN
v_sql := 'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || v_schema_name || '.' || v_table_name ||
' a WHERE obj_id BETWEEN ' || v_task_start_id || ' AND ' ||
CASE WHEN v_task_id = v_num_tasks THEN v_max_id ELSE v_ranges(i).end_id END || ';';
INSERT INTO dms_task_ranges (
task_id, start_value, end_value, row_count, count_query, dms_mapping
)
VALUES (
v_task_id,
v_task_start_id,
CASE WHEN v_task_id = v_num_tasks THEN v_max_id ELSE v_ranges(i).end_id END,
v_task_rows,
v_sql,
'{
"rules": [{
"rule-type": "selection",
"rule-id": "' || v_task_id || '",
"rule-name": "' || v_task_id || '",
"object-locator": {
"schema-name": "' || v_schema_name || '",
"table-name": "' || v_table_name || '"
},
"rule-action": "include",
"filters": [{
"filter-type": "source",
"column-name": "obj_id",
"filter-conditions": [{
"filter-operator": "between",
"start-value": "' || v_task_start_id || '",
"end-value": "' || CASE WHEN v_task_id = v_num_tasks THEN v_max_id ELSE v_ranges(i).end_id END || '"
}]
}]
}]
}'
);
v_task_id := v_task_id + 1;
v_task_rows := 0;
v_task_start_id := v_ranges(i).end_id + 1;
EXIT WHEN v_task_id > v_num_tasks;
END IF;
END LOOP;
-- Ensure exactly 5 tasks by forcing remaining rows into task 5 if needed
IF v_task_id = v_num_tasks AND v_task_start_id <= v_max_id THEN
v_sql := 'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || v_schema_name || '.' || v_table_name ||
' a WHERE obj_id BETWEEN ' || v_task_start_id || ' AND ' || v_max_id || ';';
EXECUTE IMMEDIATE v_sql INTO v_count;
INSERT INTO dms_task_ranges (
task_id, start_value, end_value, row_count, count_query, dms_mapping
)
VALUES (
v_task_id,
v_task_start_id,
v_max_id,
v_count,
v_sql,
'{
"rules": [{
"rule-type": "selection",
"rule-id": "' || v_task_id || '",
"rule-name": "' || v_task_id || '",
"object-locator": {
"schema-name": "' || v_schema_name || '",
"table-name": "' || v_table_name || '"
},
"rule-action": "include",
"filters": [{
"filter-type": "source",
"column-name": "obj_id",
"filter-conditions": [{
"filter-operator": "between",
"start-value": "' || v_task_start_id || '",
"end-value": "' || v_max_id || '"
}]
}]
}]
}'
);
END IF;
COMMIT;
-- Output results
DBMS_OUTPUT.PUT_LINE('Results stored in DMS_TASK_RANGES table.');
DBMS_OUTPUT.PUT_LINE('Query with: SELECT task_id, start_value, end_value, row_count, count_query FROM dms_task_ranges ORDER BY task_id;');
EXCEPTION
WHEN OTHERS THEN
v_error_msg := 'Fatal Error: ' || SQLERRM;
DBMS_OUTPUT.PUT_LINE(v_error_msg);
END;
/
-- Query to view results
SELECT
task_id,
start_value,
end_value,
row_count,
count_query,
dms_mapping
FROM dms_task_ranges
ORDER BY task_id;
===
DECLARE
v_schema_name VARCHAR2(30) := 'YOUR_SCHEMA_NAME'; -- Replace with your schema
v_table_name VARCHAR2(30) := 'YOUR_TABLE_NAME'; -- Replace with your table
TYPE range_rec IS RECORD (
start_value NUMBER,
end_value NUMBER
);
TYPE range_tab IS TABLE OF range_rec;
v_ranges range_tab := range_tab(
range_rec(&1, &2), -- Replace with your ranges, e.g., range_rec(1, 250000)
range_rec(&3, &4), -- Add more as needed
range_rec(&5, &6),
range_rec(&7, &8),
range_rec(&9, &10)
);
v_count NUMBER;
v_sql VARCHAR2(4000);
BEGIN
DBMS_OUTPUT.PUT_LINE('Range Start | Range End | Row Count');
DBMS_OUTPUT.PUT_LINE('------------|-----------|----------');
FOR i IN 1..v_ranges.COUNT LOOP
v_sql := 'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || v_schema_name || '.' || v_table_name ||
' a WHERE obj_id BETWEEN :1 AND :2';
EXECUTE IMMEDIATE v_sql INTO v_count USING v_ranges(i).start_value, v_ranges(i).end_value;
DBMS_OUTPUT.PUT_LINE(
RPAD(v_ranges(i).start_value, 11) || ' | ' ||
RPAD(v_ranges(i).end_value, 9) || ' | ' || v_count
);
END LOOP;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
=========
-- Ensure fresh statistics
BEGIN
DBMS_STATS.GATHER_TABLE_STATS(
ownname => 'YOUR_SCHEMA_NAME',
tabname => 'YOUR_TABLE_NAME',
granularity => 'PARTITION'
);
END;
/
-- Get partition details and approximate row counts
SELECT
p.table_owner AS schema_name,
p.table_name,
p.partition_name,
p.high_value AS partition_range,
p.num_rows AS approx_row_count,
ROUND(SUM(s.bytes) / 1024 / 1024 / 1024, 2) AS approx_size_gb
FROM
dba_tab_partitions p
JOIN dba_segments s ON p.table_owner = s.owner
AND p.table_name = s.segment_name
AND p.partition_name = s.partition_name
WHERE
p.table_owner = 'YOUR_SCHEMA_NAME'
AND p.table_name = 'YOUR_TABLE_NAME'
GROUP BY
p.table_owner, p.table_name, p.partition_name, p.high_value, p.num_rows
ORDER BY
p.partition_position;
===========
DECLARE
v_schema_name VARCHAR2(30) := 'YOUR_SCHEMA_NAME'; -- Replace with schema
v_table_name VARCHAR2(30) := 'YOUR_TABLE_NAME'; -- Replace with table
v_num_tasks NUMBER := 5; -- Number of DMS tasks
v_min_id NUMBER;
v_max_id NUMBER;
v_total_rows NUMBER;
v_target_rows NUMBER;
v_sql VARCHAR2(4000);
TYPE range_rec IS RECORD (
start_id NUMBER,
end_id NUMBER,
row_count NUMBER
);
TYPE range_tab IS TABLE OF range_rec;
v_ranges range_tab := range_tab();
v_current_id NUMBER;
v_count NUMBER;
v_task_id NUMBER;
BEGIN
-- Get min and max obj_id
EXECUTE IMMEDIATE
'SELECT MIN(obj_id), MAX(obj_id), COUNT(*) FROM ' || v_schema_name || '.' || v_table_name
INTO v_min_id, v_max_id, v_total_rows;
-- Target rows per task
v_target_rows := CEIL(v_total_rows / v_num_tasks);
-- Sample obj_id ranges (e.g., every 100,000 IDs or adjust based on data)
v_current_id := v_min_id;
WHILE v_current_id <= v_max_id LOOP
v_sql := 'SELECT /*+ parallel (a,24) */ COUNT(*) FROM ' || v_schema_name || '.' || v_table_name ||
' a WHERE obj_id BETWEEN :1 AND :2';
EXECUTE IMMEDIATE v_sql INTO v_count
USING v_current_id, LEAST(v_current_id + 99999, v_max_id);
v_ranges.EXTEND;
v_ranges(v_ranges.COUNT) := range_rec(v_current_id, LEAST(v_current_id + 99999, v_max_id), v_count);
v_current_id := v_current_id + 100000;
END LOOP;
-- Distribute ranges to 5 tasks
DBMS_OUTPUT.PUT_LINE('Task | Start ID | End ID | Row Count');
DBMS_OUTPUT.PUT_LINE('-----|----------|--------|----------');
v_task_id := 1;
v_count := 0;
v_current_id := v_min_id;
FOR i IN 1..v_ranges.COUNT LOOP
v_count := v_count + v_ranges(i).row_count;
IF v_count >= v_target_rows OR i = v_ranges.COUNT THEN
DBMS_OUTPUT.PUT_LINE(
'Task ' || v_task_id || ' | ' ||
RPAD(v_current_id, 8) || ' | ' ||
RPAD(v_ranges(i).end_id, 6) || ' | ' || v_count
);
-- Output DMS JSON mapping
DBMS_OUTPUT.PUT_LINE('DMS Mapping for Task ' || v_task_id || ':');
DBMS_OUTPUT.PUT_LINE('{
"rules": [{
"rule-type": "selection",
"rule-id": "' || v_task_id || '",
"rule-name": "' || v_task_id || '",
"object-locator": {
"schema-name": "' || v_schema_name || '",
"table-name": "' || v_table_name || '"
},
"rule-action": "include",
"filters": [{
"filter-type": "source",
"column-name": "obj_id",
"filter-conditions": [{
"filter-operator": "between",
"start-value": "' || v_current_id || '",
"end-value": "' || v_ranges(i).end_id || '"
}]
}]
}]
}');
v_task_id := v_task_id + 1;
v_count := 0;
v_current_id := v_ranges(i).end_id + 1;
EXIT WHEN v_task_id > v_num_tasks;
END IF;
END LOOP;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.PUT_LINE('Error: ' || SQLERRM);
END;
/
No comments:
Post a Comment