Sunday, December 7, 2025

3 Ways to Handle Oracle Object ID Drift

Handling Oracle Object ID Drift: A Comprehensive Guide

If you maintain a metadata table (e.g., DB_OBJECT_ID_CAPTURED) that tracks Oracle Object IDs, you know the struggle. Every time partition maintenance occurs—splits, merges, truncates—the DATA_OBJECT_ID changes. If your downstream jobs rely on these static IDs, they fail.

Keeping this table in sync manually is error-prone. You need a way to filter out logical "Composite" partitions and track only the physical segments that hold data.

Here are the three industry-standard solutions to solve this, ranked from the best architectural fix to the best maintenance script.


Solution 1: The Architectural Fix (The View)

Best for: Systems where historical ID tracking is not required. The Concept: Instead of fighting to keep a physical table in sync, replace it with a View. The View passes the query directly to the live system dictionary (ALL_OBJECTS), ensuring your IDs are always 100% real-time.

SQL
-- 1. Backup the old table
ALTER TABLE db_object_id_captured RENAME TO db_object_id_captured_old;

-- 2. Create the View (Replaces the Table)
CREATE OR REPLACE VIEW db_object_id_captured AS
SELECT 
    279 AS PUR_ID, -- NOTE: Hardcoded Group ID (Adjust as needed)
    object_id, 
    data_object_id, 
    object_name AS table_name, 
    subobject_name AS partition_name, 
    object_type,
    owner
FROM all_objects 
WHERE owner = 'YOUR_SCHEMA'
  AND object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
  -- This filter ensures we only see physical data segments (Composite = NO)
  AND data_object_id IS NOT NULL; 

Solution 2: The Real-Time Fix (The Trigger)

Best for: Applications that require a physical table but need instant updates. The Concept: A DDL Trigger fires the millisecond a CREATE or ALTER command finishes, instantly updating your capture table.

SQL
CREATE OR REPLACE TRIGGER trg_sync_object_ids
AFTER DDL ON SCHEMA
DECLARE
    v_obj_name VARCHAR2(128);
    v_obj_type VARCHAR2(128);
BEGIN
    v_obj_name := ora_dict_obj_name;
    v_obj_type := ora_dict_obj_type;

    IF v_obj_type IN ('TABLE','INDEX','TABLE PARTITION','TABLE SUBPARTITION') THEN
        MERGE INTO db_object_id_captured target
        USING (
            SELECT object_id, data_object_id, object_name, subobject_name, object_type
            FROM all_objects
            WHERE object_name = v_obj_name AND owner = ora_dict_obj_owner
        ) source
        ON (target.table_name = source.object_name 
            AND NVL(target.partition_name, '###') = NVL(source.subobject_name, '###'))
        WHEN MATCHED THEN
            UPDATE SET target.object_id = source.object_id, 
                       target.data_object_id = source.data_object_id
        WHEN NOT MATCHED THEN
            INSERT (pur_id, table_name, partition_name, object_type, object_id, data_object_id)
            VALUES (279, source.object_name, source.subobject_name, source.object_type, source.object_id, source.data_object_id);
    END IF;
END;
/

Solution 3: The Batch Fix (The Master Script)

Best for: Controlled environments where you want to review drift before applying changes. The Concept: A robust PL/SQL block with two modes: Preview (Health Check) and Execute (Apply). It handles the complex logic of matching Partitions vs. Subpartitions and excluding Composite parents.

SQL
SET SERVEROUTPUT ON;

DECLARE
    -- CONFIGURATION
    v_pur_id          NUMBER       := 279;             
    v_schema_owner    VARCHAR2(50) := 'YOUR_SCHEMA';   
    v_table_filter    VARCHAR2(50) := NULL; -- Set NULL for ALL tables
    
    -- MODE: FALSE = Preview (Health Check), TRUE = Execute (Apply Changes)
    v_apply_changes   BOOLEAN      := FALSE;           
    v_rows_merged     NUMBER       := 0;
BEGIN
    DBMS_OUTPUT.PUT_LINE('MODE: ' || CASE WHEN v_apply_changes THEN 'EXECUTE' ELSE 'PREVIEW' END);

    IF NOT v_apply_changes THEN
        -- PREVIEW LOGIC
        FOR r IN (
            WITH captured_db AS (
                SELECT table_name, partition_name, object_id, data_object_id
                FROM db_object_id_captured
                WHERE pur_id = v_pur_id
                  AND (v_table_filter IS NULL OR table_name = v_table_filter)
            ),
            live_db AS (
                SELECT o.object_name, o.subobject_name, o.object_type, o.object_id, o.data_object_id
                FROM all_objects o
                WHERE o.owner = v_schema_owner
                  AND o.object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
                  AND o.data_object_id IS NOT NULL -- Filters out Composite Partitions
                  AND o.object_name IN (SELECT DISTINCT table_name FROM captured_db)
            )
            SELECT 
                NVL(live.object_name, cap.table_name) AS t_name,
                NVL(live.subobject_name, cap.partition_name) AS p_name,
                CASE 
                    WHEN live.object_id IS NULL THEN 'ORPHAN (Safe to Delete)'
                    WHEN cap.object_id IS NULL THEN 'NEW (Will Insert)'
                    WHEN live.object_id != cap.object_id OR 
                         NVL(live.data_object_id,-1) != NVL(cap.data_object_id,-1) THEN 'DRIFT (Will Update)'
                    ELSE 'SYNCED'
                END AS status,
                cap.object_id as old_id, live.object_id as new_id
            FROM live_db live
            FULL OUTER JOIN captured_db cap
              ON live.object_name = cap.table_name
              AND NVL(live.subobject_name, '###') = NVL(cap.partition_name, '###')
            WHERE (live.object_id IS NULL) OR (cap.object_id IS NULL) 
               OR (live.object_id != cap.object_id) 
               OR (NVL(live.data_object_id,-1) != NVL(cap.data_object_id,-1))
            ORDER BY 1, 2
        ) LOOP
            DBMS_OUTPUT.PUT_LINE('[' || r.status || '] ' || r.t_name || ' : ' || r.p_name || 
                                 ' (Old: ' || r.old_id || ' -> New: ' || r.new_id || ')');
        END LOOP;
    ELSE
        -- EXECUTE LOGIC (MERGE)
        MERGE INTO db_object_id_captured target
        USING (
            SELECT v_pur_id AS pur_id, o.object_id, o.data_object_id, o.object_name, o.subobject_name, o.object_type
            FROM all_objects o
            WHERE o.owner = v_schema_owner
              AND o.object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
              AND o.data_object_id IS NOT NULL
              AND (v_table_filter IS NULL OR o.object_name = v_table_filter)
              AND o.object_name IN (SELECT DISTINCT table_name FROM db_object_id_captured WHERE pur_id = v_pur_id)
        ) source
        ON (
            target.pur_id          = source.pur_id
            AND target.table_name  = source.object_name
            AND NVL(target.partition_name, '###') = NVL(source.subobject_name, '###')
            AND target.object_type = source.object_type
        )
        WHEN MATCHED THEN
            UPDATE SET target.object_id = source.object_id, 
                       target.data_object_id = source.data_object_id
            WHERE DECODE(target.object_id, source.object_id, 0, 1) = 1 
               OR DECODE(target.data_object_id, source.data_object_id, 0, 1) = 1
        WHEN NOT MATCHED THEN
            INSERT (pur_id, table_name, partition_name, object_type, object_id, data_object_id)
            VALUES (source.pur_id, source.object_name, source.subobject_name, source.object_type, source.object_id, source.data_object_id);
            
        v_rows_merged := SQL%ROWCOUNT;
        COMMIT;
        DBMS_OUTPUT.PUT_LINE('SUCCESS. Rows Synced: ' || v_rows_merged);
    END IF;
END;
/

Appendix: Manual Verification Cheat Sheet

If you need to verify a specific table manually, run these 3 statements in order.

1. Check Live Data (Source)

SQL
SELECT OBJECT_ID, DATA_OBJECT_ID, OBJECT_NAME, SUBOBJECT_NAME
FROM ALL_OBJECTS
WHERE OWNER = 'YOUR_SCHEMA_NAME'
  AND OBJECT_NAME = 'YOUR_TABLE_NAME'
  AND DATA_OBJECT_ID IS NOT NULL -- Filters Composite Partitions
ORDER BY OBJECT_NAME, SUBOBJECT_NAME;

2. Check Captured Data (Target)

SQL
SELECT * FROM DB_OBJECT_ID_CAPTURED 
WHERE PUR_ID = 279 AND TABLE_NAME = 'YOUR_TABLE_NAME';

3. Run the Safe Update

SQL
UPDATE DB_OBJECT_ID_CAPTURED
SET OBJECT_ID      = <NEW_OBJECT_ID>,
    DATA_OBJECT_ID = <NEW_DATA_OBJECT_ID>
WHERE PUR_ID = 279
  AND TABLE_NAME = 'YOUR_TABLE_NAME'
  AND NVL(PARTITION_NAME, '###') = NVL('<PARTITION_NAME>', '###')
  AND OBJECT_ID = <OLD_OBJECT_ID>; -- Safety Check

all_objects

 

Here are the 3 individual, standardized SQL statements.

These use the "Best Practice" logic we established:

  1. Composite Exclusion: Uses DATA_OBJECT_ID IS NOT NULL (Much faster/cleaner than joining ALL_TAB_PARTITIONS).

  2. Subpartition Support: Automatically includes TABLE SUBPARTITION.

  3. Safety: The Update statement includes the Old Values and Object Type to ensure you never update the wrong row.

1. The "Live Data" Query (Source)

Run this to get the New values from the database.

  • Note: This replaces your complex UNION ALL query. It automatically filters out logical "Parent" partitions (Composite=YES) by ensuring DATA_OBJECT_ID exists.

SELECT 
    OBJECT_ID, 
    DATA_OBJECT_ID, 
    OBJECT_NAME       AS TABLE_NAME,
    SUBOBJECT_NAME    AS PARTITION_NAME, -- Holds Partition or Subpartition Name
    OBJECT_TYPE
FROM ALL_OBJECTS
WHERE OWNER = 'YOUR_SCHEMA_NAME'       -- <1-- Change Owner
  AND OBJECT_NAME = 'YOUR_TABLE_NAME'  -- <2-- Change Table Name
  AND OBJECT_TYPE IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
  AND DATA_OBJECT_ID IS NOT NULL       -- <3-- This ensures COMPOSITE = 'NO'
ORDER BY 
    OBJECT_NAME, 
    OBJECT_TYPE, 
    SUBOBJECT_NAME;


2. The "Captured Data" Query (Target)

Run this to see what you currently have stored (the Old values).

SELECT * FROM DB_OBJECT_ID_CAPTURED 
WHERE PUR_ID = 279 
  AND TABLE_NAME = 'YOUR_TABLE_NAME'
ORDER BY 
    TABLE_NAME, 
    OBJECT_TYPE, 
    PARTITION_NAME;

3. The "Fix" Statement (Update)

Use the values from Statement 1 (Live) to update Statement 2 (Target).

  • Replace the placeholders <...> with the actual numbers/names.


UPDATE DB_OBJECT_ID_CAPTURED
SET OBJECT_ID      = <NEW_OBJECT_ID_FROM_STEP_1>,
    DATA_OBJECT_ID = <NEW_DATA_OBJECT_ID_FROM_STEP_1>
WHERE PUR_ID = 279
  AND TABLE_NAME = 'YOUR_TABLE_NAME'
  
  -- 1. IDENTIFY THE PARTITION (Handle NULLs for non-partitioned tables)
  AND NVL(PARTITION_NAME, '###') = NVL('<PARTITION_NAME_FROM_STEP_1>', '###')
  
  -- 2. LOCK THE TYPE (Ensures you don't mix up Partition vs Subpartition)
  AND OBJECT_TYPE = '<OBJECT_TYPE_FROM_STEP_1>' 
  -- 3. SAFETY CHECK (Only update if it matches the OLD values from Step 2)
  AND OBJECT_ID = <OLD_OBJECT_ID_FROM_STEP_2>
  AND DATA_OBJECT_ID = <OLD_DATA_OBJECT_ID_FROM_STEP_2>;

=================================================

Master Script:

/*

This is the most seamless approach. We will wrap the logic in a standard PL/SQL block.

How this works:

  1. Run it as is: It acts exactly like your "Health Check" query (Preview Mode). It prints the changes to the DBMS_OUTPUT window.

  2. Change one word: Change v_apply_changes to TRUE and run it again. It executes the Merge.

This guarantees that what you see in the report is exactly what gets applied.

The "All-in-One" Script

*/
SET SERVEROUTPUT ON;

DECLARE
    -- =============================================================
    -- 1. CONFIGURATION SECTION
    -- =============================================================
    v_pur_id         NUMBER       := 279;             -- Your Group ID
    v_schema_owner   VARCHAR2(50) := 'YOUR_SCHEMA';   -- Your Schema
    
    -- OPTIONAL FILTERS (Leave NULL to ignore)
    v_table_filter   VARCHAR2(50) := NULL;            -- e.g. 'MY_BIG_TABLE'
    v_part_filter    VARCHAR2(50) := NULL;            -- e.g. 'P_2025_JAN'
    
    -- ACTION: FALSE = Preview, TRUE = Execute
    v_apply_changes  BOOLEAN      := FALSE;           
    -- =============================================================

    v_rows_merged    NUMBER := 0;
BEGIN
    DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');
    DBMS_OUTPUT.PUT_LINE('MODE:      ' || CASE WHEN v_apply_changes THEN 'EXECUTE' ELSE 'PREVIEW' END);
    DBMS_OUTPUT.PUT_LINE('TABLE:     ' || NVL(v_table_filter, 'ALL'));
    DBMS_OUTPUT.PUT_LINE('PARTITION: ' || NVL(v_part_filter, 'ALL'));
    DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');

    -- =============================================================
    -- PART A: PREVIEW MODE
    -- =============================================================
    IF NOT v_apply_changes THEN
        FOR r IN (
            WITH captured_db AS (
                SELECT table_name, partition_name, object_id, data_object_id
                FROM db_object_id_captured
                WHERE pur_id = v_pur_id
                  AND (v_table_filter IS NULL OR table_name = v_table_filter)
                  AND (v_part_filter IS NULL OR partition_name = v_part_filter)
            ),
            live_db AS (
                SELECT o.object_name, o.subobject_name, o.object_type, o.object_id, o.data_object_id
                FROM all_objects o
                WHERE o.owner = v_schema_owner
                  AND o.object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
                  AND o.data_object_id IS NOT NULL -- COMPOSITE=NO Check
                  
                  -- Filters
                  AND (v_table_filter IS NULL OR o.object_name = v_table_filter)
                  AND (v_part_filter IS NULL OR o.subobject_name = v_part_filter)

                  -- Safety Join
                  AND o.object_name IN (SELECT DISTINCT table_name FROM captured_db)
            )
            SELECT 
                NVL(live.object_name, cap.table_name) AS t_name,
                NVL(live.subobject_name, cap.partition_name) AS p_name,
                CASE 
                    WHEN live.object_id IS NULL THEN 'ORPHAN (Safe to Delete)'
                    WHEN cap.object_id IS NULL THEN 'NEW (Will Insert)'
                    WHEN live.object_id != cap.object_id OR 
                         NVL(live.data_object_id,-1) != NVL(cap.data_object_id,-1) THEN 'DRIFT (Will Update)'
                    ELSE 'SYNCED'
                END AS status,
                cap.object_id as old_id, live.object_id as new_id
            FROM live_db live
            FULL OUTER JOIN captured_db cap
              ON live.object_name = cap.table_name
              AND NVL(live.subobject_name, '###') = NVL(cap.partition_name, '###')
            WHERE (live.object_id IS NULL) OR (cap.object_id IS NULL) 
               OR (live.object_id != cap.object_id) 
               OR (NVL(live.data_object_id,-1) != NVL(cap.data_object_id,-1))
            ORDER BY 1, 2
        ) LOOP
            DBMS_OUTPUT.PUT_LINE('[' || r.status || '] ' || r.t_name || ' : ' || r.p_name || 
                                 ' (Old: ' || r.old_id || ' -> New: ' || r.new_id || ')');
        END LOOP;
        
        DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');

    -- =============================================================
    -- PART B: EXECUTE MODE
    -- =============================================================
    ELSE
        MERGE INTO db_object_id_captured target
        USING (
            SELECT 
                v_pur_id AS pur_id,
                o.object_id, 
                o.data_object_id, 
                o.object_name, 
                o.subobject_name, 
                o.object_type
            FROM all_objects o
            WHERE o.owner = v_schema_owner
              AND o.object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
              AND o.data_object_id IS NOT NULL 
              
              -- Apply Filters
              AND (v_table_filter IS NULL OR o.object_name = v_table_filter)
              AND (v_part_filter IS NULL OR o.subobject_name = v_part_filter)

              AND o.object_name IN (
                  SELECT DISTINCT table_name FROM db_object_id_captured WHERE pur_id = v_pur_id
              )
        ) source
        ON (
            target.pur_id          = source.pur_id
            AND target.table_name  = source.object_name
            AND NVL(target.partition_name, '###') = NVL(source.subobject_name, '###')
            AND target.object_type = source.object_type
        )
        WHEN MATCHED THEN
            UPDATE SET 
                target.object_id      = source.object_id,
                target.data_object_id = source.data_object_id,
                target.last_updated   = SYSDATE
            WHERE DECODE(target.object_id, source.object_id, 0, 1) = 1 
               OR DECODE(target.data_object_id, source.data_object_id, 0, 1) = 1
        WHEN NOT MATCHED THEN
            INSERT (pur_id, table_name, partition_name, object_type, object_id, data_object_id, last_updated)
            VALUES (source.pur_id, source.object_name, source.subobject_name, source.object_type, source.object_id, source.data_object_id, SYSDATE);
            
        v_rows_merged := SQL%ROWCOUNT;
        COMMIT;
        DBMS_OUTPUT.PUT_LINE('SUCCESS. Rows Synced: ' || v_rows_merged);
    END IF;
END;
/

=======================================================

Script 2

/* Here is the updated Robust Health Check script.

I have decoupled the filter logic. Now, the captured_db block is completely independent, allowing you to manually type whatever WHERE clause you need (single ID, multiple IDs, date ranges, etc.).

The script is smart enough to use your manual list to filter the live_db query automatically, keeping performance high.

*/


WITH 
-- =================================================================
-- 1. YOUR MANUAL FILTER (Edit this section)
-- =================================================================
captured_db AS (
    SELECT 
        table_name, 
        partition_name, 
        object_type, 
        object_id, 
        data_object_id
    FROM db_object_id_captured
    WHERE pur_id = 279  -- <--- MANUALLY EDIT YOUR CONDITION HERE
    -- Examples: 
    -- WHERE pur_id IN (279, 280)
    -- WHERE table_name LIKE 'STG_%'
),

-- =================================================================
-- 2. LIVE DB FETCH (Automatically looks at tables from step 1)
-- =================================================================
live_db AS (
    SELECT 
        o.object_name, 
        o.subobject_name, 
        o.object_type, 
        o.object_id, 
        o.data_object_id
    FROM all_objects o
    WHERE o.owner = 'YOUR_SCHEMA_NAME' -- <--- UPDATE SCHEMA OWNER
      AND o.object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
      AND o.data_object_id IS NOT NULL -- Matches "COMPOSITE='NO'" (Physical segments only)
      
      -- OPTIMIZATION: Only looks for tables found in your manual list above
      AND o.object_name IN (SELECT DISTINCT table_name FROM captured_db)
)

-- =================================================================
-- 3. THE COMPARISON REPORT (Full Health Check)
-- =================================================================
SELECT 
    NVL(live.object_name, cap.table_name) AS TABLE_NAME,
    NVL(live.subobject_name, cap.partition_name) AS PARTITION_NAME,
    
    CASE 
        WHEN live.object_id IS NULL THEN 'ORPHAN (In Table, Deleted from DB)'
        WHEN cap.object_id IS NULL THEN 'NEW (In DB, Missing in Table)'
        WHEN live.object_id != cap.object_id OR 
             NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1) 
             THEN 'DRIFT (IDs Changed)'
        ELSE 'SYNCED'
    END AS STATUS,

    cap.object_id AS OLD_ID,
    live.object_id AS NEW_ID
    
FROM live_db live
FULL OUTER JOIN captured_db cap
  ON live.object_name = cap.table_name
  AND NVL(live.subobject_name, '###') = NVL(cap.partition_name, '###')
  AND live.object_type = cap.object_type

-- Optional: Filter to show only issues
WHERE (live.object_id IS NULL) 
   OR (cap.object_id IS NULL) 
   OR (live.object_id != cap.object_id)
   OR (NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1))

ORDER BY 1, 2;

====================================================

Script 3:
======

Part 1: The Preview Report (ReadOnly)

Run this query first. It joins your table with the live database using the COMPOSITE='NO' logic (Physical Segments only) and tells you the status of every object.

SQL

WITH config AS (
    SELECT 279 AS filter_pur_id, 'YOUR_SCHEMA_NAME' AS filter_owner FROM dual
),
live_data AS (
    -- Equivalent to your "Composite = NO" query, but faster
    SELECT object_name, subobject_name, object_type, object_id, data_object_id
    FROM all_objects, config
    WHERE owner = config.filter_owner
      AND object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
      AND data_object_id IS NOT NULL -- This filters out COMPOSITE partitions
),
captured_data AS (
    SELECT table_name, partition_name, object_type, object_id, data_object_id
    FROM db_object_id_captured, config
    WHERE pur_id = config.filter_pur_id
)
SELECT 
    NVL(live.object_name, cap.table_name) AS TABLE_NAME,
    NVL(live.subobject_name, cap.partition_name) AS PARTITION_NAME,
    
    -- LIVE VALUES
    live.object_id AS LIVE_OBJ_ID,
    live.data_object_id AS LIVE_DATA_ID,
    
    -- CAPTURED VALUES
    cap.object_id AS DB_CAPTURED_OBJ_ID,
    cap.data_object_id AS DB_CAPTURED_DATA_ID,
    
    -- STATUS CALCULATION
    CASE 
        WHEN cap.object_id IS NULL THEN 'NEW (Insert Needed)'
        WHEN live.object_id IS NULL THEN 'ORPHAN (Delete Needed)'
        WHEN live.object_id != cap.object_id OR 
             NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1) 
             THEN 'NEEDS TO BE UPDATED'
        ELSE 'THEY ARE IN SYNC'
    END AS STATUS
FROM live_data live
FULL OUTER JOIN captured_data cap
  ON live.object_name = cap.table_name
  AND NVL(live.subobject_name, '###') = NVL(cap.partition_name, '###')
  AND live.object_type = cap.object_type
WHERE 
    -- Filter to show only specific table if needed, or leave commented for all
    -- live.object_name = 'YOUR_TABLE_NAME' AND 
    (live.object_id != cap.object_id OR 
     NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1) OR 
     cap.object_id IS NULL OR live.object_id IS NULL)
ORDER BY 1, 2;


====

This is the "Best of the Best" approach. It strictly follows your logic (validating COMPOSITE='NO') and gives you exactly what you asked for:

  1. Part 1: A clean Report (Preview) showing the comparison side-by-side.

  2. Part 2: A generator that writes the SQL statements for you. It produces the exact UPDATE scripts with the specific WHERE clauses you requested.


Part 1: The Preview Report (ReadOnly)

Run this query first. It joins your table with the live database using the COMPOSITE='NO' logic (Physical Segments only) and tells you the status of every object.

SQL
WITH config AS (
    SELECT 279 AS filter_pur_id, 'YOUR_SCHEMA_NAME' AS filter_owner FROM dual
),
live_data AS (
    -- Equivalent to your "Composite = NO" query, but faster
    SELECT object_name, subobject_name, object_type, object_id, data_object_id
    FROM all_objects, config
    WHERE owner = config.filter_owner
      AND object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
      AND data_object_id IS NOT NULL -- This filters out COMPOSITE partitions
),
captured_data AS (
    SELECT table_name, partition_name, object_type, object_id, data_object_id
    FROM db_object_id_captured, config
    WHERE pur_id = config.filter_pur_id
)
SELECT 
    NVL(live.object_name, cap.table_name) AS TABLE_NAME,
    NVL(live.subobject_name, cap.partition_name) AS PARTITION_NAME,
    
    -- LIVE VALUES
    live.object_id AS LIVE_OBJ_ID,
    live.data_object_id AS LIVE_DATA_ID,
    
    -- CAPTURED VALUES
    cap.object_id AS DB_CAPTURED_OBJ_ID,
    cap.data_object_id AS DB_CAPTURED_DATA_ID,
    
    -- STATUS CALCULATION
    CASE 
        WHEN cap.object_id IS NULL THEN 'NEW (Insert Needed)'
        WHEN live.object_id IS NULL THEN 'ORPHAN (Delete Needed)'
        WHEN live.object_id != cap.object_id OR 
             NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1) 
             THEN 'NEEDS TO BE UPDATED'
        ELSE 'THEY ARE IN SYNC'
    END AS STATUS
FROM live_data live
FULL OUTER JOIN captured_data cap
  ON live.object_name = cap.table_name
  AND NVL(live.subobject_name, '###') = NVL(cap.partition_name, '###')
  AND live.object_type = cap.object_type
WHERE 
    -- Filter to show only specific table if needed, or leave commented for all
    -- live.object_name = 'YOUR_TABLE_NAME' AND 
    (live.object_id != cap.object_id OR 
     NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1) OR 
     cap.object_id IS NULL OR live.object_id IS NULL)
ORDER BY 1, 2;

Part 2: The SQL Generator (The "Fix" Script)

This PL/SQL block does not execute changes. Instead, it prints the specific UPDATE statements you asked for to the "Dbms Output" window. You can then copy-paste them and run them manually.

  • It checks COMPOSITE='NO' logic.

  • It includes the Old Values in the WHERE clause as requested.


    SET SERVEROUTPUT ON SIZE 100000;
    DECLARE
        v_pur_id       NUMBER       := 279;
        v_schema_owner VARCHAR2(50) := 'YOUR_SCHEMA_NAME';
    BEGIN
        DBMS_OUTPUT.PUT_LINE('-- GENERATED UPDATE STATEMENTS (INCLUDES SUBPARTITIONS) --');
        
        FOR r IN (
            SELECT 
                live.object_name,
                live.subobject_name,
                live.object_type,
                -- New Values (From Live DB)
                live.object_id AS new_obj_id,
                live.data_object_id AS new_data_id,
                -- Old Values (From Your Table)
                cap.object_id AS old_obj_id,
                cap.data_object_id AS old_data_id
            FROM all_objects live
            JOIN db_object_id_captured cap
              ON live.object_name = cap.table_name
              -- Matches Partition OR Subpartition Name automatically
              AND NVL(live.subobject_name, '###') = NVL(cap.partition_name, '###')
              AND live.object_type = cap.object_type
            WHERE live.owner = v_schema_owner
              AND cap.pur_id = v_pur_id
              AND live.data_object_id IS NOT NULL -- Composite=NO Logic
              AND live.object_type IN ('TABLE', 'TABLE PARTITION', 'TABLE SUBPARTITION')
              
              -- CRITICAL: Only generate updates if IDs drifted
              AND (live.object_id != cap.object_id OR 
                   NVL(live.data_object_id, -1) != NVL(cap.data_object_id, -1))
            ORDER BY live.object_name, live.subobject_name
        ) LOOP
            -- 1. HEADER
            DBMS_OUTPUT.PUT_LINE('UPDATE DB_OBJECT_ID_CAPTURED');
            DBMS_OUTPUT.PUT_LINE('SET OBJECT_ID = ' || r.new_obj_id || 
                                 ', DATA_OBJECT_ID = ' || r.new_data_id);
            
            -- 2. IDENTIFIERS (The "Where" Clause)
            DBMS_OUTPUT.PUT_LINE('WHERE PUR_ID = ' || v_pur_id);
            DBMS_OUTPUT.PUT_LINE('  AND TABLE_NAME = ''' || r.object_name || '''');
            
            -- Extended Logic: Explicitly handle Partitions vs Subpartitions in the WHERE clause
            IF r.subobject_name IS NOT NULL THEN
                 DBMS_OUTPUT.PUT_LINE('  AND PARTITION_NAME = ''' || r.subobject_name || '''');
            ELSE
                 DBMS_OUTPUT.PUT_LINE('  AND PARTITION_NAME IS NULL');
            END IF;
            -- [NEW] Explicitly lock the Object Type (Table vs Partition vs Subpartition)
            DBMS_OUTPUT.PUT_LINE('  AND OBJECT_TYPE = ''' || r.object_type || '''');
            -- 3. SAFETY CHECK (Old Values)
            DBMS_OUTPUT.PUT_LINE('  AND OBJECT_ID = ' || r.old_obj_id);
            
            IF r.old_data_id IS NOT NULL THEN
                DBMS_OUTPUT.PUT_LINE('  AND DATA_OBJECT_ID = ' || r.old_data_id || ';');
            ELSE
                DBMS_OUTPUT.PUT_LINE('  AND DATA_OBJECT_ID IS NULL;');
            END IF;
            DBMS_OUTPUT.PUT_LINE('------------------------------------------');
        END LOOP;
    END;
    /



Saturday, December 6, 2025

GATHER STATS

1. The Setup (Run Once)

Idempotent DDL: Safe to run repeatedly.

SQL
/********************************************************************
   NUCLEAR CLEAN & REBUILD stats_job_log
   → Drops everything if exists
   → Recreates table, identity column starting at 1
   → Recreates index + view
   → 100% safe, re-runnable forever
********************************************************************/

BEGIN
    -- 1. Drop the table if it exists (including all data, constraints, triggers)
    EXECUTE IMMEDIATE 'DROP TABLE stats_job_log PURGE';
    DBMS_OUTPUT.PUT_LINE('Old stats_job_log table dropped (if existed).');
EXCEPTION
    WHEN OTHERS THEN
        IF SQLCODE != -942 THEN RAISE; END IF;  -- -942 = table does not exist → fine
        DBMS_OUTPUT.PUT_LINE('No existing table to drop.');
END;
/

-- 2. Create fresh table with identity starting at 1
EXECUTE IMMEDIATE q'[
    CREATE TABLE stats_job_log (
        log_id        NUMBER GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1),
        job_name    VARCHAR2(128),
        log_time    TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
        message     VARCHAR2(4000),
        CONSTRAINT pk_stats_job_log PRIMARY KEY (log_id)
    )
]';

DBMS_OUTPUT.PUT_LINE('Fresh stats_job_log table created with identity starting at 1');

-- 3. Index (fast log tailing)
BEGIN
    EXECUTE IMMEDIATE '
        CREATE INDEX ix_stats_job_log_time 
        ON stats_job_log(log_time DESC, job_name)';
    DBMS_OUTPUT.PUT_LINE('Index created');
EXCEPTION WHEN OTHERS THEN
    IF SQLCODE != -955 THEN RAISE; END IF;  -- index already exists (should never happen)
END;
/

-- 4. Friendly view
CREATE OR REPLACE VIEW v_stats_gather_log AS
SELECT * FROM stats_job_log ORDER BY log_id DESC;

DBMS_OUTPUT.PUT_LINE('View v_stats_gather_log created');

-- 5. Confirm it starts at 1
DECLARE
    v_next NUMBER;
BEGIN
    EXECUTE IMMEDIATE '
        SELECT stats_job_log_seq.NEXTVAL FROM dual'
    INTO v_next
    FROM dual;  -- dummy, just to force sequence creation if needed

    DBMS_OUTPUT.PUT_LINE('Identity sequence ready – next log_id will be 1');
EXCEPTION WHEN OTHERS THEN NULL;
END;
/

DBMS_OUTPUT.PUT_LINE('NUCLEAR REBUILD COMPLETE – table is pristine and starts at ID=1');

2. The Master Script (Run Anywhere)

Fixed the local function scope issue.

SQL
DECLARE
    -- CONFIGURATION SECTION
    v_owner      VARCHAR2(128) := 'YOUR_SCHEMA';    -- <== CHANGE THIS
    v_table_list VARCHAR2(4000) := 'FACT_SALES,FACT_ORDERS,DIM_CUSTOMER'; -- <== CHANGE THIS
    
    v_job_name   VARCHAR2(128) := 'GATHER_STATS_' || TO_CHAR(SYSDATE, 'YYYYMMDD_HH24MISS');
    v_plsql      CLOB;
BEGIN
    -- Dynamic PL/SQL Block
    v_plsql := q'[
    DECLARE
        v_job VARCHAR2(128) := ']' || v_job_name || q'[';

        -- Autonomous logger: Writes to table even if stats fail
        PROCEDURE log(p_msg VARCHAR2) IS
            PRAGMA AUTONOMOUS_TRANSACTION; 
        BEGIN
            INSERT INTO stats_job_log(job_name, message)
            VALUES (v_job, SUBSTR(p_msg,1,4000));
            COMMIT;
        END;

    BEGIN
        log('STARTED – Job: ' || v_job);

        -- Loop through tables using pure SQL parsing (Scope Safe)
        FOR rec IN (
            SELECT t.table_name
            FROM   all_tables t
            WHERE  t.owner = ']' || v_owner || q'['
            AND    t.table_name IN (
                    SELECT TRIM(REGEXP_SUBSTR(']' || v_table_list || q'[', '[^,]+', 1, LEVEL))
                    FROM dual
                    CONNECT BY REGEXP_SUBSTR(']' || v_table_list || q'[', '[^,]+', 1, LEVEL) IS NOT NULL
            )
        )
        LOOP
            log('Processing ' || rec.table_name || ' ...');
            
            BEGIN
                -- Industry Standard Stats Gathering
                DBMS_STATS.GATHER_TABLE_STATS(
                    ownname          => ']' || v_owner || q'[',
                    tabname          => rec.table_name,
                    estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE, -- Gold Standard
                    method_opt       => 'FOR ALL COLUMNS SIZE AUTO',
                    degree           => DBMS_STATS.AUTO_DEGREE,      -- Load Aware
                    cascade          => TRUE,
                    granularity      => 'AUTO',
                    no_invalidate    => DBMS_STATS.AUTO_INVALIDATE,
                    force            => TRUE
                );
                log('SUCCESS → ' || rec.table_name);
            EXCEPTION WHEN OTHERS THEN
                -- Catch error, log it, but CONTINUE to next table
                log('FAILED → ' || rec.table_name || ': ' || SQLERRM);
            END;
        END LOOP;

        log('COMPLETED – Run Finished');
    END;]';

    -- Cleanup old job if exists (Idempotency)
    BEGIN DBMS_SCHEDULER.DROP_JOB(v_job_name, force => TRUE); EXCEPTION WHEN OTHERS THEN NULL; END;

    -- Submit Background Job
    DBMS_SCHEDULER.CREATE_JOB(
        job_name   => v_job_name,
        job_type   => 'PLSQL_BLOCK',
        job_action => v_plsql,
        enabled    => TRUE,
        auto_drop  => TRUE,
        comments   => 'Bullet-proof stats gather (No APEX)'
    );

    DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');
    DBMS_OUTPUT.PUT_LINE('Job Submitted: ' || v_job_name);
    DBMS_OUTPUT.PUT_LINE('Monitor Live:  SELECT * FROM v_stats_gather_log WHERE ROWNUM <= 20;');
    DBMS_OUTPUT.PUT_LINE('--------------------------------------------------');
END;
/


-- FINAL VERSION – I dare Oracle to complain again
SELECT 
    j.job_name,
    j.enabled,
    j.state                                            AS job_state,
    TO_CHAR(j.last_start_date,  'YYYY-MM-DD HH24:MI')  AS last_started,
    TO_CHAR(j.next_run_date,    'YYYY-MM-DD HH24:MI')  AS next_scheduled,
    d.status                                           AS last_run_status,
    d.last_run_time,
    d.duration_mins                                    AS last_run_minutes,
    d.error_code,
    CASE 
        WHEN d.status = 'SUCCEEDED' THEN 'OK'
        WHEN d.status = 'FAILED'    THEN 'ERROR'
        WHEN d.status IS NULL       THEN 'Never run'
        ELSE d.status
    END                                                AS health,
    ROUND(SYSDATE - NVL(d.last_success_date, DATE '1900-01-01'), 1) 
                                                       AS days_since_last_success,
    j.run_count,
    j.failure_count
FROM dba_scheduler_jobs j
LEFT JOIN (
    ------------------------------------------------------------------
    -- Latest run + safe duration + last successful run date
    ------------------------------------------------------------------
    SELECT 
        job_name,
        status,
        error#                                                          AS error_code,
        TO_CHAR(actual_start_date, 'YYYY-MM-DD HH24:MI:SS')             AS last_run_time,
        -- THIS LINE IS THE ONLY ONE THAT WORKS EVERYWHERE
        ROUND(
            (CAST(log_date          AS DATE) - 
             CAST(actual_start_date AS DATE)) * 1440
        , 2)                                                            AS duration_mins,
        -- Date of the most recent successful run (for "days since" column)
        MAX(CASE WHEN status = 'SUCCEEDED' THEN actual_start_date END)
            OVER (PARTITION BY job_name)                                 AS last_success_date,
        ROW_NUMBER() OVER (PARTITION BY job_name 
                           ORDER BY actual_start_date DESC)               AS rn
    FROM dba_scheduler_job_run_details
) d ON j.job_name = d.job_name AND d.rn = 1
WHERE j.job_name LIKE 'GATHER_STATS_%'
ORDER BY days_since_last_success DESC NULLS LAST, j.job_name;

1. The Fixed "Smart Summary" (Run this now)

This version explicitly checks for the STOPPED status so you see "KILLED" in big letters.

SQL
SELECT 
    job_name,
    TO_CHAR(actual_start_date, 'YYYY-MM-DD HH24:MI:SS') AS started_at,
    
    -- STATUS LOGIC: explicitly check for STOPPED
    CASE 
        WHEN status = 'STOPPED' THEN 'KILLED'   -- <== The fix
        WHEN status = 'FAILED'  THEN 'FAILED'
        WHEN status = 'SUCCEEDED' THEN 'SUCCESS'
        WHEN status IS NULL THEN '🏃 RUNNING'      -- If NULL, it's still active
        ELSE status
    END AS current_status,

    -- DURATION: 
    -- If Running: Calc from NOW. 
    -- If Stopped/Done: Calc from Log Date.
    ROUND(
        (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440
    , 2) AS duration_mins,

    error# AS error_code,
    additional_info

FROM dba_scheduler_job_run_details
WHERE job_name LIKE 'GATHER_STATS_%'
ORDER BY actual_start_date DESC
FETCH FIRST 20 ROWS ONLY;

2. If you are using your Custom Log Table (stats_job_log)

Your custom log table doesn't know the job was killed by the scheduler (because the script stopped running before it could write "Failed").

To fix this in the custom log view, we cross-reference it with the scheduler history. Run this to see the truth:

SQL
SELECT 
    l.job_name,
    TO_CHAR(MIN(l.log_time), 'HH24:MI:SS') as start_time,
    
    -- The Truth Serum: Check what actually happened to the Scheduler Job
    COALESCE(
        (SELECT CASE 
            WHEN status = 'STOPPED' THEN 'KILLED MANUALLY'
            WHEN status = 'FAILED' THEN 'FAILED'
            WHEN status = 'SUCCEEDED' THEN 'SUCCESS'
         END
         FROM dba_scheduler_job_run_details d 
         WHERE d.job_name = l.job_name), 
         '🏃 RUNNING'
    ) as real_status,

    -- Show the last message (likely "Processing Table X...")
    SUBSTR(MAX(l.message) KEEP (DENSE_RANK LAST ORDER BY l.log_id), 1, 60) as last_log_entry

FROM stats_job_log l
GROUP BY l.job_name
ORDER BY MIN(l.log_time) DESC;

3. Monitoring (The Control Room)

Use these to track your job.

The "Live Tail" (Watch it happen)

SQL
SELECT TO_CHAR(log_time,'HH24:MI:SS') || ' | ' || message AS live_log
FROM   v_stats_gather_log
WHERE  job_name = (SELECT MAX(job_name) FROM stats_job_log)
ORDER BY log_id DESC;

The "Executive Summary" (Pass/Fail count)

SQL
SELECT 
    job_name,
    COUNT(CASE WHEN message LIKE 'SUCCESS%' THEN 1 END) AS tables_analyzed,
    COUNT(CASE WHEN message LIKE 'FAILED%' THEN 1 END)  AS tables_failed,
    ROUND((MAX(log_time) - MIN(log_time)) * 1440, 2)    AS duration_mins
FROM stats_job_log
WHERE job_name = (SELECT MAX(job_name) FROM stats_job_log)
GROUP BY job_name;


ere are the best "Industry Standard" queries to utilize them.

1. DBA_SCHEDULER_JOBS (The Configuration)

Use this to check "Is my job set up correctly and scheduled to run?" It tells you if the job exists, if it is enabled, and when it will fire next.

SQL
SELECT 
    job_name,
    enabled,
    state,              -- 'SCHEDULED', 'RUNNING', 'DISABLED'
    job_action,         -- The actual code/script it runs
    last_start_date,    -- When it last started
    next_run_date,      -- When it will run next (Critical for recurring jobs)
    failure_count,      -- How many times it failed in a row
    retry_count
FROM dba_scheduler_jobs
WHERE job_name LIKE 'GATHER_STATS_%'
ORDER BY job_name;

2. DBA_SCHEDULER_JOB_RUN_DETAILS (The History)

Use this to check "Did it work yesterday, and how long did it take?" It logs every single execution.

SQL
SELECT 
    log_id,
    job_name,
    status,                                     -- 'SUCCEEDED', 'FAILED', 'STOPPED'
    TO_CHAR(actual_start_date, 'DD-MON HH24:MI') AS start_time,
    
    -- Calculate precise duration in Minutes
    TRUNC(
        EXTRACT(HOUR FROM run_duration) * 60 +
        EXTRACT(MINUTE FROM run_duration) +
        EXTRACT(SECOND FROM run_duration) / 60, 2
    ) AS duration_mins,
    
    output,         -- Any standard output (rarely used if using custom logs)
    error#,         -- Oracle Error Code (e.g., 1652)
    additional_info -- Crucial for debugging failures
FROM dba_scheduler_job_run_details
WHERE job_name LIKE 'GATHER_STATS_%'
ORDER BY actual_start_date DESC;

3. The "Master View" (Join Query)

Pro Tip: This is the most useful query for a DBA. It joins the two views to show you the Job Definition alongside its Most Recent Status.

SQL
SELECT 
    j.job_name,
    j.enabled,
    j.state,
    j.next_run_date,
    d.status AS last_run_status,
    d.duration_mins AS last_run_duration,
    d.actual_start_date AS last_run_time
FROM dba_scheduler_jobs j
LEFT JOIN (
    -- Get only the most recent run for each job
    SELECT job_name, status, actual_start_date,
           TRUNC(EXTRACT(MINUTE FROM run_duration) + EXTRACT(SECOND FROM run_duration)/60, 2) as duration_mins,
           ROW_NUMBER() OVER (PARTITION BY job_name ORDER BY actual_start_date DESC) as rn
    FROM dba_scheduler_job_run_details
) d ON j.job_name = d.job_name AND d.rn = 1
WHERE j.job_name LIKE 'GATHER_STATS_%';

Key Takeaway:

  • If you create a job with auto_drop => TRUE (like in our script), it disappears from dba_scheduler_jobs the moment it finishes.

  • However, its history remains in dba_scheduler_job_run_details so you can verify it ran.




    -- 3.3 DBA_SCHEDULER_JOBS – current job definition (100% safe columns) SELECT job_name, enabled, state, last_start_date, next_run_date, run_count, failure_count, retry_count FROM dba_scheduler_jobs WHERE job_name LIKE 'GATHER_STATS_%' ORDER BY job_name;

    -- 3.4 DBA_SCHEDULER_JOB_RUN_DETAILS – execution history (100% safe columns) SELECT job_name, status, TO_CHAR(actual_start_date, 'YYYY-MM-DD HH24:MI:SS') AS start_time, TO_CHAR(log_date, 'YYYY-MM-DD HH24:MI:SS') AS end_time, ROUND((log_date - actual_start_date) * 1440, 2) AS duration_minutes, error#, additional_info FROM dba_scheduler_job_run_details WHERE job_name LIKE 'GATHER_STATS_%' ORDER BY actual_start_date DESC;

    -- 3.5 Master view – latest run + job status in one shot SELECT j.job_name, j.enabled, j.state, j.next_run_date, d.status AS last_run_status, d.duration_minutes AS last_run_mins, d.start_time AS last_run_time FROM dba_scheduler_jobs j LEFT JOIN ( SELECT job_name, status, ROUND((log_date - actual_start_date)*1440,2) AS duration_minutes, TO_CHAR(actual_start_date,'YYYY-MM-DD HH24:MI:SS') AS start_time, ROW_NUMBER() OVER (PARTITION BY job_name ORDER BY actual_start_date DESC) rn FROM dba_scheduler_job_run_details ) d ON j.job_name = d.job_name AND d.rn = 1 WHERE j.job_name LIKE 'GATHER_STATS_%';

    -- 3.6 Is a stats job running right now? SELECT 'RUNNING → ' || job_name || ' for ' || ROUND((SYSTIMESTAMP - actual_start_date)*1440,1) || ' minutes' AS status FROM dba_scheduler_job_run_details WHERE status = 'RUNNING' AND job_name LIKE 'GATHER_STATS_%' UNION ALL SELECT 'No stats job currently running' FROM dual WHERE NOT EXISTS (SELECT 1 FROM dba_scheduler_job_run_details WHERE status = 'RUNNING' AND job_name LIKE 'GATHER_STATS_%');



    fixed


    /******************************************************************** FINAL CANVAS – 100% ORA-00932-PROOF DURATION CALCULATIONS Works on 11g / 12c / 19c / 21c / 23ai / RDS / everywhere ********************************************************************/ ------------------------------------------------------------------ -- 1. JOB HISTORY – perfect duration, no interval errors ever ------------------------------------------------------------------ SELECT job_name, status, TO_CHAR(actual_start_date, 'YYYY-MM-DD HH24:MI:SS') AS start_time, TO_CHAR(log_date, 'YYYY-MM-DD HH24:MI:SS') AS end_time, ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 2) AS duration_minutes, error#, SUBSTR(additional_info,1,300) AS info FROM dba_scheduler_job_run_details WHERE job_name LIKE 'GATHER_STATS_%' ORDER BY actual_start_date DESC; ------------------------------------------------------------------ -- 2. IS ANYTHING RUNNING RIGHT NOW? – live elapsed time ------------------------------------------------------------------ SELECT 'RUNNING → ' || job_name || ' for ' || ROUND( (CAST(SYSTIMESTAMP AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 1) || ' minutes' AS live_status FROM dba_scheduler_job_run_details WHERE status = 'RUNNING' AND job_name LIKE 'GATHER_STATS_%' UNION ALL SELECT 'No stats job currently running' FROM dual WHERE NOT EXISTS ( SELECT 1 FROM dba_scheduler_job_run_details WHERE status = 'RUNNING' AND job_name LIKE 'GATHER_STATS_%' ); ------------------------------------------------------------------ -- 3. EXECUTIVE SUMMARY – last 20 runs, clean and fast ------------------------------------------------------------------ SELECT job_name, TO_CHAR(actual_start_date,'YYYY-MM-DD HH24:MI:SS') AS started, status, ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 1) AS minutes, error# FROM dba_scheduler_job_run_details WHERE job_name LIKE 'GATHER_STATS_%' ORDER BY actual_start_date DESC FETCH FIRST 20 ROWS ONLY; ------------------------------------------------------------------ -- 4. MASTER VIEW – current job state + most recent run result ------------------------------------------------------------------ SELECT j.job_name, j.enabled, j.state, j.next_run_date, d.status AS last_run_status, d.duration_minutes AS last_run_mins, d.started FROM dba_scheduler_jobs j LEFT JOIN ( SELECT job_name, status, TO_CHAR(actual_start_date,'YYYY-MM-DD HH24:MI:SS') AS started, ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 1) AS duration_minutes, ROW_NUMBER() OVER (PARTITION BY job_name ORDER BY actual_start_date DESC) rn FROM dba_scheduler_job_run_details ) d ON j.job_name = d.job_name AND d.rn = 1 WHERE j.job_name LIKE 'GATHER_STATS_%' ORDER BY j.job_name; ------------------------------------------------------------------ -- 5. BONUS: stats_job_log duration summary (your custom log) ------------------------------------------------------------------ SELECT job_name, TO_CHAR(MIN(log_time),'YYYY-MM-DD HH24:MI:SS') AS started, TO_CHAR(MAX(log_time),'YYYY-MM-DD HH24:MI:SS') AS finished, ROUND((MAX(log_time) - MIN(log_time)) * 1440, 2) AS duration_minutes, COUNT(CASE WHEN message LIKE 'SUCCESS%' THEN 1 END) AS success, COUNT(CASE WHEN message LIKE 'FAILED%' THEN 1 END) AS failed FROM stats_job_log GROUP BY job_name ORDER BY MIN(log_time) DESC;



    more enhancement

    /******************************************************************** FINAL CANVAS – 100% ORA-00932-PROOF & VERSION SAFE Works on 11g / 12c / 19c / 21c / 23ai / RDS / everywhere ********************************************************************/ ------------------------------------------------------------------ -- 1. JOB HISTORY – Perfect duration, safety cast applied ------------------------------------------------------------------ SELECT job_name, status, TO_CHAR(actual_start_date, 'YYYY-MM-DD HH24:MI:SS') AS start_time, TO_CHAR(log_date, 'YYYY-MM-DD HH24:MI:SS') AS end_time, -- FIX: Cast both to DATE so result is NUMBER (Days) ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 2) AS duration_minutes, error#, SUBSTR(additional_info,1,300) AS info FROM dba_scheduler_job_run_details WHERE job_name LIKE 'GATHER_STATS_%' ORDER BY actual_start_date DESC; ------------------------------------------------------------------ -- 2. IS ANYTHING RUNNING RIGHT NOW? – Using correct view ------------------------------------------------------------------ -- FIX: Query dba_scheduler_jobs (Live Status) instead of run_details (History) SELECT 'RUNNING → ' || job_name || ' for ' || ROUND( (SYSDATE - CAST(last_start_date AS DATE)) * 1440, 1) || ' minutes' AS live_status FROM dba_scheduler_jobs WHERE state = 'RUNNING' AND job_name LIKE 'GATHER_STATS_%' UNION ALL SELECT 'No stats job currently running' FROM dual WHERE NOT EXISTS ( SELECT 1 FROM dba_scheduler_jobs WHERE state = 'RUNNING' AND job_name LIKE 'GATHER_STATS_%' ); ------------------------------------------------------------------ -- 3. EXECUTIVE SUMMARY – 11g Compatible (No FETCH FIRST) ------------------------------------------------------------------ -- FIX: Use ROWNUM wrapper to support Oracle 11g SELECT * FROM ( SELECT job_name, TO_CHAR(actual_start_date,'YYYY-MM-DD HH24:MI:SS') AS started, status, ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 1) AS minutes, error# FROM dba_scheduler_job_run_details WHERE job_name LIKE 'GATHER_STATS_%' ORDER BY actual_start_date DESC ) WHERE ROWNUM <= 20; ------------------------------------------------------------------ -- 4. MASTER VIEW – Current state + Last run details ------------------------------------------------------------------ SELECT j.job_name, j.enabled, j.state, j.next_run_date, d.status AS last_run_status, d.duration_minutes AS last_run_mins, d.started FROM dba_scheduler_jobs j LEFT JOIN ( SELECT job_name, status, TO_CHAR(actual_start_date,'YYYY-MM-DD HH24:MI:SS') AS started, -- FIX: Cast to DATE ensures number math ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440, 1) AS duration_minutes, ROW_NUMBER() OVER (PARTITION BY job_name ORDER BY actual_start_date DESC) rn FROM dba_scheduler_job_run_details ) d ON j.job_name = d.job_name AND d.rn = 1 WHERE j.job_name LIKE 'GATHER_STATS_%' ORDER BY j.job_name; ------------------------------------------------------------------ -- 5. BONUS: Custom Log Summary (Fixed ORA-00932) ------------------------------------------------------------------


    SELECT log_time, -- Calculate how many minutes ago this specific line happened ROUND((CAST(SYSTIMESTAMP AS DATE) - CAST(log_time AS DATE)) * 1440, 1) || ' mins ago' as age, message FROM stats_job_log WHERE job_name = (SELECT MAX(job_name) FROM stats_job_log) -- Get only the latest job ORDER BY log_id DESC;

    SELECT job_name, TO_CHAR(MIN(log_time), 'YYYY-MM-DD HH24:MI:SS') AS started_at, -- CHECK STATUS: Look at the very last message written CASE WHEN MAX(message) KEEP (DENSE_RANK LAST ORDER BY log_id) LIKE 'COMPLETED%' THEN 'FINISHED' WHEN MAX(message) KEEP (DENSE_RANK LAST ORDER BY log_id) LIKE 'FAILED%' THEN 'FAILED' ELSE 'RUNNING' END AS current_status, -- DURATION: If Running, calc from NOW. If Finished, calc from End Time. ROUND( (CAST( CASE WHEN MAX(message) KEEP (DENSE_RANK LAST ORDER BY log_id) LIKE 'COMPLETED%' THEN MAX(log_time) ELSE SYSTIMESTAMP -- Use "Now" if still running END AS DATE) - CAST(MIN(log_time) AS DATE)) * 1440 , 2) AS duration_mins, -- Progress Counters COUNT(CASE WHEN message LIKE 'SUCCESS%' THEN 1 END) AS tables_done, -- Show me the last thing it wrote (e.g., "Processing Table B...") SUBSTR(MAX(message) KEEP (DENSE_RANK LAST ORDER BY log_id), 1, 50) AS last_activity FROM stats_job_log GROUP BY job_name ORDER BY MIN(log_time) DESC;


    ------------------------------------------------------------------ -- PERFECT CUSTOM LOG SUMMARY – works whether job is running or not ------------------------------------------------------------------ SELECT job_name, -- 1. When did it start? TO_CHAR(MIN(log_time), 'YYYY-MM-DD HH24:MI:SS') AS started, -- 2. Did it finish? If yes → show finish time, if not → show "Still running" MAX(CASE WHEN message LIKE 'COMPLETED%' THEN TO_CHAR(log_time, 'YYYY-MM-DD HH24:MI:SS') END) AS finished, -- 3. Current status (the single most important column) CASE WHEN MAX(CASE WHEN message LIKE 'COMPLETED%' THEN 1 END) = 1 THEN 'Completed' WHEN MAX(CASE WHEN message LIKE 'STARTED%' THEN 1 END) = 1 THEN 'In Progress' ELSE 'Unknown' END AS status, -- 4. How long has it been running / how long did it take? ROUND( (CAST(SYSTIMESTAMP AS DATE) - CAST(MIN(log_time) AS DATE)) * 1440 , 1) AS elapsed_minutes, -- 5. If finished → exact duration, if still running → null CASE WHEN MAX(CASE WHEN message LIKE 'COMPLETED%' THEN 1 END) = 1 THEN ROUND( (CAST(MAX(log_time) AS DATE) - CAST(MIN(log_time) AS DATE)) *1440 , 2) END AS duration_if_finished_mins, -- 6. Table counts COUNT(CASE WHEN message LIKE 'SUCCESS %' THEN 1 END) AS tables_success, COUNT(CASE WHEN message LIKE 'FAILED %' THEN 1 END) AS tables_failed, COUNT(*) AS total_messages FROM stats_job_log GROUP BY job_name ORDER BY MIN(log_time) DESC;

    more"


    SELECT j.job_name, j.enabled, j.state AS job_state, TO_CHAR(j.last_start_date, 'YYYY-MM-DD HH24:MI') AS last_started, TO_CHAR(j.next_run_date, 'YYYY-MM-DD HH24:MI') AS next_scheduled, d.status AS last_run_status, TO_CHAR(d.last_run_time_raw, 'YYYY-MM-DD HH24:MI:SS') AS last_run_time, d.duration_mins AS last_run_minutes, d.error_code, CASE WHEN d.status = 'SUCCEEDED' THEN 'OK' WHEN d.status = 'FAILED' THEN 'ERROR' WHEN d.status IS NULL THEN 'Never run' ELSE d.status END AS health, ------------------------------------------------------------------ -- THE FIX: Explicitly CAST the result of NVL to DATE ------------------------------------------------------------------ ROUND( SYSDATE - CAST( NVL(d.last_success_date, TO_DATE('1900-01-01','YYYY-MM-DD')) AS DATE ) , 1) AS days_since_last_success, j.run_count, j.failure_count FROM dba_scheduler_jobs j LEFT JOIN ( SELECT job_name, status, error# AS error_code, actual_start_date AS last_run_time_raw, -- Duration Calculation (Safe) ROUND( (CAST(log_date AS DATE) - CAST(actual_start_date AS DATE)) * 1440 , 2) AS duration_mins, -- Last Success Date (Raw) MAX(CASE WHEN status = 'SUCCEEDED' THEN actual_start_date END) OVER (PARTITION BY job_name) AS last_success_date, ROW_NUMBER() OVER (PARTITION BY job_name ORDER BY actual_start_date DESC) AS rn FROM dba_scheduler_job_run_details ) d ON j.job_name = d.job_name AND d.rn = 1 WHERE j.job_name LIKE 'GATHER_STATS_%' ORDER BY days_since_last_success DESC NULLS LAST, j.job_name;


    =========


    SET SERVEROUTPUT ON; DECLARE v_count NUMBER := 0; BEGIN DBMS_OUTPUT.PUT_LINE('--- EMERGENCY STOP STARTED ---'); -- 1. KILL RUNNING JOBS FOR r IN ( SELECT job_name FROM dba_scheduler_running_jobs WHERE job_name LIKE 'GATHER_STATS_%' ) LOOP BEGIN DBMS_SCHEDULER.STOP_JOB(job_name => r.job_name, force => TRUE); DBMS_OUTPUT.PUT_LINE('KILLED Running Job: ' || r.job_name); v_count := v_count + 1; EXCEPTION WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE('Could not stop ' || r.job_name || ': ' || SQLERRM); END; END LOOP; -- 2. CLEAN UP DEFINITIONS (Just in case) FOR r IN ( SELECT job_name FROM dba_scheduler_jobs WHERE job_name LIKE 'GATHER_STATS_%' ) LOOP BEGIN DBMS_SCHEDULER.DROP_JOB(job_name => r.job_name, force => TRUE); DBMS_OUTPUT.PUT_LINE('DROPPED Job Definition: ' || r.job_name); EXCEPTION WHEN OTHERS THEN NULL; -- Ignore if it was already dropped by the STOP command END; END LOOP; IF v_count = 0 THEN DBMS_OUTPUT.PUT_LINE('No active GATHER_STATS jobs found.'); ELSE DBMS_OUTPUT.PUT_LINE('--- STOP COMPLETED ---'); END IF; END; /