HWM ( High Water Mark )

Query to Resize Datafiles:

select 'alter database datafile '''||file_name||''' resize '||ceil( (nvl(hwm,1)*8192)/1024/1024 )||'m;' as cmd, bytes/1024/1024 "MB_WILL_BE_SAVED_BY_COMMAND" from dba_data_files a, ( select file_id, max(block_id+blocks-1) as hwm from dba_extents group by file_id ) b where a.file_id = b.file_id(+) and ceil(blocks*8192/1024/1024)- ceil((nvl(hwm,1)* 8192)/1024/1024 ) > 0 and a.tablespace_name NOT IN ('SYSAUX','SYSTEM','UNDOTB1','UNDOTB2','UNDOTB3','UNDOTB4');


Other Query to Resize Datafile

set linesize 1000 pagesize 0 feedback off trimspool on
with
hwm as (
  -- get highest block id from each datafiles ( from x$ktfbue as we don't need all joins from dba_extents )
  select /*+ materialize */ ktfbuesegtsn ts#,ktfbuefno relative_fno,max(ktfbuebno+ktfbueblks-1) hwm_blocks
  from sys.x$ktfbue group by ktfbuefno,ktfbuesegtsn
),
hwmts as (
  -- join ts# with tablespace_name
  select name tablespace_name,relative_fno,hwm_blocks
  from hwm join v$tablespace using(ts#)
),
hwmdf as (
  -- join with datafiles, put 5M minimum for datafiles with no extents
  select file_name,nvl(hwm_blocks*(bytes/blocks),5*1024*1024) hwm_bytes,bytes,autoextensible,maxbytes
  from hwmts right join dba_data_files using(tablespace_name,relative_fno)
)
select
case when autoextensible='YES' and maxbytes>=bytes
then -- we generate resize statements only if autoextensible can grow back to current size
  '/* reclaim '||to_char(ceil((bytes-hwm_bytes)/1024/1024),999999)
   ||'M from '||to_char(ceil(bytes/1024/1024),999999)||'M */ '
   ||'alter database datafile '''||file_name||''' resize '||ceil(hwm_bytes/1024/1024)||'M;'
else -- generate only a comment when autoextensible is off
  '/* reclaim '||to_char(ceil((bytes-hwm_bytes)/1024/1024),999999)
   ||'M from '||to_char(ceil(bytes/1024/1024),999999)
   ||'M after setting autoextensible maxsize higher than current size for file '
   || file_name||' */'
end SQL
from hwmdf
where
bytes-hwm_bytes>1024*1024*1024 -- resize only if at least 1GB can be reclaimed
and file_name not like '%undo%'
order by bytes-hwm_bytes desc
/

Redo Logs and Archivelogs

+++++++++ Per Day Archive ++++++

SELECT A.*,Round(A.Count#*B.AVG#/1024/1024) Daily_Avg_MB
--,thread
FROM
(SELECT To_Char(First_Time,'YYYY-MM-DD') DAY,Count(1) Count#,Min(RECID) Min#,Max(RECID) Max#
--,thread# thread
FROM v$log_history GROUP BY To_Char(First_Time,'YYYY-MM-DD') ORDER BY 1 DESC ) A,
(SELECT Avg(BYTES) AVG#,Count(1) Count#,Max(BYTES) Max_Bytes,Min(BYTES) Min_Bytes FROM v$log) B
where rownum <10
order by 1 desc;per hour :


+++++++++ Per Day Archives 30 Days ++++++

SELECT A.*,
Round(A.Count#*B.AVG#/1024/1024) Daily_Avg_Mb
 FROM
 (
 SELECT
 To_Char(First_Time,'YYYY-MM-DD') DAY,
 Count(1) Count#,
 Min(RECID) Min#,
 Max(RECID) Max#
 FROM
 gv$log_history
 GROUP BY
 To_Char(First_Time,'YYYY-MM-DD')
 ORDER
 BY 1 DESC
) A,
 (
 SELECT
 Avg(BYTES) AVG#,
 Count(1) Count#,
 Max(BYTES) Max_Bytes,
 Min(BYTES) Min_Bytes
 FROM
 gv$log
 ) B
 ;

++++++++++++++++++ Hourly Archive Generation ++++++++++++++

select 
to_char(first_time,'YY-MM-DD') day, 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'00',1,0)),'999') "00", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'01',1,0)),'999') "01", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'02',1,0)),'999') "02", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'03',1,0)),'999') "03", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'04',1,0)),'999') "04", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'05',1,0)),'999') "05", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'06',1,0)),'999') "06", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'07',1,0)),'999') "07", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'08',1,0)),'999') "08", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'09',1,0)),'999') "09", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'10',1,0)),'999') "10", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'11',1,0)),'999') "11", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'12',1,0)),'999') "12", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'13',1,0)),'999') "13", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'14',1,0)),'999') "14", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'15',1,0)),'999') "15", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'16',1,0)),'999') "16", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'17',1,0)),'999') "17", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'18',1,0)),'999') "18", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'19',1,0)),'999') "19", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'20',1,0)),'999') "20", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'21',1,0)),'999') "21", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'22',1,0)),'999') "22", 
to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'23',1,0)),'999') "23", 
COUNT(*) TOT 
from v$log_history where trunc(first_time)='24-SEP-2019'
group by to_char(first_time,'YY-MM-DD') 
order by day ;

+++++++++++++++ Current Session Generating Archives ++++++++++++++++++

select s.sid, n.name, s.value, sn.username, sn.program, sn.type, sn.module
from v$sesstat s
join v$statname n on n.statistic# = s.statistic#
join v$session sn on sn.sid = s.sid
where name like '%redo entries%' and TYPE not like '%BACKGROUND%'
order by value desc;

+++++++++++++++++++ If session is Generating Huge archives +++++++++++++++

Step 1: Find the session

SELECT to_char(begin_interval_time,'YYYY_MM_DD HH24:MI') snap_time,
        dhsso.object_name,
        sum(db_block_changes_delta) as maxchages
  FROM dba_hist_seg_stat dhss,
         dba_hist_seg_stat_obj dhsso,
         dba_hist_snapshot dhs
  WHERE dhs.snap_id = dhss.snap_id
    AND dhs.instance_number = dhss.instance_number
    AND dhss.obj# = dhsso.obj#
    AND dhss.dataobj# = dhsso.dataobj#
    AND begin_interval_time BETWEEN to_date('2013_05_22 17','YYYY_MM_DD HH24')
                                           AND to_date('2013_05_22 21','YYYY_MM_DD HH24')
  GROUP BY to_char(begin_interval_time,'YYYY_MM_DD HH24:MI'),
           dhsso.object_name order by maxchages asc;

Step 2:
Once you know the objects ,get the SQL information realted to those objects:
The below are the SQLs causing more changes to MGMT_POLICY_ASSOC_CFG%.

SELECT to_char(begin_interval_time,'YYYY_MM_DD HH24:MI'),
         dbms_lob.substr(sql_text,4000,1),
         dhss.instance_number,
         dhss.sql_id,executions_delta,rows_processed_delta
  FROM dba_hist_sqlstat dhss,
         dba_hist_snapshot dhs,
         dba_hist_sqltext dhst
  WHERE upper(dhst.sql_text) LIKE '%MGMT_POLICY_ASSOC_CFG%'
    AND dhss.snap_id=dhs.snap_id
    AND dhss.instance_Number=dhs.instance_number
 AND begin_interval_time BETWEEN to_date('2013_05_22 17','YYYY_MM_DD HH24')
                                           AND to_date('2013_05_22 21','YYYY_MM_DD HH24')
    AND dhss.sql_id = dhst.sql_id;

Step 3:

 Once you know the SQL ID , get the program and userid who is running it and intimate the user to take care of this query / Program.As a DBA , you can increase the archive log backup frequency of the database / once you get the permission to kill the program /sql you can kill them as well.
Know which program is causing huge redo using the SQL IDs.
    
 SELECT instance_number, to_char(sample_time,'yyyy_mm_dd hh24:mi:ss'),
         user_id,
         program
  FROM dba_hist_active_sess_history
  WHERE sql_id in ('5xmfm787py0a3','bq28gpz5w5qqg','71t5d196acgr9','a8d5br4z9m251')
    AND snap_id BETWEEN 31856 AND 31860  2    3    4    5    6  ;

+++++++++++++++++++ Other Method ++++++++++++++++++++++++++

Find a session which are generating more archive logs

To find sessions generating lots of redo, you can use either of the following methods. Both methods examine the amount of undo generated. When a transaction generates undo, it will automatically generate redo as well.

The methods are:

1) Query V$SESS_IO. This view contains the column BLOCK_CHANGES which indicates how much blocks have been changed by the session. High values indicate a session generating lots of redo.

The query you can use is:

SELECT s.sid, s.serial#, s.username, s.program, i.block_changes FROM v$session s, v$sess_io i WHERE s.sid = i.sid ORDER BY 5 desc, 1, 2, 3, 4;

 SID SERIAL# USERNAME      PROGRAM       BLOCK_CHANGES
---------- ---------- ------------ -----------------------
 158 6 SCOTT               sqlplus.exe       630295
 159 3 SYS                 sqlplus.exe       97
 161 1                     ORACLE.EXE (MMON) 58
 164 1 ORACLE.EXE          (SMON)            34
 148 5 ORACLE.EXE          (q001)            0
........ 
19 rows selected.


Run the query multiple times and examine the delta between each occurrence of BLOCK_CHANGES. Large deltas indicate high redo generation by the session. Like Scott user with Sid 158 is having high value for Block_changes and is the main session for generating more archive logs.


2) Query V$TRANSACTION. This view contains information about the amount of
undo blocks and undo records accessed by the transaction (as found in the USED_UBLK and USED_UREC columns).


SELECT s.sid, s.serial#, s.username, s.program,  t.used_ublk, t.used_urec FROM v$session s, v$transaction t WHERE s.taddr = t.addr ORDER BY 5 desc, 6 desc, 1, 2, 3, 4;

 SID SERIAL# USERNAME PROGRAM        USED_UBLK USED_UREC
---------- ---------- ------------ ---------------------------
 158 6       SCOTT    sqlplus.exe    4929      157526

SQL>


Run the query multiple times and examine the delta between each occurrence of USED_UBLK and USED_UREC. Large deltas indicate high redo generation by
the session.

You use the first query when you need to check for programs generating lots of redo when these programs activate more than one transaction. The latter query can be used to find out which particular transactions are generating redo.

From the above example we can see that user Scott is generating more archive logs. To know which SQL statment Scott user is executed

select sql.sql_text sql_text, t.USED_UREC Records, t.USED_UBLK Blocks, (t.USED_UBLK*8192/1024) KBytes from v$transaction t, v$session s, v$sql sql where t.addr = s.taddr and s.sql_id = sql.sql_id and s.username ='&USERNAME';

Enter value for username: SCOTT
old 7: and s.username ='&USERNAME'
new 7: and s.username ='SCOTT'
SQL_TEXT                                 RECORDS BLOCKS KBYTES
---------- ---------- ----------
insert into emp_c select * from emp_c    157526   4929  39432
SQL>

MongoDB All in One

 Working on Installation, Configuration, Administration of MongoDB servers.

We have 3 Node Replicaset sharded cluster.

I used to sit with our developers in writing CRUD operations, Creating users, assigning privilages/roles,

Checking slow running queries, Creating Indexes on Dcoument Id.

Exporting and Importing Collections from prod to Dev enviornment.

 

Basic Architecture:-

mongodb is a document oriented database, store data in the form of BSNO format (Binary coded JSON) i.e Key value pair.

In Database we have Collection and in collections we can have mulitple documnets.

The main daemon or process in mongodb is mongoD, which is actually a database server, it handle connection requests and handle data.

The other process is mongo.. it provide access to the mongoD server.

MongoD Listene on default port 27017.. and default dbpath /data/db/.

Users are created in DB level in mongodb and previlages are also on DB level.

We can pass all the details like dbpath logpath port while starting mongoD but best is to use configuration file.

Bydefault mongoD start in foreground, to start it in background we need to use --fork .

Namespace is nothing but concatination of databasename and collection name.

Default storage engine is WiredTiger.

Replica sets are groups of different MongoDs that contain the same data.

The purpose of a replica set cluster is to ensure high availability and automatic failover.

In its most basic form, you can think of replica sets as having one node that manages all reads and writes-- a primary node.

And some secondary nodes that constantly copy the information from the primary node.

The latest version of our protocol is based in the industry accepted Raft Protocol, which ensures that in case of failure, the system is able to automatically select a secondary node as the new primary and keep up with the service without requiring any manual intervention or downtime.

Replica set cluster nodes can have different roles, different hardware configuration, and different operation system.

Process of distributing data across multiple servers for storage,

Shards reduces the number of operations.

Each shard servers as independent DB, together shards make a single logical DB.

Shards themselves are replica sets-- highly availability in units.

Where we have other components as well, like MongoSs, which are our shard cluster routing components.

MongoDB shards data per collection basis.

when you connect to perticular shard you will be able to view only fraction of data contained in cluster.

Data is not organized in any perticular order.



use admin

db.startServer()

db.shutdownServer()

db.currentOp()

db.killOp(pid)

mongotop

mongostat

db.getProfilingStatus()

db.getProfilingLevel (0-2)

db.setProfilingLevel(1)  ---default conf 100msec

db.setProfilingLevel(1,5)...for 5misec

db.system.profile.find() 

db.serverStatus() 

db.serverCmdLineOpts() --to check db path

mongod  --fork  --logPath  ---to start mongod in background

http:loclhost:28017  --we can open in browser if mongod started with --httpinterface and --rest

db = db.getSiblingDB('admin')  to create admin user

db.auth("AdminUser", "password")

mongos> testdb=db.getSisterDB("testdb")

testdb


*) How to monitor mongodb performance?

db.getLogComponents() -verbosity (default 0, increase 1-5 for more debugging, -1 means inherited)

db.adminCommand({"getLog" : "global"})

db.setLogLevel( 0, "Index")


================================================ Replica Set Configuration =================================


1. Create three folders

D:\mongodb

one 

two

three


2. mongod --dbpath "D:\mongodb\one" --port 45896 --replSet replicationtest  (starting server)

3.  mongo --port 45896


4.  Open New Termianl

5.  mongod --dbpath "D:\mongodb\two" --port 45897 --replSet replicationtest  (starting server)

6.  mongo --port 45897


7.  Open New Termianl

8.  mongod --dbpath "D:\mongodb\three" --port 45898 --replSet replicationtest  (starting server)

9.  mongo --port 45898


10.  Create configuration file --(In prim DB session mongo --port 45896)


config =  {

 _id : "testreplica" , members :[

{

"_id" : 0 ,

"host" :"localhost:45896"

}

]

}

11.  rs.initiate(config)

12.  rs.status()

13.  rs.add("localhost:45897")

14.  rs.add("localhost:45898")

done


Usefull Queries and Commands

 select name,db_unique_name,database_role from v$database;

select instance_name,status from gv$instacne;

========================================   ASM ========================================  


SELECT name, round(total_mb/1024,2) Total_GB,round((total_mb-free_mb)/1024,2) Used_GB, round(free_mb/1024,2)  Free_GB, round(free_mb/total_mb*100,2) "%free Space"  FROM v$asm_diskgroup;



SELECT name, round(total_mb/1024/1024,2) Total_TB,round((total_mb-free_mb)/1024/1024,2)Used_TB, round(free_mb/1024/1024.2) FREE_TB, round(free_mb/total_mb*100,2) "%free Space"  FROM v$asm_diskgroup;




=========================================FRA (Flash recovery area)=========================================


SELECT 

  ROUND((A.SPACE_LIMIT / 1024 / 1024 / 1024), 2) AS FLASH_IN_GB, 

  ROUND((A.SPACE_USED / 1024 / 1024 / 1024), 2) AS FLASH_USED_IN_GB, 

  ROUND((A.SPACE_RECLAIMABLE / 1024 / 1024 / 1024), 2) AS FLASH_RECLAIMABLE_GB,

  SUM(B.PERCENT_SPACE_USED)  AS PERCENT_OF_SPACE_USED

FROM 

  V$RECOVERY_FILE_DEST A,

  V$FLASH_RECOVERY_AREA_USAGE B

GROUP BY

  SPACE_LIMIT, 

  SPACE_USED , 

  SPACE_RECLAIMABLE ;


========================================  Tablespaces ========================================  

col tsname  format         a24    justify c heading 'Tablespace'

col nfrags  format     9,999,990  justify c heading 'Free|Frags'

col mxfrag  format   999,990.999  justify c heading 'Largest|Free Frag'

col totsiz  format 9,999,999,999.999  justify c heading 'Total|Mbytes'

col avasiz  format 9,999,990.999  justify c heading 'Available|Mbytes'

col pctusd  format       990.99   justify c heading 'Percent|Used'



set pagesize 200

set linesize 120



select

total.tablespace_name                       tsname,

count(free.bytes)                           nfrags,

round(nvl(max(free.bytes)/1048576,0),2)              mxfrag,

total.bytes/1048576                         totsiz,

round(nvl(sum(free.bytes)/1048576,0),2)              avasiz,

round((1-nvl(sum(free.bytes),0)/total.bytes)*100,2)  pctusd

from

(select tablespace_name, sum(bytes) bytes

from dba_data_files

where tablespace_name = upper('&1')

group by tablespace_name)  total,

dba_free_space  free

where

--total.tablespace_name in ('TBS01S','TBS01M','TBS01L','TBS51S','TBS51M','TBS51L')

--and

total.tablespace_name = free.tablespace_name(+)

and total.tablespace_name like upper('%&1%')

group by

total.tablespace_name,

total.bytes

--order by 6 desc, 1

/


===============================================================

select    tsu.tablespace_name, ceil(tsu.maxmb) "MAXIMUM size MB", ceil(tsu.used_mb) "allocated MB",ceil(tsu.used_mb-nvl(tsf.free_mb,0)) "used MB"

,ceil(ceil(tsu.used_mb-nvl(tsf.free_mb,0))*100/tsu.maxmb) "% used"

from      (select tablespace_name, sum(bytes)/1024/1024 used_mb, sum(maxbytes)/1024/1024 maxmb

                from      dba_data_files group by tablespace_name) tsu

                ,               (select tablespace_name, sum(bytes)/1024/1024 free_mb

                from      dba_free_space group by tablespace_name) tsf

where   tsu.tablespace_name = tsf.tablespace_name (+) order by 5 desc; 



select a.tablespace_name TABLESPACE_NAME, c.status, a.df_count, decode(a.auto_extend,0,'NO','YES') as autoextend, a.value TOTAL_MB,

NVL(b.value,0) USED_MB, a.value-NVL(b.value,0) FREE_MB, 

round((NVL(b.value,0))*100/a.value,2) PCRT_USED FROM

(select tablespace_name, sum(decode(autoextensible,'YES',CASE when MAXBYTES > BYTES THEN MAXBYTES ELSE BYTES END, bytes))/1024/1024 value,

count(file_name) df_count, sum(decode(autoextensible, 'YES', 1, 0)) auto_extend from dba_data_files group by tablespace_name) a,

(select tablespace_name, sum(bytes)/1024/1024 value from dba_segments group by tablespace_name) b,

(select tablespace_name, status, CONTENTS  from dba_tablespaces) c

where a.tablespace_name=b.tablespace_name(+) and a.tablespace_name=c.tablespace_name and c.contents !='UNDO' and c.status  != 'READ ONLY' order by PCRT_USED desc;


set lines 200;

col tablespace_name for a25

select tsu.tablespace_name, ceil(tsu.maxmb) "MAXIMUM size MB", ceil(tsu.used_mb) "allocated MB",ceil(tsu.used_mb-nvl(tsf.free_mb,0)) "used MB", ceil(ceil(tsu.used_mb-nvl(tsf.free_mb,0))*100/tsu.maxmb) "% used" from (select tablespace_name, sum(bytes)/1024/1024 used_mb, sum(maxbytes)/1024/1024 maxmb from dba_data_files group by tablespace_name) tsu,(select tablespace_name, sum(bytes)/1024/1024 free_mb from dba_free_space group by tablespace_name) tsf where tsu.tablespace_name = tsf.tablespace_name (+) and  tsu.tablespace_name='COMPLIANCE_DATA';


========================================  Kill sessions


select machine,'alter system kill session '''||sid||','||serial#||'@'||inst_id||''' immediate;' from gv$session where status like '%INACTIVE%';




select 'alter system kill session '''||s.sid||','||s.serial#||''' immediate;'||chr(10)||

'!kill -9 '||p.spid

from v$session s, v$process p

where s.sid = &sid

and s.paddr=p.addr 

/



 alter system kill session '5935,40349,@1' immediate;

 

select 'alter system kill session '''||sid||','||serial#||'''immediate;' from gv$session where  sql_id='1xdapbmgpdaqz' ;


select 'alter system kill session '''||sid||','||serial#||'''immediate;' from v$session where  sql_id='1xdapbmgpdaqz' ;



=============================================== Kill session on AWS ===============================================


select 'begin rdsadmin.rdsadmin_util.kill( sid => '||sid||', serial => '||serial#||'); end; ' from v$session where  last_call_et > 1800 and status='INACTIVE';



select 'exec rdsadmin.rdsadmin_util.kill( sid => '||sid||', serial => '||serial#||');  ' from v$session where  last_call_et > 180 and status='INACTIVE';


begin rdsadmin.rdsadmin_util.kill( sid => sid, serial => serial_number); end; /


https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.Oracle.CommonDBATasks.System.html#Appendix.Oracle.CommonDBATasks.KillingSession

 

========================================   ASH

select DISTINCT SQL_ID from SYS.DBA_HIST_ACTIVE_SESS_HISTORY where sample_time 

between to_date('28-JUN-2019 12:30:00','dd-mon-yyyy hh24:mi:ss') and to_date('28-JUN-2019 14:30:00','dd-mon-yyyy hh24:mi:ss')

 and event LIKE '%row lock contention%';


======================================== SQL Text of a Session

 

SELECT a.sid,a.username,s.sql_text FROM v$session a,v$sqltext s WHERE a.sql_address = s.address AND a.sql_hash_value = s.hash_value AND a.username LIKE 'HR%' ORDER BY a.username,a.sid,s.piece;



 ========================================  Locking and Blocking ================================


select l1.sid, ' IS BLOCKING ', l2.sid from gv$lock l1, gv$lock l2 where l1.block =1 and l2.request > 0 and l1.id1=l2.id1 and l1.id2=l2.id2;


select sid,serial#,module,machine,sql_id from gv$session where module like '%SQL%';



select count(1) from gv$session where blocking_session is not null;



select sid,serial#,sql_id,machine,status from gv$session where action like '%1765695600%';


select sid,serial#,sql_id,machine,status from gv$session where sid in (241,442,489,662,672,704,925);

========================================   PID to SID  =========================================


select a.sid,a.serial#,b.spid,a.OSUSER,a.MACHINE,a.sql_id from gv$session a,gv$process b where a.paddr=b.addr and b.spid=18219; ---7668 is a PID



========================================  Redo Log Details ======================================== 


SELECT a.GROUP#, a.THREAD#, a.SEQUENCE#,

 a.ARCHIVED, a.STATUS, b.MEMBER AS REDOLOG_FILE_NAME,

 (a.BYTES/1024/1024) AS SIZE_MB FROM v$log a

JOIN v$logfile b ON a.Group#=b.Group#

ORDER BY a.GROUP#;


========================================   Idea Issue

SELECT 'alter system kill session '''

  ||a.sid

  ||','

  ||a.serial#

  || ''' immediate;' ,

  a.inst_id,

  a.sid,

  a.serial#,

  b.spid,

  a.OSUSER,

  a.MACHINE,

  a.sql_id,

  a.event

FROM gv$session a,

  gv$process b

WHERE a.paddr=b.addr

AND b.spid  in (  18885,29528);


=============================================  DR =====================================

SQL> SELECT ARCH.THREAD# "Thread", ARCH.SEQUENCE# "Last Sequence Received", APPL.SEQUENCE# "Last Sequence Applied", (ARCH.SEQUENCE# - APPL.SEQUENCE#) "Difference" FROM (SELECT THREAD# ,SEQUENCE# FROM V$ARCHIVED_LOG WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$ARCHIVED_LOG GROUP BY THREAD#)) ARCH,(SELECT THREAD# ,SEQUENCE# FROM V$LOG_HISTORY WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$LOG_HISTORY GROUP BY THREAD#)) APPL WHERE ARCH.THREAD# = APPL.THREAD# ORDER BY 1;


    Thread Last Sequence Received Last Sequence Applied Difference

---------- ---------------------- --------------------- ----------

         1                   1296                   841        455


SQL>


================================================   HWM    ===================

select 'alter database datafile '''||file_name||''' resize '||ceil( (nvl(hwm,1)*8192)/1024/1024 )||'m;' as cmd, bytes/1024/1024 "MB_WILL_BE_SAVED_BY_COMMAND" from dba_data_files a, ( select file_id, max(block_id+blocks-1) as hwm from dba_extents group by file_id ) b where a.file_id = b.file_id(+) and ceil(blocks*8192/1024/1024)- ceil((nvl(hwm,1)* 8192)/1024/1024 ) > 0 and a.tablespace_name NOT IN ('SYSAUX','SYSTEM','UNDOTB1','UNDOTB2','UNDOTB3','UNDOTB4');



================================================== GOt from Sagar====================

set linesize 1000 pagesize 0 feedback off trimspool on

with

hwm as (

  -- get highest block id from each datafiles ( from x$ktfbue as we don't need all joins from dba_extents )

  select /*+ materialize */ ktfbuesegtsn ts#,ktfbuefno relative_fno,max(ktfbuebno+ktfbueblks-1) hwm_blocks

  from sys.x$ktfbue group by ktfbuefno,ktfbuesegtsn

),

hwmts as (

  -- join ts# with tablespace_name

  select name tablespace_name,relative_fno,hwm_blocks

  from hwm join v$tablespace using(ts#)

),

hwmdf as (

  -- join with datafiles, put 5M minimum for datafiles with no extents

  select file_name,nvl(hwm_blocks*(bytes/blocks),5*1024*1024) hwm_bytes,bytes,autoextensible,maxbytes

  from hwmts right join dba_data_files using(tablespace_name,relative_fno)

)

select

case when autoextensible='YES' and maxbytes>=bytes

then -- we generate resize statements only if autoextensible can grow back to current size

  '/* reclaim '||to_char(ceil((bytes-hwm_bytes)/1024/1024),999999)

   ||'M from '||to_char(ceil(bytes/1024/1024),999999)||'M */ '

   ||'alter database datafile '''||file_name||''' resize '||ceil(hwm_bytes/1024/1024)||'M;'

else -- generate only a comment when autoextensible is off

  '/* reclaim '||to_char(ceil((bytes-hwm_bytes)/1024/1024),999999)

   ||'M from '||to_char(ceil(bytes/1024/1024),999999)

   ||'M after setting autoextensible maxsize higher than current size for file '

   || file_name||' */'

end SQL

from hwmdf

where

bytes-hwm_bytes>1024*1024*1024 -- resize only if at least 1GB can be reclaimed

and file_name not like '%undo%'

order by bytes-hwm_bytes desc

/


================================================== RMAN


select to_char(START_TIME,'DD-MM-YY:HH-MI-SS'),to_char(END_TIME,'DD-MM-YY:HH-MI-SS'),STATUS from v$rman_backup_job_details where start_time>sysdate-5;

select to_char(END_TIME,'DD-MM-YY:HH-MI-SS'),STATUS from v$rman_backup_job_details;


Backup Size:-


select ctime "Date", decode(backup_type, 'L', 'Archive Log', 'D', 'Full', 'Incremental') backup_type, bsize "Size MB"

from (select trunc(bp.completion_time) ctime, backup_type, round(sum(bp.bytes/1024/1024),2) bsize

from v$backup_set bs, v$backup_piece bp where bs.set_stamp = bp.set_stamp and bs.set_count  = bp.set_count and bp.status = 'A'

group by trunc(bp.completion_time), backup_type) order by 1, 2;



Backup Progress:

SELECT SID, SERIAL#, CONTEXT, SOFAR, TOTALWORK,

ROUND (SOFAR/TOTALWORK*100, 2) "% COMPLETE"

FROM V$SESSION_LONGOPS

WHERE OPNAME LIKE 'RMAN%' AND OPNAME NOT LIKE '%aggregate%'

AND TOTALWORK! = 0 AND SOFAR <> TOTALWORK;




STEP 1:  To find the status of the jobs:


set lines 300

 col STATUS format a22

 col hrs format 999.99

 select

 SESSION_KEY, SESSION_RECID, SESSION_STAMP,INPUT_TYPE, STATUS,

 to_char(START_TIME,'mm/dd/yy hh24:mi') start_time,

 to_char(END_TIME,'mm/dd/yy hh24:mi')   end_time,

 elapsed_seconds/3600                   hrs

 from V$RMAN_BACKUP_JOB_DETAILS

 order by session_key;

 

 

 STEP 2:  Check the % completed and more detailed information:


SELECT SID, SERIAL#, CONTEXT, SOFAR, TOTALWORK,

 ROUND (SOFAR/TOTALWORK*100, 2) "% COMPLETE"

 FROM V$SESSION_LONGOPS

 WHERE OPNAME LIKE 'RMAN%' AND OPNAME NOT LIKE '%aggregate%'

 AND TOTALWORK! = 0 AND SOFAR <> TOTALWORK;

 

 

 

 STEP 3: Check the logs or output of the running RMAN jobs 

set lines 200

 set pages 1000

 select output from GV$RMAN_OUTPUT

 where session_recid = &SESSION_RECID

 and session_stamp = &SESSION_STAMP

 order by recid;


NOTE: Please enter  SESSION_RECID  and  SESSION_STAMP  from the 1st query mentioned in step 1.




================================================ Per Day Archives



SELECT A.*,Round(A.Count#*B.AVG#/1024/1024) Daily_Avg_MB

--,thread

FROM

(SELECT To_Char(First_Time,'YYYY-MM-DD') DAY,Count(1) Count#,Min(RECID) Min#,Max(RECID) Max#

--,thread# thread

FROM v$log_history GROUP BY To_Char(First_Time,'YYYY-MM-DD') ORDER BY 1 DESC ) A,

(SELECT Avg(BYTES) AVG#,Count(1) Count#,Max(BYTES) Max_Bytes,Min(BYTES) Min_Bytes FROM v$log) B

where rownum <10

order by 1 desc;per hour :


================================================ archive log history per day of last 30 days


SELECT A.*,

Round(A.Count#*B.AVG#/1024/1024) Daily_Avg_Mb

 FROM

 (

 SELECT

 To_Char(First_Time,'YYYY-MM-DD') DAY,

 Count(1) Count#,

 Min(RECID) Min#,

 Max(RECID) Max#

 FROM

 gv$log_history

 GROUP BY

 To_Char(First_Time,'YYYY-MM-DD')

 ORDER

 BY 1 DESC

) A,

 (

 SELECT

 Avg(BYTES) AVG#,

 Count(1) Count#,

 Max(BYTES) Max_Bytes,

 Min(BYTES) Min_Bytes

 FROM

 gv$log

 ) B

 ;


=============================================== Check Hourly Archive log generation


select 

to_char(first_time,'YY-MM-DD') day, 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'00',1,0)),'999') "00", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'01',1,0)),'999') "01", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'02',1,0)),'999') "02", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'03',1,0)),'999') "03", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'04',1,0)),'999') "04", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'05',1,0)),'999') "05", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'06',1,0)),'999') "06", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'07',1,0)),'999') "07", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'08',1,0)),'999') "08", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'09',1,0)),'999') "09", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'10',1,0)),'999') "10", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'11',1,0)),'999') "11", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'12',1,0)),'999') "12", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'13',1,0)),'999') "13", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'14',1,0)),'999') "14", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'15',1,0)),'999') "15", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'16',1,0)),'999') "16", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'17',1,0)),'999') "17", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'18',1,0)),'999') "18", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'19',1,0)),'999') "19", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'20',1,0)),'999') "20", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'21',1,0)),'999') "21", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'22',1,0)),'999') "22", 

to_char(sum(decode(substr(to_char(first_time,'HH24'),1,2),'23',1,0)),'999') "23", 

COUNT(*) TOT 

from v$log_history where trunc(first_time)='24-SEP-2019'

group by to_char(first_time,'YY-MM-DD') 

order by day ;


============================================  Check current session generating Redo


select s.sid, n.name, s.value, sn.username, sn.program, sn.type, sn.module

from v$sesstat s

join v$statname n on n.statistic# = s.statistic#

join v$session sn on sn.sid = s.sid

where name like '%redo entries%' and TYPE not like '%BACKGROUND%'

order by value desc;



======================================Huge archive log generation .. what to do? what is causing it?

Step 1: Find the session

SELECT to_char(begin_interval_time,'YYYY_MM_DD HH24:MI') snap_time,

        dhsso.object_name,

        sum(db_block_changes_delta) as maxchages

  FROM dba_hist_seg_stat dhss,

         dba_hist_seg_stat_obj dhsso,

         dba_hist_snapshot dhs

  WHERE dhs.snap_id = dhss.snap_id

    AND dhs.instance_number = dhss.instance_number

    AND dhss.obj# = dhsso.obj#

    AND dhss.dataobj# = dhsso.dataobj#

    AND begin_interval_time BETWEEN to_date('2013_05_22 17','YYYY_MM_DD HH24')

                                           AND to_date('2013_05_22 21','YYYY_MM_DD HH24')

  GROUP BY to_char(begin_interval_time,'YYYY_MM_DD HH24:MI'),

           dhsso.object_name order by maxchages asc;

Step 2:

Once you know the objects ,get the SQL information realted to those objects:

The below are the SQLs causing more changes to MGMT_POLICY_ASSOC_CFG%.


SELECT to_char(begin_interval_time,'YYYY_MM_DD HH24:MI'),

         dbms_lob.substr(sql_text,4000,1),

         dhss.instance_number,

         dhss.sql_id,executions_delta,rows_processed_delta

  FROM dba_hist_sqlstat dhss,

         dba_hist_snapshot dhs,

         dba_hist_sqltext dhst

  WHERE upper(dhst.sql_text) LIKE '%MGMT_POLICY_ASSOC_CFG%'

    AND dhss.snap_id=dhs.snap_id

    AND dhss.instance_Number=dhs.instance_number

 AND begin_interval_time BETWEEN to_date('2013_05_22 17','YYYY_MM_DD HH24')

                                           AND to_date('2013_05_22 21','YYYY_MM_DD HH24')

    AND dhss.sql_id = dhst.sql_id;


Step 3:

 Once you know the SQL ID , get the program and userid who is running it and intimate the user to take care of this query / Program.As a DBA , you can increase the archive log backup frequency of the database / once you get the permission to kill the program /sql you can kill them as well.

Know which program is causing huge redo using the SQL IDs.

    

 SELECT instance_number, to_char(sample_time,'yyyy_mm_dd hh24:mi:ss'),

         user_id,

         program

  FROM dba_hist_active_sess_history

  WHERE sql_id in ('5xmfm787py0a3','bq28gpz5w5qqg','71t5d196acgr9','a8d5br4z9m251')

    AND snap_id BETWEEN 31856 AND 31860  2    3    4    5    6  ;

============================================== Failed Jobs

select distinct job_name from SYS.DBA_SCHEDULER_JOB_RUN_DETAILS where status='FAILED' ;;


============================================ Table Fragmenttion

select table_name,

round(blocks*8/1024/1024,2) "Size Consumed GB",

round(avg_row_len*num_rows/1024/1024/1024,2) "Actual Size GB",

round((blocks*8/1024/1024) - (avg_row_len*num_rows/1024/1024/1024),2) "Space wasted GB",

round((100 - ((avg_row_len*num_rows/1024/1024/1024)*100)/(blocks*8/1024/1024)),2) "% fragmented"

from dba_tables

where table_name='TBLCDRPREPAID'

and round(blocks*8,2) > 0

and round(avg_row_len*num_rows/1024,2) > 0

order by 5 desc;

/


================================================== Object in a perticular DB block


-- List an Object and Object Type via a Given File and Block ID.

--

 

SET PAUSE ON

SET PAUSE 'Press Return to Continue'

SET PAGESIZE 60

SET LINESIZE 300

 

COLUMN segment_name FORMAT A24

COLUMN segment_type FORMAT A24

 

SELECT segment_name, segment_type, block_id, blocks

   FROM   dba_extents

   WHERE

          file_id = &file_no

   AND

          ( &block_value BETWEEN block_id AND ( block_id + blocks ) )

/

=================================================== DR ======================================


select process,status,client_process,sequence#,block#,active_agents,known_agents from gv$managed_standby;


SELECT ARCH.THREAD# "Thread", ARCH.SEQUENCE# "Last Sequence Received", APPL.SEQUENCE# "Last

Sequence Applied", (ARCH.SEQUENCE# - APPL.SEQUENCE#) "Difference" FROM

(SELECT THREAD# ,SEQUENCE# FROM sys.V_$ARCHIVED_LOG WHERE (THREAD#,FIRST_TIME ) IN (SELECT

THREAD#,MAX(FIRST_TIME) FROM sys.V_$ARCHIVED_LOG GROUP BY THREAD#)) ARCH,

(SELECT THREAD# ,SEQUENCE# FROM sys.V_$LOG_HISTORY WHERE (THREAD#,FIRST_TIME ) IN (SELECT

THREAD#,MAX(FIRST_TIME) FROM sys.V_$LOG_HISTORY GROUP BY THREAD#)) APPL

WHERE ARCH.THREAD# = APPL.THREAD# ORDER BY 1;


SELECT THREAD#, LOW_SEQUENCE#, HIGH_SEQUENCE# FROM GV$ARCHIVE_GAP; 


SELECT SEQUENCE#,APPLIED FROM V$ARCHIVED_LOG ORDER BY SEQUENCE# order by 1;


SELECT record_id, message_text,ORIGINATING_TIMESTAMP

FROM X$DBGALERTEXT

WHERE originating_timestamp > systimestamp - 12 AND regexp_like(message_text, '(ORA-)');



dgmgrl /


show database standbydbname;

show database verbose 'stanbdydbname';

======================================================= user session


alter session set trace_identifier='user_session_trace';

exec dbms_session.session_trace_enable(waits=> TRUE, binds => TRUE);

exec dbms_session.session_trace_disable;

tkprof tracefilename

output=filename


========================================================= Object Growth

select * from table(dbms_space.object_growth_trend('JISPBILCORBILLINGART501','TBLMACCOUNT','TABLE')) order by timepoint;


========================================================== Mainetenance window check

--below query to check timings of window of auto tuning. 


SELECT window_name,TO_CHAR(window_next_time,'DD-MON-YY HH24:MI:SS'),sql_tune_advisor, optimizer_stats, segment_advisor FROM dba_autotask_window_clients; 


--below will disable window. You may opt for changing timing also. 

DBMS_SCHEDULER.disable(name => 'SYS.SATURDAY_WINDOW', force => TRUE); 

DBMS_SCHEDULER.disable(name => 'SYS.SUNDAY_WINDOW', force => TRUE);


=========================================================== Duplicate Records in Table

select * from 

   JISPBILCORBILLINGART501.Tblsystmadt_6mnth

where rowid in

 (select rowid from 

   (select 

     rowid,

     row_number()

    over 

     (partition by Systemauditid order by Systemauditid) dup

    from JISPBILCORBILLINGART501.Tblsystmadt_6mnth)

  where dup > 1);

  

=========================================================== Relocate Service in RAC

srvctl relocate service -d db_unique_name -s service_name -i old_inst_name -t new_inst_name [-f] 


Note: -f [Disconnect all sessions during stop or relocate service operations]


srvctl relocate service -d icncl -s ocssec2 -i icncl3 -t icncl4 


srvctl relocate service -d racdb -s racdb2new -i RACDB1 -t RACDB2



Usage: srvctl relocate service -d <db_unique_name> -s <service_name> {-i <old_inst_name> -t <new_inst_name> | -c <current_node> -n <target_node>} [-f]

       Specify instances for an administrator-managed database, or nodes for a policy managed database

    -d <db_unique_name>      Unique name for the database

    -s <service>             Service name

    -i <old_inst>            Old instance name

    -t <new_inst>            New instance name

    -c <current_node>        Node name to relocate service from

    -n <target_node>         Node name to relocate service to

    -f                       Disconnect all sessions during stop or relocate service operations

    -h                       Print usage


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%    Add service %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

[oracle@myrac1 ~]$ srvctl add service -s myservice1 -d MYRAC -r MYRAC1 -a MYRAC2 -P BASIC

[oracle@myrac2 ~]$ srvctl add service -s myservice1 -d MYRAC -r MYRAC1 -a MYRAC1 -P BASIC


-r  --Preferred node

-a -- Available node

-p  --TAF_Options (NONE,BASIC,PRECONNECT)


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Enable the service %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

[oracle@myrac1 bin]$ srvctl enable service -d MYRAC -s myservice1

PRCC-1010 : myservice2 was already enabled

PRCR-1002 : Resource ora.myrac.myservice2.svc is already enabled


[oracle@myrac2 bin]$ srvctl enable service -d MYRAC -s myservice2

PRCC-1010 : myservice2 was already enabled

PRCR-1002 : Resource ora.myrac.myservice2.svc is already enabled



========================================================= Cluster Speed check

-bash-4.1$ /u01/11.2.0/grid/bin/oifcfg getif

-bash-4.1$ ethtool eth1


======================================================== Partition COmpression 

select 'ALTER TABLE '||table_name|| ' MODIFY PARTITION '|| partition_name || ' COMPRESS FOR ALL OPERATIONS;' FROM dba_tab_partitions where table_name='TBLCDRPREPAID' and PARTITION_NAME like '%JAN19';; ;


======================================================= Partition PCT Free and PCT Used Change

ALTER TABLE TBLTCUSTOMERRESERVATION MODIFY PARTITION '|| I.partition_name || ' PCTFREE 20 PCTUSED 70;


======================================================= Partition Datafile Drop after droping partitions


SELECT 'DROP TABLESPACE '

||TABLESPACE_NAME

||' INCLUDING CONTENTS AND DATAFILES;'

FROM DBA_TABLESPACES

WHERE Tablespace_Name LIKE 'TS_TBLCDRPREPAID%'

AND Tablespace_Name NOT IN

(SELECT Tablespace_Name FROM SYS.User_Tab_Partitions WHERE TABLE_NAME='TBLCDRPREPAID'

)

AND Tablespace_Name NOT IN

(SELECT DISTINCT tablespace_name FROM dba_segments

);


========================================================= Database Growth yearly

-:Database Growth Yearly:-

select to_char(CREATION_TIME,'RRRR') year, to_char(CREATION_TIME,'MM') month, round(sum(bytes)/1024/1024/1024) GB

from   v$datafile

group by  to_char(CREATION_TIME,'RRRR'),   to_char(CREATION_TIME,'MM')

order by   1, 2;



-------------------------

SELECT

(select min(creation_time) from v$datafile) "Create Time",

(select name from v$database) "Database Name",

ROUND((SUM(USED.BYTES) / 1024 / 1024 ),2) || ' MB' "Database Size",

ROUND((SUM(USED.BYTES) / 1024 / 1024 ) - ROUND(FREE.P / 1024 / 1024 ),2) || ' MB' "Used Space",

ROUND(((SUM(USED.BYTES) / 1024 / 1024 ) - (FREE.P / 1024 / 1024 )) / ROUND(SUM(USED.BYTES) / 1024 / 1024 ,2)*100,2) || '% MB' "Used in %",

ROUND((FREE.P / 1024 / 1024 ),2) || ' MB' "Free Space",

ROUND(((SUM(USED.BYTES) / 1024 / 1024 ) - ((SUM(USED.BYTES) / 1024 / 1024 ) - ROUND(FREE.P / 1024 / 1024 )))/ROUND(SUM(USED.BYTES) / 1024 / 1024,2 )*100,2) || '% MB' "Free in %",

ROUND(((SUM(USED.BYTES) / 1024 / 1024 ) - (FREE.P / 1024 / 1024 ))/(select sysdate-min(creation_time) from v$datafile),2) || ' MB' "Growth DAY",

ROUND(((SUM(USED.BYTES) / 1024 / 1024 ) - (FREE.P / 1024 / 1024 ))/(select sysdate-min(creation_time) from v$datafile)/ROUND((SUM(USED.BYTES) / 1024 / 1024 ),2)*100,3) || '% MB' "Growth DAY in %",

ROUND(((SUM(USED.BYTES) / 1024 / 1024 ) - (FREE.P / 1024 / 1024 ))/(select sysdate-min(creation_time) from v$datafile)*7,2) || ' MB' "Growth WEEK",

ROUND((((SUM(USED.BYTES) / 1024 / 1024 ) - (FREE.P / 1024 / 1024 ))/(select sysdate-min(creation_time) from v$datafile)/ROUND((SUM(USED.BYTES) / 1024 / 1024 ),2)*100)*7,3) || '% MB' "Growth WEEK in %"

FROM    (SELECT BYTES FROM V$DATAFILE

UNION ALL

SELECT BYTES FROM V$TEMPFILE

UNION ALL

SELECT BYTES FROM V$LOG) USED,

(SELECT SUM(BYTES) AS P FROM DBA_FREE_SPACE) FREE

GROUP BY FREE.P;


======================================================= Stats Transfer

exec dbms_stats.create_stat_table(ownname => 'CRESTELCAAMPRDSITI', stattab => 'stattable_1');


exec dbms_stats.export_schema_stats(ownname=>'CRESTELCAAMPRDSITI', stattab=>'stattable_1');


select count(*) from stattable_1;


expdp CRESTELCAAMPRDSITI/CRESTELCAAMPRDSITI directory=DB_BKP dumpfile=CAAMSCHEMASTAT_24may.dmp logfile=CAAMSCHEMASTAT_24may.log tables=stattable_1



impdp CRESTELCAAMPRDSITI/CRESTELCAAMPRDSITI directory=EXPORT_DIR dumpfile=CAAMSCHEMASTAT_24may.dmp logfile=IMPCAAMSCHEMASTAT_24may.log tables=stattable_1


==================================================== Alert Log check query

SELECT record_id, message_text,ORIGINATING_TIMESTAMP

FROM X$DBGALERTEXT

WHERE originating_timestamp > systimestamp - 10 AND regexp_like(message_text, '(ORA-)');



==================================================== SQL PLUS Backspace issue

SQL> !stty erase ^H


=================================================== OEM Status

[oracle@ORAPRI ~]$ export ORACLE_UNQNAME=orasiti

[oracle@ORAPRI ~]$ echo $ORACLE_UNIQUENAME


[oracle@ORAPRI ~]$ emctl status dbconsole

Oracle Enterprise Manager 11g Database Control Release 11.2.0.3.0 

Copyright (c) 1996, 2011 Oracle Corporation.  All rights reserved.

https://ORAPRI:1158/em/console/aboutApplication

Oracle Enterprise Manager 11g is running. 



======================================================== Delete Duplicate Records

select sr.* from 

tblmserialnumber sr,

(select serial_number,count(1) from tblmserialnumber where is_processed='N' group by serial_number having count(1)>1 order by 2 desc) sr1

where sr.serial_number=sr1.serial_number

and sr.is_processed='N'

order by sr.serial_number,sr.filename;



delete from tblmserialnumber where is_processed='N'

and serial_number in (select serial_number from (select serial_number,count(1) from tblmserialnumber where is_processed='N' group by serial_number having count(1)>1 order by 2 desc));


commit;


================================================== Wait Events


Select a.inst_id, a.event, a.total_waits, a.time_waited, a.average_wait*10 avg_ms

From gv$system_event a

where a.event in ('cell single block physical read',

'cell list of blocks physical read',

'cell smart table scan',

'db file parallel read',

'db file sequential read',

'db file scattered read',

'db file single write',

'db file parallel write',

'direct path write',

'direct path write temp'

)

order by inst_id;




select sn.BEGIN_INTERVAL_TIME, total_waits, TIME_WAITED_MICRO, (TIME_WAITED_MICRO/1000)/total_waits "mavg ms"

from dba_hist_system_event st, DBA_HIST_SNAPSHOT sn where st.instance_number = 1 and

st.event_name = 'db file sequential read'

and st.SNAP_ID=sn.SNAP_ID

and st.dbid=sn.dbid

and st.INSTANCE_NUMBER=sn.INSTANCE_NUMBER

and sn.BEGIN_INTERVAL_TIME between to_date('05/05/2019 11:00:00', 'mm/dd/yyyy hh24:mi:ss') and to_date('05/15/2015 13:00:00', 'mm/dd/yyyy hh24:mi:ss');


=========================================== Table Privilage Details ====================================


select 'grant '||privilege|| ' on ' ||owner||'.'||table_name|| ' to ' ||grantee|| ';' from SYS.dba_tab_privs where owner like 'CRESTELWOMPRD';


========================================== Alert Log query =============================================

SELECT record_id, message_text,ORIGINATING_TIMESTAMP

FROM X$DBGALERTEXT

WHERE originating_timestamp > systimestamp - 6 AND regexp_like(message_text, '(ORA-04031)');


================================================ 12 C

1. Error while user creation in container.


alter session set "_ORACLE_SCRIPT"=true;


2. Password is case Sensitive.


=========================================== ASM DIskgroup offline


crsctl stat res ora.CLSTR.dg -t

Diskgroup status was offline on one node(punrdrn3)


srvctl start diskgroup -diskgroup CLSTR -node punrdrn3


=========================================== Network Setting RHEL7


nmtui


==================================== connection string

sqlplus sys/manager@hostname:1521/servicename


=================================== Block COrruption

COLUMN owner FORMAT A20

COLUMN segment_name FORMAT A30

select

   relative_fno,

   owner,

   segment_name,

   segment_type

from

   dba_extents

where

   file_id = 6

and

   437 between block_id and block_id + blocks - 1;

   

   

COLUMN owner FORMAT A20

COLUMN segment_name FORMAT A30

SELECT DISTINCT owner, segment_name

FROM   v$database_block_corruption dbc

       JOIN dba_extents e ON dbc.file# = e.file_id AND dbc.block# BETWEEN e.block_id and e.block_id+e.blocks-1

ORDER BY 1,2;



   

   select * from v$database_block_corruption;

   

   RMAN> blockrecover datafile 4 block 76;

   

   RMAN> blockrecover corruption list;

   

 


============================


var cfg = rs.config()

cfg.members[0].priority = 10 -- to change priority)

reconfig(cfg)

rs.initiate()

rs.add(ip:port)

rs.status()

db.getMongo()

db = connect('mylocalhost:32000') 


rs.stepDown(3*60) to stepdown primary for some activity for 3minutes (3 * 60secs)




====================== Table Fragmentaion Query  (Need to test below query)


select table_name,avg_row_len,round(((blocks*16/1024)),2)||'MB' "TOTAL_SIZE",

round((num_rows*avg_row_len/1024/1024),2)||'Mb' "ACTUAL_SIZE",

round(((blocks*16/1024)-(num_rows*avg_row_len/1024/1024)),2) ||'MB' "FRAGMENTED_SPACE",

(round(((blocks*16/1024)-(num_rows*avg_row_len/1024/1024)),2)/round(((blocks*16/1024)),2))*100 "percentage"

from all_tables WHERE table_name='&TABLE_NAME';




select

   table_name,round((blocks*8),2) "size (kb)" ,

   round((num_rows*avg_row_len/1024),2) "actual_data (kb)",

   (round((blocks*8),2) - round((num_rows*avg_row_len/1024),2)) "wasted_space (kb)"

from

   dba_tables

where

   (round((blocks*8),2) > round((num_rows*avg_row_len/1024),2))

order by 4 desc;



===================== System Jobs



Check the enabled job present in oracle database


 SQL> SELECT client_name, status FROM dba_autotask_client;


Disable the following jobs


SQL> EXEC DBMS_AUTO_TASK_ADMIN.DISABLE(client_name=>'auto optimizer stats collection', operation=>NULL, window_name=>NULL);


Enable the auto task jobs:


SQL> EXEC DBMS_AUTO_TASK_ADMIN.ENABLE(client_name=>'auto optimizer stats collection', operation=>NULL, window_name=>NULL);



=====================  SYSAUX Tablespace usage


select occupant_desc, space_usage_kbytes/1024 MB

from v$sysaux_occupants

where space_usage_kbytes > 0

order by space_usage_kbytes;





col MB for 999G990

col blocks for 9999999999

col segment_name for a30

col partition_name for a30

col segment_type for a20

col tablespace_name for a20

select * from (

               select bytes/1024/1024 MB, blocks, s.SEGMENT_NAME, s.partition_name, s.segment_type, s.tablespace_name

                 from dba_segments s

                where owner='SYS'

             order by bytes desc

)

where rownum <=10

;


select OWNER,TABLE_NAME from dba_lobs where SEGMENT_NAME IN ('SYS_LOB0001310633C00038$$ ','SYS_LOB0001310627C00004$$');

select max(snap_id) from dba_hist_snapshot where BEGIN_INTERVAL_TIME >= SYSDATE -35;



exec DBMS_WORKLOAD_REPOSITORY.DROP_SNAPSHOT_RANGE(low_snap_id =>xxxx,high_snap_id =>zzzz);



select min(snap_id),max(snap_id) from sys.WRM$_SNAPSHOT where begin_interval_time< to_date('xx.yy.201z','dd.mm.YYYY');



select min(snap_id),MAX(snap_id) from dba_hist_snapshot;

select min(snap_id),MAX(snap_id) from WRH$_ACTIVE_SESSION_HISTORY;

SQL> EXEC dbms_workload_repository.drop_snapshot_range(1,17753,1798927129);



======================================== ASM Details


set lines 300 pages 200

col PATH for a80

col DG_NAME for a10

col DG_STATE for a10

col dsk_no for 99999

col FAILGROUP for a20

select dg.name dg_name, dg.state dg_state, dg.type, d.disk_number dsk_no,

d.path, d.mount_status, d.FAILGROUP, d.state 

from v$asm_diskgroup dg, v$asm_disk d

where dg.group_number=d.group_number

order by dg_name, dsk_no;


========================================= Delete old file


find /oracle/PEC/saptrace/audit -mtime +2 -name '*.aud' -exec rm -rf {} \;

 sudo -u oracle find /goldengate/1220/ogg_home/dirdat/  -type f -name "e8*" -mtime +4 -exec  rm -rf  {} \;


 sudo -u oracle find /goldengate/1220/ogg_home/dirdat/  -type f -name "e8*" -mtime +4 -exec  ls -lrth  {} \;

 

========================================= Useful URL


http://www.br8dba.com/tag/archive-log-gap/


============================================ IMPDP/EXPDP ==================================


########################### IMPDP Job ###################################################


set linesize 200

set pagesize 200

col owner_name format a12

col job_name format a20

col operation format a12

col job_mode format a20

SELECT 

owner_name, 

job_name, 

operation, 

job_mode, 

state 

FROM 

dba_datapump_jobs

where 

state='EXECUTING';





####################### To get the detail information like SID, Serial#, and % of completion:

SELECT 

OPNAME, 

SID, 

SERIAL#, 

CONTEXT, 

SOFAR, 

TOTALWORK,

    ROUND(SOFAR/TOTALWORK*100,2) "%_COMPLETE"

FROM 

V$SESSION_LONGOPS

WHERE 

OPNAME in

(

select 

d.job_name

from 

v$session s, 

v$process p, 

dba_datapump_sessions d

where 

p.addr=s.paddr 

and 

s.saddr=d.saddr

)

AND 

OPNAME NOT LIKE '%aggregate%'

AND 

TOTALWORK != 0

AND 

SOFAR <> TOTALWORK;



############################# To check the waiting status and wait event of the job waiting for:


SELECT   w.sid, w.event, w.seconds_in_wait

   FROM   V$SESSION s, DBA_DATAPUMP_SESSIONS d, V$SESSION_WAIT w

    WHERE   s.saddr = d.saddr AND s.sid = w.sid;

############################# To monitor and perform various operations from the expdp/impdp prompt:


[oracle@orahow ~]$ expdp attach=Job_name

expdp / as sysdba attach=job_name

export>status

export>stop_job

export>start_jop

export>kill_job


###################### To check the orphaned datapump jobs. For orphaned jobs the state will be NOT RUNNING.


SET lines 140

COL owner_name FORMAT a10;

COL job_name FORMAT a20

COL state FORMAT a12 

COL operation LIKE owner_name

COL job_mode LIKE owner_name

SELECT owner_name, job_name, operation, job_mode,

state, attached_sessions

FROM dba_datapump_jobs;



====================To check the alert log and query the DBA_RESUMABLE view.


select name, sql_text, error_msg from dba_resumable;


========================= EXPDP example

expdp  \"/ as sysdba\"  directory=DRMUA_OCI dumpfile=SEI_DRM_EXPORT_26Jul20.dmp logfile=SEI_DRM_EXPORT_26Jul20.log schemas=SEI_DRM_EXPORT

expdp  \"/ as sysdba\"  directory=DRMUA_OCI dumpfile=DRMUA_DB_26Jul20.dmp logfile=DRMUA_DB_26Jul20.log schemas=DRMUA_DB



======================== Datafile movment from local to Shared ASM diskgroup


SQL>select tablespace_name,File_id,FILE_NAME ,bytes/2014/1024/1024/1024 as size_tb from dba_data_files where file_id=1250;

rman>backup as copy datafile 1250 format '+DG_DATA1_X4_4631';

rman>list copy of datafile 1250;

SQL>alter database datafile 1250 offline;

RMAN> switch datafile 1250 to copy;

RMAN> recover datafile 1250;

SQL> alter database datafile 1250 online;

SQL>select tablespace_name,File_id,FILE_NAME ,bytes/2014/1024/1024/1024 as size_tb from dba_data_files where file_id=1250;

 



=================================== Big FIle tablespace


SQL> select tablespace_name,bigfile from   dba_tablespaces where  tablespace_name in ('DS_DATASTORE_DATA');


alter tablespace ODI_STAGE_DATA AUTOEXTEND ON MAXSIZE 2000G; (bigfile tablespace)



ALTER TABLESPACE LZ_DATA AUTOEXTEND ON MAXSIZE UNLIMITED;



select a.tablespace_name TABLESPACE_NAME, c.status, a.df_count, decode(a.auto_extend,0,'NO','YES') as autoextend, a.value TOTAL_MB,

NVL(b.value,0) USED_MB, a.value-NVL(b.value,0) FREE_MB, 

round((NVL(b.value,0))*100/a.value,2) PCRT_USED FROM

(select tablespace_name, sum(decode(autoextensible,'YES',CASE when MAXBYTES > BYTES THEN MAXBYTES ELSE BYTES END, bytes))/1024/1024 value,

count(file_name) df_count, sum(decode(autoextensible, 'YES', 1, 0)) auto_extend from dba_data_files group by tablespace_name) a,

(select tablespace_name, sum(bytes)/1024/1024 value from dba_segments group by tablespace_name) b,

(select tablespace_name, status, CONTENTS  from dba_tablespaces) c

where a.tablespace_name=b.tablespace_name(+) and a.tablespace_name=c.tablespace_name and c.contents !='UNDO' and c.status  != 'READ ONLY' and a.tablespace_name in ('INVS_DATA')




SELECT

  a.tablespace_name,

  ROUND((a.used_space * b.block_size) / 1048576/1024, 2) AS "Used space (GB)",

  ROUND((a.tablespace_size * b.block_size) / 1048576/1024, 2) AS "Tablespace size (GB)",

  ROUND(a.used_percent, 2) AS "Used %"

FROM dba_tablespace_usage_metrics a

  JOIN dba_tablespaces b ON a.tablespace_name = b.tablespace_name and a.tablespace_name='DS_DATASTORE_DATA';

  

  

  ALTER TABLESPACE TX_OUTC_CLM_DETL_DTA AUTOEXTEND ON MAXSIZE UNLIMITED;


========================================= Single Instance ASM And Database Stop =======================

[rizwanm@dbplgbrdb01 ~]$ ps -ef|grep pmon

oracle    41709      1  0 Jun14 ?        00:29:02 asm_pmon_+ASM

rizwanm   52230  52200  0 00:24 pts/1    00:00:00 grep pmon

oracle    84201      1  0 Jun28 ?        00:57:05 ora_pmon_gbrprod

[rizwanm@dbplgbrdb01 ~]$

[rizwanm@dbplgbrdb01 ~]$ . oraenv

ORACLE_SID = [rizwanm] ? gbrprod

The Oracle base has been set to /ora01/app/oracle

[rizwanm@dbplgbrdb01 ~]$ sudo -u oracle srvctl stop database -d gbrprod

[rizwanm@dbplgbrdb01 ~]$ ps -ef|grep pmon

oracle    41709      1  0 Jun14 ?        00:29:02 asm_pmon_+ASM

rizwanm   54644  52200  0 00:26 pts/1    00:00:00 grep pmon

[rizwanm@dbplgbrdb01 ~]$ . oraenv

ORACLE_SID = [gbrprod] ? +ASM

The Oracle base remains unchanged with value /ora01/app/oracle

[rizwanm@dbplgbrdb01 ~]$ cd $ORACLE_HOME

[rizwanm@dbplgbrdb01 oracle]$ cd bin

[rizwanm@dbplgbrdb01 bin]$ sudo ./crsctl check has

CRS-4638: Oracle High Availability Services is online

[rizwanm@dbplgbrdb01 bin]$ sudo ./crsctl stop has

[rizwanm@dbplgbrdb01 bin]$ sudo ./crsctl disable has




[10:54 AM] Bhavin Dave

    


--for standalone with ASM


. oraenv


+ASM




sudo ./crsctl check has


#crsctl stop has


sudo ./crsctl check has


****************************************************************


7.Disable CRS on each node


#sudo crsctl disable crs


​[10:54 AM] Bhavin Dave

    sent you this doc as well



============================================== ACFS Drivers ===============================


/sbin/lsmod | grep oracle

/grid/app/12.1.0/oracle/bin/acfsload stop

/sbin/lsmod | grep oracle

/grid/app/12.1.0/oracle/bin/acfsload start -s

/grid/app/12.1.0/oracle/bin/acfsroot enable

ls -ltr /dev/asm/



=========================================== Exadata ========================================


smart scan

hybrid columnar compression

cellcli

---disk scrubbing

writeback/writethrough

patching--


 


why exadata--perf

dcli -- 


 


vi dbs_group

db1

db2

db3


 


dcli -l oracle -g dbs_group -e 'ps -ef |grep pmon'


 


exacli

ilom shutdown/startup with gui/command line

exawatcher

 

 

 ======================= Temp tablespace check.

 

 set lines 180 pages 1000

col tablespace_name format a15

col name format a60

col GB format 999

select a.*,b.name,b.status,b.bytes/1024/1024/1024 GB

from v$temp_space_header a,v$tempfile b

where a.FILE_ID=b.file#

order by 5;



======================= AIX Commands=================

oracle@nis10prddwdb03:/home/oracle #uname -a

AIX nis10prddwdb01 1 6 00C2F2C74C00


MEMORY UTILIZATION check

oracle@nis10prddwdb03:/home/oracle #svmon -G

               size       inuse        free         pin     virtual   mmode

memory      9961456     9955804        5652     1876955     9394457     Ded

pg space   12582912     2866204


               work        pers        clnt       other

pin         1575122           0        4841      296992

in use      8173729           0     1782075


PageSize   PoolSize       inuse        pgsp         pin     virtual

s    4 KB         -     2691500      618876      471851     1411033

m   64 KB         -      454019      140458       87819      498964

oracle@nis10prddwdb03:/home/oracle #



============================ Total UNDO USAGE


select a.tablespace_name, SIZEMB, USAGEMB, (SIZEMB - USAGEMB) FREEMB

from (select sum(bytes) / 1024 / 1024 SIZEMB, b.tablespace_name

from dba_data_files a, dba_tablespaces b

where a.tablespace_name = b.tablespace_name

and b.contents = 'UNDO'

group by b.tablespace_name) a,

(select c.tablespace_name, sum(bytes) / 1024 / 1024 USAGEMB

from DBA_UNDO_EXTENTS c

where status <> 'EXPIRED'

group by c.tablespace_name) b

where a.tablespace_name = b.tablespace_name;


1. To check the current size of the Undo tablespace:


 select to_char( sum(a.bytes/1024/1024/1024)) as undo_size_in_GB  from v$datafile a, v$tablespace b, dba_tablespaces c where c.contents = 'UNDO' and c.status = 'ONLINE' and b.name = c.tablespace_name and a.ts# = b.ts#


2. To check the free space (unallocated) space within Undo tablespace:


select sum(bytes)/1024/1024/1024 "Gb" from dba_free_space where tablespace_name ='UNDOTBS1';



select tablespace_name, sum(blocks)*8/(1024) reusable_space from dba_undo_extents where status='EXPIRED' group by tablespace_name;



with free_sz as ( select tablespace_name, sum(f.bytes)/1048576/1024 free_gb from dba_free_space f group by tablespace_name ), a as ( select tablespace_name , sum(case when status = 'EXPIRED' then blocks end)*8/1048576 reusable_space_gb , sum(case when status in ('ACTIVE', 'UNEXPIRED') then blocks end)*8/1048576 allocated_gb from dba_undo_extents where status in ('ACTIVE', 'EXPIRED', 'UNEXPIRED') group by tablespace_name ) , undo_sz as ( select tablespace_name, df.user_bytes/1048576/1024 user_sz_gb from dba_tablespaces ts join dba_data_files df using (tablespace_name) where ts.contents = 'UNDO' and ts.status = 'ONLINE' ) select tablespace_name, user_sz_gb, free_gb, reusable_space_gb, allocated_gb , free_gb + reusable_space_gb + allocated_gb total from undo_sz join free_sz using (tablespace_name) join a using (tablespace_name);




======================== User password


SQL> set lines 120

set pages 120

column username format a20

column account_status format a20

column profile format a15

select username,account_status,profile,EXPIRY_DATE from dba_users where username like ‘TVIJAY’ order by 1;


USERNAME ACCOUNT_STATUS PROFILE EXPIRY_DA

——————– ——————– ————— ———

TVIJAY EXPIRED(GRACE) DEFAULT 19-DEC-17


SQL>



SQL> select password from sys.user$ where name=’TVIJAY’;


PASSWORD

——————————

03BE7DDA8399B6A7


SQL>


Reset the password


alter user TVIJAY identified by values ’03BE7DDA8399B6A7′;


===================

Messages file locations:


Linux: /var/log/messages

Sun /var/adm/messages

HP-UX: /var/adm/syslog/syslog.log

IBM: /bin/errpt -a > messages.out



====================== NOHUP


nohup sqlplus "/ as sysdba" @testscript.sql > testscript.log &


$ nohup sqlplus username/password @script.sql > file.out 2>&1 & 


$ nohup sqlplus "/ as sysdba" @script.sql > file.out 2>&1 &