SQL Analytic Functions – I

--- SQL Analytic Functions
 REFERENCES:
 http://docs.oracle.com/database/121/SQLRF/functions004.htm#SQLRF06174
 http://orafaq.com/node/55

1.) Table with some data to test:

create table cmagno.tb_emp_salary
 (id_emp number,
 id_dept number,
 employee varchar2(30),
 salary number);

insert into cmagno.tb_emp_salary
 values
 (1,10,'EMP 1', 100);

insert into cmagno.tb_emp_salary
 values
 (2,10,'EMP 2', 150);

insert into cmagno.tb_emp_salary
 values
 (3,10,'EMP 3', 200);

insert into cmagno.tb_emp_salary
 values
 (4,10,'EMP 4', 250);

insert into cmagno.tb_emp_salary
 values
 (5,20,'EMP 5', 100);

insert into cmagno.tb_emp_salary
 values
 (6,20,'EMP 6', 150);

insert into cmagno.tb_emp_salary
 values
 (7,20,'EMP 7', 200);

insert into cmagno.tb_emp_salary
 values
 (8,20,'EMP 8', 250);

insert into cmagno.tb_emp_salary
 values
 (9,20,'EMP 9', 250);

SELECT * FROM CMAGNO.TB_EMP_SALARY;

ID_EMP ID_DEPT EMPLOYEE SALARY
 ------- -------------------- ------------- ----------
 1 10 EMP 1 100
 2 10 EMP 2 150
 3 10 EMP 3 200
 4 10 EMP 4 250
 5 20 EMP 5 100
 6 20 EMP 6 150
 7 20 EMP 7 200
 8 20 EMP 8 250
 9 20 EMP 9 250

-- So, let's play with some Analytic Functions

2.) Group By:

select count(*) Employees , sum(salary) Total_Salary, id_dept
 from cmagno.tb_emp_salary
 group by id_depth
 order by 3;

EMPLOYEES TOTAL_SALARY ID_DEPT
 --------- -------------------- --------------------
 4 700 10
 5 950 20

select count(*) Employees , sum(salary) Total_Salary, id_depth
 from cmagno.tb_emp_salary
 group by id_depth
 having count(*) > 4 -- Just dept with more than 4 employees
 order by 3;

EMPLOYEES TOTAL_SALARY ID_DEPT
 ----------- -------------------- --------------------
 5 950 20

3.) PARTITION BY
 -- Counting by partition
 select ID_EMP, ID_DEPT, count(*) over (partition by ID_DEPT) Total_per_Dept
 from cmagno.tb_emp_salary
 order by 1;

ID_EMP ID_DEPT TOTAL_PER_DEPT
 ------- ------------ --------------------
 1 10 4
 2 10 4
 3 10 4
 4 10 4
 5 20 5
 6 20 5
 7 20 5
 8 20 5
 9 20 5

-- Counting for all rows
 select ID_EMP, ID_DEPT, count(*) over () Total_per_Dept
 from cmagno.tb_emp_salary
 order by 1;

ID_EMP ID_DEPT TOTAL_PER_DEPT
 ------- ---------- ----------------
 1 10 9
 2 10 9
 3 10 9
 4 10 9
 5 20 9
 6 20 9
 7 20 9
 8 20 9
 9 20 9

-- Showing the SUm of the Salary for the DEPT
 select ID_EMP, ID_DEPT, sum(salary) over (partition by id_dept) SALARY_dept
 from cmagno.tb_emp_salary
 order by 1;

ID_EMP ID_DEPT SALARY_DEPT
 ------- --------- -------------
 1 10 700
 2 10 700
 3 10 700
 4 10 700
 5 20 950
 6 20 950
 7 20 950
 8 20 950
 9 20 950

4.) RANK

SELECT ID_EMP, ID_DEPT, SALARY,
 RANK() OVER (PARTITION BY ID_DEPT
 ORDER BY SALARY DESC NULLS LAST) RANK,
 DENSE_RANK() OVER (PARTITION BY
 ID_DEPT
 ORDER BY SALARY DESC NULLS LAST) DENSE_RANK
 FROM cmagno.tb_emp_salary
 ORDER BY 2, RANK;

ID_EMP ID_DEPT SALARY RANK DENSE_RANK
 ------- ----------- ----------- --------- -------------
 4 10 250 1 1
 3 10 200 2 2
 2 10 150 3 3
 1 10 100 4 4
 9 20 250 1 1
 8 20 250 1 1
 7 20 200 3 2
 6 20 150 4 3
 5 20 100 5 4

5.) LAG and LEAD

SELECT ID_DEPT, ID_EMP, SALARY,
 LEAD(SALARY, 1, 0) OVER (PARTITION BY ID_DEPT ORDER BY SALARY DESC NULLS LAST) NEXT_LOWER_SAL,
 LAG(SALARY, 1, 0) OVER (PARTITION BY ID_DEPT ORDER BY SALARY DESC NULLS LAST) PREV_HIGHER_SAL
 FROM CMAGNO.TB_EMP_SALARY
 ORDER BY ID_DEPT, SALARY DESC;

ID_DEPT ID_EMP SALARY NEXT_LOWER_SAL PREV_HIGHER_SAL
 -------- ---------- --------- ----------------- -----------------
 10 4 250 200 0
 10 3 200 150 250
 10 2 150 100 200
 10 1 100 0 150
 20 9 250 250 0
 20 8 250 200 250
 20 7 200 150 250
 20 6 150 100 200
 20 5 100 0 150

6.) STDDEV: Returns the sample standard deviation of an expression

SELECT employee, salary,
 STDDEV(salary) OVER (ORDER BY ID_EMP) "StdDev"
 FROM CMAGNO.TB_EMP_SALARY

EMPLOYEE SALARY StdDev
 ------------ ---------- ----------
 EMP 1 100 .000
 EMP 2 150 35.355
 EMP 3 200 50.000
 EMP 4 250 64.550
 EMP 5 100 65.192
 EMP 6 150 58.452
 EMP 7 200 55.635
 EMP 8 250 59.761
 EMP 9 250 61.237

7.) DENSE_RANK - Another

SELECT EMPLOYEE, ID_DEPT, SALARY,
 MIN(SALARY) KEEP (DENSE_RANK FIRST ORDER BY SALARY)
 OVER (PARTITION BY ID_DEPT) "Lower",
 MAX(SALARY) KEEP (DENSE_RANK LAST ORDER BY SALARY)
 OVER (PARTITION BY ID_DEPT) "Higher"
 FROM CMAGNO.TB_EMP_SALARY;

EMPLOYEE ID_DEPT SALARY Lower Higher
 ------------ ------------ ------------ ----------- ------------
 EMP 4 10 250 100 250
 EMP 3 10 200 100 250
 EMP 2 10 150 100 250
 EMP 1 10 100 100 250
 EMP 8 20 250 100 250
 EMP 7 20 200 100 250
 EMP 6 20 150 100 250
 EMP 9 20 250 100 250
 EMP 5 20 100 100 250

LogMiner – Pluggable Database

Log Miner on Multitenant 12c

The following steps will show you some differences to use LogMiner with Pluggable
databases. At this time i will use Flat File for the dictionary.

References:
http://docs.oracle.com/database
/121/SUTIL/GUID-3417B738-374C-4EE3-B15C-3A66E01AE2B5.htm#SUTIL019

--> PDBS:
select * from v$pdbs;

              CON_ID                 DBID NAME                      OPEN_MODE
-------------------- -------------------- ------------------------- ----------
                   2           1147477011 PDB$SEED                  READ ONLY
                   3           3291301321 PDB1                      READ WRITE
                   5           4142533224 PDB3                      READ WRITE

--> Archive Status
--> The time below will be used on DBMS_LOGMNR.start_logmnr

08:42:41 SQL> archive log list;

Database log mode          Archive Mode
Automatic archival             Enabled
Archive destination            /data01/CDB/archives
Oldest online log sequence     167
Next log sequence to archive   169
Current log sequence           169

--> Simulating some operations on PDBs

alter session set container=pdb1;

select name, open_mode from v$pdbs;

NAME                                     OPEN_MODE
---------------------------------------- ----------
PDB1                                     READ WRITE

create table test.test_pdb1
(num number,
 text varchar2(10))
tablespace TS_PDB1;

insert into test.test_pdb1 values (1,'test 1');
insert into test.test_pdb1 values (2,'test 2');
insert into test.test_pdb1 values (3,'test 3');
insert into test.test_pdb1 values (4,'test 4');

commit;

delete test.test_pdb1 where num = 2;
commit;

update test.test_pdb1 set text='UPDATE 3' where num=3;
commit;

select * from test.test_pdb1;

                 NUM TEXT
-------------------- ----------
                   1 test 1
                   3 UPDATE 3
                   4 test 4


alter session set container=pdb3;
 
select name, open_mode from v$pdbs;

NAME                                     OPEN_MODE
---------------------------------------- ----------
PDB3                                     READ WRITE

create table test.test_pdb3
(num number,
text varchar2(10))
tablespace users;

insert into test.test_pdb3 values (1,'test 1');
insert into test.test_pdb3 values (2,'test 2');
insert into test.test_pdb3 values (3,'test 3');
insert into test.test_pdb3 values (4,'test 4');
commit;

update test.test_pdb3 set text='ERROR';
commit;

select * from test.test_pdb3;

                 NUM TEXT
-------------------- ----------
                   1 ERROR
                   2 ERROR
                   3 ERROR
                   4 ERROR

-------------------------------- STARTING HERE ---------------------------------

alter session set container=cdb$root;

select name, open_mode from v$pdbs;

NAME                                     OPEN_MODE
---------------------------------------- ----------
PDB$SEED                                 READ ONLY
PDB1                                     READ WRITE
PDB3                                     READ WRITE

alter system switch logfile;
--> The time below will be used on DBMS_LOGMNR.start_logmnr

09:04:13 SQL> archive log list;

Database log mode              Archive Mode
Automatic archival             Enabled
Archive destination            /data01/CDB/archives
Oldest online log sequence     173
Next log sequence to archive   175
Current log sequence           175

--> At this time we will work with archived logs below:

/data01/CDB/archives/1_168_900491294.dbf
/data01/CDB/archives/1_169_900491294.dbf
/data01/CDB/archives/1_170_900491294.dbf
/data01/CDB/archives/1_171_900491294.dbf
/data01/CDB/archives/1_172_900491294.dbf
/data01/CDB/archives/1_173_900491294.dbf
/data01/CDB/archives/1_174_900491294.dbf

Note:

http://docs.oracle.com/database/121/SUTIL/GUID-EF8A932F-7E71-4995-893E-E583B90007C2.htm#SUTIL4227

Using a Flat File Dictionary in a CDB

"You cannot take a dictionary snapshot for an entire CDB in a single flat file. 
You must be connected to a distinct PDB, and can take a snapshot of only that 
PDB in a flat file. Thus, when using a flat file dictionary, you can only mine 
the redo logs for the changes associated with the PDB whose data dictionary is 
contained within the flat file."

1.) Creating the dictionary PDB1

alter session set container=pdb1;
create directory scripts as '/home/oracle/dba/scripts';
exec dbms_logmnr_d.build('minerPDB1.dic', 'SCRIPTS', dbms_logmnr_d.store_in_flat_file);

!ls -ltr /home/oracle/dba/scripts/minerPDB1.dic
-rw-r--r-- 1 oracle oinstall 44592353 May 17 10:14 /home/oracle/dba/scripts/minerPDB1.dic

2.) Creating the Dictionary PDB3

alter session set container=pdb3;
create directory scripts as '/home/oracle/dba/scripts';
exec dbms_logmnr_d.build('minerPDB3.dic', 'SCRIPTS', dbms_logmnr_d.store_in_flat_file);

!ls -ltr /home/oracle/dba/scripts/minerPDB3.dic
-rw-r--r-- 1 oracle oinstall 44638808 May 17 10:16 /home/oracle/dba/scripts/minerPDB3.dic

3.) Including Archived Logs to be mined for PDB1:

exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_168_900491294.dbf',Options=>dbms_logmnr.NEW);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_169_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_170_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_171_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_172_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_173_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_174_900491294.dbf',Options=>dbms_logmnr.ADDFILE);

4.) Starting the miner for PDB1:

BEGIN
DBMS_LOGMNR.start_logmnr (
dictfilename => '/home/oracle/dba/scripts/minerPDB1.dic',
starttime => TO_DATE('17-MAY-2016 08:42:41', 'DD-MON-YYYY HH:MI:SS'),
endtime => TO_DATE('17-MAY-2016 09:04:13', 'DD-MON-YYYY HH:MI:SS'),
options => DBMS_LOGMNR.COMMITTED_DATA_ONLY);
END;
/

5.) Collecting Information for PDB1

desc v$logmnr_contents

select count(*), TABLE_NAME
FROM v$logmnr_contents
GROUP BY TABLE_NAME;

select count(*) from v$logmnr_contents where table_name like '%TEST_PDB%';

            COUNT(*)
--------------------
                   7


col operation format a10
col sql_redo format a80
col sql_undo format a80
col username format a10

select username, operation, sql_redo, sql_undo 
from  v$logmnr_contents
where table_name ='TEST_PDB1';
                   
LogMiner1
                   
6.) Finishing for PDB1

exec DBMS_LOGMNR.END_LOGMNR;

7.) Including Archived Logs to be mined for PDB3:

exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_168_900491294.dbf',Options=>dbms_logmnr.NEW);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_169_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_170_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_171_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_172_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_173_900491294.dbf',Options=>dbms_logmnr.ADDFILE);
exec dbms_logmnr.add_logfile(LogFileName=>'/data01/CDB/archives/1_174_900491294.dbf',Options=>dbms_logmnr.ADDFILE);

8.) Starting the miner for PDB3:

BEGIN
DBMS_LOGMNR.start_logmnr (
dictfilename => '/home/oracle/dba/scripts/minerPDB3.dic',
starttime => TO_DATE('17-MAY-2016 08:42:41', 'DD-MON-YYYY HH:MI:SS'),
endtime => TO_DATE('17-MAY-2016 09:04:13', 'DD-MON-YYYY HH:MI:SS'),
options => DBMS_LOGMNR.COMMITTED_DATA_ONLY);
END;
/

9.) Collection Information for PDB3

desc v$logmnr_contents

select count(*), TABLE_NAME
FROM v$logmnr_contents
GROUP BY TABLE_NAME;

select count(*) from v$logmnr_contents where table_name like '%TEST_PDB%';

            COUNT(*)
--------------------
                   9


col operation format a10
col sql_redo format a80
col sql_undo format a80
col username format a10

select username, operation, sql_redo, sql_undo 
from  v$logmnr_contents
where table_name ='TEST_PDB3';

LogMiner2
                   
10.) Finishing for PDB3

exec DBMS_LOGMNR.END_LOGMNR;

Oracle DB Link to SQL Server

Sometimes we need connect Oracle with third parties such as SQL Server. 
Here i will show the easy steps to achieve this type of integration.

1.) Create an ODBC Connection
On My case my Oracle DB is on Windows 2008 Server, 
but even you are on Linux/Unix is possible create ODBC Connections.
To create an ODBC connection on Windows:
Start->Programs->Administrative Tools-> ODBC Data Sources
SQLServer1

SQLServer2
Put your SQL Server IP address above:

Note: The field "Name" will be used on the configuration :

SQLServer3

2.) Heterogeneous Services (HS) Configuration:

We need configure the init.ora for HS, inside the Oracle Home we have:

cd %ORACLE_HOME%
cd hs
cd admin

notepad initdg4odbc.ora

Set the parameters below:

HS_FDS_CONNECT_INFO = dg4odbc  <-- Here is the name of data source
HS_FDS_TRACE_LEVEL = off

After, we need configure the TNSNAMES.ORA and LISTENER.ORA

cd %ORACLE_HOME%/network/admin

--> tnsnames.ora

dg4odbc =
  (DESCRIPTION=
    (ADDRESS=(PROTOCOL=tcp)(HOST=localhost)(PORT=1521))
      (CONNECT_DATA=(SID=dg4odbc))
      (HS=OK)
    )

--> listener.ora

SID_LIST_LISTENER=
  (SID_LIST=
      (SID_DESC=
         (SID_NAME=dg4odbc)
         (ORACLE_HOME=C:\Oracle\product\11.2.0\dbhome_2)
         (PROGRAM=dg4odbc)
      )
  )
  
--> lsnrctl reload

3.) Database Link 
sqlplus / as sysdba
CREATE PUBLIC DATABASE LINK dg4odbc 
CONNECT TO "SA" IDENTIFIED BY "xxxx" USING 'dg4odbc';
Note: the username and password must be between double quotes.

Now you can use the database link:

select * from <table-name>@dg4odbc;

 

TFA(Trace File Analyzer) on OHAS

References:
Making Applications Highly Available Using 11gR2 Grid Infrastructure 
(Doc ID 1105489.1)

On this post i will show how to add the TFACTL as a resource on CRS, on my case 
is a standalone installation, so will be on 
OHAS (Oracle High Availability Services).

1.) Changes as ROOT:

cd $TFA_HOME
chmod -R o+r *
chmod -R o+x *

Include Oracle on /etc/sudoers to execute TFACTL as root, on my case oracle has
right to execute anything as root, check with your sysadmin.

oracle  ALL=(ALL) NOPASSWD: ALL

On /etc/sudoers set the option below:
Defaults    !requiretty

2.) Script:
To automate the process, we need create a script with CRS application standard. 
You can find on (Doc ID 1105489.1).
This script should be created as Grid owner.
-----------------------------------------------------------------------------
#!/bin/sh
# Author: Carlos Magno
########################################
logfile=/tmp/tfacrs.out
lockfile=/oracle/tfa/lnx01/tfa_home/tfa.lock
CMD=/oracle/tfa/bin/tfactl
PIDosw=`ps -ef | grep TFAMain | grep -v grep | awk '{print $2}'`
vPar=$1
echo "Option:" $vPar >> $logfile
case $1 in
  'start')
    echo `/bin/date` " start " `/usr/bin/id` >> $logfile
    if [ -e $lockfile ]; then
       echo "TFA already started" >> $logfile
    else
      sudo $CMD $vPar
    fi
    RET=$?
    ;;
  'stop')
    echo `/bin/date` " stop " `/usr/bin/id` >> $logfile

    if [ -e $lockfile ]; then
           sudo $CMD $vPar
    else
       echo "TFA not running" >> $logfile
    fi
    RET=$?
    ;;
  'clean')
    echo `/bin/date` " clean " `/usr/bin/id` >> $logfile
    sudo kill -9 $PIDosw
    RET=0
    ;;
  'check')
    echo `/bin/date` " check " `/usr/bin/id` >> $logfile
    if [ -e $lockfile ]; then
          echo "CHECK - TFA is running" >> $logfile
      RET=0
    else
      echo "CHECK - TFA is not running" >> $logfile
      RET=1
    fi
    tail -1  $logfile
    ;;
  *)
    echo `/bin/date` " unknown " `/usr/bin/id` >> $logfile
    RET=0
    ;;
esac
if [ $RET -eq 0 ]; then
  exit 0
else
  exit 1
fi
-----------------------------------------------------------------------------
After created the script, you need execute:
chmod +x <script name>

execute some tests with the script. On my case my script is:
/home/oracle/dba/scripts/oracle_exec_tfa.sh
./oracle_exec_tfa.sh (start|stop|clean|check)

To check: /tmp/tfacrs.out
3.) Adding on CRS
crsctl add resource TFACRS.ora -type ora.local_resource.type -attr 
"AUTO_START=always,ACTION_SCRIPT=/home/oracle/dba/scripts/oracle_exec_tfa.sh"

4.) Start Resource
crsctl start resource TFACRS.ora
crsctl stat res -t

CRSCTL_TFACTL

Log on CRS to check:
/<ORACLE BASE>/diag/crs/<HOSTNAME>/crs/trace/ohasd_scriptagent_oracle.trc