Wednesday 12 October 2016

Script to add DML audit for one user in Mysql Using Triggers.

-- Following scripts create audit tables and then also create audit triggers for full database or for only one table as per need.


 If user provides only schema name parameter then following process will create audit triggers for       all tables in the schema.
And if user provides schema and table_name parameters then only for that table audit triggers is created.

Three procedures are provided.

1. First procedure can create tables to store audit data.
2. Second procedure generates scripts to add DML audit triggers for the above tables.
3. Third procedure calls above two procedures to create audit tables and generate triggers scripts.
4. Follow the next steps to execute script generated with above procedure to create triggers.

Third procedure can be called in two ways
   1. With schema name and table name parameter (For only one table)
   2. Only schema name (For all tables in schema)


Issues that can appear 

1. If your table contains table with large column size i.e size not supported by Mysql below script will not be able to create audit table but it still it will provide script that you can modify to create audit table manually.

2. If table contains column with spaces in column name then again you need to modify script manually generated by following procedures.

Create a user to be audited or use existing user.

create new user by following command

GRANT USAGE ON *.* TO `myuser`@`%` IDENTIFIED BY 'mypassword' REQUIRE NONE;


1. Following Procedure create audit tables to store audit data.

use mydb;

DELIMITER $$
DROP PROCEDURE IF EXISTS create_aud_tabs$$
CREATE PROCEDURE create_aud_tabs (IN p_db_name varchar(64),IN tbl_name varchar(64))
BEGIN
 DECLARE v_column_list varchar(10000) default '';
 DECLARE v_query varchar(10000) DEFAULT '';
 declare has_error int default 0;
  DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SET has_error = 1;
 SET SESSION group_concat_max_len=15000;
 select GROUP_CONCAT(distinct 'old_', column_name,' ',column_type,',new_',COLUMN_NAME,' ',column_type)
 into v_column_list
 from information_schema.COLUMNS
 where TABLE_SCHEMA=table_schema
 and TABLE_NAME=tbl_name
 and table_name not like 'aud\_%'
 order by ordinal_position ;
 set v_query=concat ('create table ',p_db_name,'.aud_',tbl_name,'( audit_id int NOT NULL AUTO_INCREMENT,',v_column_list,',audit_type varchar(30),audit_user varchar(100),audit_date  TIMESTAMP DEFAULT CURRENT_TIMESTAMP ,primary key (audit_id))');
 select v_query;
 SET @sql = v_query;
  PREPARE stmt FROM @sql;
  EXECUTE stmt ;
  if has_error=1 then
   select v_query;
   set has_error=0;
  end if;
  END$$
DELIMITER ;


--  2. Following Procedure  Create Scripts to add audit for DML operations.

DELIMITER $$
 DROP PROCEDURE IF EXISTS add_aud_triggers $$
CREATE PROCEDURE add_aud_triggers (IN P_db_name varchar(64),IN tbl_name varchar(64),in p_user_to_audit varchar(64) ,in p_trigger_script_path varchar(1000))
BEGIN

 DECLARE v_column_list varchar(20000) DEFAULT '';
 DECLARE v_del_val_list varchar(20000);
 DECLARE v_ins_val_list varchar(20000);
 DECLARE v_upd_val_list varchar(20000) DEFAULT '';
 DECLARE u_trigger varchar(20000)DEFAULT '';
 Declare d_trigger varchar (20000)DEFAULT '';
 Declare i_trigger varchar (20000)DEFAULT '';
 Declare has_error int;
 Declare CONTINUE HANDLER FOR SQLEXCEPTION SET has_error = 1;
 SET SESSION group_concat_max_len=20000;
 set v_column_list='';
 set v_upd_val_list='';
 set v_del_val_list= '';
 set v_ins_val_list= '';

 SELECT GROUP_CONCAT( distinct 'old_', column_name,',new_',COLUMN_NAME)
 into v_column_list
 FROM information_schema.COLUMns
 where TABLE_SCHEMA=table_schema
 and TABLE_NAME=tbl_name
 order by ordinal_position ;

 --  select v_column_list;

SELECT GROUP_CONCAT(distinct 'old.', column_name,',new.',COLUMN_NAME)
 into v_upd_val_list
 FROM information_schema.COLUMns
 where TABLE_SCHEMA=table_schema
 and TABLE_NAME=tbl_name
 order by ordinal_position ;

SELECT GROUP_CONCAT(distinct 'old.', column_name,',null')
 into v_del_val_list
 FROM information_schema.COLUMns
 where TABLE_SCHEMA=table_schema
 and TABLE_NAME=tbl_name
 order by ordinal_position ;

 -- -------------------

 SELECT GROUP_CONCAT(distinct 'null,','new.', column_name)
 into v_ins_val_list
 FROM information_schema.COLUMns
 where TABLE_SCHEMA=table_schema
 and TABLE_NAME=tbl_name
 order by ordinal_position ;
-- ---------------------------------------
 SET u_trigger = '';
 SET u_trigger = CONCAT('DELIMITER $$ \n DROP TRIGGER IF EXISTS tra_',tbl_name,'_upd_aud$$\n');
 SET u_trigger = CONCAT(u_trigger,'CREATE TRIGGER tra_',tbl_name,'_upd_aud AFTER update ON ',tbl_name,' FOR EACH ROW \n');
 SET u_trigger = CONCAT(u_trigger,'BEGIN \n');
 SET u_trigger = CONCAT(u_trigger,'if SUBSTRING_INDEX(SESSION_USER(),''@'',1)= ''',p_user_to_audit,''' then \n');
 set u_trigger = concat (u_trigger,concat ('insert into ',P_db_name,'.aud_',tbl_name,'(',v_column_list,',audit_type ,audit_user ,audit_date) \nvalues (',v_upd_val_list,',''update'',SESSION_USER(),CURRENT_TIMESTAMP); \n' ));
 SET u_trigger = CONCAT(u_trigger,'END IF;\n');
 SET u_trigger = CONCAT(u_trigger,'END$$ \nDELIMITER ; \n');
 select u_trigger from dual into @triggertxt ;
 SET @savestr = CONCAT('SELECT ', '"', @triggertxt, '"', " INTO DUMPFILE ", '"', p_trigger_script_path ,'/upd_trigger_',tbl_name,'.sql', '"');
 PREPARE stmt5 FROM @savestr;
 EXECUTE stmt5;
 DEALLOCATE PREPARE stmt5;
--  -------------------------------------
 SET d_trigger = '';
 SET d_trigger = CONCAT('DELIMITER $$ \n DROP TRIGGER IF EXISTS tra_',tbl_name,'_del_aud$$\n');
 SET d_trigger = CONCAT(d_trigger,'CREATE TRIGGER tra_',tbl_name,'_del_aud AFTER delete ON ',tbl_name,' FOR EACH ROW \n');
 SET d_trigger = CONCAT(d_trigger,'BEGIN \n');
 SET d_trigger = CONCAT(d_trigger,'if SUBSTRING_INDEX(SESSION_USER(),''@'',1)= ''',p_user_to_audit,''' then \n');
 set d_trigger=concat (d_trigger,concat ('insert into ',P_db_name,'.aud_',tbl_name,'(',v_column_list,',audit_type ,audit_user ,audit_date) \nvalues (',v_del_val_list,',''delete'',SESSION_USER(),CURRENT_TIMESTAMP); \n' ));
 SET d_trigger = CONCAT(d_trigger,'END IF;\n');
 SET d_trigger = CONCAT(d_trigger,'END$$ \nDELIMITER ; \n');
 select d_trigger from dual into @triggertxt ;
 SET @savestr = CONCAT('SELECT ', '"', @triggertxt, '"', " INTO DUMPFILE ", '"', p_trigger_script_path ,'/del_trigger_',tbl_name,'.sql', '"');
 -- select @savestr;
 PREPARE stmt5 FROM @savestr;
 EXECUTE stmt5;
 DEALLOCATE PREPARE stmt5;
 --  -------------------------------------
 SET i_trigger = '';
 SET i_trigger = CONCAT('DELIMITER $$ \n DROP TRIGGER IF EXISTS tra_',tbl_name,'_ins_aud$$\n');
 SET i_trigger = CONCAT(i_trigger,'CREATE TRIGGER tra_',tbl_name,'_ins_aud AFTER insert ON ',tbl_name,' FOR EACH ROW \n');
 SET i_trigger = CONCAT(i_trigger,'BEGIN \n');
 SET i_trigger = CONCAT(i_trigger,'if SUBSTRING_INDEX(SESSION_USER(),''@'',1)= ''',p_user_to_audit,''' then \n');
 set i_trigger=concat (i_trigger,concat ('insert into ',P_db_name,'.aud_',tbl_name,'(',v_column_list,',audit_type ,audit_user ,audit_date) \nvalues (',v_ins_val_list,',''insert'',SESSION_USER(),CURRENT_TIMESTAMP); \n' ));
 SET i_trigger = CONCAT(i_trigger,'END IF;\n');
 SET i_trigger = CONCAT(i_trigger,'END$$ \nDELIMITER ; \n');
 select i_trigger from dual into @triggertxt ;
 SET @savestr = CONCAT('SELECT ', '"', @triggertxt, '"', " INTO DUMPFILE ", '"', p_trigger_script_path ,'/ins_trigger_',tbl_name,'.sql', '"');
 -- select @savestr;
 PREPARE stmt5 FROM @savestr;
 EXECUTE stmt5;
 DEALLOCATE PREPARE stmt5;
 --  --------------------------------------------
set v_column_list ='';
END$$

DELIMITER ;


3. Following procedure call above created procedures to create audit tables and then generate scrip to create audit triggers for update and delete operations.

DELIMITER $$
DROP PROCEDURE IF EXISTS create_audit$$
CREATE PROCEDURE create_audit (in p_db_name varchar(64),in p_table_name varchar(64),in p_user_to_audit varchar(64),in p_trigger_script_path varchar(1000))
BEGIN

 DECLARE v_finished INTEGER DEFAULT 0;

 DECLARE v_table_name varchar(64);

 DEClARE tab_cursor CURSOR FOR
select  table_name
from information_schema.tables
where TABLE_SCHEMA=p_db_name
and (TABLE_NAME=p_table_name or p_table_name is null)
and table_name not like 'aud\_%'
order by table_name;

 -- declare NOT FOUND handler
 DECLARE CONTINUE HANDLER  FOR NOT FOUND SET v_finished = 1;

 OPEN tab_cursor;
 set v_table_name='';
 get_table: LOOP
 -- select v_finished;
 FETCH tab_cursor INTO v_table_name;
 --  select v_finished;
 IF v_finished = 1 THEN
 LEAVE get_table;
 END IF;
  select v_table_name;
 call create_aud_tabs(p_db_name,v_table_name);
 call  add_aud_triggers(p_db_name,v_table_name,p_user_to_audit,p_trigger_script_path);

 END LOOP get_table;
 CLOSE tab_cursor;
END$$
DELIMITER ;


To create audit tables and trigger script for complete schema.

call create_audit('mydb,null,'myuser','/usr/local/mysql/data/triggers');

To create audit tables and trigger script for only on table in schema.
call create_audit('mydb,'table_name','myuser','/usr/local/mysql/data/triggers');

To create Audit table for one table use following Procedure.

call create_aud_tabs('mydb','mytable');

To get script for audit trigger of one table use following script.

call add_aud_triggers('mydb','mytable','myuser','/usr/local/mysql/data/triggers');

Note: path "/usr/local/mysql/data/triggers" must be created manually before executing the procedures 

-----------------------------------------------------------------------------------------------------

4. Follow the next steps to execute script generated with above procedure to create triggers.

now the audit script is created and we can proceed  with following commands.

-- Execute following query and and paste result in file called triggers.sql and place this file with other scripts generated at "/usr/local/mysql/data/mydb/triggers"  as given in the above procedures.

select concat ('source ' ,filename )
from (
select  concat('del_trigger_',table_name,'.sql') filename,table_name
from information_schema.tables
where TABLE_SCHEMA='mydb'
and table_name not like 'aud\_%'
union all
select  concat('upd_trigger_',table_name,'.sql') filename,table_name
from information_schema.tables
where TABLE_SCHEMA='mydb'
and table_name not like 'aud\_%'
union all
select  concat('ins_trigger_',table_name,'.sql') filename,table_name
from information_schema.tables
where TABLE_SCHEMA='mydb'
and table_name not like 'aud\_%') files
order by table_name  ;


Now go to the location "/usr/local/mysql/data/mydb/triggers"  and execute following command to create triggers.

-- Command to execute create trigger scripts

 /usr/local/mysql/bin/mysql -u root -h hostname -p -f  mydb < triggers.sql

Helping Script with above procedures


-- Query to generate drop script for audit tables
select concat ('drop table ',table_schema,'.',table_name,';')
from information_schema.tables
where table_schema='mydb'
and table_name like 'aud\_%';


-- Query to count tables
select count(0)
from information_schema.tables t
where t.table_schema='mydb'
and t.table_type='BASE TABLE'
and t.table_name not like 'aud\_%';

-- Query to count Audit tables

select count(0)
from information_schema.tables t
where t.table_schema='mydb'
and t.table_type='BASE TABLE'
and t.table_name like 'aud\_%';


-- Query to find tables for which script is Unable to create audit table

select CONCAT ('SELECT * FROM ',t.table_name,';')
from information_schema.tables t  left outer join information_schema.tables a
on(t.TABLE_NAME=substr(a.table_name,5))
and a.table_schema='mydb'
and a.table_name like 'aud\_%'
where a.table_name is null
and t.table_schema='mydb'
and t.table_name not like 'aud\_%';


-- Query to find tables for which script is Unable to create audit triggers

select CONCAT ('SELECT * FROM ',a.table_name,';')
from information_schema.tables a left outer join information_schema.TRIGGERS t
on (t.EVENT_OBJECT_TABLE=a.TABLE_NAME)
where t.EVENT_OBJECT_TABLE is null
and a.table_schema='mydb'
and a.table_name not like 'aud\_%';


-----------------------




-- Query to find table which have 3 or less triggers (ins,upd,del)

select EVENT_OBJECT_TABLE,tb.TABLE_NAME
 from (
 select  t.EVENT_OBJECT_TABLE
 from information_schema.TRIGGERS  t
 group by t.EVENT_OBJECT_TABLE
 having count(0)>=3 ) tr right outer join information_schema.tables tb
 on (tr.EVENT_OBJECT_TABLE=tb.TABLE_NAME)
where tb.TABLE_SCHEMA='mydb'
 --  and tb.table_name='mytable'
  and tr.EVENT_OBJECT_TABLE is null
 and tb.TABLE_name not like 'aud\_%';


Following query will give you the script to Grant priviliges to new user on the database.

select  concat ('grant select ,insert ,update ,delete on mydb.',table_name ,' to ','`myuser`@`%`;')
from information_schema.tables
where TABLE_SCHEMA='mydb'
-- and (TABLE_NAME=p_table_name or p_table_name is null)
and table_name not like 'aud\_%';

MySql usefull queries

1. Query to kill process by a host 

Execute following query this will give you the list of processes executed by the host given in the where clause.

select concat('KILL ',id,';') ,p.*
from information_schema.processlist  p
where host like '192.168.10.11%';

Copy the results and execute on Mysql to kill all process by the host.



2. Query to find master and child table and column names with master table name

SELECT rc.`CONSTRAINT_CATALOG`   AS `ChildTable_Catalog`
, rc.`CONSTRAINT_SCHEMA`         AS `ChildTable_Schema`
, rc.`TABLE_NAME`                AS `ChildTable`
, rc.`CONSTRAINT_NAME`           AS `ChildTable_ForeignKey`
, GROUP_CONCAT(DISTINCT fk.`COLUMN_NAME` ORDER BY fk.`ORDINAL_POSITION` ASC) AS `ChildTable_ForeignKey_Columns`
, rc.`UNIQUE_CONSTRAINT_CATALOG` AS `ParentTable_Catalog`
, rc.`UNIQUE_CONSTRAINT_SCHEMA`  AS `ParentTable_Schema`
, rc.`REFERENCED_TABLE_NAME`     AS `Parent_Table`
, rc.`UNIQUE_CONSTRAINT_NAME`    AS `ParentTable_UniqueKey`
, GROUP_CONCAT(DISTINCT uk.`COLUMN_NAME` ORDER BY fk.`ORDINAL_POSITION` ASC) AS `ParentTable_UniqueKey_Columns`
-- constraint relation
FROM INFORMATION_SCHEMA.`REFERENTIAL_CONSTRAINTS` AS rc
-- foreign key
INNER JOIN INFORMATION_SCHEMA.`KEY_COLUMN_USAGE` AS fk
    ON rc.`CONSTRAINT_CATALOG`        = fk.`CONSTRAINT_CATALOG`
   AND rc.`CONSTRAINT_SCHEMA`         = fk.`CONSTRAINT_SCHEMA`
   AND rc.`TABLE_NAME`                = fk.`TABLE_NAME`
   AND rc.`CONSTRAINT_NAME`           = fk.`CONSTRAINT_NAME`
-- unique key
INNER JOIN INFORMATION_SCHEMA.`KEY_COLUMN_USAGE` AS uk
    ON rc.`UNIQUE_CONSTRAINT_CATALOG` = uk.`CONSTRAINT_CATALOG`
   AND rc.`UNIQUE_CONSTRAINT_SCHEMA`  = uk.`CONSTRAINT_SCHEMA`
   AND rc.`REFERENCED_TABLE_NAME`     = uk.`TABLE_NAME`
   AND rc.`UNIQUE_CONSTRAINT_NAME`    = uk.`CONSTRAINT_NAME`
-- optional filter condition
WHERE rc.`UNIQUE_CONSTRAINT_SCHEMA` = 'database name'
  AND rc.`REFERENCED_TABLE_NAME`    = 'parent table name'
-- necessary grouping parameters
GROUP BY rc.`CONSTRAINT_CATALOG`
, rc.`CONSTRAINT_SCHEMA`
, rc.`TABLE_NAME`
, rc.`CONSTRAINT_NAME`
, rc.`UNIQUE_CONSTRAINT_CATALOG`
, rc.`UNIQUE_CONSTRAINT_SCHEMA`
, rc.`REFERENCED_TABLE_NAME`
, rc.`UNIQUE_CONSTRAINT_NAME`
-- optional ordering parameters
ORDER BY rc.`CONSTRAINT_CATALOG` ASC
, rc.`CONSTRAINT_SCHEMA` ASC
, rc.`REFERENCED_TABLE_NAME` ASC
, rc.`TABLE_NAME` ASC;


2. Query to find master and child table and column names with child table name in mysql


SELECT rc.`CONSTRAINT_CATALOG`   AS `ChildTable_Catalog`
, rc.`CONSTRAINT_SCHEMA`         AS `ChildTable_Schema`
, rc.`TABLE_NAME`                AS `ChildTable`
, rc.`CONSTRAINT_NAME`           AS `ChildTable_ForeignKey`
, GROUP_CONCAT(DISTINCT fk.`COLUMN_NAME` ORDER BY fk.`ORDINAL_POSITION` ASC) AS `ChildTable_ForeignKey_Columns`
, rc.`UNIQUE_CONSTRAINT_CATALOG` AS `ParentTable_Catalog`
, rc.`UNIQUE_CONSTRAINT_SCHEMA`  AS `ParentTable_Schema`
, rc.`REFERENCED_TABLE_NAME`     AS `Parent_Table`
, rc.`UNIQUE_CONSTRAINT_NAME`    AS `ParentTable_UniqueKey`
, GROUP_CONCAT(DISTINCT uk.`COLUMN_NAME` ORDER BY fk.`ORDINAL_POSITION` ASC) AS `ParentTable_UniqueKey_Columns`
-- constraint relation
FROM INFORMATION_SCHEMA.`REFERENTIAL_CONSTRAINTS` AS rc
-- foreign key
INNER JOIN INFORMATION_SCHEMA.`KEY_COLUMN_USAGE` AS fk
    ON rc.`CONSTRAINT_CATALOG`        = fk.`CONSTRAINT_CATALOG`
   AND rc.`CONSTRAINT_SCHEMA`         = fk.`CONSTRAINT_SCHEMA`
   AND rc.`TABLE_NAME`                = fk.`TABLE_NAME`
   AND rc.`CONSTRAINT_NAME`           = fk.`CONSTRAINT_NAME`
-- unique key
INNER JOIN INFORMATION_SCHEMA.`KEY_COLUMN_USAGE` AS uk
    ON rc.`UNIQUE_CONSTRAINT_CATALOG` = uk.`CONSTRAINT_CATALOG`
   AND rc.`UNIQUE_CONSTRAINT_SCHEMA`  = uk.`CONSTRAINT_SCHEMA`
   AND rc.`REFERENCED_TABLE_NAME`     = uk.`TABLE_NAME`
   AND rc.`UNIQUE_CONSTRAINT_NAME`    = uk.`CONSTRAINT_NAME`
-- optional filter condition
WHERE rc.`UNIQUE_CONSTRAINT_SCHEMA` = 'database name'
  and fk.`TABLE_NAME`='child table name'
-- necessary grouping parameters
GROUP BY rc.`CONSTRAINT_CATALOG`
, rc.`CONSTRAINT_SCHEMA`
, rc.`TABLE_NAME`
, rc.`CONSTRAINT_NAME`
, rc.`UNIQUE_CONSTRAINT_CATALOG`
, rc.`UNIQUE_CONSTRAINT_SCHEMA`
, rc.`REFERENCED_TABLE_NAME`
, rc.`UNIQUE_CONSTRAINT_NAME`
-- optional ordering parameters
ORDER BY rc.`CONSTRAINT_CATALOG` ASC
, rc.`CONSTRAINT_SCHEMA` ASC
, rc.`REFERENCED_TABLE_NAME` ASC
, rc.`TABLE_NAME` ASC;

Thursday 6 October 2016

Rebuild or compile Indexes on Mysql ndb cluster

To rebuild or verify Indexes on Mysql we can us alter table command.

this will tell you if any of index is having issues.
 i.e corrupt ,data issue related to index.

alter table table_name engine= engine_name;


1. Query to generate script of those tables whose indexes needs to be rebuild or compile.


select concat('alter table ', TABLE_SCHEMA,'.',table_name , ' engine=' , engine ,';' ) alters from information_schema.TABLES
where TABLE_SCHEMA='mydb';

2. Copy generated results and executed on either mysql command line or workbench GUI tool

i.e on mysql command line.

./mysql -u root -p -h hostname

alter table mydb.alteridentity engine=ndbcluster;
alter table mydb.audit_trail engine=ndbcluster;


Tuesday 27 September 2016

Setup Docker and then Postgres on the Docker Centos6.

Follow the steps one to six under heading Install with yum given in below link to setup and start docker.


Install with yum

  1. Log into your machine as a user with sudo or root privileges.
  2. Make sure your existing yum packages are up-to-date.
    $ sudo yum update
    
  3. Add the yum repo.
    $ sudo tee /etc/yum.repos.d/docker.repo <<-'EOF'
    [dockerrepo]
    name=Docker Repository
    baseurl=https://yum.dockerproject.org/repo/main/centos/6/
    enabled=1
    gpgcheck=1
    gpgkey=https://yum.dockerproject.org/gpg
    EOF
    
  4. Install the Docker package.
    $ sudo yum install docker-engine
    
  5. Start the Docker daemon.
    $ sudo service docker start
    
  6. Verify docker is installed correctly by running a test image in a container.
    $ sudo docker run hello-world
    Unable to find image 'hello-world:latest' locally
        latest: Pulling from hello-world
        a8219747be10: Pull complete
        91c95931e552: Already exists
        hello-world:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.
        Digest: sha256:aa03e5d0d5553b4c3473e89c8619cf79df368babd1.7.1cf5daeb82aab55838d
        Status: Downloaded newer image for hello-world:latest
        Hello from Docker.
        This message shows that your installation appears to be working correctly.
    
        To generate this message, Docker took the following steps:
         1. The Docker client contacted the Docker daemon.
         2. The Docker daemon pulled the "hello-world" image from the Docker Hub.
                (Assuming it was not already locally available.)
         3. The Docker daemon created a new container from that image which runs the
                executable that produces the output you are currently reading.
         4. The Docker daemon streamed that output to the Docker client, which sent it
                to your terminal.
    
        To try something more ambitious, you can run an Ubuntu container with:
         $ docker run -it ubuntu bash
    
        For more examples and ideas, visit:
         http://docs.docker.com/userguide/
  7. If you are behind proxy then follow the below link and restart the docker as given below.
  8. https://crondev.com/running-docker-behind-proxy/
For RedHat/CentOS version 6:



Now you are ready to setup PostgreSQL.


Execute following command and it will pull latest version of PostgreSQL and follow link below to start and connect PostgreSQL with docker.

Link is given below.



docker pull postgres




start a postgres instance


$ docker run --name some-postgres -e POSTGRES_PASSWORD=mysecretpassword -d postgres


docker run --name postgres9.5 -e POSTGRES_PASSWORD=postgres -d postgres




Start psql


$ docker run -it --rm --link some-postgres:postgres postgres psql -h postgres -U postgres
psql (9.5.0)
Type "help" for help.

postgres=# SELECT 1;
 ?column? 
----------
        1
(1 row)





docker run -it --rm --link postgres9.5:postgres postgres psql -h postgres -U postgres



Password for user postgres:
psql (9.5.4)
Type "help" for help.

postgres=# select 1;
 ?column?
----------
        1
(1 row)


postgres=# \q





First execute following command to list running docker.

[root@centos6_pri lib]# docker ps -a

It will show you the name of the all docker images avaliable and their status in my case is  "postgres9.5"


To login to Docker

 docker exec -i -t postgres9.5 /bin/bash

Tuesday 6 September 2016

How to check if Oracle Database instance is up and running?


I. Log into infrastructure instance  & login as sysdba;


\$ sqlplus /nolog
SQL> conn / as sysdba;
SQL> select status from v$instance;

STATUS
------------------------------------
OPEN

II. Log into database server & issue the following command

\$ ps -ef | grep pmon
ocsinfra 11021     1   0 09:15:15 ?           0:09 ora_pmon_orcl
ocsinfra 17960 17592   0 10:47:15 pts/2       0:00 grep pmon

If DB instance is running, then the above command returns the pmon process running as above.

Monitoring Multiple Oracle database tablespaces on multiple machines.

This post help you to monitor your multiple database's tablespaces on multiple servers by collecting information into a table from each database using a job and then use that table to report the tablespace status.


CREATE TABLE scripts.VERIFY_TABLESPACE
(
  TABLESPACE_NAME  VARCHAR2(30 BYTE),
  MEGS_ALLOC       NUMBER,
  MEGS_FREE        NUMBER,
  MEGS_USED        NUMBER,
  PCT_FREE         NUMBER,
  PCT_USED         NUMBER,
  MAX              NUMBER,
  USED_PCT_OF_MAX  NUMBER
)
TABLESPACE USERS
PCTUSED    0
PCTFREE    10
INITRANS   1
MAXTRANS   255
STORAGE    (
            INITIAL          64K
            MINEXTENTS       1
            MAXEXTENTS       UNLIMITED
            PCTINCREASE      0
            BUFFER_POOL      DEFAULT
           )
LOGGING
NOCOMPRESS
NOCACHE
NOPARALLEL
MONITORING
/

create or replace public synonym VERIFY_TABLESPACE for scripts.VERIFY_TABLESPACE
/
GRANT INSERT, SELECT ON scripts.VERIFY_TABLESPACE TO PUBLIC
/

CREATE PUBLIC DATABASE LINK monitor_db
CONNECT TO smbackup
IDENTIFIED BY smbackup
USING 'backuprep';
/

grant select any dictionary to scripts
/
create or replace force view scripts.v_verify_tablespace
as
select  a.tablespace_name,
       round(a.bytes_alloc / 1024 / 1024) megs_alloc,
       round(nvl(b.bytes_free, 0) / 1024 / 1024) megs_free,
       round((a.bytes_alloc - nvl(b.bytes_free, 0)) / 1024 / 1024) megs_used,
       round((nvl(b.bytes_free, 0) / a.bytes_alloc) * 100) Pct_Free,
       100 - round((nvl(b.bytes_free, 0) / a.bytes_alloc) * 100) Pct_used,
       round(maxbytes/1048576) Max,
       round(round((a.bytes_alloc - nvl(b.bytes_free, 0)) / 1024 / 1024) / round(maxbytes/1048576)*100) used_pct_of_max
from  ( select  f.tablespace_name,
               sum(f.bytes) bytes_alloc,
               sum(decode(f.autoextensible, 'YES',f.maxbytes,'NO', f.bytes)) maxbytes
        from dba_data_files f
        group by tablespace_name) a,
      ( select  f.tablespace_name,
               sum(f.bytes)  bytes_free
        from dba_free_space f
        group by tablespace_name) b
where a.tablespace_name = b.tablespace_name (+)
union all
select h.tablespace_name,
       round(sum(h.bytes_free + h.bytes_used) / 1048576) megs_alloc,
       round(sum((h.bytes_free + h.bytes_used) - nvl(p.bytes_used, 0)) / 1048576) megs_free,
       round(sum(nvl(p.bytes_used, 0))/ 1048576) megs_used,
       round((sum((h.bytes_free + h.bytes_used) - nvl(p.bytes_used, 0)) / sum(h.bytes_used + h.bytes_free)) * 100) Pct_Free,
       100 - round((sum((h.bytes_free + h.bytes_used) - nvl(p.bytes_used, 0)) / sum(h.bytes_used + h.bytes_free)) * 100) pct_used,
       round(sum(f.maxbytes) / 1048576) max,
       round( round(sum(nvl(p.bytes_used, 0))/ 1048576)  / round(sum(f.maxbytes) / 1048576) *100) used_pct_of_max
from   sys.v_$TEMP_SPACE_HEADER h, sys.v_$Temp_extent_pool p, dba_temp_files f
where  p.file_id(+) = h.file_id
and    p.tablespace_name(+) = h.tablespace_name
and    f.file_id = h.file_id
and    f.tablespace_name = h.tablespace_name
group by h.tablespace_name
/

create or replace public synonym v_verify_tablespace for scripts.v_verify_tablespace
/


create or replace procedure scripts.p_log_tablespace
as
begin
insert into verify_tablespace(
TABLESPACE_NAME, MEGS_ALLOC, MEGS_FREE, MEGS_USED, PCT_FREE, PCT_USED, MAX, USED_PCT_OF_MAX
)
select
TABLESPACE_NAME, MEGS_ALLOC, MEGS_FREE, MEGS_USED, PCT_FREE, PCT_USED, MAX, USED_PCT_OF_MAX
from v_verify_tablespace;
delete from smbackup.verify_tablespace@monitor_db where client = 'ASPEN';
commit;
insert into smbackup.verify_tablespace@monitor_db(
CLIENT, TABLESPACE_NAME, MEGS_ALLOC, MEGS_FREE, MEGS_USED, PCT_FREE, PCT_USED, MAX, USED_PCT_OF_MAX
)
select
'ASPEN', TABLESPACE_NAME, MEGS_ALLOC, MEGS_FREE, MEGS_USED, PCT_FREE, PCT_USED, MAX, USED_PCT_OF_MAX
from verify_tablespace;
commit;
exception
when others then null;
end;
/
create or replace public synonym  p_log_tablespace for scripts.p_log_tablespace
/


DECLARE
  X NUMBER;
BEGIN
  SYS.DBMS_JOB.SUBMIT
  ( job       => X
   ,what      => 'scripts.p_log_tablespace;'
   ,next_date => to_date('19/07/2011 7:00:00','dd/mm/yyyy hh24:mi:ss')
   ,interval  => 'trunc(SYSDATE+1)+6/24'
   ,no_parse  => FALSE
  );
  SYS.DBMS_OUTPUT.PUT_LINE('Job Number is: ' || to_char(x));
COMMIT;
END;
/

Reporting_server Tns

BACKUPREP =
 (DESCRIPTION = 
   (ADDRESS_LIST =
     (ADDRESS = (PROTOCOL = TCP)(HOST = 10.11.20.23)(PORT = 1521))
   )
 (CONNECT_DATA =
   (SERVICE_NAME = ORA12)
 ) 
) 

 Step – 1:

add your reporting server tns entry on client machine in                        tnsnames.ora to create db link.
 Step – 2:

Replace client name in last procedure in attached script file 
 Step – 3:

Execute scripts in attached file one-by-one carefully on client                database. And also create database job at suitable time as in my                case is 6:45 AM 
 Step – 4:

Test result data on BACKUPREP {tns given above}



Monday 13 June 2016

Create/Configuring the Recovery Catalog Database


Allocate space for the recovery catalog. Consider:
– Number of databases supported by the recovery catalog
– Number of archived redo log files and backups recorded
– Use of RMAN stored scripts





Connect to the database where recovery catalog needs to be created and execute following Commands.

sqlplus sys as sysdba


Create a tablespace for the recovery catalog, which becomes the default tablespace for the recovery catalog owner.

CREATE TABLESPACE rmanbkup datafile '/home/oracle/product/11.2.0/dbhome_1/oradata/ow12c/rmanbkup.dbf' SIZE 75M autoextend ON NEXT 2M MAXSIZE UNLIMITED;


Create the recovery catalog owner.


CREATE USER rmanbkup IDENTIFIED BY oracle
 TEMPORARY TABLESPACE temp
 DEFAULT TABLESPACE rmanbkup
 QUOTA UNLIMITED ON rmanbkup ;

Grant the RECOVERY_CATALOG_OWNER role

GRANT recovery_catalog_owner TO rmanbkup;

Connect to the recovery catalog database as the catalog owner and create catalog.

[oracle@rhel6 ~/Desktop]$ rman catalog rmanbkup/oracle@ow12c

Recovery Manager: Release 11.2.0.3.0 - Production on Fri May 27 16:17:18 2016

Copyright (c) 1982, 2011, Oracle and/or its affiliates.  All rights reserved.

connected to recovery catalog database

RMAN> CREATE CATALOG;

recovery catalog created

RMAN>  exit;


Now connect to catalog with target database to register database to recovery catalog 


RMAN performs the following actions:

• Creates rows in the recovery catalog tables for the target database
• Copies data from the target database control file to the recovery catalog tables
• Synchronizes the recovery catalog with the control file


[oracle@rhel6 oradata]$ rman target sys/oracle@ow11g catalog rmanbkup/oracle@ow12c

Recovery Manager: Release 11.2.0.3.0 - Production on Mon Jun 13 13:00:11 2016

Copyright (c) 1982, 2011, Oracle and/or its affiliates.  All rights reserved.

connected to target database: OW11G (DBID=1397584131)
connected to recovery catalog database

RMAN> base: OW11G (DBID=1397584131)

connected to recovery catalog database


RMAN> REGISTER DATABASE;

database registered in recovery catalog
starting full resync of recovery catalog
full resync complete


Manually Resynchronizing the Recovery Catalog

RMAN> resync catalog;




To Unregistering a Target Database from the Recovery Catalog

• This removes information about the target database from the recovery catalog.
• Use this when you no longer want the target database to be defined in the recovery catalog.


$ rman target sys/oracle@ow11g catalog rmanbkup/oracle@ow12c

RMAN> UNREGISTER DATABASE;

database name is "OW11G" and DBID is 1397584131

Do you really want to unregister the database (enter YES or NO)? yes
database unregistered from the recovery catalog



Cataloging Additional Backup Files

Examples of cataloging a control file, data file, archived redo log file, and backup piece follow:

RMAN> CATALOG CONTROLFILECOPY
'/disk1/controlfile_bkup/2009_01_01/control01.ctl';
RMAN> CATALOG DATAFILECOPY
'/disk1/datafile_bkup/2009_01_01/users01.dbf';
RMAN> CATALOG ARCHIVELOG '/disk1/arch_logs/archive1_731.log',
'/disk1/arch_logs/archive1_732.log';
RMAN> CATALOG BACKUPPIECE '/disk1/backups/backup_820.bkp';



You can catalog all files in the currently enabled Flash Recovery Area as follows:
RMAN> CATALOG RECOVERY AREA NOPROMPT;


START WITH Option
Use the START WITH option to catalog all files found in the directory tree specified. Provide a
prefix that indicates the directory and possibly a file prefix to look for. You cannot use wildcards;
this is only a prefix.


All types of backup files that are found in the specified directory and subdirectories are cataloged.
Suppose you have several backup files in the /tmp/arch_logs directory. The following
command catalogs all of them:

RMAN> CATALOG START WITH '/tmp/arch_logs/';

Suppose you want to be sure to catalog only those files in the /tmp directory whose file names start
with the string bset. The following accomplishes that:

RMAN> CATALOG START WITH '/tmp/bset';

This command also catalogs any backup files that are found in directory trees that begin with
/tmp/bset.

The CATALOG command can be used without being connected to a recovery catalog.



Sunday 12 June 2016

Oracle Dataguard Switch Over and Failover

Role Transitions/Switch-over  Involving Physical Standby Databases
------------------------------------------------------------

To check the switchover status of database issue the following commands on primary database

SQL>select switchover_status from v$database;

A value of TO STANDBY or SESSIONS ACTIVE indicates that the primary database can be
switched to the standby role.

SQL>alter database commit to switchover to physical standby with session shutdown;
then
SQL>shutdown immediate;
SQL>startup nomount;
SQL>alter database mount standby database;

Above commands with create a new marker archived log which be applied on standby database a signal that the
standby is ready to work as primary database. you can confirm it by following commands on standby database.

SQL>select switchover_status from v$database;

If you received the switchover status of commands as "TO_PRIMARY" then its mean marker archived log has been received and  your database
is ready to change the status to primary.

Now issue the following command  on standby to change the status of standby database to primary.

SQL>ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY WITH SESSION SHUTDOWN;

SQL>shutdown immediate;
SQL>startup;

Now enjoy your new primary database.

following command on current standby DB .

 ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE disconnect;

select sequence#,applied from v$archived_log order by 1;


Performing a Failover to a Physical Standby Database
=====================================================
1-check V$ARCHIVE_GAP  view for any gap. If there is gap manually copy the archives and register in standby.

 select * from v$archive_gap;

2- ALTER DATABASE REGISTER PHYSICAL LOGFILE 'filespec1';
               repeat for all missing archived files until gap resloved.

3- Change the physical standby status step by step by following commands.

Stop Redo Apply
---------------
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;


SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE FINISH;

Incase of error execute below statement otherwise skip it
SQL> ALTER DATABASE ACTIVATE PHYSICAL STANDBY DATABASE;
------


-----------check status and switchover to primary
SQL> SELECT SWITCHOVER_STATUS FROM V$DATABASE ;


SQL> ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY WITH SESSION SHUTDOWN;
SQL> ALTER DATABASE OPEN;

Open Standby Database for reporting Purpose

In this post we will perform different operations on standby like  Switch over ,Fail Over  ,Open standby database for reporting purpose,check standby database status.

Step-I Create standby control file.
SQL>alter database create standby controlfile as 'D:\docs\data\control.ctl';

move primary db and redo file plus standby control.clt file, password  and spfile.

On standby

1. make copy of standby control file
2. modify pfile with stand by parameters from oranet2
3 startup mount database with this pfile.
create spfile from pfile
alter database open

For Real Time apply.
-------------------
sqlplus / as sysdba

SQL>Startup mount

SQL> ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE disconnect;


Step-II Change the status of standby database in recover mode in background mode by the following commands.

SQL>alter database recover managed standby database disconnect;



Step-III To check the status of standby database issue the following command for applied
             archived logs.

SQL>select max(sequence#) from v$archived_log where applied='YES';
SQL>select sequence#,applied from v$archived_log order by 1;

Step-IV Stop Managed recovery on standby / Open for reporting.

SQL>alter database recover managed standby database cancel;
SQL>alter database open;

===============To again start stanby process ===============
shutdown immediate;
startup mount
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE disconnect;