<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
<!--}}}-->
Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity:60)';}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0em 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0em 1em 1em; left:0px; top:0px;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0em 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 .3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0em 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0em 0em 0em; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0em;}
.wizardFooter .status {padding:0em 0.4em 0em 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em 0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0em; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em 0.2em 0.2em 0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em 0.2em 0.2em 0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em 1em 1em 1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0em;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0em 0em 0.5em;}
.tab {margin:0em 0em 0em 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0em 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0em 1em;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0em 0.25em; padding:0em 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0em; font-size:.9em;}
.editorFooter .button {padding-top:0px; padding-bottom:0px;}

.fieldsetFix {border:0; padding:0; margin:1px 0px 1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0em; right:0em;}
#backstageButton a {padding:0.1em 0.4em 0.1em 0.4em; margin:0.1em 0.1em 0.1em 0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin:0em 3em 0em 3em; padding:1em 1em 1em 1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em 0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none ! important;}
#displayArea {margin: 1em 1em 0em 1em;}
/* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
noscript {display:none;}
}
/*}}}*/
<!--{{{-->
<div class='header' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
<!--}}}-->
To get started with this blank TiddlyWiki, you'll need to modify the following tiddlers:
* SiteTitle & SiteSubtitle: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* MainMenu: The menu (usually on the left)
* DefaultTiddlers: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
These InterfaceOptions for customising TiddlyWiki are saved in your browser

Your username for signing your edits. Write it as a WikiWord (eg JoeBloggs)

<<option txtUserName>>
<<option chkSaveBackups>> SaveBackups
<<option chkAutoSave>> AutoSave
<<option chkRegExpSearch>> RegExpSearch
<<option chkCaseSensitiveSearch>> CaseSensitiveSearch
<<option chkAnimate>> EnableAnimations

----
Also see [[AdvancedOptions]]
<<importTiddlers>>
http://www.evernote.com/shard/s48/sh/780e7534-cee4-4278-87f7-449e81d2acfc/23124af97669edefaca70c4f6e719426
http://www.evernote.com/shard/s48/sh/c5653239-108b-4515-a19e-d69fa3ad92c1/2476cb2c13785e02a391d05a7daf7507
http://jacobian.org/writing/web-scale/
http://thebuild.com/blog/2010/10/27/things-i-do-not-understand-web-scale/
<<showtoc>>

On the following notes I've discussed the "scale out vs scale up" and "speed vs bandwidth"
** T3 CPUs thread:core ratio http://bit.ly/2g5bdPA
** Mainframe (MIPS) to Sparc sizing http://bit.ly/2g5dwSB
** DB2 Mainframe to Oracle Sizing http://bit.ly/2g53JMm
You can read on the URLs to get more details but here are the essential parts of the discussions

! The "scale out (x86) vs scale up (T5)" boils down to two factors
!! Factor 1) bandwidth vs speed

<<<
	[img[ http://i.imgur.com/oP70UP8.png ]]

	T5 can offer more bandwidth capacity but slower CPU speed than x86 CPUs of Exadata. But with Exadata you need to have more compute nodes to match the bandwidth capacity of the T5.
	If you compare the LIO/sec performance of
	SPARCT5 8threads pinned to a core with SPECint_rate of 29
	vs
	XeonE5 (X4-2 in this example) 2threads pinned to a core with SPECint_rate of 39
	&nbsp;
	You’ll see the following curve.. &nbsp;the 2threads on Xeon will give you higher LIOs/sec value vs the first 2threads of SPARC just because of the speed differences
	
	[img[ http://i.imgur.com/MjMn10y.png ]]
	> Y-axis: **Logical IOs/sec** X-axis: **CPU thread**
	
	But then when you saturate the entire platform the SPARC given that it has a lot of “slower” threads in effect can consolidate more LIO workload but at a price of LIO speed performance. So meaning if you have an OLTP SQL executing .2ms per execute in Xeon that will be much slower in SPARC. That’s why I prefer to scale out using Xeon machines (with faster CPUs) than scale up with SPARC. But then it depends if the application can take advantage of the RAC (rac-aware app)
	
	[img[ http://i.imgur.com/wDoXUjp.png ]]
	> Y-axis: **Logical IOs/sec** X-axis: **CPU thread**
	
	But then the X4-8 is pretty promising if they need a scale up kind of solution, it is faster than T5,M5,M6 which has speed of 38 vs 30 (see comparison here [http://bit.ly/2fOzM06](http://bit.ly/2fOzM06))
	
	and X4-8 (https://twitter.com/karlarao/status/435882623500423168) is pretty much the same speed as the compute nodes of X4-2 so you also get that linear scaling that they’re saying in T5,M5,M6 but a much faster CPU… 
<<<

!! Factor 2) the compatibility of the apps to either T5 (scale-up) or x86 (scale-out)
<<<
	If this is way too old school mainframe specific program then they might want to stick with zEC12. 
<<<


! I like this reply by Alex Fatkulin about Xeon vs SPARC
https://twitter.com/alexfatkulin
<<<
I think Xeon is a lot more versatile platform when it comes to the types of workloads it can handle.

A very strong point about Xeon is it a lot more forgiving to the type of workloads you want to run. It can be a race car or it can be a heavy duty truck. A SPARC is a bulldozer and that's the only thing it is and the only thing it can be. You might find yourself in a wrong vehicle in the middle of the highway ;-)

SPARC is like a bulldozer. It can move with a constant speed no matter what's in front of it but it doesn't change the fact that it moves slow. 

Some years ago I was involved with a company doing AirCraft maintenance software which bought a bunch of SPARC servers thinking that a lot of threads is cool otherwise why would SUN call these CoolThreads? The problem was that they had a lot of very complex logic sensitive to single threaded performance which wasn't designed to run in parallel. The end result is SPARC could not do a maintenance cycle within a window. For these unfamiliar with the subject the only thing worth mentioning is that it kept planes grounded. So it was a case where a software couldn't take any advantage of the high throughput offered by the SPARC platform while SPARC couldn't offer high single threaded performance. Guess what these SPARC servers got replaced with. Now granted this was all before T5 but the fact of the matter is T5 continues to lag significantly behind latest Intel generations in core IPC.
<<<



! Discussions on thread:core ratio with Frits Hoogland
https://twitter.com/fritshoogland

AFAIK, these are the heavily threaded ones (thread:core ratio 8:1). When I calculate the CPU time from AWR per second, any CPU time above the number of threads roughly means queue time right? 
<<<
<- Yes, and that should show as CPU Wait on Oracle side of things
<<<

With the calculated CPU time (alias queue time subtracted), I got the number of active threads. However, in my book only one of the eight threads can truly execute, the other ones are visible as running on CPU, but in reality waiting to truly execute on the core. 
<<<
<- this will manifest as  diminishing returns on workload level LIO/sec performance (imagine the LIO load profile section in AWR) as you saturate more threads.. imagine a line like this https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=diminishing%20returns%20curve
<<<

This should mean that when CPU threads are more busy than the core can handle, you get increased time on CPU, which in reality is only waiting time, which is a cpu thread waiting/stalling to run. 
<<<
<- you’ll first see the diminishing returns on LIO/sec performance and then the CPU wait afterwards when the line reaches plateau and it gets worse and worse 
<<<

Can you confirm this is how that works? Or otherwise please correct me where I am making a mistake. 
<<<
<- From my tests on investigating the thread performance, I’ve noticed there are two types of LIO workload.. the short and the long ones, Kevin showed this at oaktable world before and even at RMOUG way back. I think he calls it big and small. But the idea is, the short ones tends to share pretty well with other threads on let’s say the same core. Meaning the core gets busy and the threads are just time slicing pretty damn quick that the net net effect is a pretty good LIO/sec performance, and this also assumes all threads are being utilized evenly and still the diminishing returns apply as you saturate more and more threads. On the other hand, the long ones tends to hog on the time slice which results to overall lower LIO/sec performance .. this behavior is better explained on this wiki entry here [[cpu centric benchmark comparisons]] or here http://bit.ly/1xOJrEu

And also this two types can mix in a given workload.  
<<<

This also means that when going to Xeon (a recent one), this will extremely boost performance because the CPU time will decrease (significantly) because the thread:core ratio is much lower (2:1). 
This means that it’s not only the specint ratio difference which the system get’s faster, but also the excess stalling on CPU. 
<<<

<- Yes it’s possible, that it’s a contribution of all those factors. But I think the boost is mainly driven by the speed (newer CPU) or you can also say the thread:core ratio is much faster in Xeon than SPARC. 
If you compare the LIO/sec performance of

SPARCT5 8threads pinned to a core with SPECint_rate of 29
vs
XeonE5 2threads pinned to a core with SPECint_rate of 44

You’ll see the following curve..  the 2threads on Xeon will give you higher LIOs/sec value vs the first 2threads of SPARC just because of the speed differences

[img[ http://i.imgur.com/MjMn10y.png ]]
Y-axis: **Logical IOs/sec** X-axis: **CPU thread** 

But then when you saturate the entire platform the SPARC given that it has a lot of “slower” threads in effect can consolidate more LIO workload but at the price of LIO speed performance. So meaning if you have an OLTP SQL executing .2ms per execute in Xeon that will be much slower in SPARC. That’s why I prefer to scale out using Xeon machines (with faster CPUs) than scale up with SPARC. But then it depends if the application can take advantage of the RAC (rac-aware app)

[img[ http://i.imgur.com/wDoXUjp.png ]]
Y-axis: **Logical IOs/sec** X-axis: **CPU thread**

But the X4-8 is pretty promising if they need a scale up kind of solution, it is faster than T5,M5,M6 (see wiki [[M6, M5, T5]]) which has speed of 38 vs 30

X4-8 https://twitter.com/karlarao/status/435882623500423168
and X4-8 is pretty much the same speed as the compute nodes of X4-2 so you also get that linear scaling that they’re saying in T5,M5,M6 but a much faster CPU…

 

SPECint_rate2006 reference

— below are the variable values (raw and final header)
Result/# Cores, # Cores, # Chips, # Cores Per Chip, # Threads Per Core, Baseline, Result, Hardware Vendor, System, Published
$ less spec.txt | sort -rnk1 | grep -i sparc | grep -i oracle
30.5625, 16, 1, 16, 8, 441, 489, Oracle Corporation, SPARC T5-1B, Oct-13
@@29.2969, 128, 8, 16, 8, 3490, 3750, Oracle Corporation, SPARC T5-8, Apr-13@@
29.1875, 16, 1, 16, 8, 436, 467, Oracle Corporation, SPARC T5-1B, Apr-13
18.6, 2, 1, 2, 2, 33.7, 37.2, Oracle Corporation, SPARC Enterprise M3000, Apr-11
14.05, 4, 1, 4, 2, 50.3, 56.2, Oracle Corporation, SPARC Enterprise M3000, Apr-11
13.7812, 64, 16, 4, 2, 806, 882, Oracle Corporation, SPARC Enterprise M8000, Dec-10
13.4375, 128, 32, 4, 2, 1570, 1720, Oracle Corporation, SPARC Enterprise M9000, Dec-10
12.3047, 256, 64, 4, 2, 2850, 3150, Oracle Corporation, SPARC Enterprise M9000, Dec-10
11.1875, 16, 4, 4, 2, 158, 179, Oracle Corporation, SPARC Enterprise M4000, Dec-10
11, 32, 8, 4, 2, 313, 352, Oracle Corporation, SPARC Enterprise M5000, Dec-10
@@10.4688, 32, 2, 16, 8, 309, 335, Oracle Corporation, SPARC T3-2, Feb-11
10.4062, 64, 4, 16, 8, 614, 666, Oracle Corporation, SPARC T3-4, Feb-11
10.375, 16, 1, 16, 8, 153, 166, Oracle Corporation, SPARC T3-1, Jan-11@@

x3-2 spec
$ cat spec.txt | grep -i intel | grep -i "E5-26" | grep -i sun | sort -rnk1
@@44.0625, 16, 2, 8, 2, 632, 705, Oracle Corporation, Sun Blade X6270 M3 (Intel Xeon E5-2690 2.9GHz)@@
44.0625, 16, 2, 8, 2, 632, 705, Oracle Corporation, Sun Blade X3-2B (Intel Xeon E5-2690 2.9GHz)
44.0625, 16, 2, 8, 2, 630, 705, Oracle Corporation, Sun Server X3-2L (Intel Xeon E5-2690 2.9GHz)
44.0625, 16, 2, 8, 2, 630, 705, Oracle Corporation, Sun Fire X4270 M3 (Intel Xeon E5-2690 2.9GHz)
43.875, 16, 2, 8, 2, 628, 702, Oracle Corporation, Sun Server X3-2 (Intel Xeon E5-2690 2.9GHz)
43.875, 16, 2, 8, 2, 628, 702, Oracle Corporation, Sun Fire X4170 M3 (Intel Xeon E5-2690 2.9GHz)
<<<









[img(70%,70%)[https://i.imgur.com/XsBOAey.jpg]]

[img(70%,70%)[https://i.imgur.com/xMCK0Ug.png]]


<<<
Introduction 06:20
Welcome! Thank you for learning the Data Warehouse concepts with me! 
Preview
06:20
–
Brief about the Data warehouse
21:13
Is Data Warehouse still relevant in the age of Big Data? 
Preview
04:25
Why do we need a Data Warehouse? 
Preview
05:26
What is a Data Warehouse? 
Preview
05:42
Characteristics of a Data Warehouse 
Preview
05:40
–
Business Intelligence
23:37
What is Business Intelligence? 
05:37
Business Intelligence -Extended Explanation 
03:34
Uses of Business Intelligence 
08:02
Tools used for (in) Business Intelligence 
06:24
–
Data Warehouse Architectures
32:12
Enterprise Architecture or Centralized Architecture 
Preview
04:46
Federated Architecture 
03:05
Multi-Tired Architecture 
03:13
Components of a Data Warehouse 
03:57
Purpose of a Staging Area in Data Warehouse Architecture - Part 1 
04:49
Purpose of a Staging Area in Data Warehouse Architecture - Part 2 
03:41
Advantages of Traditional warehouse 
02:33
Limitations of Traditional Data Warehouses 
06:08
–
ODS - Operational Data Store
14:13
What is ODS? 
02:26
Define ODS 
07:40
Differences between ODS,DWH, OLTP, OLAP, DSS 
04:07
–
OLAP
28:15
OLAP Overview 
05:17
OLTP Vs OLAP - Part 1_U 
04:05
OLTP Vs OLAP - Part 2 
05:31
OLAP Architecture - MOLAP 
05:56
ROLAP 
03:35
HOLAP 
02:20
DOLAP 
01:31
–
Data Mart
13:52
What is a Data Mart? 
01:40
Fundamental Difference between DWH and DM 
00:40
Advantages of a Data Mart 
02:46
Characteristics of a Data Mart 
03:37
Disadvantages of a Data Mart 
03:01
Mistakes and MisConceptions of a Data Mart 
02:08
–
Metadata
19:30
Overview of Metadata 
01:50
Benefits of Metadata 
01:47
Types of Metadata
05:38
Projects on Metadata 
05:28
Best Practices for Metadata Setup 
01:36
Summary 
03:11
–
Data Modeling
05:53
What is Data Modeling? 
02:11
Data Modeling Techniques 
03:42
–
Entity Relational Data Model
35:46
ER - (Entity Relation) Data Model 
03:37
ER Data Model - What is Entity? 
02:01
ER Data Model - Types of Entities - Part 1 
03:57
ER Data Model - Types of Entities - Part 2 
01:49
ER Data Model - Attributes 
01:54
ER Data Model - Types of Attributes 
03:59
ER Data Model - Entity-Set and Keys 
02:42
ER Data Model - Identifier 
01:53
ER Data Model - Relationship 
01:15
ER Data Model - Notation 
02:34
ER Data Model - Logical Data Model 
01:30
ER Data Model - Moving from Logical Data Model to Physical Data Model
02:14
ER Data Model - Differences between CDM, LDM and PDM 
03:06
ER Data Model - Disadvantages 
03:15
–
Dimensional Model
01:24:32
What is Dimension Modelling? 
04:38
Benefits of Dimensional Modelling 
01:52
What is a Dimension? 
02:36
What is a Fact? 
02:00
Additive Facts 
01:45
Semi Additive Facts 
02:23
Non-Additive Facts 
01:26
FactLess Facts 
02:26
What is a Surrogate key? 
03:45
Star Schema 
04:54
SnowFlake Schema 
03:22
Galaxy Schema or Fact Constellation Schema 
02:25
Differences between Star Schema and SnowFlake Schema? 
04:55
Conformed Dimension 
06:17
Junk Dimension 
03:12
Degenerate Dimension 
03:36
Slowly Changing Dimensions - Intro and Example Creation 
05:35
Slowly Changing Dimensions - Type 1, 2 and 3 
12:14
Slowly Changing Dimensions - Summary 
03:05
Step by Step approach to set up the Dimensional Model using a retail case study
06:44
ER Model Vs Dimensional Model 
05:22
–
DWH Indexes
10:59
What is an Index? 
02:04
Bitmap Index 
03:46
B-Tree index 
01:49
Bitmap Index Vs B Tree Index 
03:20
–
Data Integration and ETL
13:20
What is Data Integration? 
06:49
What is ETL? 
03:49
Common Questions and Summary 
02:42
–
ETL Vs ELT
13:45
ETL - Explained 
06:03
ELT - Explained 
05:24
ETL Vs ELT
02:18
–
ETL - Extraction Transformation & Loading
12:48
Build Vs Buy 
05:10
ETL Tools for Data Warehouses 
01:56
Extraction Methods in Data Warehouses 
05:42
–
Typical Roles In DWH Project
44:18
Project Sponsor 
03:24
Project Manager 
01:46
Functional Analyst or Business Analyst 
02:53
SME - Subject Matter Expert 
04:17
DW BI Architect 
03:07
Data Modeler 
08:59
DWH Tech Admin 
01:20
ETL Developers 
01:56
BI OLAP Developers 
01:29
ETL Testers/QA Group 
01:58
DB UNIX Network Admins 
00:56
Data Architect, Data Warehouse Architect, BI Architect and Solution Architect 
09:57
Final Note about the Roles 
02:16
–
DW/BI/ETL Implemetation Approach
39:48
Different phases in DW/BI/ETL Implementation Approach 
01:51
Knowledge Capture Sessions 
03:34
Requirements 
07:21
Architecture phases 
04:48
Data Model/Database 
01:35
ETL Phase 
02:43
Data Access Phase 
02:10
Data Access Types - Selection 
01:37
Data Access Types - Drilling Down 
00:58
Data Access Types - Exception Reporting 
00:36
Data Access Types - Calculations 
01:26
Data Access Types - Graphics and Visualization 
00:58
Data Access Types -Data Entry Options 
02:04
Data Access Types - Customization 
01:00
Data Access Types - WebBased Reporting 
00:56
Data Access Types - BroadCasting 
01:04
Deploy 
01:42
Iterative Approach 
03:25
–
Retired Lectures
02:23
ETL Vs ELT 
02:23
–
Bonus Section
01:37
Links to other courses 
01:37
<<<
good compilation of oracle hints http://www.hellodba.com/Download/OracleSQLHints.pdf


12c new SQL hints
http://www.hellodba.com/reader.php?ID=220&lang=EN

search for "hints"
http://www.hellodba.com/index.php?class=DOC&lang=EN


.

<<showtoc>> 


! Greg Wooledge wiki
http://mywiki.wooledge.org/FullBashGuide
http://mywiki.wooledge.org/BashGuide/Practices
https://mywiki.wooledge.org/BashPitfalls
http://mywiki.wooledge.org/BashWeaknesses
http://mywiki.wooledge.org/BashFAQ
http://www.tldp.org/LDP/abs/html/abs-guide.html	


! essential documentation 
http://www.tldp.org/LDP/abs/html/abs-guide.html
https://github.com/DingGuodong/bashstyle   <- some style guide
http://superuser.com/questions/414965/when-to-use-bash-and-when-to-use-perl-python-ruby/415134
https://www.shellcheck.net/
http://www.gnu.org/software/bash/manual/bash.html
https://wiki.bash-hackers.org/
https://www.in-ulm.de/~mascheck/
http://www.grymoire.com/Unix/Quote.html
http://www.shelldorado.com/


! video courses
https://www.pluralsight.com/courses/bash-shell-scripting
https://www.pluralsight.com/courses/red-hat-enterprise-linux-shell-scripting-fundamentals

https://www.safaribooksonline.com/library/view/bash-scripting-fundamentals/9780134541730/
https://www.safaribooksonline.com/library/view/advanced-bash-scripting/9780134586229/




! /usr/bin/env or /bin/env
<<<
it's better to use 
#!/usr/bin/env bash

In most cases, using /usr/bin/env bash will be better than /bin/bash;
If you are running in a multi-user environment and security is a big concern, forget about /usr/bin/env (or anything that uses the $PATH, actually);
If you need an extra argument to your interpreter and you care about portability, /usr/bin/env may also give you some headaches.
<<<
https://www.google.com/search?q=%2Fusr%2Fbin%2Fenv+or+%2Fbin%2Fenv&oq=usr%2Fbin+or+%2Fbin&aqs=chrome.4.69i57j69i58j0l4.11969j1j1&sourceid=chrome&ie=UTF-8
https://stackoverflow.com/questions/5549044/whats-the-difference-of-using-usr-bin-env-or-bin-env-in-shebang
https://unix.stackexchange.com/questions/29608/why-is-it-better-to-use-usr-bin-env-name-instead-of-path-to-name-as-my
https://www.brianstorti.com/rethinking-your-shebang/



! batch 
http://steve-jansen.github.io/guides/windows-batch-scripting/part-10-advanced-tricks.html
''batch file a-z'' http://ss64.com/nt/
''batch file categorized'' http://ss64.com/nt/commands.html

! loop
http://ss64.com/nt/for.html
http://ss64.com/nt/for_cmd.html
http://stackoverflow.com/questions/1355791/how-do-you-loop-in-a-windows-batch-file
http://stackoverflow.com/questions/1103994/how-to-run-multiple-bat-files-within-a-bat-file












.








<<showtoc>>


! info

!! getting started
!! sample code

! data types and variables

!! data type
!! specific data types/values
!! variable assignment/scope
!! comparison operators

! data structures

!! data containers/structures
!! vector
!! matrix
!! data frame or pandas
!! list
!! sets

! control structures

!! control workflow
!! if else
!! error handling
!! unit testing / TDD

! loops

!! loops workflow
!! for loop
!! while loop

! advanced concepts

!! functions
!! OOP

! other functions methods procedures
! language specific operations

!! data workflow
!! directory operations
!! package management
!! importing data
!! cleaning data
!! data manipulation
!! visualization

! scripting

!! scripting workflow
!! run a script
!! print multiple var
!! input data









! xxxxxxxxxxxxxxxxxxxxxxxx
! xxx Data Engineering
! xxxxxxxxxxxxxxxxxxxxxxxx



! workflow 
! installation  and upgrade
! commands
! performance and troubleshooting
!! sizing and capacity planning
!! benchmark
! high availability 
! security

! xxxxxxxxxxxxxxxxxxxxxxxx




.
! 2021

<<<
Kumaran's courses are the best out there to get you up to speed w/ design patterns and technology components for modern data architecture. Love the format, short, sweet, practical, and direct to the point.


https://www.linkedin.com/learning/architecting-big-data-applications-real-time-application-engineering/sm-analyze-the-problem
https://www.linkedin.com/learning/architecting-big-data-applications-batch-mode-application-engineering/welcome

https://www.linkedin.com/learning/stream-processing-design-patterns-with-kafka-streams/stream-processing-with-kafka
https://www.linkedin.com/learning/stream-processing-design-patterns-with-spark/streaming-with-spark

https://www.linkedin.com/learning/applied-ai-for-it-operations/artificial-intelligence-and-its-many-uses
https://www.linkedin.com/learning/applied-ai-for-human-resources/artificial-intelligence-and-human-resources


https://www.linkedin.com/in/kumaran-ponnambalam-961a344/?trk=lil_instructor
<<<


<<<
design and architecture

https://www.pluralsight.com/courses/google-dataflow-architecting-serverless-big-data-solutions
https://www.pluralsight.com/courses/google-cloud-platform-leveraging-architectural-design-patterns
https://www.pluralsight.com/courses/google-cloud-functions-architecting-event-driven-serverless-solutions
https://www.pluralsight.com/courses/google-dataproc-architecting-big-data-solutions
https://www.pluralsight.com/courses/google-machine-learning-apis-designing-implementing-solutions
https://www.pluralsight.com/courses/google-bigquery-architecting-data-warehousing-solutions
https://www.pluralsight.com/courses/google-cloud-automl-designing-implementing-solutions

https://www.linkedin.com/learning/search?keywords=apache%20beam
https://www.linkedin.com/learning/data-science-on-google-cloud-platform-building-data-pipelines/what-goes-into-a-data-pipeline <— good summary
https://www.linkedin.com/learning/google-cloud-platform-for-enterprise-essential-training/enterprise-ready-gcp

https://www.linkedin.com/learning/architecting-big-data-applications-batch-mode-application-engineering/dw-lay-out-the-architecture <— good 5 use cases
https://www.linkedin.com/learning/data-science-on-google-cloud-platform-architecting-solutions/architecting-data-science <— good 4 use cases
https://www.linkedin.com/learning/data-science-on-google-cloud-platform-designing-data-warehouses/why-data-warehouses-are-important
https://www.linkedin.com/learning/architecting-big-data-applications-real-time-application-engineering/sm-analyze-the-problem <— good 4 use cases 
<<<





!! Distributed systems in one lesson 
https://learning.oreilly.com/videos/distributed-systems-in/9781491924914?autoplay=false



!! ML system
ML system design https://us.teamblind.com/s/5HGmH4Wd
Machine Learning Systems: Designs that scale https://learning.oreilly.com/library/view/machine-learning-systems/9781617293337/kindle_split_024.html



!! kafka 
https://www.youtube.com/results?search_query=kafka+system+design



!! messaging service 
https://www.datanami.com/2019/05/28/assessing-your-options-for-real-time-message-buses/
https://www.udemy.com/courses/search/?src=ukw&q=message+queueing+
https://www.udemy.com/rabbitmq-messaging-with-java-spring-boot-and-spring-mvc/
https://bytes.com/topic/python/answers/437385-queueing-python-ala-jms


!! instagram
http://highscalability.com/blog/2011/12/6/instagram-architecture-14-million-users-terabytes-of-photos.html


https://github.com/CodeThat/Algorithm-Implementations



https://www.algoexpert.io/purchase    coupon "devon"
https://www.algoexpert.io/questions/Nth%20Fibonacci

    
<<showtoc>>

! practice courses and resources 
<<<

https://www.udemy.com/aws-emr-and-spark-2-using-scala/learn/v4/t/lecture/9366830?start=0
https://www.udemy.com/python-and-spark-setup-development-environment/
https://www.udemy.com/linux-fundamentals-for-it-professionals/learn/v4/overview
https://www.udemy.com/fundamentals-of-programming-using-python-3/learn/v4/overview
https://www.udemy.com/python-for-data-science-and-machine-learning-bootcamp/learn/v4/t/lecture/5774370?start=0
https://www.udemy.com/data-science-and-machine-learning-bootcamp-with-r/learn/v4/content
https://www.udemy.com/machinelearning/learn/v4/t/lecture/5935024?start=0
https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/learn/v4/t/lecture/4020676?start=0
Installing TensorFlow and H2O in R https://learning.oreilly.com/learning-paths/learning-path-r/9781789340839/9781788838771-video1_4
Using the H2O Deep Learning Framework https://learning.oreilly.com/videos/learning-path-r/9781788298742/9781788298742-video1_29
Interpretable AI - Not just for regulators - Patrick Hall (H2O.ai | George Washington University), Sri Satish (H2O.ai) h2o.ai https://learning.oreilly.com/videos/strata-data-conference/9781491976326/9781491976326-video316338
https://weidongzhou.wordpress.com/tag/big-data/


Practical Machine Learning with H2O https://learning.oreilly.com/library/view/practical-machine-learning/9781491964590/
https://www.udemy.com/complete-deep-learning-in-r-with-keras-others/

<<<
<<showtoc>>



! info

!! getting started
!! sample code

! data types and variables

!! data type
!! specific data types/values
!! variable assignment/scope

!!! delete variables 
https://stackoverflow.com/questions/26545051/is-there-a-way-to-delete-created-variables-functions-etc-from-the-memory-of-th?rq=1
https://stackoverflow.com/questions/3543833/how-do-i-clear-all-variables-in-the-middle-of-a-python-script


!! comparison operators

! data structures


!! data containers/structures
!! R vector / python tuple or list
<<<
https://stackoverflow.com/questions/252703/difference-between-append-vs-extend-list-methods-in-python/28119966
<<<

!! R matrix / python numpy
!! R data frame / python pandas


!!! save dataframe to parquet file 
https://stackoverflow.com/questions/41066582/python-save-pandas-data-frame-to-parquet-file
{{{
pip install fastparquet
df.to_parquet('myfile.parquet', engine='fastparquet', compression='UNCOMPRESSED')


# this created the table columns as BYTES
bq load --location=US --source_format=PARQUET tink.enc_parquet2 myfile.parquet
 
}}}





!! R list / python dictionary
<<<
https://stackoverflow.com/questions/1024847/add-new-keys-to-a-dictionary

https://stackoverflow.com/questions/1867861/dictionaries-how-to-keep-keys-values-in-same-order-as-declared
<<<


!! python sets
!! python list comprehension, nested list/dictionary



! control structures

!! control workflow
!! if else
<<<
https://stackoverflow.com/questions/2493404/complex-if-statement-in-python
<<<


!! error handling
!! unit testing / TDD

! loops

!! loops workflow
!! for loop
!! while loop

! advanced concepts

!! functions

!!! functional programming
Reactive Programming in Python https://learning.oreilly.com/videos/reactive-programming-in/9781786460332/9781786460332-video1_3?autoplay=false


!! OOP

!!! static vs class method 
https://stackoverflow.com/questions/136097/what-is-the-difference-between-staticmethod-and-classmethod
!!! what is pass 
https://stackoverflow.com/questions/13886168/how-to-use-the-pass-statement
!!! iterating through instance object attributes 
https://www.saltycrane.com/blog/2008/09/how-iterate-over-instance-objects-data-attributes-python/
https://stackoverflow.com/questions/739882/iterating-over-object-instances-of-a-given-class-in-python
https://stackoverflow.com/questions/44196243/iterate-over-list-of-class-objects-pythonic-way
https://stackoverflow.com/questions/42581286/iterate-over-an-instance-objects-attributes-in-python
https://stackoverflow.com/questions/21598872/how-to-create-multiple-class-objects-with-a-loop-in-python
https://stackoverflow.com/questions/25150955/python-iterating-through-object-attributes

!! args kwargs 
<<<
https://stackoverflow.com/questions/8977594/in-python-what-determines-the-order-while-iterating-through-kwargs/41634018
https://stackoverflow.com/questions/26748097/using-an-ordereddict-in-kwargs

<<<



! other functions methods procedures
! language specific operations

!! data workflow
!! directory operations
!! package management
!!! use-import-module-or-from-module-import
https://stackoverflow.com/questions/710551/use-import-module-or-from-module-import

!! importing data
!! cleaning data
!! data manipulation
!! visualization

!!! python and highcharts
<<<
https://www.highcharts.com/blog/products/highmaps/226-get-your-data-ready-for-charts-with-python/
https://github.com/kyper-data/python-highcharts
Flask Web Development in Python - 6 - js Plugin - Highcharts example https://www.youtube.com/watch?v=9Ic79kOBj_M


<<<



! scripting

!! scripting workflow
!! run a script
!! print multiple var
!! input data

!! check if list exist 
<<<
https://stackoverflow.com/questions/11556234/how-to-check-if-a-list-exists-in-python
<<<


!! command line args parser 
<<<
Building cmd line using click https://www.youtube.com/watch?v=6OY1xFYJVxQ
https://medium.com/@collectiveacuity/argparse-vs-click-227f53f023dc
https://realpython.com/comparing-python-command-line-parsing-libraries-argparse-docopt-click/
https://stackoverflow.com/questions/3217673/why-use-argparse-rather-than-optparse
https://ttboj.wordpress.com/2010/02/03/getopt-vs-optparse-vs-argparse/
https://pymotw.com/2/optparse/
https://docs.python.org/2/howto/argparse.html
https://leancrew.com/all-this/2015/06/better-option-parsing-in-python-maybe/
https://www.quora.com/What-are-the-advantages-of-using-argparse-over-optparse-or-vice-versa
<<<

<<<
http://www.annasyme.com/docs/python_structure.html   <- GOOD
good basics https://medium.com/code-85/how-to-pass-command-line-values-to-a-python-script-1e3e7b244c89 <- GOOD
https://towardsdatascience.com/a-simple-guide-to-command-line-arguments-with-argparse-6824c30ab1c3
https://martin-thoma.com/how-to-parse-command-line-arguments-in-python/


logging https://gist.github.com/olooney/8155400
https://pymotw.com/2/argparse/
https://gist.github.com/BurkovBA/947ae7406a3b22b32c81904da9d9797e
https://zetcode.com/python/argparse/
https://gist.github.com/abalter/605773b34a68bb370bf84007ee55a130
https://github.com/nhoffman/argparse-bash
https://python.plainenglish.io/parse-args-in-bash-scripts-d50669be6a61
https://stackoverflow.com/questions/14340822/pass-bash-argument-to-python-script
{{{
#!/bin/sh

python script.py "$@"

}}}
https://stackoverflow.com/questions/4256107/running-bash-commands-in-python
{{{
bashCommand = "cwm --rdf test.rdf --ntriples > test.nt"
import subprocess
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate()

}}}
https://stackabuse.com/executing-shell-commands-with-python/
https://stackoverflow.com/questions/34836382/python-3-subprocessing-a-python-script-that-uses-argparse
https://medium.com/code-85/how-to-pass-command-line-values-to-a-python-script-1e3e7b244c89


<<<

{{{
Every option has some values like:

    dest: You will access the value of option with this variable
    help: This text gets displayed whey someone uses --help.
    default: If the command line argument was not specified, it will get this default value.
    action: Actions tell optparse what to do when it encounters an option on the command line. action defaults to store. These actions are available:
        store: take the next argument (or the remainder of the current argument), ensure that it is of the correct type, and store it to your chosen destination dest.
        store_true: store True in dest if this flag was set.
        store_false: store False in dest if this flag was set.
        store_const: store a constant value
        append: append this option’s argument to a list
        count: increment a counter by one
        callback: call a specified function
    nargs: ArgumentParser objects usually associate a single command-line argument with a single action to be taken. The nargs keyword argument associates a different number of command-line arguments with a single action.
    required: Mark a command line argument as non-optional (required).
    choices: Some command-line arguments should be selected from a restricted set of values. These can be handled by passing a container object as the choices keyword argument to add_argument(). When the command line is parsed, argument values will be checked, and an error message will be displayed if the argument was not one of the acceptable values.
    type: Use this command, if the argument is of another type (e.g. int or float).

argparse automatically generates a help text. So if you call python myScript.py --help you will get something like that:

usage: ikjMultiplication.py [-h] [-i FILE]

ikjMatrix multiplication

optional arguments:
  -h, --help  show this help message and exit
  -i FILE     input file with two matrices

}}}





! xx


! forecasting
!! times series 
timeseries techniques https://www.safaribooksonline.com/library/view/practical-data-analysis/9781783551668/ch07.html
http://www.johnwittenauer.net/a-simple-time-series-analysis-of-the-sp-500-index/
time series python statsmodels http://conference.scipy.org/scipy2011/slides/mckinney_time_series.pdf
Do not smooth times series, you hockey puck http://wmbriggs.com/post/195/
practical Data Analysis Cookbook https://github.com/drabastomek/practicalDataAnalysisCookbook

! underscore in python
watch the two videos below:
{{{
What's the meaning of underscores (_ & __) in Python variable names
Python Tutorial: if __name__ == '__main__' 
}}}
https://www.youtube.com/watch?v=ALZmCy2u0jQ
https://www.youtube.com/watch?v=sugvnHA7ElY
{{{
Difference between _, __ and __xx__ in Python
http://igorsobreira.com/2010/09/16/difference-between-one-underline-and-two-underlines-in-python.html
http://stackoverflow.com/questions/8689964/why-do-some-functions-have-underscores-before-and-after-the-function-name
http://programmers.stackexchange.com/questions/229804/usage-of-while-declaring-any-variables-or-class-member-in-python
}}}


! sqldf / pandasql 
http://blog.yhat.com/posts/pandasql-intro.html
pandasql: Make python speak SQL https://community.alteryx.com/t5/Data-Science-Blog/pandasql-Make-python-speak-SQL/ba-p/138435
https://statcompute.wordpress.com/2016/10/17/flavors-of-sql-on-pandas-dataframe/
https://www.r-bloggers.com/turning-data-into-awesome-with-sqldf-and-pandasql/


! gui ide 
https://www.yhat.com/products/rodeo



! for loops 
https://data36.com/python-for-loops-explained-data-science-basics-5/


! PYTHONPATH
https://stackoverflow.com/questions/19917492/how-to-use-pythonpath
<<<
You're confusing PATH and PYTHONPATH. You need to do this:

export PATH=$PATH:/home/randy/lib/python 
PYTHONPATH is used by the python interpreter to determine which modules to load.

PATH is used by the shell to determine which executables to run.
<<<


! python compatibility 

!! pycon talk - start here
Brett Cannon - How to make your code Python 2/3 compatible - PyCon 2015 https://www.youtube.com/watch?v=KPzDX5TX5HE
https://www.youtube.com/results?search_query=python-modernize


!! performance between 2 and 3 
https://chairnerd.seatgeek.com/migrating-to-python-3/

!! coding differences between 2 and 3 
[[..python 2 to 3]]
https://wiki.python.org/moin/Python2orPython3

!! 2to3 - tool to automatically convert code 
https://docs.python.org/2/library/2to3.html
Python 2to3 - Convert your Python 2 to Python 3 automatically https://www.youtube.com/watch?v=8qxKYnAsNuU
Make Python 2 Programs Compatible with Python 3 Automatically https://www.youtube.com/watch?v=M6wkCIdfI8U
https://stackoverflow.com/questions/40020178/what-python-linter-can-i-use-to-spot-python-2-3-compatibility-issues

!! futurize and modernize 
{{{
# this will work in python 2
from __future__ import print_function
print('hello world')	
}}}
https://python-future.org/faq.html
https://www.youtube.com/results?search_query=python+futurize
python-future vs 2to3 https://www.google.com/search?q=python-future+vs+2to3&oq=python-future+vs+2to3&aqs=chrome..69i57.3384j0j4&sourceid=chrome&ie=UTF-8
Moving from Python 2 to Python 3 http://ptgmedia.pearsoncmg.com/imprint_downloads/informit/promotions/python/python2python3.pdf    <-- good stuff
Python How to use from __future__ import print_function https://www.youtube.com/watch?v=lLpp2cbUWX0  <-- good stuff
http://python-future.org/quickstart.html#to-convert-existing-python-2-code   <- futurize
https://www.youtube.com/results?search_query=future__+import
http://python3porting.com/noconv.html
https://www.reddit.com/r/Python/comments/45vok2/why_did_python_3_change_the_print_syntax/


!! six 
https://pypi.org/project/six/


! tricks 

!! count frequency of words
http://stackoverflow.com/questions/30202011/how-can-i-count-comma-separated-values-in-one-column-of-my-panda-table
https://www.google.com/search?q=R+word+count&oq=R+word+count&aqs=chrome..69i57j0l5.2673j0j1&sourceid=chrome&ie=UTF-8#q=r+count+frequency+of+numbers&*
http://stackoverflow.com/questions/8920145/count-the-number-of-words-in-a-string-in-r
http://r.789695.n4.nabble.com/How-to-count-the-number-of-occurence-td1661733.html
http://stackoverflow.com/questions/1923273/counting-the-number-of-elements-with-the-values-of-x-in-a-vector
https://www.quora.com/How-do-I-generate-frequency-counts-of-categorical-variables-eg-total-number-of-0s-and-total-number-of-1s-from-each-column-within-a-dataset-in-RStudio
http://stackoverflow.com/questions/1296646/how-to-sort-a-dataframe-by-columns



! data structures / containers 

!! pickle 
https://stackoverflow.com/questions/11641493/how-to-cpickle-dump-and-load-separate-dictionaries-to-the-same-file
Serializing Data Using the pickle and cPickle Modules https://learning.oreilly.com/library/view/python-cookbook/0596001673/ch08s03.html
Reading a pickle file (PANDAS Python Data Frame) in R https://stackoverflow.com/questions/35121192/reading-a-pickle-file-pandas-python-data-frame-in-r


! scheduler - celery 
https://www.youtube.com/results?search_query=python+scheduler+async+every+minute+background
https://www.udemy.com/using-python-with-oracle-db/learn/lecture/5330818#overview
https://stackoverflow.com/questions/22715086/scheduling-python-script-to-run-every-hour-accurately
https://stackoverflow.com/questions/2223157/how-to-execute-a-function-asynchronously-every-60-seconds-in-python






! learning materials 
https://linuxacademy.com/linux/training/learningpath/name/scripting-automation-for-sysadmins
https://acloud.guru/learn/automating-aws-with-python






<<showtoc>>

! Upgrading R
* first, fix the permissions of the R folder by making if "full control" http://stackoverflow.com/questions/5059692/unable-to-update-r-packages-in-default-library-on-windows-7
* download the new rstudio https://www.rstudio.com/products/rstudio/download/
* follow the steps mentioned here http://stackoverflow.com/questions/13656699/update-r-using-rstudio and here http://www.r-statistics.com/2013/03/updating-r-from-r-on-windows-using-the-installr-package/ basically you'll have to execute the following:
{{{
# installing/loading the package:
if(!require(installr)) {
install.packages("installr"); require(installr)} #load / install+load installr
 
# using the package:
updateR() # this will start the updating process of your R installation.  It will check for newer versions, and if one is available, will guide you through the decisions you'd need to make.
}}}

! Clone R 
https://github.com/MangoTheCat/pkgsnap

! rstudio 
preview version https://www.rstudio.com/products/rstudio/download/preview/

! documentation 
{{{
> library(RDocumentation)
Do you want to automatically load RDocumentation when you start R? [y|n] y
Congratulations!
R will now use RDocumentation to display your help files.
If you're offline, R will just display your local documentation.
To avoid automatically loading the RDocumentation package, use disable_autoload().
If you don't want the ? and help functionality to show RDocumentation pages, use disable_override().

Attaching package: ‘RDocumentation’

The following objects are masked from ‘package:utils’:

    ?, help, help.search
}}}

! favorite packages
!! summary 

!!! visualization
* ggfortify (autoplot) - easy plotting of data.. just execute autoplot
<<<
http://www.sthda.com/english/wiki/ggfortify-extension-to-ggplot2-to-handle-some-popular-packages-r-software-and-data-visualization
http://rpubs.com/sinhrks/basics
http://rpubs.com/sinhrks/plot_lm
<<<

!!! time series 
* quantstart time series - https://www.quantstart.com/articles#time-series-analysis
* xts - convert to time series object
** as.xts()

!!! quant 

* quantmod http://www.quantmod.com/gallery/
* quantstrat and blotter
http://masterr.org/r/how-to-install-quantstrat/
http://www.r-bloggers.com/nuts-and-bolts-of-quantstrat-part-i/
http://www.programmingr.com/content/installing-quantstrat-r-forge-and-source/
using quantstrat to evaluate intraday trading strategies http://www.rinfinance.com/agenda/2013/workshop/Humme+Peterson.pdf
* highfrequency package 
https://cran.r-project.org/web/packages/highfrequency/highfrequency.pdf , http://feb.kuleuven.be/public/n09022/research.htm
http://highfrequency.herokuapp.com/
*quantlib http://quantlib.org/index.shtml

!!!! quant topics

* quant data
https://www.onetick.com/
interactivebrokers api http://www.r-bloggers.com/how-to-save-high-frequency-data-in-mongodb/ , http://www.r-bloggers.com/i-see-high-frequency-data/ 

* quant portals 
quanstart - learning materials (books, scripts) https://www.quantstart.com/faq
http://www.rfortraders.com/
http://www.quantlego.com/welcome/
https://www.quantstart.com/articles/Quantitative-Finance-Reading-List
http://datalab.lu/
http://carlofan.wix.com/data-science-chews

* quant books 
https://www.amazon.com/Quantitative-Trading-Understanding-Mathematical-Computational/dp/1137354070?ie=UTF8&camp=1789&creative=9325&creativeASIN=1137354070&linkCode=as2&linkId=KJAPF3TMVPQHWD4H&redirect=true&ref_=as_li_qf_sp_asin_il_tl&tag=boucom-20
https://www.quantstart.com/successful-algorithmic-trading-ebook
https://www.quantstart.com/advanced-algorithmic-trading-ebook
https://www.quantstart.com/cpp-for-quantitative-finance-ebook

* quant career 
https://www.quantstart.com/articles/Can-You-Still-Become-a-Quant-in-Your-Thirties
http://www.dlsu.edu.ph/academics/graduate-studies/cob/master-sci-fin-eng.asp

* quant strategies 
trend following strategy http://www.followingthetrend.com/2014/03/improving-the-free-trend-following-trading-rules/
connorsRSI http://www.qmatix.com/ConnorsRSI-Pullbacks-Guidebook.pdf

* quant options trade
http://www.businessinsider.com/the-story-of-the-first-ever-options-trade-in-recorded-history-2012-3

* quant portfolio optimization 
http://www.rinfinance.com/RinFinance2009/presentations/yollin_slides.pdf
http://zoonek.free.fr/blosxom/R/2012-06-01_Optimization.html

* quant time series databases
https://kx.com/benchmarks.php
http://www.paradigm4.com/

* PerformanceAnalytics-package 
http://braverock.com/brian/R/PerformanceAnalytics/html/PerformanceAnalytics-package.html

!!! TDD, testing	
http://www.agiledata.org/essays/tdd.html
http://r-pkgs.had.co.nz/tests.html
* testthat

!!! performance
* rtools 
https://github.com/stan-dev/rstan/wiki/Install-Rtools-for-Windows
* Rcpp
* RInside 

* speed up loop in R 
http://stackoverflow.com/questions/2908822/speed-up-the-loop-operation-in-r
http://www.r-bloggers.com/faster-for-loops-in-r/
http://biostat.mc.vanderbilt.edu/wiki/pub/Main/SvetlanaEdenRFiles/handouts.pdf
http://www.r-bloggers.com/faster-higher-stonger-a-guide-to-speeding-up-r-code-for-busy-people/

!!! reporting 
* knitr

!!! database programming
http://blog.aguskurniawan.net/



! favorite functions 
* cut 
** turns continuous variables into factors http://www.r-bloggers.com/r-function-of-the-day-cut/


! .Rprofile
http://www.r-bloggers.com/fun-with-rprofile-and-customizing-r-startup/
http://stackoverflow.com/questions/13633876/getting-rprofile-to-load-at-startup
http://www.dummies.com/how-to/content/how-to-install-and-configure-rstudio.html


! require vs library
http://stackoverflow.com/questions/5595512/what-is-the-difference-between-require-and-library
http://yihui.name/en/2014/07/library-vs-require/
https://github.com/rstudio/shiny#installation

! R java issue fix 
{{{

 check the environment 
> Sys.getenv()
ALLUSERSPROFILE          C:\ProgramData
APPDATA                  C:\Users\karl\AppData\Roaming
CommonProgramFiles       C:\Program Files\Common Files
CommonProgramFiles(x86)
                         C:\Program Files (x86)\Common Files
CommonProgramW6432       C:\Program Files\Common Files
COMPUTERNAME             KARL-REMOTE
ComSpec                  C:\Windows\system32\cmd.exe
DISPLAY                  :0
FP_NO_HOST_CHECK         NO
GFORTRAN_STDERR_UNIT     -1
GFORTRAN_STDOUT_UNIT     -1
HADOOP_HOME              C:\tmp\hadoop
HOME                     C:/Users/karl/Documents
HOMEDRIVE                C:
HOMEPATH                 \Users\karl
JAVA_HOME                C:/Program Files/Java/jdk1.8.0_25/bin
LOCALAPPDATA             C:\Users\karl\AppData\Local
LOGONSERVER              \\KARL-REMOTE
NUMBER_OF_PROCESSORS     4
OS                       Windows_NT
PATH                     C:\Program
                         Files\R\R-3.3.1\bin\x64;C:\ProgramData\Oracle\Java\javapath;C:\oracle\product\11.1.0\db_1\bin;C:\Windows\system32;C:\Windows;C:\Windows\System32\Wbem;C:\Windows\System32\WindowsPowerShell\v1.0\;C:\Program
                         Files (x86)\Common
                         Files\SYSTEM\MSMAPI\1033;C:\Python33;C:\Python33\Scripts;C:\Program
                         Files (x86)\QuickTime\QTSystem\;C:\Program Files
                         (x86)\nodejs\;C:\Users\karl\AppData\Roaming\npm;C:\Users\karl\AppData\Local\atom\bin;C:\Users\karl\AppData\Local\Pandoc\;C:\Program
                         Files\Java\jdk1.8.0_25\jre\bin\server;C:\Program
                         Files\Java\jdk1.8.0_25\bin
PATHEXT                  .COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC
PROCESSOR_ARCHITECTURE   AMD64
PROCESSOR_IDENTIFIER     Intel64 Family 6 Model 58 Stepping 9, GenuineIntel
PROCESSOR_LEVEL          6
PROCESSOR_REVISION       3a09
ProgramData              C:\ProgramData
ProgramFiles             C:\Program Files
ProgramFiles(x86)        C:\Program Files (x86)
ProgramW6432             C:\Program Files
PSModulePath             C:\Windows\system32\WindowsPowerShell\v1.0\Modules\
PUBLIC                   C:\Users\Public
R_ARCH                   /x64
R_COMPILED_BY            gcc 4.9.3
R_DOC_DIR                C:/PROGRA~1/R/R-33~1.1/doc
R_HOME                   C:/PROGRA~1/R/R-33~1.1
R_LIBS_USER              C:/Users/karl/Documents/R/win-library/3.3
R_USER                   C:/Users/karl/Documents
RMARKDOWN_MATHJAX_PATH   C:/Program Files/RStudio/resources/mathjax-23
RS_LOCAL_PEER            \\.\pipe\33860-rsession
RS_RPOSTBACK_PATH        C:/Program Files/RStudio/bin/rpostback
RS_SHARED_SECRET         63341846741
RSTUDIO                  1
RSTUDIO_MSYS_SSH         C:/Program Files/RStudio/bin/msys-ssh-1000-18
RSTUDIO_PANDOC           C:/Program Files/RStudio/bin/pandoc
RSTUDIO_SESSION_PORT     33860
RSTUDIO_USER_IDENTITY    karl
RSTUDIO_WINUTILS         C:/Program Files/RStudio/bin/winutils
SESSIONNAME              Console
SystemDrive              C:
SystemRoot               C:\Windows
TEMP                     C:\Users\karl\AppData\Local\Temp
TMP                      C:\Users\karl\AppData\Local\Temp
USERDOMAIN               karl-remote
USERNAME                 karl
USERPROFILE              C:\Users\karl
windir                   C:\Windows

 check java version 
> system("java -version")
java version "1.8.0_91"
Java(TM) SE Runtime Environment (build 1.8.0_91-b14)
Java HotSpot(TM) Client VM (build 25.91-b14, mixed mode)


 add the java directories on PATH , critical here is the directory of jvm.dll
C:\Program Files\Java\jdk1.8.0_25\jre\bin\server;C:\Program Files\Java\jdk1.8.0_25\bin

 set JAVA_HOME
Sys.setenv(JAVA_HOME="C:/Program Files/Java/jdk1.8.0_25/bin")
library(rJava)
library(XLConnect)

}}}

! remove duplicate records 
http://www.cookbook-r.com/Manipulating_data/Finding_and_removing_duplicate_records/
http://www.dummies.com/how-to/content/how-to-remove-duplicate-data-in-r.html

! get R memory usage 
http://stackoverflow.com/questions/1358003/tricks-to-manage-the-available-memory-in-an-r-session
{{{
# improved list of objects
.ls.objects <- function (pos = 1, pattern, order.by,
                        decreasing=FALSE, head=FALSE, n=5) {
    napply <- function(names, fn) sapply(names, function(x)
                                         fn(get(x, pos = pos)))
    names <- ls(pos = pos, pattern = pattern)
    obj.class <- napply(names, function(x) as.character(class(x))[1])
    obj.mode <- napply(names, mode)
    obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
    obj.prettysize <- napply(names, function(x) {
                           capture.output(format(utils::object.size(x), units = "auto")) })
    obj.size <- napply(names, object.size)
    obj.dim <- t(napply(names, function(x)
                        as.numeric(dim(x))[1:2]))
    vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
    obj.dim[vec, 1] <- napply(names, length)[vec]
    out <- data.frame(obj.type, obj.size, obj.prettysize, obj.dim)
    names(out) <- c("Type", "Size", "PrettySize", "Rows", "Columns")
    if (!missing(order.by))
        out <- out[order(out[[order.by]], decreasing=decreasing), ]
    if (head)
        out <- head(out, n)
    out
}

# shorthand
lsos <- function(..., n=10) {
    .ls.objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n)
}

lsos()

}}}

! dplyr join functions cheat sheet
https://stat545-ubc.github.io/bit001_dplyr-cheatsheet.html


! loess
http://flowingdata.com/2010/03/29/how-to-make-a-scatterplot-with-a-smooth-fitted-line/


! gather vs melt
http://stackoverflow.com/questions/26536251/comparing-gather-tidyr-to-melt-reshape2

! tidyr vs reshape2
http://rpubs.com/paul4forest/reshape2tidyrdplyr


! bootstrapping 
https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=bootstrapping%20in%20r

! forecasting 
Forecasting time series using R by Prof Rob J Hyndman at Melbourne R Users https://www.youtube.com/watch?v=1Lh1HlBUf8k
forecasting principles and practice http://robjhyndman.com/uwafiles/fpp-notes.pdf
http://www.statistics.com/forecasting-analytics#fees
!! melbourne talk
http://robjhyndman.com/seminars/melbournerug/
http://robjhyndman.com/talks/MelbourneRUG.pdf
http://robjhyndman.com/talks/MelbourneRUGexamples.R
!! time series data
https://forecasters.org/resources/time-series-data/m3-competition/
https://forecasters.org/resources/time-series-data/
http://www.forecastingprinciples.com/index.php?option=com_content&view=article&id=8&Itemid=18
https://datamarket.com/data/list/?q=provider%3atsdl
!! prediction competitions
http://robjhyndman.com/hyndsight/prediction-competitions/
!! forecasting books
Forecasting: principles and practice https://www.otexts.org/book/fpp
!! automated forecasting examples
http://www.dxbydt.com/munge-automate-forecast/ , https://github.com/djshahbydt/Munge-Automate-Forecast.../blob/master/Munge%2C%20Automate%20%26%20Forecast...
http://www.dxbydt.com/wp-content/uploads/2015/11/data.csv
https://github.com/pmaier1971/AutomatedForecastingWithShiny/blob/master/server.R
!! forecasting UI examples
https://pmaier1971.shinyapps.io/AutomatedForecastingWithShiny/  <- check the overview and economic forecasting tabs
http://www.ae.be/blog-en/combining-the-power-of-r-and-d3-js/ , http://vanhumbeecka.github.io/R-and-D3/plotly.html  R and D3 binding
https://nxsheet.com/sheets/56d0a87264e47ee60a95f652

!! forecasting and shiny 
https://aneesha.shinyapps.io/ShinyTimeseriesForecasting/
https://medium.com/@aneesha/timeseries-forecasting-with-the-forecast-r-package-and-shiny-6fa04c64196#.r9nllan82
http://www.datasciencecentral.com/profiles/blogs/time-series-forecasting-and-internet-of-things-iot-in-grain

!! forecasting time series reading materials
http://a-little-book-of-r-for-time-series.readthedocs.io/en/latest/index.html
http://a-little-book-of-r-for-time-series.readthedocs.io/en/latest/src/timeseries.html
understanding time series data https://www.safaribooksonline.com/library/view/practical-data-analysis/9781783551668/ch07s03.html
https://www.quantstart.com/articles#time-series-analysis

!! acf pacf, arima arma
http://www.forecastingbook.com/resources/online-tutorials/acf-and-random-walk-in-xlminer
autocorrelation in bearing performance https://www.youtube.com/watch?v=oVQCS9Om_w4
autocorrelation function in time series analysis https://www.youtube.com/watch?v=pax02Q0aJO8
Detecting AR & MA using ACF and PACF plots https://www.youtube.com/watch?v=-vSzKfqcTDg
time series theory https://www.youtube.com/playlist?list=PLUgZaFoyJafhfcggaNzmZt_OdJq32-iFW
R Programming LiveLessons (Video Training): Fundamentals to Advanced https://www.safaribooksonline.com/library/view/r-programming-livelessons/9780133578867/
understanding time series data https://www.safaribooksonline.com/library/view/practical-data-analysis/9781783551668/ch07s03.html

ARMA (no differencing), ARIMA (with differencing) https://www.quora.com/Whats-the-difference-between-ARMA-ARIMA-and-ARIMAX-in-laymans-terms
https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average
https://en.wikipedia.org/wiki/Autoregressive%E2%80%93moving-average_model
ARIMA models https://www.otexts.org/fpp/8
Stationarity and differencing https://www.otexts.org/fpp/8/1
https://www.quora.com/What-are-the-differences-between-econometrics-quantitative-finance-mathematical-finance-computational-finance-and-financial-engineering

!! time series forecasting model compare 
http://stats.stackexchange.com/questions/140163/timeseries-analysis-procedure-and-methods-using-r

!! cross validation 
http://stats.stackexchange.com/questions/140163/timeseries-analysis-procedure-and-methods-using-r
http://robjhyndman.com/hyndsight/crossvalidation/
http://robjhyndman.com/hyndsight/tscvexample/
Evaluating forecast accuracy https://www.otexts.org/fpp/2/5/
http://moderntoolmaking.blogspot.com/2011/11/functional-and-parallel-time-series.html
http://moderntoolmaking.blogspot.com/search/label/cross-validation
http://moderntoolmaking.blogspot.com/search/label/forecasting
cross validation and train/test split - Selecting the best model in scikit-learn using cross-validation https://www.youtube.com/watch?v=6dbrR-WymjI



! dplyr vs data.table
http://stackoverflow.com/questions/21435339/data-table-vs-dplyr-can-one-do-something-well-the-other-cant-or-does-poorly/27840349#27840349
http://www.r-bloggers.com/working-with-large-datasets-with-dplyr-and-data-table/
http://www.r-statistics.com/2013/09/a-speed-test-comparison-of-plyr-data-table-and-dplyr/


! shiny 
reproducible research with R and shiny https://www.safaribooksonline.com/library/view/strata-hadoop/9781491927960/part24.html
http://rmarkdown.rstudio.com/
https://rstudio.github.io/packrat/
https://rstudio.github.io/packrat/
https://www.shinyapps.io
https://gist.github.com/SachaEpskamp/5796467 A general shiny app to import and export data to R. Note that this can be used as a starting point for any app that requires data to be loaded into Shiny.
https://www.youtube.com/watch?v=HPZSunrSo5M R Shiny app tutorial # 15 - how to use fileInput to upload CSV or Text file

!! shiny time series 
http://markedmondson.me/my-google-analytics-time-series-shiny-app-alpha
https://gist.github.com/MarkEdmondson1234/3190fb967f3cbc2eeae2
http://blog.rstudio.org/2015/04/14/interactive-time-series-with-dygraphs/
http://stackoverflow.com/questions/28049248/create-time-series-graph-in-shiny-from-user-inputs


!! courses/tutorials
http://shiny.rstudio.com/
http://shiny.rstudio.com/tutorial/
http://shiny.rstudio.com/articles/
http://shiny.rstudio.com/gallery/
http://shiny.rstudio.com/articles/shinyapps.html
http://shiny.rstudio.com/reference/shiny/latest/ <- function references
https://www.safaribooksonline.com/library/view/introduction-to-shiny/9781491959558/
https://www.safaribooksonline.com/library/view/web-application-development/9781782174349/
http://deanattali.com/blog/building-shiny-apps-tutorial/
https://github.com/rstudio/IntroToShiny


!! showcase/gallery/examples
https://www.rstudio.com/products/shiny/shiny-user-showcase/
https://github.com/rstudio/shiny-examples


!! persistent data/storage in shiny
http://deanattali.com/blog/shiny-persistent-data-storage/  
http://daattali.com/shiny/persistent-data-storage/
https://github.com/daattali/shiny-server/tree/master/persistent-data-storage


!! google form with shiny app
http://deanattali.com/2015/06/14/mimicking-google-form-shiny/


!! real time monitoring of R package downloads
https://gallery.shinyapps.io/087-crandash/
https://github.com/Athospd/semantix_closeness_centrality


!! R pivot table
http://www.magesblog.com/2015/03/pivot-tables-with-r.html
http://www.joyofdata.de/blog/pivoting-data-r-excel-style/
http://stackoverflow.com/questions/33214397/download-rpivottable-ouput-in-shiny
https://www.rforexcelusers.com/make-pivottable-in-r/
https://github.com/smartinsightsfromdata/rpivotTable/blob/master/R/rpivotTable.R
https://github.com/joyofdata/r-big-pivot


!! setup shiny server 
https://www.digitalocean.com/community/tutorials/how-to-set-up-shiny-server-on-ubuntu-14-04
http://deanattali.com/2015/05/09/setup-rstudio-shiny-server-digital-ocean/
http://www.r-bloggers.com/how-to-get-your-very-own-rstudio-server-and-shiny-server-with-digitalocean/
http://johndharrison.blogspot.com/2014/03/rstudioshiny-server-on-digital-ocean.html
http://www.r-bloggers.com/deploying-your-very-own-shiny-server/
http://matthewlincoln.net/2015/08/31/setup-rstudio-and-shiny-servers-on-digital-ocean.html


!! nearPoints, brushedPoints
http://shiny.rstudio.com/articles/selecting-rows-of-data.html
http://shiny.rstudio.com/reference/shiny/latest/brushedPoints.html
http://stackoverflow.com/questions/31445367/r-shiny-datatableoutput-not-displaying-brushed-points
http://stackoverflow.com/questions/34642851/shiny-ggplot-with-interactive-x-and-y-does-not-pass-information-to-brush
http://stackoverflow.com/questions/29965979/data-object-not-found-when-deploying-shiny-app
https://github.com/BillPetti/Scheduling-Shiny-App


!! deploy app 
library(rsconnect)
rsconnect::deployApp('E:/GitHub/code_ninja/r/shiny/karlshiny')


!! shiny and d3
http://stackoverflow.com/questions/26650561/binding-javascript-d3-js-to-shiny
http://www.r-bloggers.com/d3-and-r-interacting-through-shiny/
https://github.com/timelyportfolio/shiny-d3-plot
https://github.com/vega/vega/wiki/Vega-and-D3
http://vega.github.io/


! data frame vs data table 
http://stackoverflow.com/questions/13618488/what-you-can-do-with-data-frame-that-you-cant-in-data-table
http://stackoverflow.com/questions/18001120/what-is-the-practical-difference-between-data-frame-and-data-table-in-r

! stat functions
stat_summary dot plot - ggplot2 dot plot : Quick start guide - R software and data visualization http://www.sthda.com/english/wiki/print.php?id=180

! ggplot2
ggplot2 essentials http://www.sthda.com/english/wiki/ggplot2-essentials
Be Awesome in ggplot2: A Practical Guide to be Highly Effective - R software and data visualization http://www.sthda.com/english/wiki/be-awesome-in-ggplot2-a-practical-guide-to-be-highly-effective-r-software-and-data-visualization
Beautiful plotting in R: A ggplot2 cheatsheet http://zevross.com/blog/2014/08/04/beautiful-plotting-in-r-a-ggplot2-cheatsheet-3/

!! real time viz
http://stackoverflow.com/questions/11365857/real-time-auto-updating-incremental-plot-in-r
http://stackoverflow.com/questions/27205610/real-time-auto-incrementing-ggplot-in-r


! ggvis
ggvis vs ggplot2 http://ggvis.rstudio.com/ggplot2.html
ggvis basics http://ggvis.rstudio.com/ggvis-basics.html#layers
Properties and scales http://ggvis.rstudio.com/properties-scales.html
ggvis cookbook http://ggvis.rstudio.com/cookbook.html
https://www.cheatography.com/shanly3011/cheat-sheets/data-visualization-in-r-ggvis-continued/
http://stats.stackexchange.com/questions/117078/for-plotting-with-r-should-i-learn-ggplot2-or-ggvis

! Execute R inside Oracle 
https://blogs.oracle.com/R/entry/invoking_r_scripts_via_oracle
https://blogs.oracle.com/R/entry/oraah_enabling_high_performance_r
https://blogs.oracle.com/R/entry/analyzing_big_data_using_the1
http://sheepsqueezers.com/media/documentation/oracle/ore-trng4-embeddedrscripts-1501638.pdf
Oracle R Enterprise Hands-on Lab http://static1.1.sqspcdn.com/static/f/552253/24257177/1390505576063/BIWA_14_Presentation_3.pdf?token=LqmhB3tJhuDeN0eYOXaGlm04BlI%3D
http://www.peakindicators.com/blog/the-advantages-of-ore-over-traditional-r
COUPLING DATABASES AND ADVANCED ANALYTICAL TOOLS (R) http://it4bi.univ-tours.fr/it4bi/medias/pdfs/2014_Master_Thesis/IT4BI_2014_submission_4.pdf
R Interface for Embedded R Execution http://docs.oracle.com/cd/E67822_01/OREUG/GUID-3227A0D4-C5FE-49C9-A28C-8448705ADBCF.htm#OREUG495
automated trading strategies with R http://www.oracle.com/assets/media/automatedtradingstrategies-2188856.pdf?ssSourceSiteId=otnen
Is it possible to run a SAS or R script from PL/SQL? http://stackoverflow.com/questions/4043629/is-it-possible-to-run-a-sas-or-r-script-from-pl-sql
statistical analysis with oracle http://www.sdn.sap.com/irj/scn/go/portal/prtroot/docs/library/uuid/40cfa537-62b8-2f10-a78d-d320a2ab7205?overridelayout=true

! Turn your R code into a web API 
https://github.com/trestletech/plumber


! errors

!! dplyr “Select” - Error: found duplicated column name
http://stackoverflow.com/questions/28549045/dplyr-select-error-found-duplicated-column-name

! spark 
[[sparklyr]]

! R cookbook - Winston C. 
http://www.cookbook-r.com/

! references 
http://www.amazon.com/The-Art-Programming-Statistical-Software/dp/1593273843/ref=tmm_pap_title_0?ie=UTF8&qid=1392504776&sr=8-1
http://www.amazon.com/R-Graphics-Cookbook-Winston-Chang/dp/1449316956/ref=tmm_pap_title_0?ie=UTF8&qid=1392504949&sr=8-2
http://had.co.nz/
http://adv-r.had.co.nz/ <- advanced guide
http://adv-r.had.co.nz/Style.html <- style guide 

https://www.youtube.com/user/rdpeng/playlists

https://cran.r-project.org/doc/contrib/Short-refcard.pdf
discovering statistics using R http://library.mpib-berlin.mpg.de/toc/z2012_1351.pdf

! rpubs favorites
http://rpubs.com/karlarao
Interpreting coefficients from interaction (Part 1) http://rpubs.com/hughes/15353


! tricks 


!! count frequency of words
{{{

numbers <- c(33, 30, 14, 1 , 6, 19, 34, 17, 14, 15, 24 , 21, 24, 34, 6, 24, 34, 6, 29, 5, 19 , 4, 3, 19, 4, 14, 20, 34)

library(dplyr); arrange(as.data.frame(table(numbers)))

numbers Freq
      1    1
      3    1
      5    1
     15    1
     17    1
     20    1
     21    1
     29    1
     30    1
     33    1
      4    2
      6    3
     14    3
     19    3
     24    3
     34    4
}}}

!! How to print text and variables in a single line in r 
https://stackoverflow.com/questions/32241806/how-to-print-text-and-variables-in-a-single-line-in-r/32242334


! data structures 

!! see R DATA FORMAT 
[[R data format]]



! XLConnect

!! XLConnect strftime
https://stackoverflow.com/questions/21312173/how-can-i-retrive-the-time-only-with-xlconnect

!! 1899-Dec-31
http://www.cpearson.com/excel/datetime.htm

!! R import big xlsx 
https://stackoverflow.com/questions/19147884/importing-a-big-xlsx-file-into-r/31029292#31029292

!! R write CSV 
http://rprogramming.net/write-csv-in-r/

!! R java memory 
https://stackoverflow.com/questions/34624002/r-error-java-lang-outofmemoryerror-java-heap-space
https://stackoverflow.com/questions/11766981/xlconnect-r-use-of-jvm-memory

!! R commandargs 
https://www.rdocumentation.org/packages/R.utils/versions/2.8.0/topics/commandArgs



! scraping HTML (XML package)
http://bradleyboehmke.github.io/2015/12/scraping-html-tables.html









.






<<<
Before h2o there’s a GUI data mining tool called rattle
 
quick intro http://r4stats.com/articles/software-reviews/rattle/
detailed course https://www.udemy.com/data-mining-with-rattle/
https://www.kdnuggets.com/2017/02/top-r-packages-machine-learning.html
 
I’d definitely try h2o with the same data set https://www.h2o.ai/try-driverless-ai/
 
it also helps having Tableau for easy validation the raw data, and sqldf https://www.r-bloggers.com/make-r-speak-sql-with-sqldf/ (also available in python - from pandasql import sqldf)
and of course R studio, pycharm, and sql developer
 
another GUI tool is exploratory made by an ex oracle guy (from oracle visual analyzer team)
https://exploratory.io/features
<<<
<<showtoc>>

http://insightdataengineering.com/blog/The-Data-Engineering-Ecosystem-An-Interactive-Map.html
https://blog.insightdatascience.com/the-new-data-engineering-ecosystem-trends-and-rising-stars-414a1609d4a0#.c03g5b1nc
https://github.com/InsightDataScience/data-engineering-ecosystem/wiki/Data-Engineering-Ecosystem
https://github.com/InsightDataScience/data-engineering-ecosystem


! the ecosystem

!! v3
http://xyz.insightdataengineering.com/blog/pipeline_map/
[img(50%,50%)[ https://i.imgur.com/xeo0SP4.png ]]

!! v2
http://xyz.insightdataengineering.com/blog/pipeline_map_v2.html
[img(90%,90%)[ http://i.imgur.com/gn9E7Jf.png ]]

!! v1
http://xyz.insightdataengineering.com/blog/pipeline_map_v1.html
[img(90%,90%)[ https://lh3.googleusercontent.com/-iD9v8Iho_7g/VZVvf0mK1PI/AAAAAAAACmU/VlovJ-JP2cI/s2048/20150702_DataEngineeringEcosystem.png ]]


! hadoop architecture use case
[img[ https://lh3.googleusercontent.com/-QsRM3czDMkg/Vhfg7pTmFrI/AAAAAAAACzU/4BEa8SfK_KU/s800-Ic42/IMG_8542.JPG ]]



! others
https://trello.com/b/rbpEfMld/data-science





! also check 
!! microservices patterns 
[img(100%,100%)[ https://i.imgur.com/7p8kBwI.png]]
https://microservices.io/patterns/index.html






<<showtoc>> 

! The players: 

!! Cloud computing 
!!! AWS
!!! Azure
!!! Google Cloud 
!!! Digital Ocean

!! Infrastructure as code 
!!! Chef http://www.getchef.com/chef/ , http://puppetlabs.com/puppet/puppet-enterprise
!!! Puppet
!!! Ansible
!!! Saltstack
!!! terraform
!!! cfengine

!! Build and Test using continuous integration 
!!!  jenkins https://jenkins-ci.org/

!! Containerization 
!!!  docker, kubernetes


! ''reviews'' 
http://www.infoworld.com/d/data-center/puppet-or-chef-the-configuration-management-dilemma-215279

! Comparison of open-source configuration management software 
http://en.wikipedia.org/wiki/Comparison_of_open_source_configuration_management_software

! List of build automation software 
http://en.wikipedia.org/wiki/List_of_build_automation_software



! nice viz from heroku website
[img[ http://i.imgur.com/4kTs7TE.png  ]]
[img[ http://i.imgur.com/PHCK74x.png  ]]

! from hashicorp 
[img(70%,70%)[ http://i.imgur.com/yj0bKNF.png ]]
[img(70%,70%)[ http://i.imgur.com/zrSw8ge.png ]]

http://thenewstack.io/devops-landscape-2015-the-race-to-the-management-layer/
https://gist.github.com/diegopacheco/8f3a03a0869578221ecf






https://becominghuman.ai/cheat-sheets-for-ai-neural-networks-machine-learning-deep-learning-big-data-678c51b4b463
https://medium.com/machine-learning-in-practice/cheat-sheet-of-machine-learning-and-python-and-math-cheat-sheets-a4afe4e791b6
https://ml-cheatsheet.readthedocs.io/en/latest/
https://technology.amis.nl/2017/05/06/the-hello-world-of-machine-learning-with-python-pandas-jupyter-doing-iris-classification-based-on-quintessential-set-of-flower-data/
<<showtoc>>


! DVC - data version control 


MLOps Data Versioning and DataOps with Dmitry Petrov of DVC.org
https://www.meetup.com/pl-PL/bristech/events/271251921/

https://www.eventbrite.com/e/dc-thurs-dvc-w-dmitry-petrov-tickets-120036389071?ref=enivtefor001&invite=MjAwNjgyNjMva2FybGFyYW9AZ21haWwuY29tLzA%3D%0A&utm_source=eb_email&utm_medium=email&utm_campaign=inviteformalv2&utm_term=eventpage
Using Python With Oracle Database 11g
http://www.oracle.com/technetwork/articles/dsl/python-091105.html

http://www.oracle.com/webfolder/technetwork/tutorials/obe/db/OOW11/python_db/python_db.htm
http://www.oracle.com/webfolder/technetwork/tutorials/obe/db/oow10/python_db/python_db.htm
http://cx-oracle.sourceforge.net/
http://www.python.org/dev/peps/pep-0249/

http://www.amazon.com/gp/product/1887902996
http://wiki.python.org/moin/BeginnersGuide

''python for ipad'' http://www.tuaw.com/2012/11/19/python-3-2-lets-you-write-python-on-the-iphone/

''python environment''
http://blog.andrewhays.net/love-your-terminal
http://ozkatz.github.com/improving-your-python-productivity.html

http://showmedo.com/videotutorials/python
The Ultimate Python Programming Course http://goo.gl/vvpWE, https://www.udemy.com/the-ultimate-python-programming-course/
Python 3 Essential Training http://www.lynda.com/Python-3-tutorials/essential-training/62226-2.html






{{{
parameters: 
    p_owner       
    p_tabname     
    p_partname    
    p_granularity 
    p_est_percent 
    p_method_opt
    p_degree
}}}

{{{

CREATE OR REPLACE PROCEDURE alloc_app_perf.table_stats 
( 
    p_owner IN varchar2,
    p_tabname IN varchar2, 
    p_partname IN varchar2 default NULL,  
    p_granularity IN varchar2 default 'GLOBAL AND PARTITION',
    p_est_percent IN varchar2 default 'DBMS_STATS.AUTO_SAMPLE_SIZE',
    p_method_opt IN varchar2 default 'FOR ALL COLUMNS SIZE AUTO',
    p_degree IN varchar2 default 8
) 
IS
    action varchar2(128);
    v_mode varchar2(30);
    cmd varchar2(2000);
BEGIN
    action := 'Analyzing the table ' || p_tabname; 
    IF p_partname IS NOT NULL THEN
        action := action||', partition '||p_partname;
        v_mode := p_granularity;

        cmd := '
        BEGIN 
            DBMS_STATS.GATHER_TABLE_STATS('||
            'ownname=>'''||p_owner||''',tabname=>'''||p_tabname||
            ''',partname=>'''||p_partname||''',granularity=>'''||v_mode||
            ''',estimate_percent=>'||p_est_percent||',method_opt=>'''||p_method_opt||''',cascade=>TRUE,degree=>'||p_degree||');
        END;';

        execute immediate cmd;
    ELSE 
        v_mode := 'DEFAULT';

        cmd := '
        BEGIN 
            DBMS_STATS.GATHER_TABLE_STATS('||
            'ownname=>'''||p_owner||''',tabname=>'''||p_tabname||
            ''',estimate_percent=>'||p_est_percent||',method_opt=>'''||p_method_opt||''',cascade=>TRUE,degree=>'||p_degree||');
        END;';

        execute immediate cmd;
    END IF; 
END;
/

}}}


{{{

grant analyze any to alloc_app_perf;
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'CLASS_SALES')
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'CLASS_SALES',p_est_percent=>'dbms_stats.auto_sample_size')
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'CLASS_SALES',p_est_percent=>'1')

exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'DBA_OBJECTS')
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'DBA_OBJECTS',p_est_percent=>'1')
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'DBA_OBJECTS',p_est_percent=>'100')
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'DBA_OBJECTS',p_est_percent=>'dbms_stats.auto_sample_size')

exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'DEMO_SKEW')
exec alloc_app_perf.table_stats(p_owner=>'BAS',p_tabname=>'DEMO_SKEW',p_method_opt=>'for all columns size skewonly')


}}}


! generate stats commands 
{{{

set lines 500
set pages 0
select 'exec alloc_app_perf.table_stats(p_owner=>'''||owner||''',p_tabname=>'''||table_name||''',p_degree=>''16'');'
from dba_tables where table_name in 
('ALGO_INPUT_FOR_REVIEW'        
,'ALLOCATED_NOTSHIPPED_INVS'     
,'ALLOC_BATCH_LINE_ITEMS'        
,'ALLOC_BATCH_VOLUMEGRADE_CHANGE'
,'ALLOC_SKU0_MASTER'             
,'ALLOC_SKU_MASTER'              
,'ALLOC_STORES'                  
,'EOM_NEED_UNITS'                
,'EOM_UNIT_TARGETS'              
,'INVENTORY_CLASSES'             
,'SIM_CLASSES'                   
,'STAGING_STORES'                
,'STORE_ON_ORDERS'               
,'STORE_WAREHOUSE_DETAILS'       
,'VOLUME_GRADE_CONSTRAINTS')
/


}}}


https://github.com/DingGuodong/LinuxBashShellScriptForOps
http://www.bashoneliners.com/
https://github.com/learnbyexample/scripting_course
https://github.com/learnbyexample/Linux_command_line/blob/master/Shell_Scripting.md
https://github.com/learnbyexample/Linux_command_line/blob/master/Text_Processing.md
https://medium.com/capital-one-developers/bashing-the-bash-replacing-shell-scripts-with-python-d8d201bc0989
https://www.linuxjournal.com/content/python-scripts-replacement-bash-utility-scripts
https://www.educba.com/bash-shell-programming-with-python/
https://www.linuxquestions.org/questions/linux-software-2/need-help-converting-bash-script-to-python-4175605267/
https://stackoverflow.com/questions/2839810/converting-a-bash-script-to-python-small-script
https://tails.boum.org/blueprint/Port_shell_scripts_to_Python/
https://www.dreamincode.net/forums/topic/399713-convert-a-shell-script-to-python/
https://grasswiki.osgeo.org/wiki/Converting_Bash_scripts_to_Python

https://medium.com/capital-one-tech/bashing-the-bash-replacing-shell-scripts-with-python-d8d201bc0989



! tools 
https://zwischenzugs.com/2016/08/29/bash-to-python-converter/
https://github.com/tomerfiliba/plumbum
https://hub.docker.com/r/imiell/bash2py/
https://github.com/ianmiell/bash2py
http://www.swag.uwaterloo.ca/bash2py/index.html  , https://ieeexplore.ieee.org/document/7081866/?reload=true

<<showtoc>>


! gcp security 
* https://www.udemy.com/course/introduction-to-google-cloud-security-features/learn/lecture/14562410#overview


! bigquery 
* Practical Google BigQuery for those who already know SQL https://www.udemy.com/course/practical-google-bigquery-for-those-who-already-know-sql/
* https://www.udemy.com/course/google-bigquery-for-marketers-and-agencies/


! cloud composer (airflow)
* playlist - Apache Airflow Tutorials - https://www.youtube.com/watch?v=AHMm1wfGuHE&list=PLYizQ5FvN6pvIOcOd6dFZu3lQqc6zBGp2
* Apache Airflow using Google Cloud Composer https://www.udemy.com/course/apache-airflow-using-google-cloud-composer-introduction/


! dataflow (apache beam)
* https://www.udemy.com/course/streaming-analytics-on-google-cloud-platform/learn/lecture/7996614#announcements
* https://www.udemy.com/course/apache-beam-a-hands-on-course-to-build-big-data-pipelines/learn/lecture/16220774#announcements


! end to end 
* https://www.udemy.com/course/data-engineering-on-google-cloud-platform/
* https://www.udemy.com/course/talend-open-studio-for-big-data-using-gcp-bigquery/


! SQL 
https://www.udemy.com/course/oracle-analytic-functions-in-depth/
https://www.udemy.com/course/oracle-plsql-is-my-game-exam-1z0-144/


! python 
https://www.udemy.com/course/python-oops-beginners/learn/lecture/7359360#overview
https://www.udemy.com/course/python-object-oriented-programming-oop/learn/lecture/16917860#overview
https://www.udemy.com/course/python-sql-tableau-integrating-python-sql-and-tableau/learn/lecture/13205790#overview
https://www.youtube.com/c/Coreyms/videos
https://www.youtube.com/c/realpython/videos



! java 
https://www.udemy.com/course/java-for-absolute-beginners/learn/lecture/14217184#overview
<<showtoc>>

! PL/SQL User's Guide and Reference Release - Sample PL/SQL Programs
https://docs.oracle.com/cd/A97630_01/appdev.920/a96624/a_samps.htm

! steve's videos - the plsql channel
http://tutorials.plsqlchannel.com/public/index.php 
https://learning.oreilly.com/search/?query=Steven%20Feuerstein&extended_publisher_data=true&highlight=true&include_assessments=false&include_case_studies=true&include_courses=true&include_orioles=true&include_playlists=true&is_academic_institution_account=false&sort=relevance&page=0
https://www.youtube.com/channel/UCpJpLMRm452kVcie3RpINPw/playlists
http://stevenfeuersteinonplsql.blogspot.com/2015/03/27-hours-of-free-plsql-video-training.html
practically perfect plsql playlist https://apexapps.oracle.com/pls/apex/f?p=44785:141:0::NO::P141_PAGE_ID,P141_SECTION_ID:168,1208
https://www.youtube.com/channel/UCpJpLMRm452kVcie3RpINPw/playlists
https://www.oracle.com/database/technologies/appdev/plsql.html


! style guide 
http://oracle.readthedocs.org/en/latest/sql/basics/style-guide.html
http://www.williamrobertson.net/documents/plsqlcodingstandards.htlm



! plsql the good parts 
https://github.com/mortenbra/plsql-the-good-parts
http://mortenbra.github.io/plsql-the-good-parts/



! bulk collect and forall 
https://venzi.wordpress.com/2007/09/27/bulk-collect-forall-vs-cursor-for-loop/


! mvc pl/sq/
http://www.dba-oracle.com/oracle_news/2004_10_27_MVC_development_using_plsql.htm
https://github.com/osalvador/dbax
http://it.toolbox.com/blogs/jjflash-oracle-journal/mvc-for-plsql-and-the-apex-listener-42688
http://jj-blogger.blogspot.com/2006/05/plsql-and-faces.html
https://www.rittmanmead.com/blog/2004/09/john-flack-on-mvc-development-using-plsql/
http://www.liberidu.com/blog/2016/11/02/how-you-should-or-shouldnt-design-program-for-a-performing-database-environment/



! references/books
http://stevenfeuersteinonplsql.blogspot.com/2014/05/resources-for-new-plsql-developers.html
https://www.safaribooksonline.com/library/view/beginning-plsql-from/9781590598825/
https://www.safaribooksonline.com/library/view/beginning-oracle-plsql/9781484207376/
https://www.safaribooksonline.com/library/view/oracle-and-plsql/9781430232070/
https://www.safaribooksonline.com/library/view/oracle-plsql-for/9780764599576/
https://www.safaribooksonline.com/library/view/oracle-plsql-for/0596005873/

! wiki
http://www.java2s.com/Tutorials/Database/Oracle_PL_SQL_Tutorial/index.htm
https://gerardnico.com/wiki/plsql/plsql


! implicit cursor attribute	
https://www.ibm.com/support/knowledgecenter/en/SS6NHC/com.ibm.swg.im.dashdb.apdv.plsql.doc/doc/c0053881.html
    https://www.ibm.com/support/knowledgecenter/SS6NHC/com.ibm.swg.im.dashdb.apdv.plsql.doc/doc/c0053879.html
    https://www.ibm.com/support/knowledgecenter/SS6NHC/com.ibm.swg.im.dashdb.apdv.plsql.doc/doc/c0053878.html
    https://www.ibm.com/support/knowledgecenter/SS6NHC/com.ibm.swg.im.dashdb.apdv.plsql.doc/doc/c0053607.html
PL/SQL Language Elements https://docs.oracle.com/cd/B28359_01/appdev.111/b28370/langelems.htm#LNPLS013 
Cursor Attribute https://docs.oracle.com/cd/B28359_01/appdev.111/b28370/cursor_attribute.htm#LNPLS01311
https://markhoxey.wordpress.com/2012/12/11/referencing-implicit-cursor-attributes-sql/


! plsql profiling 
https://www.thatjeffsmith.com/archive/2019/02/sql-developer-the-pl-sql-hierarchical-profiler/

{{{
Script to produce HTML report with top consumers out of PL/SQL Profiler DBMS_PROFILER data (Doc ID 243755.1)
PURPOSE
To use the PL/SQL Profiler please refer to DBMS_PROFILER documentation as per Oracle® Database PL/SQL Packages and Types Reference for your specific release and platform.

Once you have executed the PL/SQL Profiler for a piece of your application, you can use script profiler.sql provided in this document. This profiler.sql script produces a nice HTML report with the top time consumers as per your execution of the PL/SQL Profiler.

TROUBLESHOOTING STEPS
Familiarize yourself with the PL/SQL Profiler documented in the "Oracle® Database PL/SQL Packages and Types Reference" under DBMS_PROFILER.
If needed, create the PL/SQL Profiler Tables under your application schema: @?/rdbms/admin/proftab.sql
If needed, install the DBMS_PROFILER API, connected as SYS: @?/rdbms/admin/profload.sql
Start PL/SQL Profiler in your application: EXEC DBMS_PROFILER.START_PROFILER('optional comment');
Execute your transaction to be profiled. Calls to PL/SQL Libraries are expected.
Stop PL/SQL Profiler: EXEC DBMS_PROFILER.STOP_PROFILER;
Connect as your application user, execute script profiler.sql provided in this document: @profiler.sql
Provide to profiler.sql the "runid" out of a displayed list.
Review HTML report generated by profiler.sql.
}}}




! plsql collections 
Collections in Oracle PLSQL https://www.youtube.com/watch?v=DvA-amyao7s

!! accessing varray 
Accessing elements in a VARRAY column which is in a type https://community.oracle.com/thread/3961996
https://docs.oracle.com/cd/E11882_01/server.112/e41084/statements_10002.htm#i2071643





! pl/sql design patterns 
<<<
https://technology.amis.nl/2006/03/10/design-patterns-in-plsql-the-template-pattern/
https://technology.amis.nl/2006/03/11/design-patterns-in-plsql-interface-injection-for-even-looser-coupling/
https://technology.amis.nl/2006/04/02/design-patterns-in-plsql-implementing-the-observer-pattern/

https://blog.serpland.com/tag/design-patterns
https://blog.serpland.com/oracle/design-patterns-in-plsql-oracle


https://peterhrasko.wordpress.com/2017/09/16/oop-design-patterns-in-plsql/
<<<




! plsql dynamic sql 
!! EXECUTE IMMEDIATE with multiple lines of columns to insert 
https://stackoverflow.com/questions/14401631/execute-immediate-with-multiple-lines-of-columns-to-insert
https://stackoverflow.com/questions/9090072/insert-a-multiline-string-in-oracle-with-sqlplus


! plsql cursor within the cursor 
https://www.techonthenet.com/oracle/questions/cursor2.php



! end










https://startupsventurecapital.com/essential-cheat-sheets-for-machine-learning-and-deep-learning-researchers-efb6a8ebd2e5
Data Mining from a process perspective 
(from the book Data Mining for Business Analytics - Concepts, Techniques, and Applications)
[img(80%,80%)[http://i.imgur.com/tJ4TVCX.png]]
[img(80%,80%)[http://i.imgur.com/rmLhSnV.png]]

Machine Learning Summarized in One Picture
http://www.datasciencecentral.com/profiles/blogs/machine-learning-summarized-in-one-picture
[img(80%,80%)[http://i.imgur.com/oA0LjyF.png]]

Data Science Summarized in One Picture
http://www.datasciencecentral.com/profiles/blogs/data-science-summarized-in-one-picture
https://www.linkedin.com/pulse/business-intelligence-data-science-fuzzy-borders-rubens-zimbres
[img(80%,80%)[http://i.imgur.com/1SnVfqV.png]]


Python for Big Data in One Picture
http://www.datasciencecentral.com/profiles/blogs/python-for-big-data-in-one-picture
https://www.r-bloggers.com/python-r-vs-spss-sas/
[img(80%,80%)[http://i.imgur.com/5kPV76P.jpg]]

R for Big Data in One Picture
http://www.datasciencecentral.com/profiles/blogs/r-for-big-data-in-one-picture
[img(80%,80%)[http://i.imgur.com/abDq0ow.jpg]]


! top data science packages
[img(100%,100%)[ https://i.imgur.com/tj3ryoK.png]]
https://www.coriers.com/comparison-of-top-data-science-libraries-for-python-r-and-scala-infographic/






! ML modelling in R cheat sheet
[img(100%,100%)[ https://i.imgur.com/GPiepGw.jpg]]
https://github.com/rstudio/cheatsheets/raw/master/Machine%20Learning%20Modelling%20in%20R.pdf
https://www.r-bloggers.com/machine-learning-modelling-in-r-cheat-sheet/


! ML workflow 
[img(100%,100%)[ https://i.imgur.com/TuhIB7T.png ]]







.


also see [[database/data movement methods]]


<<showtoc>>

! RMAN 

[img(50%,50%)[ http://i.imgur.com/eLK7RRk.png ]]

!! backup and restore 
        backup and restore from physical standby http://gavinsoorma.com/2012/04/performing-a-database-clone-using-a-data-guard-physical-standby-database/
	Using RMAN Incremental Backups to Refresh Standby Database http://oracleinaction.com/using-rman-incremental-backups-refresh-standby-database/
	https://jarneil.wordpress.com/2008/06/03/applying-an-incremental-backup-to-a-physical-standby/

!! active duplication 
        create standby database using rman active duplicate https://www.pythian.com/blog/creating-a-physical-standby/
	https://oracle-base.com/articles/11g/duplicate-database-using-rman-11gr2#active_database_duplication
	https://oracle-base.com/articles/12c/recovery-manager-rman-database-duplication-enhancements-12cr1
!! backup-based duplication
        duplicate database without connecting to target http://oracleinaction.com/duplicate-db-no-db-conn/
        https://www.safaribooksonline.com/library/view/rman-recipes-for/9781430248361/9781430248361_Ch15.xhtml
        https://www.safaribooksonline.com/library/view/oracle-database-12c/9780071847445/ch10.html#ch10lev15
        http://oracleinaction.com/duplicate-db-no-db-conn/
        http://oracledbasagar.blogspot.com/2011/11/cloning-on-different-server-using-rman.html
!! restartable duplicate 
        11gr2 DataGuard: Restarting DUPLICATE After a Failure https://blogs.oracle.com/XPSONHA/entry/11gr2_dataguard_restarting_dup

! dNFS + CloneDB
    uses the backup piece as the backing storage,
    1210656.1: “Clone your dNFS Production Database for Testing.”
    How to Accelerate Test and Development Through Rapid Cloning of Production Databases and Operating Environments http://www.oracle.com/technetwork/server-storage/hardware-solutions/o13-022-rapid-cloning-db-1919816.pdf
    https://oracle-base.com/articles/11g/clonedb-11gr2
    http://datavirtualizer.com/database-thin-cloning-clonedb-oracle/
Clonedb: The quick and easy cloning solution you never knew you had https://www.youtube.com/watch?v=YBVj1DkUG54

! oem12c snapclone , snap clone
    http://datavirtualizer.com/em-12c-snap-clone/
    snap clone https://www.safaribooksonline.com/library/view/building-database-clouds/9780134309781/ch08.html#ch08
https://dbakevlar.com/2013/09/em-12c-snap-clone/
DB Snap Clone on Exadata 	https://www.youtube.com/watch?v=nvEmP6Z65Bg



! Thin provisioning of PDBs using “Snapshot Copy” (using ACFS snapshot or ZFS)

!! ACFS snapshot
https://www.youtube.com/watch?v=jwgD2sg8cyM
https://www.youtube.com/results?search_query=acfs+snapshot
How To Manually Create An ACFS Snapshot (Doc ID 1347365.1)
12.2 Oracle ACFS Snapshot Enhancements (Doc ID 2200299.1)



! exadata sparse clones 
https://www.doag.org/formes/pubfiles/10819226/2018-Infra-Peter_Brink-Exadata_Snapshot_Clones-Praesentation.pdf
https://docs.oracle.com/en/engineered-systems/exadata-database-machine/sagug/exadata-storage-server-snapshots.html#GUID-78F67DD0-93C8-4944-A8F0-900D910A06A0
https://learning.oreilly.com/library/view/Oracle+Database+Exadata+Cloud+Service:+A+Beginner's+Guide/9781260120882/ch3.xhtml#page_83
How to Calculate the Physical Size and Virtual Size for Sparse GridDisks in Exadata Sparse Diskgroups (ORA-15041) (Doc ID 2473412.1)


! summary matrix 
https://www.oracle.com/technetwork/database/exadata/learnmore/exadata-database-copy-twp-2543083.pdf
https://blogs.oracle.com/exadata/exadata-snapshots-part1

[img(100%,100%)[ https://i.imgur.com/C9dQNwr.png]]




! flexclone (netapp)
    snap best practices http://www.netapp.com/us/media/tr-3761.pdf

! delphix 
    Instant Cloning: Boosting Application Development http://www.nocoug.org/download/2014-02/NoCOUG_201402_delphix.pdf






! references
https://www.safaribooksonline.com/search/?query=RMAN%20duplicate&highlight=true&is_academic_institution_account=false&extended_publisher_data=true&include_orioles=true&source=user&include_courses=true&sort=relevance&page=2
Oracle Database 12c Oracle RMAN Backup & Recovery https://www.safaribooksonline.com/library/view/oracle-database-12c/9780071847445/
{{{
10 Duplication: Cloning the Target Database
RMAN Duplication: A Primer
Why Use RMAN Duplication?
Different Types of RMAN Duplication
The Duplication Architecture
Duplication: Location Considerations
Duplication to the Same Server: An Overview
Duplication to the Same Server, Different ORACLE_HOME
Duplication to a Remote Server: An Overview
Duplication and the Network
RMAN Workshop: Build a Password File
Duplication to the Same Server
RMAN Workshop: Duplication to the Same Server Using Disk Backups
Using Tape Backups
Duplication to a Remote Server
RMAN Workshop: Duplication to a Remote Server Using Disk Backups
Using Tape Backups for Remote Server Duplication
Targetless Duplication in 12c
Incomplete Duplication: Using the DBNEWID Utility
New RMAN Cloning Features for 12c
Using Compression
Duplicating Large Tablespaces
Summary

Duplication to a Single-Node System
RMAN Workshop: Duplicating a RAC Database to a Single-Node Database

Case #9: Completing a Failed Duplication Manually
Case #10: Using RMAN Duplication to Create a Historical Subset of the Target Database
}}}

Building Database Clouds in Oracle 12c https://www.safaribooksonline.com/library/view/building-database-clouds/9780134309781/ch08.html#ch08
{{{
Chapter 8. Cloning Databases in Enterprise Manager 12c
Full Clones
Snap Clones
Summary
}}}

Oracle Database 11g—Underground Advice for Database Administrators https://www.safaribooksonline.com/library/view/oracle-database-11gunderground/9781849680004/ch06s09.html
{{{
RMAN cloning and standbys—physical, snapshot, or logical
}}}

Oracle Database Problem Solving and Troubleshooting Handbook https://www.safaribooksonline.com/library/view/oracle-database-problem/9780134429267/ch14.html
{{{
14. Strategies for Migrating Data Quickly between Databases
}}}

Oracle RMAN Database Duplication https://www.safaribooksonline.com/library/view/oracle-rman-database/9781484211120/9781484211137_Ch01.xhtml
{{{
CHAPTER 1 Introduction
}}}

RMAN Recipes for Oracle Database 12c: A Problem-Solution Approach, Second Edition https://www.safaribooksonline.com/library/view/rman-recipes-for/9781430248361/9781430248361_Ch15.xhtml
{{{
15-1. Renaming Database Files in a Duplicate Database
15-2. Specifying Alternative Names for OMF or ASM File Systems
15-3. Creating a Duplicate Database from RMAN Backups
15-4. Duplicating a Database Without Using RMAN Backups
15-5. Specifying Options for Network-based Active Database Duplication
15-6. Duplicating a Database with Several Directories
15-7. Duplicating a Database to a Past Point in Time
15-8. Skipping Tablespaces During Database Duplication
15-9. Duplicating a Database with a Specific Backup Tag
15-10. Resynchronizing a Duplicate Database
15-11. Duplicating Pluggable Databases and Container Databases
15-12. Transporting Tablespaces on the Same Operating System Platform
15-13. Performing a Cross-Platform Tablespace Transport by Converting Files on the Source Host
15-14. Performing a Cross-Platform Tablespace Transport by Converting Files on the Destination Host
15-15. Transporting a Database by Converting Files on the Source Database Platform
15-16. Transporting Tablespaces to a Different Platform Using RMAN Backup Sets
15-17. Transporting a Database to a Different Platform Using RMAN Backup Sets

}}}








https://15445.courses.cs.cmu.edu/fall2019/
also see [[database cloning methods , rman duplicate]]


<<showtoc>>

! Transactional Migration Methods
[img(30%,30%)[http://i.imgur.com/IXfxJlZ.png]]


! Nontransactional Migration Methods
[img(30%,30%)[http://i.imgur.com/cB7is6q.png]]
[img(30%,30%)[http://i.imgur.com/09X0J6j.png]]


! Piecemeal Migration Methods / Manual Migration Methods
[img(30%,30%)[http://i.imgur.com/zPZlA2V.png]]
[img(30%,30%)[http://i.imgur.com/cnyrkW4.png]]


! Replication techniques
[img(30%,30%)[http://i.imgur.com/oi93qRg.png]]
[img(30%,30%)[http://i.imgur.com/ka9Tm52.png]]


! references 
Oracle Database Problem Solving and Troubleshooting Handbook https://www.safaribooksonline.com/library/view/oracle-database-problem/9780134429267/ch14.html
{{{
14. Strategies for Migrating Data Quickly between Databases
}}}
Oracle RMAN Database Duplication https://www.safaribooksonline.com/library/view/oracle-rman-database/9781484211120/9781484211137_Ch01.xhtml
{{{
CHAPTER 1 Introduction
}}}




! yahoo oath 
https://www.google.com/search?q=oath+hadoop+platform&oq=oath+hadoop+platform&aqs=chrome..69i57.5613j1j1&sourceid=chrome&ie=UTF-8
https://www.google.com/search?biw=1194&bih=747&ei=4ux9W7m3Nurs_QbNupXQCg&q=yahoo+hadoop+platform+oath&oq=yahoo+hadoop+platform+oath&gs_l=psy-ab.3...12449.17634.0.20887.26.26.0.0.0.0.108.2024.24j2.26.0..2..0...1.1.64.psy-ab..0.23.1791...0j0i131k1j0i67k1j0i131i67k1j0i3k1j0i22i30k1j0i22i10i30k1j33i21k1j33i160k1j33i22i29i30k1.0.rnoY_HHMiAw
also see [[JL five-hints]] for examples on using hints to manipulate the priority of query block/table












.
based on http://ptgmedia.pearsoncmg.com/imprint_downloads/informit/promotions/python/python2python3.pdf

! .
[img(80%,80%)[https://i.imgur.com/VFFUDki.png]]
! .
[img(80%,80%)[https://i.imgur.com/aMNeCYv.png]]
! .
[img(80%,80%)[https://i.imgur.com/21KlHuY.png]]
! .
[img(80%,80%)[https://i.imgur.com/11GXxTf.png]]
https://community.oracle.com/docs/DOC-1005069  <- arup, good stuff
https://blogs.oracle.com/developers/updates-to-python-php-and-c-drivers-for-oracle-database


https://blog.dbi-services.com/oracle-locks-identifiying-blocking-sessions/

{{{
when w.wait_event_text like 'enq: TM%' then
    ' mode '||decode(w.p1 ,1414332418,'Row-S' ,1414332419,'Row-X' ,1414332420,'Share' ,1414332421,'Share RX' ,1414332422,'eXclusive')
     ||( select ' on '||object_type||' "'||owner||'"."'||object_name||'" ' from all_objects where object_id=w.p2 )
}}}


https://jonathanlewis.wordpress.com/2010/06/21/locks/
{{{
This list is specifically about the lock modes for a TM lock:

Value   Name(s)                    Table method (TM lock)
    0   No lock                    n/a
 
    1   Null lock (NL)             Used during some parallel DML operations (e.g. update) by
                                   the pX slaves while the QC is holding an exclusive lock.
 
    2   Sub-share (SS)             Until 9.2.0.5/6 "select for update"
        Row-share (RS)             Since 9.2.0.1/2 used at opposite end of RI during DML until 11.1
                                   Lock table in row share mode
                                   Lock table in share update mode
 
    3   Sub-exclusive(SX)          Update (also "select for update" from 9.2.0.5/6)
        Row-exclusive(RX)          Lock table in row exclusive mode
                                   Since 11.1 used at opposite end of RI during DML
 
    4   Share (S)                  Lock table in share mode
                                   Can appear during parallel DML with id2 = 1, in the PX slave sessions
                                   Common symptom of "foreign key locking" (missing index) problem
                                   Note that bitmap indexes on the child DON'T address the locking problem
 
    5   share sub exclusive (SSX)  Lock table in share row exclusive mode
        share row exclusive (SRX)  Less common symptom of "foreign key locking" but likely to be more
                                   frequent if the FK constraint is defined with "on delete cascade."
 
    6   Exclusive (X)              Lock table in exclusive mode
                                   create index    -- duration and timing depend on options used
                                   insert /*+ append */ 
}}}
{{{

select * from dba_tables where owner = 'KARLARAO' order by last_analyzed desc;

BEGIN
  DBMS_STATS.GATHER_SCHEMA_STATS('KARLARAO', 
  options=>'GATHER',
  estimate_percent=>dbms_stats.auto_sample_size,
  degree=>dbms_stats.auto_degree,
  cascade=>TRUE,
  no_invalidate=> FALSE);
END;
/



SELECT DBMS_STATS.GET_PREFS('AUTOSTATS_TARGET') AS autostats_target,
       DBMS_STATS.GET_PREFS('CASCADE') AS cascade,
       DBMS_STATS.GET_PREFS('DEGREE') AS degree,
       DBMS_STATS.GET_PREFS('ESTIMATE_PERCENT') AS estimate_percent,
       DBMS_STATS.GET_PREFS('METHOD_OPT') AS method_opt,
       DBMS_STATS.GET_PREFS('NO_INVALIDATE') AS no_invalidate,
       DBMS_STATS.GET_PREFS('GRANULARITY') AS granularity,
       DBMS_STATS.GET_PREFS('PUBLISH') AS publish,
       DBMS_STATS.GET_PREFS('INCREMENTAL') AS incremental,
       DBMS_STATS.GET_PREFS('STALE_PERCENT') AS stale_percent
FROM   dual;
}}}
<<showtoc>>


Here are some profile/baseline steps that can be done
 
! Then to create a profile from a good plan you can do either of the two below:

{{{
Take note that if the predicate has literals you need to specify on force_matching=TRUE so that the literals will be treated as binds
 
 
Create a profile by copying the plan_hash_value from a different SQL_ID (let’s say you rewrote the SQL and you want to inject that new plan to the old SQL_ID) 
https://raw.githubusercontent.com/karlarao/scripts/master/performance/create_sql_profile-goodbad.sql
 
dwbs001s1(sys): @create_sql_profile-goodbad.sql
Enter value for goodsql_id: 22s34g2djar10
Enter value for goodchild_no (0):  <HIT ENTER>
Enter value for badsql_id: 00fnpu38hz98x
Enter value for badchild_no (0): <HIT ENTER>
Enter value for profile_name (PROF_sqlid_planhash):  <HIT ENTER>
Enter value for category (DEFAULT):  <HIT ENTER>
Enter value for force_matching (FALSE): <HIT ENTER>
Enter value for plan_hash_value: <HIT ENTER>
SQL Profile PROF_00fnpu38hz98x_ created.
 
 
Create a profile by copying the plan_hash_value from the same SQL (let’s say the previous good plan_hash_value exist, and you want the SQL_ID to use that)
https://raw.githubusercontent.com/karlarao/scripts/master/performance/copy_plan_hash_value.sql
 
HCMPRD1> @copy_plan_hash_value.sql
Enter value for plan_hash_value to generate profile from (X0X0X0X0): 3609883731  <-- this is the good plan
Enter value for sql_id to attach profile to (X0X0X0X0): c7tadymffd34z
Enter value for child_no to attach profile to (0):
Enter value for category (DEFAULT):
Enter value for force_matching (false):
 
PL/SQL procedure successfully completed.
}}}



! After stabilizing the SQL to run on acceptable response time. You can create a SQL baseline on the SQL_ID with one or more good plan_hash_value
Example below

{{{
SQL with multiple Execution Plans
 
·         The following SQLs especially SQL_ID 93c0q2r788x6c (bad PHV 369685592) and 8txzdvns1jzxm (bad PHV 866924405) would benefit from using the SQL Plan Baseline to exclude the
bad PHVs from executing and just use the good ones
 
3d.297. SQL with multiple Execution Plans (DBA_HIST_SQLSTAT)
cid:image001.png@01D4C1AE.70BF4300
 
This can be done by following the example below:
 
·         In the example the SQL_ID 93c0q2r788x6c adds Plan Hash Values 1948592153 and 2849155601 to its SQL Plan Baseline so that the optimizer would just choose between the two plans
 
 
-- create the baseline
DECLARE
my_plans pls_integer;
BEGIN
my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(sql_id => '93c0q2r788x6c',plan_hash_value=>'1948592153', fixed =>'YES', enabled=>'YES');
END;
/
      
-- add the other plan
DECLARE
my_plans pls_integer;
BEGIN
my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(sql_id => '93c0q2r788x6c',plan_hash_value=>'2849155601', fixed =>'YES', enabled=>'YES');
END;
/
-- verify
set lines 200
set verify off
SELECT * FROM TABLE(DBMS_XPLAN.DISPLAY_SQL_PLAN_BASELINE(sql_handle=>'&sql_handle', format=>'basic'));
 
 
 
--######################################################################
if PHV 2849155601 is not on cursor cache then the DBMS_SPM.LOAD_PLANS_FROM_SQLSET has to be used
--########################################################################
 
exec dbms_sqltune.create_sqlset(sqlset_name => '93c0q2r788x6c_sqlset_test',description => 'sqlset descriptions');
 
declare
baseline_ref_cur DBMS_SQLTUNE.SQLSET_CURSOR;
begin
open baseline_ref_cur for
select VALUE(p) from table(
DBMS_SQLTUNE.SELECT_WORKLOAD_REPOSITORY(&begin_snap_id, &end_snap_id,'sql_id='||CHR(39)||'&sql_id'||CHR(39)||' and plan_hash_value=2849155601',NULL,NULL,NULL,NULL,NULL,NULL,'ALL')) p;
DBMS_SQLTUNE.LOAD_SQLSET('93c0q2r788x6c_sqlset_test', baseline_ref_cur);
end;
/
 
SELECT NAME,OWNER,CREATED,STATEMENT_COUNT FROM DBA_SQLSET where name='93c0q2r788x6c_sqlset_test';
 
select * from table(dbms_xplan.display_sqlset('93c0q2r788x6c_sqlset_test','&sql_id'));
 
select sql_handle, plan_name, origin, enabled, accepted, fixed, module from dba_sql_plan_baselines;
 
set serveroutput on
declare
my_int pls_integer;
begin
my_int := dbms_spm.load_plans_from_sqlset (
sqlset_name => '93c0q2r788x6c_sqlset_test',
basic_filter => 'sql_id="93c0q2r788x6c",
sqlset_owner => 'SYS',
fixed => 'YES',
enabled => 'YES');
DBMS_OUTPUT.PUT_line(my_int);
end;
/
 
select sql_handle, plan_name, origin, enabled, accepted, fixed, module from dba_sql_plan_baselines;
 
 
-- make sure the additional PHV is ACCEPTED and FIXED
 
SET SERVEROUTPUT ON
DECLARE
  l_plans_altered  PLS_INTEGER;
BEGIN
  l_plans_altered := DBMS_SPM.alter_sql_plan_baseline(
    sql_handle      => 'SQL_c244ec33ef56024a',
    plan_name       => 'SQL_PLAN_c4j7c6grpc0kaf8003e90',
    attribute_name  => 'ACCEPTED',
    attribute_value => 'YES');
  DBMS_OUTPUT.put_line('Plans Altered: ' || l_plans_altered);
END;
/
 
set serveroutput on                                          
DECLARE                                                      
  l_plans_altered  PLS_INTEGER;                              
BEGIN                                                         
  l_plans_altered := DBMS_SPM.alter_sql_plan_baseline(       
    sql_handle      => 'SQL_c244ec33ef56024a',           
    plan_name       => 'SQL_PLAN_c4j7c6grpc0kaf8003e90',      
    attribute_name  => 'FIXED',                              
    attribute_value => 'YES');                                                                               
  DBMS_OUTPUT.put_line('Plans Altered: ' || l_plans_altered);
END;                                                          
/  
}}}



! You can verify the SQL_ID picking up the good plan by using dplan or dplanx
<<<
https://raw.githubusercontent.com/karlarao/scripts/master/performance/dplan.sql
rac-aware https://raw.githubusercontent.com/karlarao/scripts/master/performance/dplanx.sql
<<<



! Also read on “how to migrate SQL baseline” across databases. Because you need to have those baselines propagated on all your environments.
<<<
There’s also a tool inside SQLTXPLAIN (search this in MOS) it’s called coe_xfr_sql_profile https://raw.githubusercontent.com/karlarao/scripts/master/performance/coe_xfr_sql_profile_12c.sql
What this does is you run it in a SQL_ID and PLAN_HASH_VALUE and it will create a sql file. And when you run this on another environment it will create a sql profile on that SQL_ID and PLAN_HASH_VALUE combination.
So it becomes a backup of that SQL performance or another way of migrating or backing up profiles across environments.
 
In summary if you have full control over the code. Rewriting or putting hints (but not too much) to behave optimally is what I recommend. This way it gets pushed to your code base and it’s tracked on your git/version control repo and propagated across environments.
You can also baseline on top of the rewrite or hints but make sure this is maintained across environments.
<<<
















.
http://kerryosborne.oracle-guy.com/2009/07/how-to-attach-a-sql-profile-to-a-different-statement/

HOWTO: bad plan to good plan switch http://www.evernote.com/shard/s48/sh/308af73e-47bc-4598-ab31-77ab74cbbed9/7acc32b91ebb64639116d3931a4e9935

{{{
15:07:41 HCMPRD1> @copy_plan_hash_value.sql
Enter value for plan_hash_value to generate profile from (X0X0X0X0): 3609883731  <-- this is the good plan
Enter value for sql_id to attach profile to (X0X0X0X0): c7tadymffd34z
Enter value for child_no to attach profile to (0):
Enter value for category (DEFAULT):
Enter value for force_matching (false):

PL/SQL procedure successfully completed.

}}}
Database Development guide -> 2 Connection Strategies for Database Applications
https://docs.oracle.com/en/database/oracle/oracle-database/19/adfns/connection_strategies.html#GUID-90D1249D-38B8-47BF-9829-BA0146BD814A


https://docs.oracle.com/database/122/ADFNS/connection_strategies.htm#ADFNS-GUID-90D1249D-38B8-47BF-9829-BA0146BD814A
<<showtoc>>


! redo apply

!! Without Real Time Apply (RTA) on standby database
{{{
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION;
}}}

!! With Real Time Apply (RTA)
If you configured your standby redo logs, you can start real-time apply using the following command:
{{{
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;
}}}

!! Stopping Redo Apply on standby database
To stop Redo Apply in the foreground, issue the following SQL statement.
{{{
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;
}}}



! monitor redo apply 

!! Last sequence received and applied
You can use this (important) SQL to check whether your physical standby is in Sync with the Primary:
{{{
SELECT ARCH.THREAD# "Thread", ARCH.SEQUENCE# "Last Sequence Received", APPL.SEQUENCE# "Last Sequence Applied", (ARCH.SEQUENCE# - APPL.SEQUENCE#) "Difference"
FROM
(SELECT THREAD# ,SEQUENCE# FROM V$ARCHIVED_LOG WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$ARCHIVED_LOG GROUP BY THREAD#)) ARCH,
(SELECT THREAD# ,SEQUENCE# FROM V$LOG_HISTORY WHERE (THREAD#,FIRST_TIME ) IN (SELECT THREAD#,MAX(FIRST_TIME) FROM V$LOG_HISTORY GROUP BY THREAD#)) APPL
WHERE
ARCH.THREAD# = APPL.THREAD#;
}}}
{{{
-- redo transport services 
ALTER SESSION SET NLS_DATE_FORMAT ='DD-MON-RR HH24:MI:SS';
SELECT INST_ID, SEQUENCE#, APPLIED, FIRST_TIME, NEXT_TIME FROM GV$ARCHIVED_LOG ORDER BY 2,1,4;

ALTER SYSTEM SWITCH LOGFILE;
 
SELECT INST_ID, SEQUENCE#, APPLIED, FIRST_TIME, NEXT_TIME FROM GV$ARCHIVED_LOG ORDER BY 2,1,4;
}}}

!! on standby, get data guard stats 
{{{
set linesize 120
col START_TIME format a20
col ITEM format a20
SELECT TO_CHAR(START_TIME, 'DD-MON-RR HH24:MI:SS') START_TIME, ITEM , SOFAR, UNITS
FROM V$RECOVERY_PROGRESS
WHERE ITEM IN ('Active Apply Rate', 'Average Apply Rate', 'Redo Applied');
}}}


!! on standby, retrieve the transport lag and the apply lag
{{{
-- Transport lag represents the data that will be lost in case of disaster
col NAME for a13
col VALUE for a13
col UNIT for a30
set LINES 132
SELECT NAME, VALUE, UNIT, TIME_COMPUTED
FROM V$DATAGUARD_STATS WHERE NAME IN ('transport lag', 'apply lag');
}}}


!! Standby database process status
{{{
select distinct process, status, thread#, sequence#, block#, blocks from v$managed_standby ;
}}}


If using real time apply
{{{
select TYPE, ITEM, to_char(TIMESTAMP, 'DD-MON-YYYY HH24:MI:SS') from v$recovery_progress where ITEM='Last Applied Redo';
or 
select recovery_mode from v$archive_dest_status where dest_id=1;

}}}



! others 
{{{
select * from v$managed_standby;
select * from v$log;
select * from v$standby_log;
select * from DBA_REGISTERED_ARCHIVED_LOG
select * from V$ARCHIVE
select * from V$PROXY_ARCHIVEDLOG
select * from V$ARCHIVED_LOG
select * from V$ARCHIVE_GAP
select * from V$ARCHIVE_PROCESSES;
select * from V$ARCHIVE_DEST;
select * from V$ARCHIVE_DEST_STATUS
select * from V$PROXY_ARCHIVELOG_DETAILS
select * from V$BACKUP_ARCHIVELOG_DETAILS
select * from V$BACKUP_ARCHIVELOG_SUMMARY
select * from V$PROXY_ARCHIVELOG_SUMMARY

----------------------------
-- MONITOR RECOVERY
----------------------------

-- Monitoring the Process Activities
     -- The V$MANAGED_STANDBY view on the standby database site shows you the activities performed by both redo transport and Redo Apply processes in a Data Guard environment. The CLIENT_P column in the output of the following query identifies the corresponding primary database process.
SELECT PROCESS, CLIENT_PROCESS, SEQUENCE#, STATUS FROM V$MANAGED_STANDBY;

-- Determining the Progress of Redo Apply
     -- The V$ARCHIVE_DEST_STATUS view on either a primary or standby database site provides you information such as the online redo log files that were archived, the archived redo log files that are applied, and the log sequence numbers of each. The following query output shows the standby database is two archived redo log files behind in applying the redo data received from the primary database. To determine if real-time apply is enabled, query the RECOVERY_MODE column of the V$ARCHIVE_DEST_STATUS view. It will contain the value MANAGED REAL TIME APPLY when real-time apply is enabled
SELECT ARCHIVED_THREAD#, ARCHIVED_SEQ#, APPLIED_THREAD#, APPLIED_SEQ#, RECOVERY_MODE FROM V$ARCHIVE_DEST_STATUS;

-- Determining the Location and Creator of the Archived Redo Log Files
     -- the location of the archived redo log, which process created the archived redo log, redo log sequence number of each archived redo log file, when each log file was archived, and whether or not the archived redo log file was applied
set lines 300
col name format a80
alter session set NLS_DATE_FORMAT='DD-MON-YYYY HH24:MI:SS';
SELECT NAME, CREATOR, SEQUENCE#, APPLIED, COMPLETION_TIME FROM V$ARCHIVED_LOG where applied = 'NO' order by 3;

-- Viewing Database Incarnations Before and After OPEN RESETLOGS
SELECT RESETLOGS_ID,THREAD#,SEQUENCE#,STATUS,ARCHIVED FROM V$ARCHIVED_LOG ORDER BY RESETLOGS_ID,SEQUENCE# ;
SELECT INCARNATION#, RESETLOGS_ID, STATUS FROM V$DATABASE_INCARNATION;

-- Viewing the Archived Redo Log History
     -- The V$LOG_HISTORY on the standby site shows you a complete history of the archived redo log, including information such as the time of the first entry, the lowest SCN in the log, the highest SCN in the log, and the sequence numbers for the archived redo log files.
SELECT FIRST_TIME, FIRST_CHANGE#, NEXT_CHANGE#, SEQUENCE# FROM V$LOG_HISTORY;

-- Determining Which Log Files Were Applied to the Standby Database
select max(sequence#), applied, thread# from v$archived_log group by applied, thread# order by 1;

-- Determining Which Log Files Were Not Received by the Standby Site
--SELECT LOCAL.THREAD#, LOCAL.SEQUENCE#, local.applied FROM 
--(SELECT THREAD#, SEQUENCE#, applied FROM V$ARCHIVED_LOG WHERE DEST_ID=1) LOCAL 
 --WHERE LOCAL.SEQUENCE# NOT IN 
--(SELECT SEQUENCE# FROM V$ARCHIVED_LOG WHERE DEST_ID=2 AND 
--THREAD# = LOCAL.THREAD#); 


------------------------------------------------------------------------------------
-- Monitoring Log Apply Services on Physical Standby Databases
------------------------------------------------------------------------------------

-- Accessing the V$DATABASE View
     -- Issue the following query to show information about the protection mode, the protection level, the role of the database, and switchover status:
SELECT DATABASE_ROLE, DB_UNIQUE_NAME INSTANCE, OPEN_MODE, PROTECTION_MODE, PROTECTION_LEVEL, SWITCHOVER_STATUS FROM V$DATABASE;
     -- Issue the following query to show information about fast-start failover:
SELECT FS_FAILOVER_STATUS FSFO_STATUS, FS_FAILOVER_CURRENT_TARGET TARGET_STANDBY, FS_FAILOVER_THRESHOLD THRESHOLD, FS_FAILOVER_OBSERVER_PRESENT OBS_PRES FROM V$DATABASE;

-- Accessing the V$MANAGED_STANDBY Fixed View
     -- Query the physical standby database to monitor Redo Apply and redo transport services activity at the standby site.. The previous query output shows that an RFS process completed archiving a redo log file with sequence number 947. The output also shows that Redo Apply is actively applying an archived redo log file with the sequence number 946. The recovery operation is currently recovering block number 10 of the 72-block archived redo log file.
SELECT PROCESS, STATUS, THREAD#, SEQUENCE#, BLOCK#, BLOCKS FROM V$MANAGED_STANDBY;

-- Accessing the V$ARCHIVE_DEST_STATUS Fixed View
     -- To determine if real-time apply is enabled, query the RECOVERY_MODE column of the V$ARCHIVE_DEST_STATUS view. It will contain the value MANAGED REAL TIME APPLY when real-time apply is enabled
SELECT ARCHIVED_THREAD#, ARCHIVED_SEQ#, APPLIED_THREAD#, APPLIED_SEQ#, RECOVERY_MODE FROM V$ARCHIVE_DEST_STATUS;

-- Accessing the V$ARCHIVED_LOG Fixed View
     -- The V$ARCHIVED_LOG fixed view on the physical standby database shows all the archived redo log files received from the primary database. This view is only useful after the standby site starts receiving redo data; before that time, the view is populated by old archived redo log records generated from the primary control file.
SELECT REGISTRAR, CREATOR, THREAD#, SEQUENCE#, FIRST_CHANGE#, NEXT_CHANGE# FROM V$ARCHIVED_LOG;

-- Accessing the V$LOG_HISTORY Fixed View
     -- Query the V$LOG_HISTORY fixed view on the physical standby database to show all the archived redo log files that were applied
SELECT THREAD#, SEQUENCE#, FIRST_CHANGE#, NEXT_CHANGE# FROM V$LOG_HISTORY;

-- Accessing the V$DATAGUARD_STATUS Fixed View
     -- The V$DATAGUARD_STATUS fixed view displays events that would typically be triggered by any message to the alert log or server process trace files.
SELECT MESSAGE FROM V$DATAGUARD_STATUS;


}}}








! references
https://www.oracle-scripts.net/dataguard-management/
[[Coderepo]]

[[Awk]], [[grep]], [[sed]], [[sort, uniq]]
[[BashShell]]
[[PowerShell]]
[[Perl]]
[[Python]]

[[PL/SQL]]
[[x R - Datacamp]] [[R maxym]]

[[HTML5]]
[[Javascript]] [[node.js]]
[[GoLang]]

[[Java]]
[[Machine Learning]]

viz and reporting in [[Tableau]]

[[noSQL]]




<<showtoc>> 

! learning path 
https://training.looker.com/looker-development-foundations  enroll here first "Getting Started with LookML" you will be redirected to -> https://learn.looker.com/projects/learn_intro/documents/home.md
https://training.looker.com/looker-development-foundations/334816


! watch videos
!! Lynda
https://www.linkedin.com/learning/looker-first-look/welcome

!! Business User Video Tutorials
https://docs.looker.com/video-library/exploring-data

!! Developer Video Tutorials
https://docs.looker.com/video-library/data-modeling


! official doc 
https://docs.looker.com/

!! release notes 
https://docs.looker.com/relnotes/intro

!! development 
!!! what is LookML 
https://docs.looker.com/data-modeling/learning-lookml/what-is-lookml
!!! Steps to Learning LookML
https://docs.looker.com/data-modeling/learning-lookml

!!! Retrieve and Chart Data
https://docs.looker.com/exploring-data/retrieve-chart-intro


!! admin
!!! Clustering 
https://docs.looker.com/setup-and-management/tutorials/clustering




! comparison vs Tableau 
https://looker.com/compare/looker-vs-tableau   "a trusted data model" 
https://webanalyticshub.com/tableau-looker-domo/
[img(80%,80%)[ https://user-images.githubusercontent.com/3683046/90553425-5bb73780-e162-11ea-9b74-038d47f3341f.png]]
https://www.itbusinessedge.com/business-intelligence/looker-vs.-tableau.html
https://www.quora.com/To-anyone-that-has-used-Looker-how-would-you-compare-it-to-Tableau-in-terms-of-price-capabilities



! x



! LOD vs Subtotals, row totals, and Table Calculations
https://discourse.looker.com/t/tableau-lod-equivalent-custom-dimension/18641
https://help.looker.com/hc/en-us/articles/360023635234-Subtotals-with-Table-Calculations
https://docs.looker.com/exploring-data/visualizing-query-results/table-next-options
https://help.tableau.com/current/pro/desktop/en-us/calculations_calculatedfields_lod_fixed.htm







<<showtoc>>

! h2o 
https://www.h2o.ai/products/h2o-driverless-ai/

! metaverse 
http://www.matelabs.in/#home
http://docs.mateverse.com/user-guide/getting-started/

HOWTO: create a manual SQL Profile https://www.evernote.com/shard/s48/sh/f1bda7e9-2ced-4794-8c5e-32b1beac567b/96cd95cebb8f3cad0329833d7aa4a328


http://kerryosborne.oracle-guy.com/2010/07/sqlt-coe_xfr_sql_profilesql/
http://kerryosborne.oracle-guy.com/2010/11/how-to-lock-sql-profiles-generated-by-sql-tuning-advisor/
http://bryangrenn.blogspot.com/2010/12/sql-profiles.html
Oracle Analytics Cloud: Augmented Analytics at Scale https://www.oracle.com/business-analytics/comparison-chart.html
https://docs.oracle.com/en/middleware/bi/analytics-server/whats-different-oas/index.html#OASWD-GUID-C907A4B0-FAFD-4F54-905C-D6FCA519C262
https://www.linkedin.com/pulse/should-i-run-performance-test-part-ci-pipeline-aniket-gadre/  <- ANSWER IS YES!
<<<
I like Wilson's answer 

Aniket, the assumptions underlying your statements makes sense ... if this was still 1995. This is exactly what I cover in my PerfGuild talk April 8th https://automationguild.com/performance 1) Perf tests just to determine "Pass or Fail" is out of fashion now because performance testers should be more valuable than "traffic cops writing tickets", and provide tools and advice to developers. Why? So that performance issues are identified early, while that code is still fresh in the mind of developers rather than in production when changes are too expensive to change. 2) monitoring systems can be brought up automatically in the pipeline. Ask your APM vendor to show you how. 3) in my experience, many performance issues are evident within 10 minutes if you can ramp up quickly enough. 4) same. 5) the point of CI/CD is to provide coverage of potential risks. Companies pay us the big bucks for us to predict issues, not to be reactive chumps. 6) Please get back in your time machine and join us in the 21st century. There are cloud environments now which spin up servers for a short time. 6 again) memory leaks are not the only reason for perf tests. Perf tests are now done to tune configurations since companies are now paying for every cycle used rather than having a fixed number of machines. It's time to upgrade your assumptions.
<<<

references 
https://alexanderpodelko.com/docs/Continuous_Performance_Testing_CMG17.pdf
https://www.qlik.com/us/products/qlikview/personal-edition
https://community.qlik.com/thread/36516
https://www.qlik.com/us/solutions/developers
http://branch.qlik.com/#!/project

https://app.pluralsight.com/library/courses/qlikview-analyzing-data/table-of-contents
{{{
    forecast step by step: 
        eyeball the data
            raw data    
            data exploration
            periodicity
            ndiff (how much we should difference)
            decomposition - determine the series components (trend, seasonality etc.)
                x = decompose(AirPassengers, "additive")
                mymodel = x$trend + x$seasonal; plot(mymodel)           # just the trend and seasonal data
                mymodel2 = AirPassengers - x$seasonal ; plot(mymodel2)  # orig data minus the seasonal data
            seasonplot 
        process data
            create xts object
            create a ts object from xts (coredata, index, frequency/periodicity)
            partition data train,validation sets        
        graph it 
            tsoutliers (outlier detection) , anomaly detection (AnomalyDetection package)
            log scale data
            add trend line (moving average (centered - ma and trailing - rollmean) and simple exponential smoothing (ets))
        performance evaluation
            Type of seasonality assessed graphically (decompose - additive,etc.)
            detrend and seasonal adjustment (smoothing/deseasonalizing)
            lag-1 diff graph
            forecast residual graph
            forecast error graph
            acf/pacf (Acf, tsdisplay)
                raw data
                forecast residual
                lag-1 diff
            autocorrelation 
                fUnitRoots::adfTest() - time series data is non-stationary (p value above 0.05)
                tsdisplay(diff(data_ts, lag=1)) - ACF displays there's no autocorrelation going on (no significant lags out of the 95% confidence interval, the blue line) 
            accuracy
            cross validation https://github.com/karlarao/forecast_examples/tree/master/cross_validation/cvts_tscvexample_investigation
            forecast of training
            forecast of training + validation + future (steps ahead)       
        forecast result
            display prediction intervals (forecast quantile)
            display the actual and forecasted series
            displaying the forecast errors
            distribution of forecast errors
}}}
https://saplumira.com/
https://www.quora.com/Which-data-visualization-tool-is-better-SAP-Lumira-or-Tableau
https://www.prokarma.com/blog/2014/08/20/look-sap-lumira-and-lumira-cloud
https://blogs.sap.com/2014/09/12/a-lumira-extension-to-acquire-twitter-data/
https://www.sap.com/developer/tutorials/lumira-initial-data-acquisition.html
http://visualbi.com/blogs/sap-lumira-discovery/connect-sap-hana-bw-universe-sap-lumira-discovery/
<<showtoc>>

! standards for business communications
[img(40%,40%)[ https://i.imgur.com/Y6Ekegn.png]]

! download and documentation
Alternate download site across versions https://licensing.tableausoftware.com/esdalt/
Release notes across versions http://www.tableausoftware.com/support/releases?signin=650fb8c2841d145bc3236999b96fd7ab
Official doc http://www.tableausoftware.com/community/support/documentation-old
knowledgebase http://kb.tableausoftware.com/
manuals http://www.tableausoftware.com/support/manuals
http://www.tableausoftware.com/new-features/6.0
http://www.tableausoftware.com/new-features/7.0
http://www.tableausoftware.com/new-features/8.0
http://www.tableausoftware.com/fast-pace-innovation  <-- timeline across versions

''Tableau - Think Data Thursday Video Library'' http://community.tableausoftware.com/community/groups/tdt-video-library
''Tableau Style Guide'' https://github.com/davidski/dataviz/blob/master/Tableau%20Style%20Guide.md
''Software Development Lifecycle With Tableau'' https://github.com/russch/tableau-sdlc-sample
''How to share data with a statistician'' https://github.com/davidski/datasharing


! license 
https://customer-portal.tableau.com/s/
upgrading tableau desktop http://kb.tableausoftware.com/articles/knowledgebase/upgrading-tableau-desktop
offline activation http://kb.tableausoftware.com/articles/knowledgebase/offline-activation
renewal cost for desktop and personal http://www.triadtechpartners.com/wp-content/uploads/Tableau-GSA-Price-List-April-2013.pdf
renewal FAQ http://www.tableausoftware.com/support/customer-success
eula http://mkt.tableausoftware.com/files/eula.pdf


! viz types
* treemap http://www.tableausoftware.com/new-features/new-view-types
* bubble chart
* word cloud


! connectors
''Oracle Driver''
there’s an Oracle Driver so you can connect directly to a database http://downloads.tableausoftware.com/drivers/oracle/desktop/tableau7.0-oracle-driver.msi
http://www.tableausoftware.com/support/drivers
http://kb.tableausoftware.com/articles/knowledgebase/oracle-connection-errors


! HOWTOs
http://www.tableausoftware.com/learn/training  <-- LOTS OF GOOD STUFF!!!
http://community.tableausoftware.com/message/242749#242749 <-- Johan's Ideas Collections

''parameters'' http://www.youtube.com/watch?v=wvF7gAV82_c

''calculated fields'' http://www.youtube.com/watch?v=FpppiLBdtGc, http://www.tableausoftware.com/table-calculations. http://kb.tableausoftware.com/articles/knowledgebase/combining-date-and-time-single-field

''scatter plots'' http://www.youtube.com/watch?v=RYMlIY4nT9k, http://downloads.tableausoftware.com/quickstart/feature-guides/trend_lines.pdf

''getting the r2'',''trendlines'' http://kb.tableausoftware.com/articles/knowledgebase/statistics-finding-correlation, http://onlinehelp.tableausoftware.com/v7.0/pro/online/en-us/trendlines_model.html

''forecasting'' http://tombrownonbi.blogspot.com/2010/07/simple-forecasting-using-tableau.html, resolving forecast errors http://onlinehelp.tableausoftware.com/current/pro/online/en-us/forecast_resolve_errors.html

tableau forecast model - Holt-Winters exponential smoothing
http://onlinehelp.tableausoftware.com/v8.1/pro/online/en-us/help.html#forecast_describe.html

Method for Creating Multipass Aggregations Using Tableau Server  <-- doing various statistical methods in tableau
http://community.tableausoftware.com/message/181143#181143

Monte Carlo in Tableau
http://drawingwithnumbers.artisart.org/basic-monte-carlo-simulations-in-tableau/

''dashboards'' http://community.tableausoftware.com/thread/109753?start=0&tstart=0, http://tableaulove.tumblr.com/post/27627548817/another-method-to-update-data-from-inside-tableau, http://ryrobes.com/tableau/tableau-phpgrid-an-almost-instant-gratification-data-entry-tool/

''dashboard size'' http://kb.tableausoftware.com/articles/knowledgebase/fixed-size-dashboard

''dashboard multiple sources'' http://kb.tableausoftware.com/articles/knowledgebase/multiple-sources-one-worksheet

''reference line weekend highlight , reference line weekend in tableau'' https://community.tableau.com/thread/123456 (shading in weekends), http://www.evolytics.com/blog/tableau-hack-how-to-highlight-a-dimension/ , https://discussions.apple.com/thread/1919024?tstart=0 , https://3danim8.wordpress.com/2013/11/18/using-tableau-buckets-to-compare-weekday-to-weekend-data/ , 	http://onlinehelp.tableau.com/current/pro/desktop/en-us/actions_highlight_advanced.html , https://community.tableau.com/thread/120260 (How can I add weekend reference lines) 	

''reference line'', ''reference band'' http://onlinehelp.tableausoftware.com/v7.0/pro/online/en-us/reflines_addlines.html, http://vizwiz.blogspot.com/2012/09/tableau-tip-adding-moving-reference.html, http://onlinehelp.tableausoftware.com/v6.1/public/online/en-us/i1000860.html, http://kb.tableausoftware.com/articles/knowledgebase/independent-field-reference-line,  http://community.tableausoftware.com/thread/127009?start=0&tstart=0, http://community.tableausoftware.com/thread/121369

''custom reference line - based on a measure field''
http://community.tableausoftware.com/message/275150 <-- drag calculated field on the marks area

''dynamic reference line''
http://community.tableausoftware.com/thread/124998, http://community.tableausoftware.com/thread/105433, http://www.interworks.com/blogs/iwbiteam/2012/04/09/adding-different-reference-lines-tableau

''percentile on reference line''
https://community.tableau.com/thread/108974

''dynamic parameter''
http://drawingwithnumbers.artisart.org/creating-a-dynamic-parameter-with-a-tableau-data-blend/

''thresholds'' Multiple thresholds for different cells on one worksheet http://community.tableausoftware.com/thread/122285

''email and alerting'' http://www.metricinsights.com/data-driven-alerting-and-email-notifications-for-tableau/, http://community.tableausoftware.com/thread/124411

''templates'' http://kb.tableausoftware.com/articles/knowledgebase/replacing-data-source, http://www.tableausoftware.com/public/templates/schools, http://wannabedatarockstar.blogspot.com/2013/06/create-default-tableau-template.html, http://wannabedatarockstar.blogspot.co.uk/2013/04/colour-me-right.html

''click to filter'' http://kb.tableausoftware.com/articles/knowledgebase/combining-sheet-links-and-dashboards

''tableau worksheet actions'' http://community.tableausoftware.com/thread/138785

''date functions and calculations'' http://onlinehelp.tableausoftware.com/current/pro/online/en-us/functions_functions_date.html, http://pharma-bi.com/2011/04/fiscal-period-calculations-in-tableau-2/

''date dimension'' http://blog.inspari.dk/2013/08/27/making-the-date-dimension-ready-for-tableau/

''Date Range filter and Default date filter''
google search https://www.google.com/search?q=tableau+date+range+filter&oq=tableau+date+range+&aqs=chrome.2.69i57j0l5.9028j0j7&sourceid=chrome&es_sm=119&ie=UTF-8
Creating a Filter for Start and End Dates Using Parameters http://kb.tableausoftware.com/articles/howto/creating-a-filter-for-start-and-end-dates-parameters
Tableau Tip: Showing all dates on a date filter after a Server refresh http://vizwiz.blogspot.com/2014/01/tableau-tip-showing-all-dates-on-date.html
Tableau Tip: Default a date filter to the last N days http://vizwiz.blogspot.com/2013/09/tableau-tip-default-date-filter-to-last.html

''hide NULL values'' http://reports4u.co.uk/tableau-hide-null-values/, http://reports4u.co.uk/tableau-hide-values-quick-filter/, http://kb.tableausoftware.com/articles/knowledgebase/replacing-null-literalsclass, http://kb.tableausoftware.com/articles/knowledgebase/null-values <-- good stuff

''logical functions - if then else, case when then'' http://onlinehelp.tableausoftware.com/v7.0/pro/online/en-us/functions_functions_logical.html, http://kb.tableausoftware.com/articles/knowledgebase/understanding-logical-calculations, http://onlinehelp.tableausoftware.com/v6.1/public/online/en-us/id2611b7e2-acb6-467e-9f69-402bba5f9617.html

''tableau working with sets''
https://www.tableausoftware.com/public/blog/2013/03/powerful-new-tools
http://onlinehelp.tableausoftware.com/v6.1/public/online/en-us/i1201140.html
http://community.tableausoftware.com/thread/136845 <-- good example on filters
https://www.tableausoftware.com/learn/tutorials/on-demand/sets?signin=a8f73d84a4b046aec26bc955854a381b <-- GOOD STUFF video tutorial 
IOPS SIORS - Combining several measures in one dimension - http://tableau-ext.hosted.jivesoftware.com/thread/137680

''tableau groups''
http://vizwiz.blogspot.com/2013/05/tableau-tip-creating-primary-group-from.html
http://www.tableausoftware.com/learn/tutorials/on-demand/grouping?signin=f98f9fd64dcac0e7f2dc574bca03b68c  <-- VIDEO tutorial 

''Random Number generation in tableau'' 
http://community.tableausoftware.com/docs/DOC-1474

''Calendar view viz''
http://thevizioneer.blogspot.com/2014/04/day-1-how-to-make-calendar-in-tableau.html
http://vizwiz.blogspot.com/2012/05/creating-interactive-monthly-calendar.html
http://vizwiz.blogspot.com/2012/05/how-common-is-your-birthday-find-out.html

''Custom SQL''
http://kb.tableausoftware.com/articles/knowledgebase/customizing-odbc-connections
http://tableaulove.tumblr.com/post/20781994395/tableau-performance-multiple-tables-or-custom-sql
http://bensullins.com/leveraging-your-tableau-server-to-create-large-data-extracts/
http://tableaulove.tumblr.com/post/18945358848/how-to-publish-an-unpopulated-tableau-extract
http://onlinehelp.tableausoftware.com/v8.1/pro/online/en-us/customsql.html
http://onlinehelp.tableausoftware.com/v7.0/pro/online/en-us/customsql.html
Using Raw SQL Functions http://kb.tableausoftware.com/articles/knowledgebase/raw-sql
http://community.tableausoftware.com/thread/131017

''Geolocation'' 
http://tableaulove.tumblr.com/post/82299898419/ip-based-geo-location-in-tableau-new-now-with-more
http://dataremixed.com/2014/08/from-gps-to-viz-hiking-washingtons-trails/
https://public.tableausoftware.com/profile/timothyvermeiren#!/vizhome/TimothyAllRuns/Dashboard

''tableau - import custom geocoding data - world map''
https://community.tableau.com/thread/200454
https://www.youtube.com/watch?v=nVrCH-PWM10
https://www.youtube.com/watch?v=IDyMMPiNVGw
https://onlinehelp.tableau.com/current/pro/online/mac/en-us/custom_geocoding.html
https://onlinehelp.tableau.com/current/pro/online/mac/en-us/maps_customgeocode_importing.html

''tableau perf analyzer''
http://www.interworks.com/services/business-intelligence/tableau-performance-analyzer

''tableau and python''
http://bensullins.com/bit-ly-data-to-csv-for-import-to-tableau/

''Visualize and Understand Tableau Functions''
https://public.tableausoftware.com/profile/tyler3281#!/vizhome/EVERYONEWILLUSEME/MainScreen

''tableau workbook on github''
http://blog.pluralsight.com/how-to-store-your-tableau-server-workbooks-on-github

''tableau radar chart / spider graph''
https://wikis.utexas.edu/display/tableau/How+to+create+a+Radar+Chart

''maps animation''
http://www.tableausoftware.com/public/blog/2014/08/capturing-animation-tableau-maps-2574?elq=d12cbf266b1342e68ea20105369371cf


''if in list'' http://community.tableausoftware.com/ideas/1870, http://community.tableausoftware.com/ideas/1500
<<<
{{{
IF 
trim([ENV])='x07d' OR 
trim([ENV])='x07p'  
THEN 'AML' 
ELSE 'OTHER' END


IF 
TRIM([ENV]) = 'x07d' THEN 'AML' ELSEIF 
TRIM([ENV]) = 'x07p' THEN 'AML' 
ELSE 'OTHER' END


IF [Processor AMD] THEN 'AMD'
ELSEIF [Processor Intel] THEN 'INTEL'
ELSEIF [Processor IBM Power] THEN 'IBM Power'
ELSEIF [Processor SPARC] THEN 'SPARC'
ELSE 'Other' END


IF contains('x11p,x08p,x28p',trim([ENV]))=true THEN 'PROD' 
ELSEIF contains('x29u,x10u,x01u',trim([ENV]))=true THEN 'UAT' 
ELSEIF contains('x06d,x07d,x12d',trim([ENV]))=true THEN 'DEV' 
ELSEIF contains('x06t,x14t,x19t',trim([ENV]))=true THEN 'TEST' 
ELSE 'OTHER' END

[Snap Id] = (150106) or
[Snap Id] = (150107) or
[Snap Id] = (150440) or
[Snap Id] = (150441)
}}}
<<<

''calculated field filter'' http://stackoverflow.com/questions/30753330/tableau-using-calculated-fields-for-filtering-dimensions, http://breaking-bi.blogspot.com/2013/03/creating-table-calculations-on-values.html
<<<
{{{
DRW
SUM(IF contains('CD_IO_RQ_R_LG_SEC-CD,CD_IO_RQ_R_SM_SEC-CD,CD_IO_RQ_W_LG_SEC-CD,CD_IO_RQ_W_SM_SEC-CD',trim([Metric]))=true THEN 1 END) > 0

CD_IO_RQ_R_LG_SEC-CD,0.21
CD_IO_RQ_R_SM_SEC-CD,0.62
CD_IO_RQ_W_LG_SEC-CD,2.14
CD_IO_RQ_W_SM_SEC-CD,5.69
}}}
<<<

''What is the difference between Tableau Server and Tableau Server Worker?'' http://community.tableausoftware.com/thread/109121

''tableau vs spotfire vs qlikview'' http://community.tableausoftware.com/thread/116055, https://apandre.wordpress.com/2013/09/13/tableau-8-1-vs-qlikview-11-2-vs-spotfire-5-5/ , http://butleranalytics.com/spotfire-tableau-and-qlikview-in-a-nutshell/ , https://www.trustradius.com/compare-products/tableau-desktop-vs-tibco-spotfire

''twbx for sending workbooks'' http://kb.tableausoftware.com/articles/knowledgebase/sending-packaged-workbook

''YOY moving average'' http://daveandrade.com/2015/01/25/tableau-table-calcs-how-to-calculate-a-year-over-year-4-week-moving-average/

''json'' http://community.tableau.com/ideas/1276

''tableau reverse engineering'' http://www.theinformationlab.co.uk/2015/01/22/learning-tableau-reverse-engineering/

''filter partial highlight'' https://community.tableau.com/thread/143761 , http://breaking-bi.blogspot.com/2014/03/partial-highlighting-on-charts-in.html

''Window functions'' https://community.tableau.com/thread/144402, http://kb.tableau.com/articles/knowledgebase/functional-differences-olap-relational, http://www.lunametrics.com/blog/2015/09/17/yoy-bar-charts-in-tableau/, http://breaking-bi.blogspot.com/2013/03/working-with-window-calculations-and.html, https://www.interworks.com/blog/tmccullough/2014/09/29/5-tableau-table-calculation-functions-you-need-know, http://breaking-bi.blogspot.com/2013/04/using-lookup-function-in-tableau.html
{{{
LOOKUP(sum([Net]),-1)
}}}

''Count only the numbers that are positive, and get the percentage'' 
{{{
(COUNT(IF [Diff] >= 0 THEN [Diff] END) / COUNT([Diff]))*100
}}}

''Add category for stock Position Type'' 
{{{
IF contains('Buy,Sell',trim([Type 1]))=true THEN 'Long' 
ELSEIF contains('Buy to Cover,Sell Short',trim([Type 1]))=true THEN 'Short' 
ELSE 'OTHER' END
}}}

''updated processor group filter''
{{{
IF contains(lower(trim([Processor])),'amd')=true THEN 'AMD' 
ELSEIF contains(lower(trim([Processor])),'intel')=true THEN 'INTEL' 
ELSEIF contains(lower(trim([Processor])),'power')=true THEN 'IBM' 
ELSEIF contains(lower(trim([Processor])),'sparc')=true THEN 'SPARC' 
ELSE 'OTHER' END
}}}

''storage cell dimension for x2 and x3 cells on the same diskgroup - useful for destage IOs''
{{{
IF 
trim([Cellname])='192.168.10.9' OR 
trim([Cellname])='192.168.10.10' OR
trim([Cellname])='192.168.10.11' OR
trim([Cellname])='192.168.10.12' OR
trim([Cellname])='192.168.10.13' OR
trim([Cellname])='192.168.10.14' OR
trim([Cellname])='192.168.10.15' OR
trim([Cellname])='192.168.10.16' OR
trim([Cellname])='192.168.10.17' OR
trim([Cellname])='192.168.10.18' OR
trim([Cellname])='192.168.10.19' OR
trim([Cellname])='192.168.10.20' OR
trim([Cellname])='192.168.10.21' OR
trim([Cellname])='192.168.10.22' 
THEN 'x2' 
elseif 
trim([Cellname])='192.168.10.38' OR 
trim([Cellname])='192.168.10.39' OR
trim([Cellname])='192.168.10.40' OR
trim([Cellname])='192.168.10.41' OR
trim([Cellname])='192.168.10.42' OR
trim([Cellname])='192.168.10.43' OR
trim([Cellname])='192.168.10.44' OR
trim([Cellname])='192.168.10.45' OR
trim([Cellname])='192.168.10.46' OR
trim([Cellname])='192.168.10.47' OR
trim([Cellname])='192.168.10.48' OR
trim([Cellname])='192.168.10.49' OR
trim([Cellname])='192.168.10.50' OR
trim([Cellname])='192.168.10.51' 
THEN 'x3'
else 
'other'
end
}}}

! highlight SQL 

{{{
IF contains(lower(trim([Sql Id])),'069k4ppu1n1nc')=true THEN [Sql Id]
ELSE 'OTHER' END
}}}

{{{
IF contains(lower(trim([Sql Id])),'069k4ppu1n1nc')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'0vzyv2wsr2apz')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'0xsz99mn2nuvc')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'0zwcr39tvssxj')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'1d9qrkfvh78bt')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'1zbk54du40dnu')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'2xjwy1jvu31xu')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'2ywv61bm22pw7')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'3fn33utt2ptns')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'3knptw3bxf1c9')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'3qvc497pz6hvp')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'3tpznswf2f7ak')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'4v775zu1p3b3f')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'51u31qah6z8d9')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'59wrat188thgf')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'5f2t4rq7xkfav')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'61r81qmqpt1bs')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'6cwh5bz0d0jkv')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'6fyy4v8c85cmk')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'6k6g6725pwjpw')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'7x0psn00ac54g')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'7xmjvrazhyntv')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'82psz0nhm68wf')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'885mt394synz4')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'af4vzj7jyv5mz')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'azkbmbyxahmh2')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'cws1kfprz7u8f')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'d6h43fh3d9p7g')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'dwgtzzmc509zf')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'f3frkf6tvkjwn')=true THEN [Sql Id]
ELSEIF contains(lower(trim([Sql Id])),'gd55mjru6w8x')=true THEN [Sql Id]
ELSE 'OTHER' END
}}}



!! complex calculated field logic 
IF statement with multiple value condition https://community.tableau.com/thread/119254
Specific rows to column using calculated fields https://community.tableau.com/thread/266616
Tableau Tip: Return the value of a single dimension member https://www.thedataschool.co.uk/naledi-hollbruegge/tableau-tip-tuesday-getting-just-one-variable-field/
https://www.google.com/search?q=tableau+calculated+field+nested+if&oq=tableau+calculated+field+nested+&aqs=chrome.0.0j69i57j0.6974j0j1&sourceid=chrome&ie=UTF-8
Nesting IF/CASE question https://community.tableau.com/thread/254997 , https://community.tableau.com/thread/254997?start=15&tstart=0

{{{

IF [Metric Name] = 'RSRC_MGR_CPU_WAIT_TIME_PCT' THEN 
    IF [Value] > 100 THEN [Value]*.2 END
END


IF [Metric Name] = 'RSRC_MGR_CPU_WAIT_TIME_PCT' AND [Value] > 100 THEN 
    IF [Value] > 100 THEN 120
    ELSE [Value] END
ELSEIF [Metric Name] = 'Host CPU Utilization (%)' THEN [Value]
END


IF [Metric Name] = 'RSRC_MGR_CPU_WAIT_TIME_PCT' AND [Value] > 100 THEN [Value]*.2 
END

}}}



! ASH elapsed time , DATEDIFF
{{{
DATEDIFF('second',MIN([TMS]),max([TM]))
}}}
https://www.google.com/search?ei=Yr8LXdCgGu2N_Qau-6_QCQ&q=tableau+lod+time+calculation+max+first+column+minus+value+second+column&oq=tableau+lod+time+calculation+max+first+column+minus+value+second+column&gs_l=psy-ab.3...40963.51043..51405...0.0..0.311.4393.27j1j7j1......0....1..gws-wiz.......0i71j35i304i39j33i10j33i160j33i299.cvq9wrnC6uY
LOD expression for 'difference from overall average' https://community.tableau.com/thread/227662
https://www.theinformationlab.co.uk/2017/01/27/calculate-datediff-one-column-tableau/
https://onlinehelp.tableau.com/current/pro/desktop/en-us/functions_functions_date.htm


!! DATEADD , add hours 
{{{
DATEADD('hour', 3, #2004-04-15#)  
}}}
https://community.tableau.com/thread/157714



! LOD expression 
LOD expression to calculate average of a sum of values per user https://community.tableau.com/thread/224293



! OBIEE workload separation 
{{{

IF contains(lower(trim([Module])),'BIP')=true THEN 'BIP'
ELSEIF contains(lower(trim([Module])),'ODI')=true THEN 'ODI'
ELSEIF contains(lower(trim([Module])),'nqs')=true THEN 'nqsserver'
ELSE 'OTHER' END

}}}






! get underlying SQL 
https://community.tableau.com/thread/170370
http://kb.tableau.com/articles/howto/viewing-underlying-sql-queries-desktop
{{{
C:\Users\karl\Documents\My Tableau Repository\Logs
}}}

! automating tableau reports 
Tableau-generated PDF imports to Inkscape with text missing https://community.tableau.com/thread/118822
Automate pdf generation using Tableau Desktop https://community.tableau.com/thread/137724
Tableau Scripting Engine https://community.tableau.com/ideas/1694
tableau community https://community.tableau.com/message/199249#199249
http://powertoolsfortableau.com/tools/portals-for-tableau
https://dwuconsulting.com/tools/software-videos/automating-tableau-pdf <- GOOD STUFF
https://www.autoitscript.com/site/autoit/
http://stackoverflow.com/questions/17212676/vba-automation-of-tableau-workbooks-using-cmd
http://www.graphgiraffe.net/blog/tableau-tutorial-automated-pdf-report-creation-with-tableau-desktop


! remove last 2 characters
https://community.tableau.com/docs/DOC-1391
https://community.tableau.com/message/325328

! measure names, measure values 
Combining several measures in one dimension https://community.tableau.com/thread/137680
Tableau Tips and Tricks: Measure Names and Measure Values https://www.youtube.com/watch?v=m0DGW_WYKtA
http://kb.tableau.com/articles/knowledgebase/measure-names-and-measure-values-explained



! initial SQL filter
When the user opens the workbook there will be parameter1 and parameter2 prompts for date range. And that date range will be in effect for all the sheets on that workbook 
https://onlinehelp.tableau.com/current/pro/desktop/en-us/connect_basic_initialsql.html
https://www.tableau.com/about/blog/2016/2/introducing-initial-sql-parameters-tableau-93-50213
https://tableauandbehold.com/2016/03/09/using-initial-sql-for/



! add total to stacked bar chart 
https://www.credera.com/blog/business-intelligence/tableau-workaround-part-3-add-total-labels-to-stacked-bar-chart/


! tableau roles 

tableau roles 
https://onlinehelp.tableau.com/current/server/en-us/users_site_roles.htm
https://www.google.com/search?q=tableau+publisher+vs+interactor&oq=tableau+publisher+vs+inter&aqs=chrome.0.0j69i57.5424j0j1&sourceid=chrome&ie=UTF-8



! How to verify the version of Tableau workbook
https://community.tableau.com/thread/257592
https://community.powertoolsfortableau.com/t/how-to-find-the-tableau-version-of-a-workbook/176
<<<
Add “.zip” to the end of the TWBX file. For example, “my workbook.twbx” would become “my workbook.twbx.zip”
<<<


! Embedded database credentials in Tableau

https://www.google.com/search?q=tableau+data+source+embed+credentials&oq=tableau+data+source+embed+credentials&aqs=chrome..69i57.5805j0j1&sourceid=chrome&ie=UTF-8
https://onlinehelp.tableau.com/current/pro/desktop/en-us/publishing_sharing_authentication.htm
https://onlinehelp.tableau.com/current/server/en-us/impers_SQL.htm
http://help.metricinsights.com/m/61790/l/278359-embed-database-credentials-in-tableau
https://howto.mt.gov/Portals/19/Documents/Publishing%20in%20Tableau.pdf
https://howto.mt.gov/Portals/19/Documents/Tableau%20Server%20Authentication.pdf
https://howto.mt.gov/tableau#610017167-tableau-desktop




! tableau dynamic reference line 
https://kb.tableau.com/articles/howto/adding-separate-dynamic-reference-lines-for-each-dimension-member
Add a reference line based on a calculated field https://community.tableau.com/thread/216051


! Add Separate Dynamic Reference Lines For Each Dimension Member in Tableau
How to Add Separate Dynamic Reference Lines For Each Dimension Member in Tableau https://www.youtube.com/watch?v=_3ASdjKFsAM



! dynamic axis range 
Dynamic Axis Range - Fixing One End (or both, or have it dynamic) https://community.tableau.com/docs/DOC-6215
http://drawingwithnumbers.artisart.org/creating-a-dynamic-range-parameter-in-tableau/
https://www.reddit.com/r/tableau/comments/77dx2v/hello_heroes_of_tableau_is_it_possible_to/
Set Axis Range Based On Calculated Field https://community.tableau.com/thread/243009
https://community.tableau.com/docs/DOC-6215  <- good stuff
https://public.tableau.com/profile/simon.r5129#!/vizhome/DynamicAxisRange-onlyPlotwithin1SD/Usethistorestrictoutliers


! window_stdev
https://playfairdata.com/how-to-do-anomaly-detection-in-tableau/
https://www.linkedin.com/pulse/standard-deviation-tableau-sumeet-bedekar/


! visualizing survey data 
https://www.datarevelations.com/visualizing-survey-data




! export tableau to powerpoint 
https://onlinehelp.tableau.com/current/pro/desktop/en-us/save_export_image.htm
https://www.clearlyandsimply.com/clearly_and_simply/2012/05/embed-tableau-visualizations-in-powerpoint.html
https://www.google.com/search?q=tableau+on+powerpoint&oq=tableau+on+powe&aqs=chrome.0.0j69i57j0l4.3757j0j1&sourceid=chrome&ie=UTF-8


! T test 
https://dabblingwithdata.wordpress.com/2015/09/18/kruskal-wallis-significance-testing-with-tableau-and-r/
http://breaking-bi.blogspot.com/2013/03/conducting-2-sample-z-test-in-tableau.html
T-test using Tableau for proportion & means https://community.tableau.com/thread/258064
Calculating T-Test (or any other statistical tests) parameters in Tableau https://community.tableau.com/thread/251371
T test https://www.google.com/search?q=t+test+columns+in+tableau&oq=t+test+columns+in+tableau&aqs=chrome..69i57j69i64l3.7841j0j1&sourceid=chrome&ie=UTF-8
t-test of two independent means https://community.tableau.com/docs/DOC-1428
https://www.google.com/search?q=t+test+in+tableau&oq=t+test+in+tableau&aqs=chrome..69i57j0l3j69i64l2.2239j0j1&sourceid=chrome&ie=UTF-8



! pivot data source 
Tableau in Two Minutes - How to Pivot Data in the Data Source https://www.youtube.com/watch?v=fvRVJ7d7NFI
Combine 3 Date Fields in the same Axis https://community.tableau.com/thread/206580
tableau combine 3 dates in one axis https://www.google.com/search?q=tableau+combine+3+dates+in+one+axis&oq=tableau+combine+3+dates+in+one+axis&aqs=chrome..69i57.22106j0j4&sourceid=chrome&ie=UTF-8


! people 
http://www.penguinanalytics.co , http://www.penguinanalytics.co/Datasets/ , https://public.tableau.com/profile/john.alexander.cook#!/



! Pareto Chart Reference line 20pct
Pareto Chart Reference line 20pct https://community.tableau.com/thread/228448
add y axis reference line pareto chart tableau
https://www.google.com/search?q=add+y+axis+reference+line+pareto+chart+tableau&oq=add+y+axis+reference+line+pareto+chart+tableau&aqs=chrome..69i57.6862j0j1&sourceid=chrome&ie=UTF-8
create pareto chart https://www.youtube.com/watch?v=pptICtCPSVg



! math based bin 
Count Number of Occurances of a Value https://community.tableau.com/message/303878#303878   <- good stuff 
http://vizdiff.blogspot.com/2015/07/create-bins-via-math-formula.html
To create custom bins or buckets for Sales https://community.tableau.com/thread/229116
tableau manual create bucket of 1 https://www.google.com/search?q=tableau+manual+create+bucket+of+1&oq=tableau+manual+create+bucket+of+1&aqs=chrome..69i57.7065j1j1&sourceid=chrome&ie=UTF-8
Calculated Field - Number Generator https://community.tableau.com/thread/205361
tableau create a sequence of numbers calculater field https://www.google.com/search?q=tableau+create+a+sequence+of+numbers+calculater+field&oq=tableau+create+a+sequence+of+numbers+calculater+field&aqs=chrome..69i57.11948j0j1&sourceid=chrome&ie=UTF-8
tableau calculated field create sequence https://www.google.com/search?q=tableau+calculated+field+create+sequence&oq=tableau+calculated+field+create+sequence&aqs=chrome..69i57.7054j0j1&sourceid=chrome&ie=UTF-8
Grouping bins greater than 'x' https://community.tableau.com/thread/220806
creating bins https://www.youtube.com/watch?v=ZFdqXVNST24
creating bins2 https://www.youtube.com/watch?v=VwDPBWuHu3Q



! constant reference line on continuous data tableau
constant reference line on continuous data tableau https://www.google.com/search?ei=6rSmXPysDaWt_Qb5nrlI&q=constant+reference+line+on+continuous+data+tableau&oq=constant+reference+line+on+continuous+data+tableau&gs_l=psy-ab.3...14576.15058..15169...0.0..0.83.382.5......0....1..gws-wiz.......0i71j35i304i39.3OhD28SQVL0 



! Add reference line in an axis made by Dimension
Add reference line in an axis made by Dimension https://community.tableau.com/thread/223274
Placing the Reference Line https://community.tableau.com/thread/260253
Highlight bin in Histogram https://community.tableau.com/thread/287638
tableau highlight bin https://www.google.com/search?q=tableau+highligh+bin&oq=tableau+highligh+bin&aqs=chrome..69i57.3251j0j1&sourceid=chrome&ie=UTF-8


! slope graph
https://www.tableau.com/about/blog/2016/9/how-add-vertical-lines-slope-graphs-multiple-measures-59632



! Reference Bands based on calculation
Reference Bands based on calculation https://community.tableau.com/thread/258490


! coding CASE statement easily 
http://vizdiff.blogspot.com/2015/07/coding-case-statement-made-easy.html



! Floor and Ceiling Functions
Floor and Ceiling Functions https://community.tableau.com/docs/DOC-1354
https://www.google.com/search?q=tableau+floor+function&oq=tableau+floor+f&aqs=chrome.0.0j69i57.4668j0j1&sourceid=chrome&ie=UTF-8



! reference band based on calculation 
reference band based on calculation https://community.tableau.com/thread/258490
tableau reference band on dimension in title https://www.google.com/search?ei=1rWmXInnLe61ggeNzI_wBg&q=tableau+reference+band+on+dimension+in+title&oq=tableau+reference+band+on+dimension&gs_l=psy-ab.1.0.33i22i29i30.16534.18647..20459...0.0..0.154.931.6j3......0....1..gws-wiz.......0i71j0i22i30j0i22i10i30.BFQ4RmLGzpA


! reference band, highlight weekends (check screenshot)
{{{
IF DATEPART('weekday',[Date])=6 or DATEPART('weekday',[Date])=7 THEN 0 END
}}}
https://www.evolytics.com/blog/tableau-hack-how-to-highlight-a-dimension/
adding reference line to discrete variable https://community.tableau.com/thread/193986
tableau Reference Line Discrete Headers https://www.google.com/search?q=tableau+Reference+Line+Discrete+Headers&oq=tableau+Reference+Line+Discrete+Headers&aqs=chrome..69i57j69i60.7445j0j1&sourceid=chrome&ie=UTF-8
https://kb.tableau.com/articles/issue/add-reference-line-to-discrete-field
Shading in weekends https://community.tableau.com/thread/123456



! Dashboard actions - Highlight bin from different source (see screenshot)
Highlight bin from different source https://community.tableau.com/thread/157710


! conditional format individual rows 
Tableau: Advanced Conditional Formatting - format text columns differently https://www.youtube.com/watch?v=w2nlT_TBUzU , https://www.youtube.com/watch?v=7H7Dy0G0y04
https://www.evolytics.com/blog/tableau-hack-conditionally-format-individual-rows-columns/
http://www.vizwiz.com/2016/06/tableau-tip-tuesday-how-to.html
is there a way to highlight or bold certain rows ( the entire row) in tableau the way you can in excel https://community.tableau.com/thread/122382
Color Coding by column instead of the entire row https://community.tableau.com/thread/115822


! tableau can't compare numerical bin and integer values
https://www.google.com/search?ei=8p-mXJuVFNCs5wKU0KDgAg&q=tableau+can%27t+compare+numerical+bin+and+integer+values&oq=tableau+can%27t+compare+integer+and+numeric+values&gs_l=psy-ab.1.0.0i8i7i30.12190.18434..20595...0.0..0.110.1319.13j2......0....1..gws-wiz.......0i71j33i10.xCVj6fYB9lU



! A secondary axis chart: How to add a secondary axis in Tableau
A secondary axis chart: How to add a secondary axis in Tableau https://www.youtube.com/watch?v=8yNPCgL7OtI




! tableau server 
https://www.udemy.com/administering-tableau-server-10-with-real-time-scenarios/ 


! tableau performance tuning 
Enhancing Tableau Data Queries https://www.youtube.com/watch?v=STfTQ55QE9s&index=19&list=LLmp7QJNLQvBQcvdltLTkiYQ&t=0s 


! perf tool - tableau log viewer
https://github.com/tableau/tableau-log-viewer https://github.com/tableau/tableau-log-viewer/releases



! tableau dashboard actions
Tableau Actions Give Your Dashboards Superpowers https://www.youtube.com/watch?v=r8SNKmzsW6c 


! tabpy/R 
Data science applications with TabPy/R https://www.youtube.com/watch?v=nRtOMTnBz_Y&feature=youtu.be




! time dimension example 
(with example workbook) US Holiday Date Flags 2010-2020, to share https://community.tableau.com/thread/246992
http://radacad.com/do-you-need-a-date-dimension
http://radacad.com/custom-functions-made-easy-in-power-bi-desktop
oracle generate date dimension https://sonra.io/2009/02/24/lets-create-a-date-dimension-with-pure-oracle-sql/


! prophet forecasting 
(with example workbook) using prophet to forecast https://community.tableau.com/thread/285800
example usage https://community.tableau.com/servlet/JiveServlet/download/855640-292880/Data%20Science%20Applications.twbx


! tableau database writeback 
Writeback to reporting database in Tableau 8 - hack or feature https://community.tableau.com/thread/122102
https://www.tableau.com/ja-jp/about/blog/2016/10/tableau-getdata-api-60539
Tableau writeback https://www.reddit.com/r/tableau/comments/6quhg4/tableau_writeback/
Updating Data in Your Database with Tableau https://www.youtube.com/watch?v=UWI_ub1Xuwg
K4 Analytics: How to extend Tableau with write-back and leverage your Excel models https://www.youtube.com/watch?v=5PlzdA19TUw
https://www.clearlyandsimply.com/clearly_and_simply/2016/06/writing-and-reading-tableau-views-to-and-from-databases-and-text-files-part-2.html
Tableau Write Back to Database https://community.tableau.com/thread/284428
Can we write-back to the database https://community.tableau.com/thread/279806
https://www.computerweekly.com/blog/CW-Developer-Network/Tableau-widens-developer-play-what-is-a-writeback-extension
Tableau's Extension API - Write Back https://www.youtube.com/watch?v=Jiazp_zQ0jY	
https://tableaufans.com/extension-api/tableau-extension-api-write-back-updated-source-code-for-tableau-2018-2/
Another method to update data from inside tableau http://tableaulove.tumblr.com/post/27627548817/another-method-to-update-data-from-inside-tableau
https://biztory.com/2017/10/09/interactive-commenting-solution-tableau-server/
adding information to charts for data which is not in the data source file https://community.tableau.com/thread/139439


! Commenting On Data Points In Dashboard
https://interworks.com/blog/jlyons/2018/10/01/portals-for-tableau-101-inline-commenting-on-dashboards/
https://www.theinformationlab.co.uk/2016/04/13/dashboards-reports-dynamic-comments-tableau/
Commenting On Data Points In Dashboard https://community.tableau.com/docs/DOC-8867
https://community.tableau.com/thread/157149?start=15&tstart=0


! tableau display database data as annotation 
Populate Annotation with Calculated Field https://community.tableau.com/thread/156259
Annotations / Comments / data writeback https://community.tableau.com/ideas/1261
https://stackoverflow.com/questions/40533735/generating-dynamic-displayed-annotations-in-tableau-dashboard



! order of operations 
https://www.theinformationlab.co.uk/2013/01/28/5-things-i-wish-i-knew-about-tableau-when-i-started/


! adding timestamp on charts 
https://www.thedataschool.co.uk/robbin-vernooij/time-stamping-your-data-in-tableau-and-tableau-prep/


! tableau threshold data alerts 
data driven alerts https://www.youtube.com/watch?v=vp3u4D7ao8w
https://www.google.com/search?q=tableau+threshold+alerts&oq=tableau+threshold+alerts&aqs=chrome..69i57.5575j0j1&sourceid=chrome&ie=UTF-8

! tableau pdf automation
<<<
Automation of creating PDF workbooks and delivery via email https://community.tableau.com/thread/120031
Tabcmd and Batch Scripts to automate PDF generation https://www.youtube.com/watch?v=ajB7CDcoyDU
https://www.thedataschool.co.uk/philip-mannering/idiots-guide-controlling-tableau-command-line-using-tabcmd/
Print to PDF using pages shelf in Tableau Desktop https://community.tableau.com/thread/238654
https://www.quora.com/How-do-I-automate-reports-using-the-Tableau-software
https://onlinehelp.tableau.com/current/pro/desktop/en-us/printing.htm

Can TabCMD be used to automatically schedule reports to a file share https://community.tableau.com/thread/176497
how to use tabcmd in tableau desktop,and what are the commands for downloading pdf file and txbx file https://community.tableau.com/thread/154051

we can achieve the batch export to pdf using tabcmd and can also input parameters, https://www.thedataschool.co.uk/philip-mannering/idiots-guide-controlling-tableau-command-line-using-tabcmd/   , a batch file can be scheduled on your laptop or on the server itself. then PDF files related to EXP1 will be spooled on a directory and can be merged (cpu, io, mem, etc.) into 1 file using another tool. all handled in the batch script
example workflow of automating pdf https://www.youtube.com/watch?v=ajB7CDcoyDU
<<<


! tableau open source 
https://tableau.github.io/


! tableau javascript api (embedded analytics - js api, REST API, SSO, and mobile)
Tableau JavaScript API | The most delicious ingredient for your custom applications https://www.youtube.com/watch?v=Oda_T5PMwt0
official doc https://onlinehelp.tableau.com/current/api/js_api/en-us/JavaScriptAPI/js_api.htm
Tableau JavaScript API: Getting Started https://www.youtube.com/watch?v=pCstUYalMEU


! tableau hyper api (hyper files as data frame - enables CRUD on files)
Hyper API: Automating Data Connectivity to Solve Real Business Problems https://www.youtube.com/watch?v=-FrMCmknI0Y
https://help.tableau.com/current/api/hyper_api/en-us/docs/hyper_api_whatsnew.html

* https://github.com/tableau/hyper-api-samples
* https://github.com/Bartman0/tableau-incremental-refresh/blob/main/tableau-incremental-refresh.py
* https://github.com/manish-aspiring-DS/Tableau-Hyper-Files


! tableau other developer tools 
https://www.tableau.com/developer/tools
https://www.tableau.com/support/help
<<<

    Tableau Connector SDK
    Tableau Embedded Analytics Playbook
    Tableau Extensions API
    Tableau Hyper API
    Tableau JavaScript API
    Tableau Metadata API
    Tableau Python Server (TabPY)
    Tableau REST API
    Tableau Webhooks
    Web Data Connector SDK

<<<



! alexa tableau integration 
https://github.com/jinuik?tab=repositories
http://bihappyblog.com/2016/06/11/voice-controlled-tableau-dashboard/
https://www.talater.com/annyang/
Tableau and Google Assistant / Siri https://community.tableau.com/thread/267634
Tableau Assistant - Alexa https://www.youtube.com/watch?v=V8TJBj0msIQ
https://www.tableau.com/about/blog/2017/3/hacking-alexa-and-other-tableau-api-tricks-67108
Alexa as a Tableau Assistant https://www.youtube.com/watch?v=zqGK2LYtx-U
Tableau 16 Hackathon - Voice Assisted Analytics https://www.youtube.com/watch?v=5Uul3Qy8YVE
alexa with tableau https://community.tableau.com/thread/256681
Integrating Tableau with Alexa https://community.tableau.com/thread/264965
https://twitter.com/tableau/status/967885701164527621



! tableau data source refresh schedule 
https://www.youtube.com/results?search_query=tableau+data+source
https://www.youtube.com/results?search_query=tableau+data+source+refresh
https://www.youtube.com/watch?v=FuDX1u9QSb8 Tableau - Do it Yourself Tutorial - Refresh Extracts using Command line - DIY -33-of-50 

!! tableau sync client, tableau bridge
Tableau - Do it Yourself Tutorial - Refresh Extracts using Command line - DIY -33-of-50 https://www.youtube.com/watch?v=FuDX1u9QSb8&list=PLklSCDzsQHdkjiTHqqCaU8tdA70AlSnPs&index=24&t=0s
https://onlinehelp.tableau.com/current/pro/desktop/en-gb/extracting_push.htm
https://www.google.com/search?q=tableay+sync+client&oq=tableay+sync+client&aqs=chrome..69i57j0l5.2789j0j0&sourceid=chrome&ie=UTF-8
https://www.tableau.com/about/blog/2015/5/online-sync-client-38549
https://onlinehelp.tableau.com/current/online/en-us/qs_refresh_local_data.htm
https://kb.tableau.com/articles/issue/error-this-file-was-created-by-a-newer-version-of-tableau-using-online-sync-client



! tableau outlier detection, standard deviation 
https://public.tableau.com/views/HandlingDataOutliers/OutlierHandling?%3Aembed=y&%3AshowVizHome=no&%3Adisplay_count=y&%3Adisplay_static_image=y
Outliers based on Standard Deviation https://community.tableau.com/thread/195904



! tableau awk split delimiter 
{{{
SPLIT([Name],':',2 )
}}}
https://community.tableau.com/thread/177520



! custom color palette 
How to use same color palette for different visualizations https://community.tableau.com/thread/248482
https://onlinehelp.tableau.com/current/pro/desktop/en-us/formatting_worksheet.htm
https://www.tableauexpert.co.in/2015/11/how-to-create-custom-color-palette-in.html



! Labels overlapping
Labels overlapping https://community.tableau.com/thread/208870
mark labels https://community.tableau.com/thread/212775
How to avoid overlapping of labels in dual axis charts https://community.tableau.com/thread/236099


! real time graphs, kafka streaming
streaming data https://community.tableau.com/thread/125081
https://rockset.com/blog/using-tableau-for-live-dashboards-on-event-data/
https://rockset.com/blog/tableau-kafka-real-time-sql-dashboard-on-streaming-data/
Real Time streaming data from Kafka https://community.tableau.com/ideas/8913
<<<
Since Kafka is a streaming data source, it would not make sense to connect Kafka directly to Tableau. But you can use Tableau's Rockset JDBC connector to build live Tableau dashboards on streaming event data with:

1. Low data latency (new data shows up in seconds)
2. Fast SQL queries (including JOINs with other data sources)
3. Support for high QPS, interactive queries & drill downs
<<<


! tableau data source - custom SQL pivot 
https://help.tableau.com/current/pro/desktop/en-us/pivot.htm
Combine multiple dimensions / pivot multiple columns https://community.tableau.com/thread/189601
https://www.google.com/search?q=tableau+pivot+dimension+columns&oq=tableau+pivot+dimension+columns&aqs=chrome..69i57j33.11202j0j1&sourceid=chrome&ie=UTF-8
https://www.google.com/search?q=tableau+pivot+column+not+working&oq=tableau+pivot+column+not+&aqs=chrome.2.69i57j33l6.7094j1j1&sourceid=chrome&ie=UTF-8


! circle graph jitter - spacing scatter plot, overlapping circles
Overlapping marks on scatter plot https://community.tableau.com/thread/283671
https://www.google.com/search?q=tableau+jitter+on+circle+chart&oq=tableau+jitter+on+circle+chart&aqs=chrome..69i57j33.6029j0j1&sourceid=chrome&ie=UTF-8


! tableau timeline graph 
https://playfairdata.com/how-to-make-a-tableau-timeline-when-events-overlap/
https://playfairdata.com/how-to-make-a-timeline-in-tableau/
https://www.google.com/search?q=tableau+visualize+start+end+times+time+series&oq=tableau+visualize+start+end+times+time+series&aqs=chrome..69i57.8797j1j1&sourceid=chrome&ie=UTF-8
https://www.google.com/search?q=tableau+time+series+start+and+end+timestamp&oq=tableau+time+series+start+and+end+timestamp&aqs=chrome..69i57.12272j0j1&sourceid=chrome&ie=UTF-8


! Changing the File Path for Extracts
https://kb.tableau.com/articles/howto/changing-the-file-path-for-extracts
{{{
Answer
If you have a .twbx file, convert the .twbx file to a .twb file using one of the following methods:
In Tableau Desktop, open the packaged workbook (.twbx file), and then select File > Save As. Under Save as type, select Tableau Workbook (*.twb).
In Windows Explorer, right-click the .twbx file, and then select Unpackage.
In Tableau Desktop, open the .twb file. Click the sheet tab and  then select Data > <data source name> > Extract > Remove.
Select Just remove the extract, and then click OK.
Select  Data > <data source name> > Extract Data, and then click Extract.
Select the desired location, and then click Save.
}}}


! Difference Between Extract Filters and Data Source Filters
https://community.tableau.com/docs/DOC-8721
{{{
Extract Filter:

As the name implies extract filters are used to filter out the data while creating the extract.

Example: Let’s say we have database with the data for different countries as shown below

USA –                    5000 rows
Canada –             2000 rows
India                      10000 rows
Australia              1500 rows
If we apply the Extract filters to bring the data only for USA (Country=USA), Tableau creates the Extract (.tde) just for the Country USA and ignore the data for all other countries.

Size of the Extract is always proportionate the Extract filters.

#of rows in the extract: 5000 rows for country USA

Data Source Filters:

In Tableau 9.0.4 applying Data source filters won’t change to the volume data and size of the extract. Instead data source filters applies the filters to the background query when we use any of the dimensions or measures in the visualizations.

Example:

If we apply the Data Source filters to bring the data only for USA (Country=USA), Tableau creates the Extract (.tde) with the full volume of the data for all countries (not only for USA) and there won’t be any relationship between the data source filters and the size of the extract.

#of rows in the extract: 18,500 (for all countries)

 

However there won’t be any change the way we use the dimensions and measures using both the extracts in the Visualization. Both should work as expected and will show the data only for USA.
}}}


! Trim a string up to a special character
https://community.tableau.com/thread/134857
{{{
RIGHT([String], LEN([String] - FIND([String],":"))
}}}


! startswith 
Creating two filters using the first letter (Starts with) https://community.tableau.com/thread/235823


! tableau topn within category - INDEX

How to find the top N within a category in Tableau
https://www.youtube.com/watch?v=z0R9OsDl-10

https://kb.tableau.com/articles/howto/finding-the-top-n-within-a-category








! Videos
Tableau TCC12 Session: Facebook http://www.ustream.tv/recorded/26807227
''Tableau Server/Desktop videos''
http://www.lynda.com/Tableau-tutorials/Up-Running-Tableau/165439-2.html
http://beta.pluralsight.com/search/?searchTerm=tableau
http://pluralsight.com/training/Authors/Details/ben-sullins
http://www.livefyre.com/profile/21361843/ ben sullins comments/questions



! people 
tableauing dangerously https://cmtoomey.github.io/blog/page5/


! TC conference papers and materials (2016 to 2018)
https://www.dropbox.com/sh/lztdogubf20498e/AAAPptLIxaAPLdBGmwUtMVJba?dl=0


.








! fork this
https://github.com/karlarao/karlaraowiki


! how to run two versions of mozilla (need to create a new profile)
{{{
"C:\Program Files (x86)\MozillaFirefox4RC2\firefox.exe" -P "karlarao" -no-remote
}}}
https://blogs.oracle.com/datawarehousing/getting-started-with-autonomous-data-warehouse-part-1-oracle-moviestream

DML without limits, now in BigQuery https://cloud.google.com/blog/products/data-analytics/dml-without-limits-now-in-bigquery
Dremel: Interactive Analysis of Web-Scale Datasets (with cost based optimizer)
https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/36632.pdf
[img(50%,50%)[https://user-images.githubusercontent.com/3683046/91346837-69e30480-e7af-11ea-9e18-452a3c1c8a28.png]]
[img(50%,50%)[https://user-images.githubusercontent.com/3683046/91346835-68b1d780-e7af-11ea-9186-7a12eb70ddf1.png]]
https://github.com/googleapis/python-bigquery/tree/master/benchmark
https://github.com/googleapis/python-bigquery/tree/master/samples

https://cloud.google.com/bigquery/docs/release-notes
<<showtoc>>

! one SQL 

vi test.py 
{{{
from google.cloud import bigquery

# Construct a BigQuery client object.
client = bigquery.Client()

query = """
with cte as (
SELECT /* cte_query */ b.* 
FROM `example-prod-284123.dataset01.table01` a
inner join `example-dev-284123.dataset01.table01` b
on a.col1 = b.col1
)
SELECT /* now3 main_query */ b.* 
FROM `example-prod-284123.dataset01.table01` a
inner join `example-dev-284123.dataset01.table01` b
on a.col1 = b.col1
inner join cte
on a.col1 = cte.col1
inner join (SELECT /* subquery */ b.* 
FROM `example-prod-284123.dataset01.table01` a
inner join `example-dev-284123.dataset01.table01` b
on a.col1 = b.col1) sq
on a.col1 = sq.col1
"""
query_job = client.query(query)  # Make an API request.

print("done")
#for row in query_job:
#    print(','.join(row))

}}}


! two SQLs - SELECT and DDL
{{{
cat test3-createtbl.py 

from google.cloud import bigquery

# Construct a BigQuery client object.
client = bigquery.Client()

query = """
with cte as (
SELECT /* cte_query */ b.* 
FROM `example-prod-284123.dataset01.table01` a
inner join `example-dev-284123.dataset01.table01` b
on a.col1 = b.col1
)
SELECT /* now3 main_query */ b.* 
FROM `example-prod-284123.dataset01.table01` a
inner join `example-dev-284123.dataset01.table01` b
on a.col1 = b.col1
inner join cte
on a.col1 = cte.col1
inner join (SELECT /* subquery */ b.* 
FROM `example-prod-284123.dataset01.table01` a
inner join `example-dev-284123.dataset01.table01` b
on a.col1 = b.col1) sq
on a.col1 = sq.col1
;

create table `example-dev-284123.dataset01.table03`
as select * from `example-dev-284123.dataset01.table01`;
"""
query_job = client.query(query)  # Make an API request.

}}}





.



https://cloud.google.com/products/calculator/
https://cloudpricingcalculator.appspot.com/static/data/pricelist.json
https://cloud.google.com/storage/pricing
http://calculator.s3.amazonaws.com/index.html


<<showtoc>>

! node and pl/sql
http://www.slideshare.net/lucasjellema/oracle-databasecentric-apis-on-the-cloud-using-plsql-and-nodejs <-- GOOD STUFF
https://technology.amis.nl/2016/04/01/running-node-oracledb-the-oracle-database-driver-for-node-js-in-the-pre-built-vm-for-database-development/ <- GOOD STUFF
https://github.com/lucasjellema/sig-nodejs-amis-2016  <- GOOD STUFF
https://www.npmjs.com/search?q=plsql
https://www.npmjs.com/package/node_plsql
https://www.npmjs.com/package/oracledb
http://www.slideshare.net/lucenerevolution/sir-en-final
https://blogs.oracle.com/opal/entry/introducing_node_oracledb_a_node
https://github.com/oracle/node-oracledb
https://github.com/oracle/node-oracledb/blob/master/doc/api.md#plsqlexecution
http://stackoverflow.com/questions/36009085/how-to-execute-stored-procedure-query-in-node-oracledb-if-we-are-not-aware-of
http://www.slideshare.net/lucasjellema/oracle-databasecentric-apis-on-the-cloud-using-plsql-and-nodejs
http://dgielis.blogspot.com/2015/01/setting-up-node-and-oracle-database.html
http://pythonhackers.com/p/doberkofler/node_plsql
http://lauren.pomogi-mne.com/how-to-run-oracle-user-defined-functions-using-nodejs-stack-overflow-1061567616/



! node websocket , socket.io
websocket https://www.youtube.com/watch?v=9L-_cNQaizM
Real-time Data with Node.js, Socket.IO, and Oracle Database https://www.youtube.com/watch?v=-mMIxikhi6M
http://krisrice.blogspot.com/2014/06/publish-data-over-rest-with-nodejs.html
An Intro to JavaScript Web Apps on Oracle Database http://nyoug.org/wp-content/uploads/2015/04/McGhan_JavaScript.pdf

! dan mcghan's relational to json
I'm looking up books/references for "end to end app from data model to nodejs" and I came across that video - VTS: Relational to JSON with Node.js https://www.youtube.com/watch?v=hFoeVZ4UpBs
https://blogs.oracle.com/newgendbaccess/entry/in_praise_of_dan_mcghan
https://jsao.io/2015/07/relational-to-json-in-oracle-database/ , http://www.slideshare.net/CJavaPeru/relational-to-json-with-node-dan-mc-ghanls
http://www.garysguide.com/events/lxnry6t/NodeJS-Microservices-NW-js-Mapping-Relational-to-JSON	
https://blog.risingstack.com/nodejs-at-scale-npm-best-practices/
An Intro to JavaScript Web Apps on Oracle Database http://nyoug.org/wp-content/uploads/2015/04/McGhan_JavaScript.pdf


! path to nodejs 
https://github.com/gilcrest/OracleMacOSXElCapitanSetup4Node	
http://drumtechy.blogspot.com/2015/03/my-path-to-nodejs-and-oracle-glory.html
http://drumtechy.blogspot.com/2015/03/my-path-to-nodejs-and-oracle-glory_14.html
http://drumtechy.blogspot.com/2015/03/my-path-to-nodejs-and-oracle-glory_16.html
<<showtoc>>

! ksun-oracle
!! book - oracle database performance tuning - studies practices research 
https://drive.google.com/file/d/1VijpHBG1I7Wi2mMPj91kSmHH_J-CZKr_/edit

!! Cache Buffer Chains Latch Contention Case Study-2: Reverse Primary Key Index
http://ksun-oracle.blogspot.com/2020/02/cache-buffer-chains-latch-contention_25.html
{{{
wget https://raw.githubusercontent.com/karlarao/scripts/master/performance/create_hint_sqlprofile.sql

profile fix offload initial max SQL from hours to 3secs
We have the following SQL that ran long in DWTST that we fixed through SQL profile (from 50mins to 3secs). We are expecting this to run longer in PROD due to larger size table. 

SELECT MIN("LOAD_DATE") FROM "DIM"."ENS_CSM_SUMMARY_DT_GLT" 

I’ve attached the script to implement the fix. And please following the steps below: 

1)	On prod host 
            cd /db_backup_denx3/p1/gluent/karl
2)	Connect / as sysdba 
3)	Execute the script as follows 

@create_hint_sqlprofile
Enter value for sql_id: dg7zj0q9qa2gf
Enter value for profile_name (PROFILE_sqlid_MANUAL): <just hit ENTER here>
Enter value for category (DEFAULT): <just hit ENTER here>
Enter value for force_matching (false): <just hit ENTER here>
Enter value for hint_text: NO_PARALLEL
Profile PROFILE_dg7zj0q9qa2gf_MANUAL created.

This will make the query run in serial through a profile hint which is the fix for this issue. 


After this profile creation. Please cancel the job and restart it. 

}}}
Field Guide to Hadoop https://www.safaribooksonline.com/library/view/field-guide-to/9781491947920/
<<<
* scd
* cdc 
* streaming
<<<



Data lake ingestion strategies - Practical Enterprise Data Lake Insights: Handle Data-Driven Challenges in an Enterprise Big Data Lake https://learning.oreilly.com/library/view/practical-enterprise-data/9781484235225/html/454145_1_En_2_Chapter.xhtml

Information Integration and Exchange - Enterprise Information Management in Practice: Managing Data and Leveraging Profits in Today’s Complex Business Environment https://learning.oreilly.com/library/view/enterprise-information-management/9781484212189/9781484212196_Ch05.xhtml

Data Warehouse Patterns - SQL Server Integration Services Design Patterns, Second Edition https://learning.oreilly.com/library/view/sql-server-integration/9781484200827/9781484200834_Ch11.xhtml

Change Data Capture techniques - SAP Data Services 4.x Cookbook https://learning.oreilly.com/library/view/sap-data-services/9781782176565/ch09s02.html

https://www.youtube.com/results?search_query=scd+vs+cdc
https://communities.sas.com/t5/SAS-Data-Management/SCD-Type-2-Loader-vs-Change-Data-Capture/td-p/136421
https://www.google.com/search?q=why+CDC+vs+SCD&ei=0C9wXNDrM-Wmgge64pbYCg&start=10&sa=N&ved=0ahUKEwjQk_Ks7c_gAhVlk-AKHTqxBasQ8tMDCLQB&biw=1439&bih=798
https://network.informatica.com/message/75171#75171
https://it.toolbox.com/question/slowly-changing-dimension-vs-change-data-capture-053110
https://network.informatica.com/thread/40299
https://archive.sap.com/discussions/thread/2140880
<<<
CDC is Change Data Capture -

The CDC methods will enable you to extract and load only the new or changed records form the source, rather than loading the entire records from the source. Also called as delta or incremental load.

SCD Type 2 (Slowly Changing Dimension Type 2)

This lets you store/preserve the history of changed records of selected dimensions as per your choice. The transaction table / source table will mostly have only the current value and is used in certain cases where in the history of a certain dimension is required for analysis purpose.
<<<
https://books.google.com/books?id=83pgjociDWsC&pg=RA5-PT9&lpg=RA5-PT9&dq=scd+and+cdc&source=bl&ots=Ipp7HAYCFX&sig=ACfU3U2C8CiaSxa_urF19q5IhQ8DOXLbIQ&hl=en&sa=X&ved=2ahUKEwiXt9WC7M_gAhXqRt8KHdWuBnsQ6AEwCXoECAoQAQ#v=onepage&q=scd%20and%20cdc&f=false
https://community.talend.com/t5/Design-and-Development/Difference-between-CDC-and-SCD/td-p/111312


! Ideas for Event Sourcing in Oracle 
https://medium.com/@FranckPachot/ideas-for-event-sourcing-in-oracle-d4e016e90af6



<<<
With more and more organization moving to the cloud, there is a growing demand to feed data from on-premise Oracle/DB2/SQL Server databases to various platforms on the cloud. CDC can captures changes as they happen in real-time fashion and push to the target platforms, such as Kafka, Event Hub, and data lake. There are many ways to perform CDC and many CDC software are also available in the market. In this session, we will discuss what CDC options are available and introduce a few key CDC softwares, such as Oracle GoldenGate, Attunity, and Striim.
<<<







..
<<<
QUESTION:
is there a way to ignore hints AND profiles through 1 single parameter?
like **** you all hints and profiles i hate you!
 or is the only way to do this is set _optimizer_ignore_hints and disable/drop all profiles ?

ANSWER: 
For profiles: ALTER SESSION SET SQLTUNE_CATEGORY = 'IGNOREMENOW';
For baselines: ALTER SESSION SET OPTIMIZER_USE_SQL_PLAN_BASELINES=false

just everything off, these are the knobs :)  because gluent doesn't like having USE_NL hint on offloaded tables it errors with KUP-04108: unable to reread file
just in case the developers have to deal with 1000+ SQLs we know how to attack this with these knobs




OTHER WAYS OF DISABLING: 
IGNORE_OPTIM_EMBEDDED_HINTS <- disables hints at session level  
{{{
select /*+ index(DEPT) ignore_optim_embedded_hints */ * from SCOTT.DEPT;
}}}
optimizer_ignore_hints <- database wide or session level through trigger
{{{
alter session set optimizer_ignore_hints=true;
alter session set optimizer_ignore_parallel_hints=true;
}}}
<<<
https://onlinexperiences.com/scripts/Server.nxp?LASCmd=AI:4;F:QS!10100&ShowUUID=958AB2AD-BBE8-4F30-82C9-338C87B7D6C6&ShowKey=73520&AffiliateData=DSCGR#xsid=a62e_5IW
https://www.youtube.com/results?search_query=How+to+Use+Time+Series+Data+to+Forecast+at+Scale
Mahan Hosseinzadeh- Prophet at scale to tune & forecast time series at Spotify  https://www.youtube.com/watch?v=fegS34ItKcI
Joe Jevnik - A Worked Example of Using Neural Networks for Time Series Prediction https://www.youtube.com/watch?v=hAlGqT3Xpus
Real-time anomaly detection system for time series at scale https://www.youtube.com/watch?v=oVXySPH7MjQ
Two Effective Algorithms for Time Series Forecasting https://www.youtube.com/watch?v=VYpAodcdFfA
Nathaniel Cook - Forecasting Time Series Data at scale with the TICK stack https://www.youtube.com/watch?v=raEyZEryC0k 
How to Use Time Series Data to Forecast at Scale| DZone.com Webinar https://www.youtube.com/watch?v=KoLR7baZYec
Forecasting at Scale: How and Why We Developed Prophet for Forecasting at Facebook https://www.youtube.com/watch?v=pOYAXv15r3A
https://cloud.google.com/blog/products/databases/alloydb-for-postgresql-columnar-engine





.
https://community.hortonworks.com/articles/58458/installing-docker-version-of-sandbox-on-mac.html  <-- follow this @@docker@@ howto!
https://hortonworks.com/tutorial/learning-the-ropes-of-the-hortonworks-sandbox/
HORTONWORKS SANDBOX SANDBOX DEPLOYMENT AND INSTALL GUIDE Deploying Hortonworks Sandbox on @@Docker@@ https://hortonworks.com/tutorial/sandbox-deployment-and-install-guide/section/3/#for-mac
https://community.hortonworks.com/questions/57757/hdp-25-sandbox-not-starting.html <-- issue on sandbox not starting

https://www.quora.com/To-start-learning-and-playing-with-Hadoop-which-one-should-I-prefer-Cloudera-QuickStart-VM-Hortonworks-Sandbox-or-MapR-Sandbox









.





<<showtoc>>


! cat files 
https://stackoverflow.com/questions/19778137/why-is-there-no-hadoop-fs-head-shell-command
{{{
hadoop fs -cat /path/to/file | head
hadoop fs -cat /path/to/file | tail
}}}


! create home directory 
{{{
[root@node1 ~]# su - hdfs
[hdfs@node1 ~]$ hadoop fs -mkdir /user/vagrant

[hdfs@node1 ~]$ hadoop fs -chown vagrant:vagrant /user/vagrant

[hdfs@node1 ~]$ hadoop fs -ls /user
Found 5 items
drwxr-xr-x   - admin     hdfs             0 2019-01-06 02:11 /user/admin
drwxrwx---   - ambari-qa hdfs             0 2019-01-06 01:31 /user/ambari-qa
drwxr-xr-x   - hcat      hdfs             0 2019-01-06 01:44 /user/hcat
drwxr-xr-x   - hive      hdfs             0 2019-01-06 02:06 /user/hive
drwxr-xr-x   - vagrant   vagrant          0 2019-01-08 06:26 /user/vagrant
}}}


! copy file 
{{{
[vagrant@node1 data]$ du -sm salaries.csv 
16	salaries.csv

[vagrant@node1 data]$ hadoop fs -put salaries.csv 

[vagrant@node1 data]$ hadoop fs -ls
Found 1 items
-rw-r--r--   3 vagrant vagrant   16257213 2019-01-08 06:27 salaries.csv

}}}


! copy file with different block size
* this spreads the 16MB file to 1MB across data nodes and replicated 3x
{{{
[vagrant@node1 data]$ hadoop fs -D dfs.blocksize=1m -put salaries.csv salaries2.csv 
}}}


! check file status 
{{{
[vagrant@node1 data]$ hdfs fsck /user/vagrant/salaries.csv
Connecting to namenode via http://node1.example.com:50070/fsck?ugi=vagrant&path=%2Fuser%2Fvagrant%2Fsalaries.csv
FSCK started by vagrant (auth:SIMPLE) from /192.168.199.2 for path /user/vagrant/salaries.csv at Tue Jan 08 06:29:06 UTC 2019
.
/user/vagrant/salaries.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741876_1056. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
Status: HEALTHY
 Total size:	16257213 B
 Total dirs:	0
 Total files:	1
 Total symlinks:		0
 Total blocks (validated):	1 (avg. block size 16257213 B)
 Minimally replicated blocks:	1 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	1 (100.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	3
 Average block replication:	2.0
 Corrupt blocks:		0
 Missing replicas:		1 (33.333332 %)
 Number of data-nodes:		2
 Number of racks:		1
FSCK ended at Tue Jan 08 06:29:06 UTC 2019 in 4 milliseconds


The filesystem under path '/user/vagrant/salaries.csv' is HEALTHY



-- FILE WITH DIFFERENT BLOCK SIZE 
[vagrant@node1 data]$ hdfs fsck /user/vagrant/salaries2.csv
Connecting to namenode via http://node1.example.com:50070/fsck?ugi=vagrant&path=%2Fuser%2Fvagrant%2Fsalaries2.csv
FSCK started by vagrant (auth:SIMPLE) from /192.168.199.2 for path /user/vagrant/salaries2.csv at Tue Jan 08 06:31:11 UTC 2019
.
/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741877_1057. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741878_1058. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741879_1059. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741880_1060. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741881_1061. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741882_1062. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741883_1063. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741884_1064. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741885_1065. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741886_1066. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741887_1067. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741888_1068. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741889_1069. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741890_1070. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741891_1071. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).

/user/vagrant/salaries2.csv:  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741892_1072. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
Status: HEALTHY
 Total size:	16257213 B
 Total dirs:	0
 Total files:	1
 Total symlinks:		0
 Total blocks (validated):	16 (avg. block size 1016075 B)
 Minimally replicated blocks:	16 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	16 (100.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	3
 Average block replication:	2.0
 Corrupt blocks:		0
 Missing replicas:		16 (33.333332 %)
 Number of data-nodes:		2
 Number of racks:		1
FSCK ended at Tue Jan 08 06:31:11 UTC 2019 in 1 milliseconds


The filesystem under path '/user/vagrant/salaries2.csv' is HEALTHY

}}}



! get file locations and blocks
{{{
[vagrant@node1 data]$ hdfs fsck /user/vagrant/salaries.csv -files -locations -blocks
Connecting to namenode via http://node1.example.com:50070/fsck?ugi=vagrant&files=1&locations=1&blocks=1&path=%2Fuser%2Fvagrant%2Fsalaries.csv
FSCK started by vagrant (auth:SIMPLE) from /192.168.199.2 for path /user/vagrant/salaries.csv at Tue Jan 08 06:33:18 UTC 2019
/user/vagrant/salaries.csv 16257213 bytes, 1 block(s):  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741876_1056. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
0. BP-534825236-192.168.199.2-1546738263299:blk_1073741876_1056 len=16257213 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]

Status: HEALTHY
 Total size:	16257213 B
 Total dirs:	0
 Total files:	1
 Total symlinks:		0
 Total blocks (validated):	1 (avg. block size 16257213 B)
 Minimally replicated blocks:	1 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	1 (100.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	3
 Average block replication:	2.0
 Corrupt blocks:		0
 Missing replicas:		1 (33.333332 %)
 Number of data-nodes:		2
 Number of racks:		1
FSCK ended at Tue Jan 08 06:33:18 UTC 2019 in 1 milliseconds


The filesystem under path '/user/vagrant/salaries.csv' is HEALTHY
[vagrant@node1 data]$ 
[vagrant@node1 data]$ 
[vagrant@node1 data]$ 
[vagrant@node1 data]$ 
[vagrant@node1 data]$ 
[vagrant@node1 data]$ 
[vagrant@node1 data]$ hdfs fsck /user/vagrant/salaries2.csv -files -locations -blocks
Connecting to namenode via http://node1.example.com:50070/fsck?ugi=vagrant&files=1&locations=1&blocks=1&path=%2Fuser%2Fvagrant%2Fsalaries2.csv
FSCK started by vagrant (auth:SIMPLE) from /192.168.199.2 for path /user/vagrant/salaries2.csv at Tue Jan 08 06:36:04 UTC 2019
/user/vagrant/salaries2.csv 16257213 bytes, 16 block(s):  Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741877_1057. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741878_1058. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741879_1059. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741880_1060. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741881_1061. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741882_1062. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741883_1063. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741884_1064. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741885_1065. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741886_1066. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741887_1067. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741888_1068. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741889_1069. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741890_1070. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741891_1071. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
 Under replicated BP-534825236-192.168.199.2-1546738263299:blk_1073741892_1072. Target Replicas is 3 but found 2 live replica(s), 0 decommissioned replica(s) and 0 decommissioning replica(s).
0. BP-534825236-192.168.199.2-1546738263299:blk_1073741877_1057 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
1. BP-534825236-192.168.199.2-1546738263299:blk_1073741878_1058 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
2. BP-534825236-192.168.199.2-1546738263299:blk_1073741879_1059 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
3. BP-534825236-192.168.199.2-1546738263299:blk_1073741880_1060 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK], DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK]]
4. BP-534825236-192.168.199.2-1546738263299:blk_1073741881_1061 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
5. BP-534825236-192.168.199.2-1546738263299:blk_1073741882_1062 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK], DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK]]
6. BP-534825236-192.168.199.2-1546738263299:blk_1073741883_1063 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
7. BP-534825236-192.168.199.2-1546738263299:blk_1073741884_1064 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
8. BP-534825236-192.168.199.2-1546738263299:blk_1073741885_1065 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
9. BP-534825236-192.168.199.2-1546738263299:blk_1073741886_1066 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
10. BP-534825236-192.168.199.2-1546738263299:blk_1073741887_1067 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
11. BP-534825236-192.168.199.2-1546738263299:blk_1073741888_1068 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
12. BP-534825236-192.168.199.2-1546738263299:blk_1073741889_1069 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
13. BP-534825236-192.168.199.2-1546738263299:blk_1073741890_1070 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
14. BP-534825236-192.168.199.2-1546738263299:blk_1073741891_1071 len=1048576 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]
15. BP-534825236-192.168.199.2-1546738263299:blk_1073741892_1072 len=528573 repl=2 [DatanodeInfoWithStorage[192.168.199.2:50010,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.3:50010,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK]]

Status: HEALTHY
 Total size:	16257213 B
 Total dirs:	0
 Total files:	1
 Total symlinks:		0
 Total blocks (validated):	16 (avg. block size 1016075 B)
 Minimally replicated blocks:	16 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	16 (100.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	3
 Average block replication:	2.0
 Corrupt blocks:		0
 Missing replicas:		16 (33.333332 %)
 Number of data-nodes:		2
 Number of racks:		1
FSCK ended at Tue Jan 08 06:36:04 UTC 2019 in 1 milliseconds


The filesystem under path '/user/vagrant/salaries2.csv' is HEALTHY
}}}


! read raw file in data node filesystem 
* check for the blk_<id>
{{{
[root@node1 ~]# find /hadoop/hdfs/ -name "blk_1073741876" -print
/hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741876

[root@node1 ~]# find /hadoop/hdfs/ -name "blk_1073741878" -print
/hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741878

[root@node1 ~]# less /hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741878

}}}


! explore files using ambari files view and NameNode UI
Ambari files view http://127.0.0.1:8080/#/main/view/FILES/auto_files_instance
Quicklinks NameNode UI http://192.168.199.2:50070/explorer.html#/























.
http://hortonworks.com/wp-content/uploads/2016/05/Hortonworks.CheatSheet.SQLtoHive.pdf

! show all config parameters
{{{
set;
}}}

! connect on beeline
{{{
beeline> !connect jdbc:hive2://sandbox.hortonworks.com:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2
Connecting to jdbc:hive2://sandbox.hortonworks.com:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2
Enter username for jdbc:hive2://sandbox.hortonworks.com:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2:
Enter password for jdbc:hive2://sandbox.hortonworks.com:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2:
Connected to: Apache Hive (version 1.2.1000.2.5.0.0-1245)
Driver: Hive JDBC (version 1.2.1000.2.5.0.0-1245)
Transaction isolation: TRANSACTION_REPEATABLE_READ
0: jdbc:hive2://sandbox.hortonworks.com:2181/>
0: jdbc:hive2://sandbox.hortonworks.com:2181/> show databases;
+----------------+--+
| database_name  |
+----------------+--+
| default        |
| foodmart       |
| hr             |
| xademo         |
+----------------+--+
4 rows selected (0.14 seconds)


}}}
https://cwiki.apache.org/confluence/display/Hive/LanguageManual
https://cwiki.apache.org/confluence/display/Hive/Home#Home-HiveDocumentation
http://hive.apache.org/

<<showtoc>>


! WITH AS 
https://cwiki.apache.org/confluence/display/Hive/Common+Table+Expression

! UNION ALL 
https://stackoverflow.com/questions/16181684/combine-many-tables-in-hive-using-union-all

! CASE function
 http://www.folkstalk.com/2011/11/conditional-functions-in-hive.html , https://stackoverflow.com/questions/41023835/case-statements-in-hive , https://community.modeanalytics.com/sql/tutorial/sql-case/

! hive JOINS
 https://www.tutorialspoint.com/hive/hiveql_joins.htm

! DDL 
{{{
show create table
}}}

! spool to CSV
{{{
hive -e 'header =true, select * from table' > file.csv

hive -e "use default;set hive.cli.print.header=true;select * from test1;" | sed 's/[\t]/,/g' >/temp/test.csv
INSERT OVERWRITE LOCAL DIRECTORY '/path/to/hive/csv' ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' SELECT * FROM hivetablename;	
}}}


! spool to pipe delimited 
https://stackoverflow.com/questions/44333450/hive-e-with-delimiter?rq=1
https://stackoverflow.com/questions/30224875/exporting-hive-table-to-csv-in-hdfs

{{{
[raj_ops@sandbox ~]$ hive -e "use default;set hive.cli.print.header=true;select * from hr.departments;" | sed 's/[\t]/|/g' > testdata.csv

Logging initialized using configuration in file:/etc/hive/2.5.0.0-1245/0/hive-log4j.properties
OK
Time taken: 3.496 seconds
OK
Time taken: 1.311 seconds, Fetched: 30 row(s)
[raj_ops@sandbox ~]$
[raj_ops@sandbox ~]$
[raj_ops@sandbox ~]$ cat testdata.csv
departments.department_id|departments.department_name|departments.manager_id|departments.location_id
10|Administration|200|1700
20|Marketing|201|1800
110|Accounting|205|1700
120|Treasury|NULL|1700
130|Corporate Tax|NULL|1700
140|Control And Credit|NULL|1700
150|Shareholder Services|NULL|1700
160|Benefits|NULL|1700
170|Manufacturing|NULL|1700
180|Construction|NULL|1700
190|Contracting|NULL|1700
200|Operations|NULL|1700
30|Purchasing|114|1700
210|IT Support|NULL|1700
220|NOC|NULL|1700
230|IT Helpdesk|NULL|1700
240|Government Sales|NULL|1700
250|Retail Sales|NULL|1700
260|Recruiting|NULL|1700
270|Payroll|NULL|1700
10|Administration|200|1700
50|Shipping|121|1500
50|Shipping|121|1500
40|Human Resources|203|2400
50|Shipping|121|1500
60|IT|103|1400
70|Public Relations|204|2700
80|Sales|145|2500
90|Executive|100|1700
100|Finance|108|1700

}}}


! alter table CSV header property skip header
{{{
hadoop fs -copyFromLocal mts_main_v1.csv /sdxx/derived/restou/dc_master_target_summary_v1

alter table dc_master_target_summary_v1 set TBLPROPERTIES ("skip.header.line.count"="1");
}}}












! run query
{{{
[raj_ops@sandbox ~]$ hive -e 'select * from foodmart.customer limit 2'
}}}

! run script
{{{
[raj_ops@sandbox ~]$ hive -f test.sql 
[raj_ops@sandbox ~]$ cat test.sql 
select * from foodmart.customer limit 2;
}}}
<<showtoc>>



! certification matrix
https://supportmatrix.hortonworks.com/

! ambari and hdp versions 
ambari_and_hdp_versions.md https://gist.github.com/karlarao/ba6bbc1c0049de1fc1404b5d8dc56c4d
<<<
* ambari 2.4.3.0
* hdp 2.2 to 2.5.3.0 

----------

* ambari 2.5.2.0
* hdp 2.3 to 2.6.3.0 

----------

* ambari 2.6.2.2
* hdp 2.4 to 2.6.5 
<<<


! ways to install 
!! own machine 
!!! manual install 
* using apt-get and yum for ambari-server/ambari-agent
* and then manual provisioning of the cluster through ambari UI
!!! unattended install using vagrant 
* using automation tools to install ambari-server/ambari-agent
* using blueprints to push a setup and configuration to the cluster
!! on cloud 
!!! manual or unattended install 
!!! using hortonworks cloudbreak (uses docker to provision to cloud) 
https://hortonworks.com/open-source/cloudbreak/#section_1
https://cwiki.apache.org/confluence/display/AMBARI/Blueprints


! installation docs 
https://docs.hortonworks.com/
https://hortonworks.com/products/data-platforms/hdp/

!! ambari doc 
https://docs.hortonworks.com/HDPDocuments/Ambari/Ambari-2.7.3.0/index.html

!! cluster planning 
https://docs.hortonworks.com/HDPDocuments/HDP3/HDP-3.1.0/cluster-planning/content/partitioning.html


! cluster resources 
!!! ambari-server
!!! ambari-agent (node 2-3)
!!! NameNode
!!! ResourceManager 
!!! Zookeeper (node 2-3)
!!! DataNode (node 2-3)
!!! NodeManager (node 2-3)




! gluent installation 
https://docs.gluent.com/goe/install_and_upgrade.html













.
http://hadooptutorial.info/impala-commands-cheat-sheet/


<<showtoc>>

! ember.js 
!! list of apps written in ember
http://iheanyi.com/journal/2015/03/24/a-list-of-open-source-emberjs-projects/
http://stackoverflow.com/questions/10830072/recommended-example-applications-written-in-ember-js

!! a very cool restaurant app 
http://www.pluralsight.com/courses/fire-up-emberjs

!! analytics tuts app 
https://code.tutsplus.com/courses/end-to-end-analytics/lessons/getting-started

<<<
very nice app that shows the resturant tables and the items ordered on that table
shows the table, items, and item details w/ total 
nice howto of ember in action 
<<<

! backbone.js
!! simple app backbone rectangle app
http://www.pluralsight.com/courses/backbone-fundamentals
<<<
very nice explanation so far! 
<<<

!! blogroll app (MBEN stack)
https://www.youtube.com/watch?v=a-ijUKVIJSw&list=PLX2HoWE32I8OCnumQmc9lcjnHIjAamIy6&index=4
another server example https://www.youtube.com/watch?v=uykzCfu1RiQ
https://www.youtube.com/watch?v=kHV7gOHvNdk&list=PLX2HoWE32I8Nkzw2TqcifObuhgJZz8a0U
!!! git repo 
https://github.com/michaelcheng429/backbone_tutorial_blogroll_app/tree/part1-clientside-code
https://github.com/michaelcheng429/backbone_tutorial_blogroll_app


!! db administration app
Application Building Patterns with Backbone.js - http://www.pluralsight.com/courses/playing-with-backbonejs
<<<
a full db administration app
uses node!
<<<

!! backbone todo
https://app.pluralsight.com/library/courses/choosing-javascript-framework/exercise-files
<<<
frontend masters - a todo mvc example
<<<

!! another app todo list 
Backbone.JS In-Depth and Intro to Testing with Mocha and Sinon - https://app.pluralsight.com/library/courses/backbone-js-in-depth-testing-mocha-sinon/table-of-contents
<<<
fronend masters class 
another app todo list 
<<<

!! music player
http://www.pluralsight.com/courses/backbonejs



! angular 
!! video publishing site w/ login (MEAN stack)
http://www.pluralsight.com/courses/building-angularjs-nodejs-apps-mean  
<<<
video publishing site built on mean stack 
great example of authentication and authorization 
<<<




! handlebars.js
!! just a simple demo page about employee address book details
http://www.lynda.com/sdk/Web-Interaction-Design-tutorials/JavaScript-Templating/156166-2.html
<<<
clear explanations maybe because it's a simple app, I like this one! 
very nice simple howto on different templating engines  
the guy used vivaldi browser and aptana for minimal setup 
<<<
<<<
jquery, moustache.js, handlebars.js, dust 
<<<
!! dog or not app 
http://www.pluralsight.com/courses/handlebars-javascript-templating
<<<
this is a handlebars centric course 
cute webapp that shows a photo where you would identify if it is a dog or not
this app shows how filtering, pagination, and scoring is done 
<<<
<<<
bower, handlebars.js, gulp
<<<

! nodejs 
!! oracle to_json and node oracle driver voting on hr schema
http://www.slideshare.net/lucasjellema/oracle-databasecentric-apis-on-the-cloud-using-plsql-and-nodejs
https://github.com/pavadeli/oowsession2016-app
https://github.com/pavadeli/oowsession2016
!! node-oracledb at amis lucas
https://github.com/lucasjellema/sig-nodejs-amis-2016
!! dino-date - showcase Oracle DB features on multiple programming languages (node, python, ruby, etc.)
<<<
DinoDate is "a fun site for finding your perfect Dino partner". It is a learning platform to showcase Oracle Database features using examples in multiple programming languages. https://community.oracle.com/docs/DOC-998357
Blaine Carter https://www.youtube.com/channel/UCnyo1hKeJ4GOsppGVRX6Y4A
http://learncodeshare.net/2016/04/08/dinodate-a-demonstration-platform-for-oracle-database/
way back 2009 http://feuerstein28.rssing.com/browser.php?indx=30498827&item=34
<<<










https://sakthismysqlblog.wordpress.com/2019/08/02/mysql-8-internal-architecture/



.
{{{
There are MYSQL functions you can use. Like this one that resolves the user:

SELECT USER();
This will return something like root@localhost so you get the host and the user.

To get the current database run this statement:

SELECT DATABASE();
}}}



! create new superuser 
{{{
create USER 'karlarao'@'%' IDENTIFIED BY 'karlarao';
GRANT ALL PRIVILEGES ON *.* TO 'karlarao'@'%' WITH GRANT OPTION;

SELECT CURRENT_USER();
SELECT DATABASE();
status;
}}}


https://community.oracle.com/tech/apps-infra/categories/database-ideas-ideas

<<<
0)	It would be ideal if you can create a separate database for your tests

1)	If you really want a quick IO test, then do calibrate_io then the iperf2/netperf
http://docs.oracle.com/cd/E11882_01/appdev.112/e40758/d_resmgr.htm#ARPLS67598
                                           
2)	If you have a bit of time, then do Orion
You can quickly do a -run dss or -run oltp
But you can also explore the attached oriontoolkit.zip

3)	If you have a lot of time, then do SLOB and test the large IOs 
For OLTP test
http://kevinclosson.net/2012/02/06/introducing-slob-the-silly-little-oracle-benchmark/
If it’s DW, then you need to test the large IOs. See attached IOsaturationtoolkit-v2.tar.bz2
http://karlarao.tiddlyspot.com/#[[cpu%20-%20SillyLittleBenchmark%20-%20SLOB]]

Kyle also has some good reference on making use of FIO https://github.com/khailey/fio_scripts/blob/master/README.md

<<<
https://wikibon.com/oracle-mysql-database-service-heatwave-vaporizes-aws-redshift-aqua-snowflake-azure-synapse-gcp-bq/
https://www.oracle.com/mysql/heatwave/



! competitors	
a similar product is TiDB 
https://pingcap.com/blog/how-we-build-an-htap-database-that-simplifies-your-data-platform
https://medium.com/swlh/making-an-htap-database-a-reality-what-i-learned-from-pingcaps-vldb-paper-6d249c930a11
! name of current db 
https://dba.stackexchange.com/questions/58312/how-to-get-the-name-of-the-current-database-from-within-postgresql
{{{
SELECT current_database();
}}}


! list current user
https://www.postgresql.org/message-id/52C315B8.2040006@gmail.com
{{{
select current_database();
}}}















.
<<showtoc>>



! 202008
INTRO 
A Tour of PostgreSQL https://www.pluralsight.com/courses/tekpub-postgres 
PostgreSQL Playbook for Developer DBAs https://www.pluralsight.com/courses/postgresql-playbook
https://www.linkedin.com/learning/postgresql-essential-training/using-the-exercise-files
https://app.pluralsight.com/library/courses/meet-postgresql/table-of-contents


PERFORMANCE
Play by Play: Database Tuning https://www.pluralsight.com/courses/play-by-play-rob-sullivan

PGPLSQL
https://www.pluralsight.com/courses/postgresql-advanced-server-programming
https://www.pluralsight.com/courses/posgresql-functions-playbook
https://www.pluralsight.com/courses/capturing-logic-custom-functions-postgresql
https://www.pluralsight.com/courses/programming-postgresql

JSON 
https://www.pluralsight.com/courses/postgresql-document-database



! courses 

https://www.pluralsight.com/courses/tekpub-postgres
https://www.udemy.com/beginners-guide-to-postgresql/learn/lecture/82719#overview
https://www.udemy.com/learn-database-design-using-postgresql/learn/lecture/1594438#overview
https://www.udemy.com/learn-partitioning-in-postgresql-from-scratch/learn/lecture/5639644#overview


pl/pgsql
https://app.pluralsight.com/profile/author/pinal-dave
https://app.pluralsight.com/library/courses/postgresql-advanced-server-programming/table-of-contents





https://www.udemy.com/course/learn-partitioning-in-postgresql-from-scratch/
https://www.udemy.com/course/the-complete-python-postgresql-developer-course/
https://www.udemy.com/course/learn-database-design-using-postgresql/
https://www.udemy.com/course/postgresql-permissionsprivilegesadvanced-review/
https://www.udemy.com/course/ordbms-with-postgresql-essential-administration-training/
https://www.udemy.com/course/ultimate-expert-guide-mastering-postgresql-administration/
https://www.udemy.com/course/beginners-guide-to-postgresql/
https://www.udemy.com/course/postgresql-encryptiondata-at-rest-ssl-security/
https://www.udemy.com/course/postgresql-backupreplication-restore/


https://www.youtube.com/results?search_query=postgresql+replication+step+by+step
https://www.youtube.com/results?search_query=postgresql+performance+tuning


! books
https://learning.oreilly.com/library/view/postgresql-up-and/9781491963401/
https://learning.oreilly.com/library/view/postgresql-for-data/9781783288601/    <- for data architects

https://learning.oreilly.com/library/view/postgresql-high-availability/9781787125537/cover.xhtml
https://learning.oreilly.com/library/view/postgresql-replication-/9781783550609/
https://learning.oreilly.com/library/view/postgresql-10-high/9781788474481/
https://learning.oreilly.com/library/view/postgresql-high-performance/9781785284335/
https://learning.oreilly.com/library/view/postgresql-96-high/9781784392970/
https://learning.oreilly.com/library/view/postgresql-90-high/9781849510301/
https://learning.oreilly.com/library/view/postgresql-high-availability/9781787125537/
https://learning.oreilly.com/library/view/postgresql-administration-cookbook/9781785883187/
https://learning.oreilly.com/library/view/mastering-postgresql-96/9781783555352/
https://learning.oreilly.com/library/view/postgresql-11-server/9781789342222/
https://learning.oreilly.com/library/view/postgresql-development-essentials/9781783989003/
https://learning.oreilly.com/library/view/beginning-postgresql-on/9781484234471/
https://learning.oreilly.com/library/view/postgresql-9-administration/9781849519069/
https://learning.oreilly.com/library/view/troubleshooting-postgresql/9781783555314/
https://learning.oreilly.com/library/view/postgresql-server-programming/9781783980581/
https://learning.oreilly.com/library/view/postgresql-developers-guide/9781783989027/
https://learning.oreilly.com/library/view/practical-postgresql/9781449309770/
https://learning.oreilly.com/library/view/professional-website-performance/9781118551721/





https://learning.oreilly.com/search/?query=postgresql%20performance&extended_publisher_data=true&highlight=true&include_assessments=false&include_case_studies=true&include_courses=true&include_orioles=true&include_playlists=true&include_collections=true&include_notebooks=true&is_academic_institution_account=false&source=user&sort=relevance&facet_json=true&page=10
















.

<<showtoc>> 

! download 
https://postgresapp.com/



! configure 
{{{

sudo mkdir -p /etc/paths.d &&
echo /Applications/Postgres.app/Contents/Versions/latest/bin | sudo tee /etc/paths.d/postgresapp


$ pwd
/Applications/Postgres.app/Contents/Versions/latest/bin

$ ls
clusterdb		gdalbuildvrt		invgeod			pg_dump			pg_waldump
createdb		gdaldem			invproj			pg_dumpall		pgbench
createuser		gdalenhance		nad2bin			pg_isready		pgsql2shp
cs2cs			gdalinfo		nearblack		pg_receivewal		postgres
dropdb			gdallocationinfo	ogr2ogr			pg_recvlogical		postmaster
dropuser		gdalmanage		ogrinfo			pg_resetwal		proj
ecpg			gdalserver		ogrtindex		pg_restore		psql
gdal-config		gdalsrsinfo		oid2name		pg_rewind		raster2pgsql
gdal_contour		gdaltindex		pg_archivecleanup	pg_standby		reindexdb
gdal_grid		gdaltransform		pg_basebackup		pg_test_fsync		shp2pgsql
gdal_rasterize		gdalwarp		pg_config		pg_test_timing		testepsg
gdal_translate		geod			pg_controldata		pg_upgrade		vacuumdb
gdaladdo		initdb			pg_ctl			pg_verify_checksums	vacuumlo


# data directory
/Users/kristofferson.a.arao/Library/Application Support/Postgres/var-11

# postgresql.conf
find . | grep postgresql.conf
./Library/Application Support/Postgres/var-11/postgresql.conf
}}}

[img(80%,80%)[https://i.imgur.com/q4haN6t.png]]

possible to create other versions of database 
[img(80%,80%)[https://i.imgur.com/IA6y1mh.png]]

also the same as [[get tuning advisor hints]]

https://blog.dbi-services.com/oracle-sql-profiles-check-what-they-do-before-accepting-them-blindly/
{{{
set serveroutput on echo off
declare
  -- input variables
  input_task_owner dba_advisor_tasks.owner%type:='SYS';
  input_task_name dba_advisor_tasks.task_name%type:='dbiInSite';
  input_show_outline boolean:=false;
  -- local variables
  task_id  dba_advisor_tasks.task_id%type;
  outline_data xmltype;
  benefit number;
begin
  for o in ( select * from dba_advisor_objects where owner=input_task_owner and task_name=input_task_name and type='SQL')
  loop
          -- get the profile hints (opt_estimate)
          dbms_output.put_line('--- PROFILE HINTS from '||o.task_name||' ('||o.object_id||') statement '||o.attr1||':');
          dbms_output.put_line('/*+');
          for r in (
            select hint,benefit from (
             select case when attr5 like 'OPT_ESTIMATE%' then cast(attr5 as varchar2(4000)) when attr1 like 'OPT_ESTIMATE%' then attr1 end hint,benefit
             from dba_advisor_recommendations t join dba_advisor_rationale r using (task_id,rec_id)
             where t.owner=o.owner and t.task_name = o.task_name and r.object_id=o.object_id and t.type='SQL PROFILE'
             --and r.message='This attribute adjusts optimizer estimates.'
            ) order by to_number(regexp_replace(hint,'^.*=([0-9.]+)[^0-9].*$','\1'))
          ) loop
           dbms_output.put_line('   '||r.hint); benefit:=to_number(r.benefit)/100;
          end loop;
          dbms_output.put_line('*/');
          -- get the outline hints
          begin
          select outline_data into outline_data from (
              select case when other_xml is not null then extract(xmltype(other_xml),'/*/outline_data/hint') end outline_data
              from dba_advisor_tasks t join dba_sqltune_plans p using (task_id)
              where t.owner=o.owner and t.task_name = o.task_name and p.object_id=o.object_id  and t.advisor_name='SQL Tuning Advisor' --11gonly-- and execution_type='TUNE SQL'
              and p.attribute='Using SQL profile'
          ) where outline_data is not null;
          exception when no_data_found then null;
          end;
          exit when not input_show_outline;
          dbms_output.put_line('--- OUTLINE HINTS from '||o.task_name||' ('||o.object_id||') statement '||o.attr1||':');
          dbms_output.put_line('/*+');
          for r in (
              select (extractvalue(value(d), '/hint')) hint from table(xmlsequence(extract( outline_data , '/'))) d
          ) loop
           dbms_output.put_line('   '||r.hint);
          end loop;
          dbms_output.put_line('*/');
          dbms_output.put_line('--- Benefit: '||to_char(to_number(benefit),'FM99.99')||'%');
  end loop;
  dbms_output.put_line('');
end;
/
}}}
What is the benefit of using google cloud pub/sub service in a streaming pipeline https://stackoverflow.com/questions/60919717/what-is-the-benefit-of-using-google-cloud-pub-sub-service-in-a-streaming-pipelin/60920217#60920217
<<<


Dataflow will need a source to get the data from. If you are using a streaming pipeline you can use different options as a source and each of them will have its own characteristics that may fit your scenario.

With Pub/Sub you can easily publish events using a client library or directly the API to a topic, and it will guarantee at least once delivery of that message.

When you connect it with Dataflow streaming pipeline, you can have a resilient architecture (Pub/Sub will keep sending the message until Dataflow acknowledge that it has processed it) and a near real-time processing. In addition, Dataflow can use Pub/Sub metrics to scale up or down depending on the number of the messages in the backlog.

Finally, Dataflow runner uses an optimized version of the PubSubIO connector which provides additional features. I suggest checking this documentation that describes some of these features.
<<<



* https://raw.githubusercontent.com/karlarao/scripts/master/security/sechealthcheck.sql
* esec360
* DBSAT  https://blogs.oracle.com/cloudsecurity/announcing-oracle-database-security-assessment-tool-dbsat-22
** https://www.oracle.com/a/ocom/docs/corporate/cyber-resilience-ds.pdf
** https://go.oracle.com/LP=38340
** concepts https://docs.oracle.com/en/database/oracle/security-assessment-tool/2.2/satug/index.html#UGSAT-GUID-C7E917BB-EDAC-4123-900A-D4F2E561BFE9
** https://www.oracle.com/technetwork/database/security/dbsat/dbsat-ds-jan2018-4219315.pdf
** https://www.oracle.com/technetwork/database/security/dbsat/dbsat-public-faq-4219329.pdf
** https://www.oracle.com/technetwork/database/security/dbsat/dbsec-dbsat-public-4219331.pdf




https://status.snowflake.com/


https://community.snowflake.com/s/topic/0TO0Z000000Unu5WAC/releases


https://docs.snowflake.com/en/release-notes/2021-01.html?_ga=2.227732125.1483243957.1613593318-1423095178.1586365212
https://www.snowflake.com/blog/new-snowflake-features-released-in-january-2021/

https://spark.apache.org/news/index.html
https://spark.apache.org/releases/spark-release-3-0-0.html
https://spark.apache.org/releases/spark-release-3-0-2.html


.
https://www.udemy.com/course/oracle-12c-sql-tuning/
https://www.udemy.com/course/sql-performance-tuning-masterclass/
https://www.udemy.com/course/sql-tuning/
<<showtoc>>

! manual way (this is recommended)

{{{

11:11:09 KARLARAO@cdb1> @spm_demo_query.sql

ALL_DISTINCT       SKEW
------------ ----------
           3          3


P_SQLID
-------------
a5jq5khm9w64n

Enter value for p_sqlid: a5jq5khm9w64n

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 246648590

--------------------------------------------------------------------------
| Id  | Operation         | Name | Rows  | Bytes | Cost (%CPU)| Time     |
--------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |       |       |     8 (100)|          |
|*  1 |  TABLE ACCESS FULL| SKEW |   909 |  6363 |     8  (13)| 00:00:01 |

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter("SKEW"=3)


18 rows selected.






11:11:43 KARLARAO@cdb1> @spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

no rows selected






-- create the baseline
DECLARE
my_plans pls_integer;
BEGIN
my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(sql_id => 'a5jq5khm9w64n',plan_hash_value=>'246648590', fixed =>'YES', enabled=>'YES');
END;
/





11:12:52 KARLARAO@cdb1> DECLARE
my_plans pls_integer;
BEGIN
my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(sql_id => 'a5jq5khm9w64n',plan_hash_value=>'246648590', fixed =>'YES', enabled=>'YES');
END;
/11:18:16   2  11:18:16   3  11:18:16   4  11:18:16   5  11:18:16   6

PL/SQL procedure successfully completed.



11:18:20 KARLARAO@cdb1> @spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------
KARLARAO 03/23/20 11:18:18    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  8 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD






--##############################################################################################################################
created an index but doesn't get used by the SQL_ID 
what needs to be done is create a new SQL_ID with new plan hash value, and add that PHV to the old SQL_ID 
--##############################################################################################################################


11:20:38 KARLARAO@cdb1> @spm_demo_createindex.sql

Index created.


PL/SQL procedure successfully completed.


PL/SQL procedure successfully completed.

11:21:27 KARLARAO@cdb1> @spm_demo_fudgestats.sql

PL/SQL procedure successfully completed.

11:21:36 KARLARAO@cdb1> @spm_demo_query.sql

ALL_DISTINCT       SKEW
------------ ----------
           3          3


P_SQLID
-------------
a5jq5khm9w64n

Enter value for p_sqlid: a5jq5khm9w64n

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 246648590

--------------------------------------------------------------------------
| Id  | Operation         | Name | Rows  | Bytes | Cost (%CPU)| Time     |
--------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |       |       |     2 (100)|          |
|*  1 |  TABLE ACCESS FULL| SKEW |     1 |     1 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter("SKEW"=3)

Note
-----
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm950a48a8 used for this statement


22 rows selected.

11:21:46 KARLARAO@cdb1> @spm_demo_query.sql

ALL_DISTINCT       SKEW
------------ ----------
           3          3


P_SQLID
-------------
a5jq5khm9w64n

Enter value for p_sqlid: a5jq5khm9w64n

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 246648590

--------------------------------------------------------------------------
| Id  | Operation         | Name | Rows  | Bytes | Cost (%CPU)| Time     |
--------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |       |       |     2 (100)|          |
|*  1 |  TABLE ACCESS FULL| SKEW |     1 |     1 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter("SKEW"=3)

Note
-----
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm950a48a8 used for this statement


22 rows selected.

11:21:59 KARLARAO@cdb1> @spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------
KARLARAO 03/23/20 11:18:18    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  8 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD








--## regather stats 
exec dbms_stats.gather_index_stats(user,'SKEW_IDX', no_invalidate => false); 
exec dbms_stats.gather_table_stats(user,'SKEW', no_invalidate => false);





--## index was picked up 

11:28:22 KARLARAO@cdb1> @spm_demo_query2.sql

ALL_DISTINCT       SKEW
------------ ----------
           3          3


P_SQLID
-------------
693ccxff9a8ku

Enter value for p_sqlid: 693ccxff9a8ku

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  693ccxff9a8ku, child number 0
-------------------------------------
select /* new */ * from skew where skew=3

Plan hash value: 1949605896

------------------------------------------------------------------------------------------------
| Id  | Operation                           | Name     | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |       |       |     2 (100)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |     1 |     7 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|*  2 |   INDEX RANGE SCAN                  | SKEW_IDX |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   2 - access("SKEW"=3)


19 rows selected.










--## use coe.sql to force index to the OLD SQL_ID 
-- edit the output sql file to match the text of OLD SQL_ID 

SQL>set lines 300
SQL>set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);SQL>
ALL_DISTINCT       SKEW
------------ ----------
           3          3

SQL>

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 1949605896

------------------------------------------------------------------------------------------------
| Id  | Operation                           | Name     | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |       |       |     2 (100)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |     1 |     7 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|*  2 |   INDEX RANGE SCAN                  | SKEW_IDX |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   2 - access("SKEW"=3)

Note
-----
   - SQL profile coe_693ccxff9a8ku_1949605896 used for this statement

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


23 rows selected.





SQL>@spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------
KARLARAO 03/23/20 11:18:18    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  8 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD







      
-- add the other plan
-- you can even use a different SQL_ID, what matters is the text matches the EXACT_MATCHING_SIGNATURE to be tied to SQL_HANDLE as a new SQL PLAN_NAME
DECLARE
my_plans pls_integer;
BEGIN
my_plans := DBMS_SPM.LOAD_PLANS_FROM_CURSOR_CACHE(sql_id => 'a5jq5khm9w64n',plan_hash_value=>'1949605896', fixed =>'YES', enabled=>'YES');
END;
/





-- SQL HANDLE is the same, there's a new PLAN NAME


SQL>@spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------
KARLARAO 03/23/20 11:18:18    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  8 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD

KARLARAO 03/23/20 11:41:32    SQL_PLAN_fahs3brrwbxcm08e93fe4           SQL_e543035defc5f593      select * from skew where skew=3                  2 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD







-- verify
set lines 300
set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);

 
 



Connected.
11:42:26 KARLARAO@cdb1>
set lines 300
set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);11:42:27 KARLARAO@cdb1> 11:42:27 KARLARAO@cdb1> 11:42:27 KARLARAO@cdb1>
ALL_DISTINCT       SKEW
------------ ----------
           3          3

11:42:27 KARLARAO@cdb1>

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 1949605896

------------------------------------------------------------------------------------------------
| Id  | Operation                           | Name     | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |       |       |     2 (100)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |     1 |     7 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|*  2 |   INDEX RANGE SCAN                  | SKEW_IDX |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   2 - access("SKEW"=3)

Note
-----
   - SQL profile coe_693ccxff9a8ku_1949605896 used for this statement

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm08e93fe4 used for this statement


24 rows selected.







--## drop sql profile and verify baseline 
exec dbms_sqltune.drop_sql_profile(name => '&profile_name');


11:46:10 KARLARAO@cdb1> set lines 300
set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);11:46:11 KARLARAO@cdb1> 11:46:11 KARLARAO@cdb1>
ALL_DISTINCT       SKEW
------------ ----------
           3          3

11:46:11 KARLARAO@cdb1>

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 1949605896

------------------------------------------------------------------------------------------------
| Id  | Operation                           | Name     | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |       |       |     2 (100)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |     1 |     7 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|*  2 |   INDEX RANGE SCAN                  | SKEW_IDX |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   2 - access("SKEW"=3)

Note
-----
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm08e93fe4 used for this statement

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


23 rows selected.






--## here you'll see two plans 


11:46:34 KARLARAO@cdb1> @spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------
KARLARAO 03/23/20 11:18:18    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  8 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD

KARLARAO 03/23/20 11:41:32    SQL_PLAN_fahs3brrwbxcm08e93fe4           SQL_e543035defc5f593      select * from skew where skew=3                  2 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD


11:46:38 KARLARAO@cdb1>
11:46:38 KARLARAO@cdb1> @spm_plans
Enter value for sql_handle: SQL_e543035defc5f593

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

--------------------------------------------------------------------------------
SQL handle: SQL_e543035defc5f593
SQL text: select * from skew where skew=3
--------------------------------------------------------------------------------

--------------------------------------------------------------------------------
Plan name: SQL_PLAN_fahs3brrwbxcm08e93fe4         Plan id: 149503972
Enabled: YES     Fixed: YES     Accepted: YES     Origin: MANUAL-LOAD
Plan rows: From dictionary
--------------------------------------------------------------------------------

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Plan hash value: 1949605896

--------------------------------------------------------
| Id  | Operation                           | Name     |
--------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |
|   2 |   INDEX RANGE SCAN                  | SKEW_IDX |
--------------------------------------------------------


PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Plan name: SQL_PLAN_fahs3brrwbxcm950a48a8         Plan id: 2500479144
Enabled: YES     Fixed: YES     Accepted: YES     Origin: MANUAL-LOAD
Plan rows: From dictionary
--------------------------------------------------------------------------------

Plan hash value: 246648590

----------------------------------
| Id  | Operation         | Name |
----------------------------------

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |
|   1 |  TABLE ACCESS FULL| SKEW |
----------------------------------

36 rows selected.






--## let's try to disable that baseline 

set verify off
declare
myplan pls_integer;
begin
myplan:=DBMS_SPM.ALTER_SQL_PLAN_BASELINE (sql_handle => '&sql_handle',plan_name  => '&plan_name',attribute_name => 'ENABLED',   attribute_value => '&YES_OR_NO');
end;
/



set verify off
declare
myplan pls_integer;
begin
myplan:=DBMS_SPM.ALTER_SQL_PLAN_BASELINE (sql_handle => '&sql_handle',plan_name  => '&plan_name',attribute_name => 'ENABLED',   attribute_value => '&YES_OR_NO');
end;
/11:49:35 KARLARAO@cdb1> 11:49:35 KARLARAO@cdb1> 11:49:35   2  11:49:35   3  11:49:35   4  11:49:35   5  11:49:35   6
Enter value for sql_handle: SQL_e543035defc5f593
Enter value for plan_name: SQL_PLAN_fahs3brrwbxcm08e93fe4
Enter value for yes_or_no: no

PL/SQL procedure successfully completed.


 @spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------
KARLARAO 03/23/20 11:18:18    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  8 YES YES YES YES MANUAL-L
                                                                                                                                                                    OAD

KARLARAO 03/23/20 11:41:32    SQL_PLAN_fahs3brrwbxcm08e93fe4           SQL_e543035defc5f593      select * from skew where skew=3                  2 NO  YES YES YES MANUAL-L
                                                                                                                                                                    OAD





--## after disabling the full scan baseline was used 



11:50:04 KARLARAO@cdb1> set lines 300
set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);11:50:31 KARLARAO@cdb1> 11:50:31 KARLARAO@cdb1>
ALL_DISTINCT       SKEW
------------ ----------
           3          3

11:50:31 KARLARAO@cdb1>

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 1
-------------------------------------
select * from skew where skew=3

Plan hash value: 246648590

--------------------------------------------------------------------------
| Id  | Operation         | Name | Rows  | Bytes | Cost (%CPU)| Time     |
--------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |       |       |     8 (100)|          |
|*  1 |  TABLE ACCESS FULL| SKEW |     1 |     7 |     8  (13)| 00:00:01 |

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter("SKEW"=3)

Note
-----
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm950a48a8 used for this statement


22 rows selected.






--## let's disable the remaining baseline 



--## here the optimizer picked up the index 

set lines 300
set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);



11:52:49 KARLARAO@cdb1> set lines 300
set serveroutput off
select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);11:53:17 KARLARAO@cdb1> 11:53:17 KARLARAO@cdb1>
ALL_DISTINCT       SKEW
------------ ----------
           3          3

11:53:17 KARLARAO@cdb1>

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 1949605896

------------------------------------------------------------------------------------------------
| Id  | Operation                           | Name     | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |       |       |     2 (100)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |     1 |     7 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|*  2 |   INDEX RANGE SCAN                  | SKEW_IDX |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   2 - access("SKEW"=3)


19 rows selected.








-- code to drop the individual baselines 


set verify off
DECLARE
  plans_dropped    PLS_INTEGER;
BEGIN
  plans_dropped := DBMS_SPM.drop_sql_plan_baseline (
sql_handle => '&sql_handle',
plan_name  => '&plan_name');
DBMS_OUTPUT.put_line(plans_dropped);
END;
 /




}}}




! automatic pickup of plans using evolve 
{{{

20:02:22 KARLARAO@cdb1> @spm_demo_query.sql

ALL_DISTINCT       SKEW
------------ ----------
           3          3


PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 1
-------------------------------------
select * from skew where skew=3

Plan hash value: 246648590

--------------------------------------------------------------------------
| Id  | Operation         | Name | Rows  | Bytes | Cost (%CPU)| Time     |
--------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |       |       |     8 (100)|          |
|*  1 |  TABLE ACCESS FULL| SKEW |     1 |     7 |     8  (13)| 00:00:01 |

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter("SKEW"=3)

Note
-----
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm950a48a8 used for this statement


22 rows selected.

20:02:31 KARLARAO@cdb1> @spm_baselines.sql
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------

KARLARAO 03/22/20 19:58:58    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  2 YES YES NO  YES AUTO-CAP
                                                                                                                                                                    TURE

KARLARAO 03/22/20 20:01:58    SQL_PLAN_fahs3brrwbxcm08e93fe4           SQL_e543035defc5f593      select * from skew where skew=3                  2 YES NO  NO  YES AUTO-CAP
                                                                                                                                                                    TURE



20:02:42 KARLARAO@cdb1> @spm_plans
Enter value for sql_handle: SQL_e543035defc5f593

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

--------------------------------------------------------------------------------
SQL handle: SQL_e543035defc5f593
SQL text: select * from skew where skew=3
--------------------------------------------------------------------------------

--------------------------------------------------------------------------------
Plan name: SQL_PLAN_fahs3brrwbxcm08e93fe4         Plan id: 149503972
Enabled: YES     Fixed: NO      Accepted: NO      Origin: AUTO-CAPTURE
Plan rows: From dictionary
--------------------------------------------------------------------------------

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Plan hash value: 1949605896

--------------------------------------------------------
| Id  | Operation                           | Name     |
--------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |
|   2 |   INDEX RANGE SCAN                  | SKEW_IDX |
--------------------------------------------------------


PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Plan name: SQL_PLAN_fahs3brrwbxcm950a48a8         Plan id: 2500479144
Enabled: YES     Fixed: NO      Accepted: YES     Origin: AUTO-CAPTURE
Plan rows: From dictionary
--------------------------------------------------------------------------------

Plan hash value: 246648590

----------------------------------
| Id  | Operation         | Name |
----------------------------------

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |
|   1 |  TABLE ACCESS FULL| SKEW |
----------------------------------

36 rows selected.









10:48:47 KARLARAO@cdb1>  @spm_evolve.sql
Enter value for sql_handle: SQL_e543035defc5f593
Enter value for verify: yes
Enter value for commit: yes
GENERAL INFORMATION SECTION
---------------------------------------------------------------------------------------------

 Task Information:
 ---------------------------------------------
 Task Name            : TASK_33661
 Task Owner           : KARLARAO
 Execution Name       : EXEC_36257
 Execution Type       : SPM EVOLVE
 Scope                : COMPREHENSIVE
 Status               : COMPLETED
 Started              : 03/23/2020 10:48:59
 Finished             : 03/23/2020 10:48:59
 Last Updated         : 03/23/2020 10:48:59
 Global Time Limit    : 2147483646
 Per-Plan Time Limit  : UNUSED
 Number of Errors     : 0
---------------------------------------------------------------------------------------------

SUMMARY
SECTION
---------------------------------------------------------------------------------------------
  Number of plans processed  : 1
  Number of findings         : 2
  Number of recommendations  : 1
  Number of errors           : 0
---------------------------------------------------------------------------------------------

DETAILS SECTION
---------------------------------------------------------------------------------------------
 Object ID          : 2
 Test Plan Name     : SQL_PLAN_fahs3brrwbxcm08e93fe4
 Base Plan Name     : SQL_PLAN_fahs3brrwbxcm950a48a8
 SQL Handle         : SQL_e543035defc5f593
 Parsing Schema     : KARLARAO
 Test Plan Creator  : KARLARAO

 SQL Text           : select * from skew where skew=3

Execution Statistics:
-----------------------------
                    Base Plan                     Test Plan
                    ----------------------------  ----------------------------
 Elapsed Time (s):  .000044                       .000003
 CPU Time (s):      .000019                       0
 Buffer Gets:       2                             0
 Optimizer Cost:    8                             2
 Disk Reads:        0                             0
 Direct Writes:     0                             0
 Rows Processed:    0                             0
 Executions:        10
10


FINDINGS SECTION
---------------------------------------------------------------------------------------------

Findings (2):
-----------------------------
 1. The plan was verified in 0.11000 seconds. It passed the benefit criterion
    because its verified performance was 6.67303 times better than that of the
    baseline plan.
 2. The plan was automatically accepted.

Recommendation:
-----------------------------
 Consider accepting the plan.


EXPLAIN PLANS SECTION
---------------------------------------------------------------------------------------------

Baseline Plan
-----------------------------
 Plan Id          : 42237
 Plan Hash Value  : 2500479144


---------------------------------------------------------------------
| Id  | Operation           | Name | Rows | Bytes | Cost | Time     |
---------------------------------------------------------------------
|   0 | SELECT STATEMENT    |      |    1 |     7 |    8 | 00:00:01 |
| * 1 |   TABLE ACCESS FULL | SKEW |    1 |     7 |    8 | 00:00:01 |
---------------------------------------------------------------------

Predicate Information (identified by operation id):
------------------------------------------
* 1 - filter("SKEW"=3)


Test Plan
-----------------------------
 Plan Id          : 42238
 Plan Hash Value  : 149503972

-------------------------------------------------------------------------------------------
| Id  | Operation                             | Name     | Rows | Bytes | Cost | Time
|
-------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                      |          |    1 |     7 |    2 | 00:00:01 |
|   1 |   TABLE ACCESS BY INDEX ROWID BATCHED | SKEW     |    1 |     7 |    2 | 00:00:01 |
| * 2 |    INDEX RANGE SCAN                   | SKEW_IDX |    1 |       |    1 | 00:00:01 |
-------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
------------------------------------------
* 2 - access("SKEW"=3)

---------------------------------------------------------------------------------------------

PL/SQL procedure successfully completed.











10:52:32 KARLARAO@cdb1> @spm_plans
Enter value for sql_handle: SQL_e543035defc5f593

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

--------------------------------------------------------------------------------
SQL handle: SQL_e543035defc5f593
SQL text: select * from skew where skew=3
--------------------------------------------------------------------------------

--------------------------------------------------------------------------------
Plan name: SQL_PLAN_fahs3brrwbxcm08e93fe4         Plan id: 149503972
Enabled: YES     Fixed: NO      Accepted: YES     Origin: AUTO-CAPTURE
Plan rows: From dictionary
--------------------------------------------------------------------------------

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

Plan hash value: 1949605896

--------------------------------------------------------
| Id  | Operation                           | Name     |
--------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |
|   2 |   INDEX RANGE SCAN                  | SKEW_IDX |
--------------------------------------------------------


PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------
Plan name: SQL_PLAN_fahs3brrwbxcm950a48a8         Plan id: 2500479144
Enabled: YES     Fixed: NO      Accepted: YES     Origin: AUTO-CAPTURE
Plan rows: From dictionary
--------------------------------------------------------------------------------

Plan hash value: 246648590

----------------------------------
| Id  | Operation         | Name |
----------------------------------

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT  |      |
|   1 |  TABLE ACCESS FULL| SKEW |
----------------------------------

36 rows selected.











10:53:09 KARLARAO@cdb1> @spm_baselines
Enter value for sql_text:
Enter value for exact_matching_signature:

PARSING_ CREATED              PLAN_NAME                                SQL_HANDLE                SQL_TEXT                            OPTIMIZER_COST ENA ACC FIX REP ORIGIN
-------- -------------------- ---------------------------------------- ------------------------- ----------------------------------- -------------- --- --- --- --- --------

KARLARAO 03/22/20 19:58:58    SQL_PLAN_fahs3brrwbxcm950a48a8           SQL_e543035defc5f593      select * from skew where skew=3                  2 YES YES NO  YES AUTO-CAP
                                                                                                                                                                    TURE


KARLARAO 03/22/20 20:01:58    SQL_PLAN_fahs3brrwbxcm08e93fe4           SQL_e543035defc5f593      select * from skew where skew=3                  2 YES YES NO  YES AUTO-CAP
                                                                                                                                                                    TURE








10:56:04 KARLARAO@cdb1> set serveroutput off
10:56:14 KARLARAO@cdb1> select * from skew where skew=3;
select * from table(dbms_xplan.display_cursor);
ALL_DISTINCT       SKEW
------------ ----------
           3          3

10:56:16 KARLARAO@cdb1>

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
SQL_ID  a5jq5khm9w64n, child number 0
-------------------------------------
select * from skew where skew=3

Plan hash value: 1949605896

------------------------------------------------------------------------------------------------
| Id  | Operation                           | Name     | Rows  | Bytes | Cost (%CPU)| Time     |
------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                    |          |       |       |     2 (100)|          |
|   1 |  TABLE ACCESS BY INDEX ROWID BATCHED| SKEW     |     1 |     7 |     2   (0)| 00:00:01 |

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|*  2 |   INDEX RANGE SCAN                  | SKEW_IDX |     1 |       |     1   (0)| 00:00:01 |
------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   2 - access("SKEW"=3)

Note
-----
   - SQL plan baseline SQL_PLAN_fahs3brrwbxcm08e93fe4 used for this statement

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------


23 rows selected.






}}}

<<showtoc>>

! from this guy https://medium.com/@benstr/meteorjs-vs-angularjs-aint-a-thing-3559b74d52cc
<<<
So bro, answer me… what should I learn?
First - Javascript, jQuery & maybe Node.

Second, It depends on your end goals…

… Wanna work for Facebook? Learn React, Flux, PHP, etc.
… Wanna work for Google? Learn Angular, Dart, Polymer, Python, etc.
… Wanna work for a 2 to 4 year old startup? Learn M-E-A-N
… Wanna work for a 5 to 10 year old startup? Learn Angular & Ruby on Rails
… Wanna create a new startup and impress everyone with how fast you add new features? Learn Meteor (& add whatever UI framework you want)

One last note for beginners. When building a web app you are going to deal with a lot of components (servers, databases, frameworks, pre-processors, packages, testing, …). To manage all this we created automated builders like Grunt & Gulp. After all, making web apps is serious and complicated business… or did we just make it complicated so it seems serious??

If you rather not bother with all that complicated build stuff then choose Meteor, it does it all auto-magically.
<<<

! angular-meteor 
http://angular-meteor.com/manifest


! bootstrap
http://stackoverflow.com/questions/14546709/what-is-bootstrap
http://getbootstrap.com/getting-started/
a good example is this http://joshcanfindit.com/



! discussion forums
http://www.quora.com/JavaScript-Frameworks/AngularJS-Meteor-Backbone-Express-or-plain-NodeJs-When-to-use-each-one
http://www.quora.com/Should-I-learn-Angular-js-or-Meteor



! ember.js 
!! https://www.emberscreencasts.com/
!! http://www.letscodejavascript.com/

!! vs backbone.js 
http://smus.com/backbone-and-ember/
backbone-ember-back-and-forth-transcript.txt https://gist.github.com/jashkenas/1732351

!! vs rails
https://www.airpair.com/ember.js/posts/top-mistakes-ember-rails
http://aokolish.me/blog/2014/11/16/8-reasons-i-won't-be-choosing-ember.js-for-my-next-app/

!! transactions, CRUD
http://bigbinary.com/videos/learn-ember-js/crud-application-in-ember-js  <-- video!
http://blog.trackets.com/2013/02/02/using-transactions-in-ember-data.html
http://blog.trackets.com/2013/01/27/ember-data-in-depth.html
http://embersherpa.com/articles/crud-example-app-without-ember-data/
http://discuss.emberjs.com/t/beginner-guidance-on-building-crud/4990
http://stackoverflow.com/questions/18691644/crud-operations-using-ember-model

!! sample real time web app 
http://www.codeproject.com/Articles/511031/A-sample-real-time-web-application-using-Ember-js

!! HTMLBars
http://www.lynda.com/Emberjs-tutorials/About-HTMLBars/178116/191855-4.html

!! ember and d3.js
https://corner.squareup.com/2012/04/building-analytics.html


! backbone.js 

http://stackoverflow.com/questions/16284724/what-does-var-app-app-do
<<<
If app is already defined, the it does nothing. If app is not defined, then it's equivalent to var app = {};
<<<
https://www.quora.com/What-are-the-pros-of-using-Handlebars-template-over-Underscore-js
https://engineering.linkedin.com/frontend/client-side-templating-throwdown-mustache-handlebars-dustjs-and-more
http://www.pluralsight.com/courses/choosing-javascript-framework
http://www.pluralsight.com/search/?searchTerm=backbone.js

I didn't really like backbone at all. It was a pain. https://news.ycombinator.com/item?id=4427556

!! backbone.js and d3.js 
Sam Selikoff - Using D3 with Backbone, Angular and Ember https://www.youtube.com/watch?v=ca3pQWc2-Xs <-- good stuff
https://github.com/samselikoff/talks/tree/master/4-apr2014-using-d3-backbone-angular-ember <-- good stuff
Backbone and D3 in a large, complex app https://groups.google.com/forum/#!topic/d3-js/3gmyzPOXNBM
D3 with Backbone / D3 with Angular / D3 with Ember http://stackoverflow.com/questions/17050921/d3-with-backbone-d3-with-angular-d3-with-ember

!! react.js as a view - Integrating React With Backbone
http://www.slideshare.net/RyanRoemer/backbonejs-with-react-views-server-rendering-virtual-dom-and-more <-- good stuff
http://timecounts.github.io/backbone-react-redux/#61
http://www.thomasboyt.com/2013/12/17/using-reactjs-as-a-backbone-view.html
https://blog.engineyard.com/2015/integrating-react-with-backbone
http://joelburget.com/backbone-to-react/
https://blog.mayflower.de/3937-Backbone-React.html
http://clayallsopp.com/posts/from-backbone-to-react/
http://leoasis.github.io/posts/2014/03/22/from_backbone_views_to_react/



! react.js 
http://www.pluralsight.com/search/?searchTerm=react.js

!! react as a view in ember 
http://discuss.emberjs.com/t/can-reactjs-be-used-as-a-view-within-emberjs/3470

!! react and d3.js 
http://nicolashery.com/integrating-d3js-visualizations-in-a-react-app/
https://www.codementor.io/reactjs/tutorial/3-steps-scalable-data-visualization-react-js-d3-js
http://10consulting.com/2014/02/19/d3-plus-reactjs-for-charting/

!! react vs ember 
Choosing Ember over React in 2016 https://blog.instant2fa.com/choosing-ember-over-react-in-2016-41a2e7fd341#.1712iqvw8
https://grantnorwood.com/why-i-chose-ember-over-react/
Check this React vs. Ember presentation by Alex Matchneer, a lot of good points on uni-directional flow. http://bit.ly/2fk0Ybe
http://www.creativebloq.com/web-design/react-goes-head-head-emberjs-31514361
http://www.slideshare.net/mraible/comparing-hot-javascript-frameworks-angularjs-emberjs-and-reactjs-springone-2gx-2015



! RoR
https://www.quora.com/Which-is-superior-between-Node-js-vs-RoR-vs-Go
http://www.hostingadvice.com/blog/nodejs-vs-golang/
https://www.codementor.io/learn-programming/ruby-on-rails-vs-node-js-backend-language-for-beginners
https://hackhands.com/use-ruby-rails-node-js-next-projectstartup/
https://www.quora.com/Which-server-side-programming-language-is-the-best-for-a-starting-programmer-Perl-PHP-Python-Ruby-JavaScript-Node-Scala-Java-Go-ASP-NET-or-ColdFusion
https://www.quora.com/Which-is-the-best-option-for-a-Ruby-on-Rails-developer-AngularJS-or-Ember-js






! references
https://en.wikipedia.org/wiki/Comparison_of_JavaScript_frameworks








http://www.cyberciti.biz/tips/what-is-devshm-and-its-practical-usage.html
http://superuser.com/questions/45342/when-should-i-use-dev-shm-and-when-should-i-use-tmp
http://download.oracle.com/docs/cd/B28359_01/server.111/b32009/appi_vlm.htm

tanel mentioned he used it as a persistent storage when he was doing a migration on this one database because it needs to do fast writes so he put the redo log on the /dev/shm.. this is dangerous because when the server crash then you have to do a restore/recover.. data residing in /dev/shm is not persistent on OS reboot..
<<showtoc>>


! install software on node2
{{{
[root@node2 scripts]# ./install_krb5.sh
}}}

! test login 
{{{
[root@node2 scripts]# kinit admin/admin
Password for admin/admin@EXAMPLE.COM: 
[root@node2 scripts]# 
[root@node2 scripts]# klist
Ticket cache: KEYRING:persistent:0:0
Default principal: admin/admin@EXAMPLE.COM

Valid starting       Expires              Service principal
01/08/2019 17:47:41  01/09/2019 17:47:41  krbtgt/EXAMPLE.COM@EXAMPLE.COM
[root@node2 scripts]# 
}}}

! configure kerberos in ambari 
[img(90%,90%)[https://i.imgur.com/GTkXrtL.png]]

[img(90%,90%)[https://i.imgur.com/u4opvr0.png]]

[img(90%,90%)[https://i.imgur.com/VbQmRUQ.png]]

[img(90%,90%)[https://i.imgur.com/oHHdXC1.png]]

[img(90%,90%)[https://i.imgur.com/MlbhMgw.png]]

[img(90%,90%)[https://i.imgur.com/7i7JRTT.png]]

[img(90%,90%)[https://i.imgur.com/o7RhsLh.png]]

[img(90%,90%)[https://i.imgur.com/yx6ORX2.png]]

* restart ambari server
[img(90%,90%)[https://i.imgur.com/dBtX7my.png]]

* manually restart other services 
[img(90%,90%)[https://i.imgur.com/GiWRr6d.png]]


! test kerberos from hdfs 
* it errors because only admin user is configured or have credentials 
{{{

[vagrant@node1 data]$ hadoop fs -ls
19/01/08 18:15:59 WARN ipc.Client: Exception encountered while connecting to the server : javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]
ls: Failed on local exception: java.io.IOException: javax.security.sasl.SaslException: GSS initiate failed [Caused by GSSException: No valid credentials provided (Mechanism level: Failed to find any Kerberos tgt)]; Host Details : local host is: "node1.example.com/192.168.199.2"; destination host is: "node1.example.com":8020; 

}}}

! from KDC host, add principal on other users 
* kerberos can be linked to an existing active directory through TRUST (needs to be configured), so users will automatically be recognized 
* here we are adding the vagrant user to the principal 
{{{

-- summary commands 
sudo su - 
klist
kinit admin/admin
kadmin.local -q "addprinc vagrant"


-- detail 
[root@node2 scripts]# klist
klist: No credentials cache found (filename: /tmp/krb5cc_0)
[root@node2 scripts]# 
[root@node2 scripts]# 
[root@node2 scripts]# kinit admin/admin
Password for admin/admin@EXAMPLE.COM: 
[root@node2 scripts]# 
[root@node2 scripts]# 
[root@node2 scripts]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: admin/admin@EXAMPLE.COM

Valid starting       Expires              Service principal
01/08/2019 18:34:19  01/09/2019 18:34:19  krbtgt/EXAMPLE.COM@EXAMPLE.COM
[root@node2 scripts]# 
[root@node2 scripts]# 
[root@node2 scripts]# kadmin.local -q "addprinc vagrant"
Authenticating as principal admin/admin@EXAMPLE.COM with password.
WARNING: no policy specified for vagrant@EXAMPLE.COM; defaulting to no policy
Enter password for principal "vagrant@EXAMPLE.COM": <USE THE VAGRANT USER PASSWORD>
Re-enter password for principal "vagrant@EXAMPLE.COM": <USE THE VAGRANT USER PASSWORD>
Principal "vagrant@EXAMPLE.COM" created.

}}}


! test new user principal 
{{{
[vagrant@node1 ~]$ kinit
Password for vagrant@EXAMPLE.COM: 
[vagrant@node1 ~]$ 
[vagrant@node1 ~]$ klist
Ticket cache: FILE:/tmp/krb5cc_1000
Default principal: vagrant@EXAMPLE.COM

Valid starting       Expires              Service principal
01/08/2019 18:35:52  01/09/2019 18:35:52  krbtgt/EXAMPLE.COM@EXAMPLE.COM
[vagrant@node1 ~]$ 
[vagrant@node1 ~]$ hadoop fs -ls
Found 3 items
-rw-r--r--   3 vagrant vagrant   16257213 2019-01-08 06:27 salaries.csv
-rw-r--r--   3 vagrant vagrant   16257213 2019-01-08 06:31 salaries2.csv
drwxr-xr-x   - vagrant vagrant          0 2019-01-08 06:58 test

}}}


! list principals 
{{{
kadmin.local -q "list_principals"

[root@node2 scripts]# kadmin.local -q "list_principals"
Authenticating as principal admin/admin@EXAMPLE.COM with password.
HTTP/node1.example.com@EXAMPLE.COM
HTTP/node2.example.com@EXAMPLE.COM
HTTP/node3.example.com@EXAMPLE.COM
K/M@EXAMPLE.COM
activity_analyzer/node1.example.com@EXAMPLE.COM
activity_explorer/node1.example.com@EXAMPLE.COM
admin/admin@EXAMPLE.COM
ambari-qa-hadoop@EXAMPLE.COM
ambari-server-hadoop@EXAMPLE.COM
amshbase/node1.example.com@EXAMPLE.COM
amszk/node1.example.com@EXAMPLE.COM
dn/node1.example.com@EXAMPLE.COM
dn/node2.example.com@EXAMPLE.COM
dn/node3.example.com@EXAMPLE.COM
hdfs-hadoop@EXAMPLE.COM
hive/node1.example.com@EXAMPLE.COM
hive/node2.example.com@EXAMPLE.COM
hive/node3.example.com@EXAMPLE.COM
jhs/node2.example.com@EXAMPLE.COM
kadmin/admin@EXAMPLE.COM
kadmin/changepw@EXAMPLE.COM
kadmin/node2.example.com@EXAMPLE.COM
keyadmin@EXAMPLE.COM
kiprop/node2.example.com@EXAMPLE.COM
krbtgt/EXAMPLE.COM@EXAMPLE.COM
nm/node1.example.com@EXAMPLE.COM
nm/node2.example.com@EXAMPLE.COM
nm/node3.example.com@EXAMPLE.COM
nn/node1.example.com@EXAMPLE.COM
nn/node2.example.com@EXAMPLE.COM
nn@EXAMPLE.COM
ranger@EXAMPLE.COM
rangeradmin/node2.example.com@EXAMPLE.COM
rangerlookup/node2.example.com@EXAMPLE.COM
rangertagsync/node1.example.com@EXAMPLE.COM
rangertagsync/node2.example.com@EXAMPLE.COM
rangerusersync/node2.example.com@EXAMPLE.COM
rm/node2.example.com@EXAMPLE.COM
vagrant@EXAMPLE.COM
yarn/node2.example.com@EXAMPLE.COM
zookeeper/node1.example.com@EXAMPLE.COM
zookeeper/node2.example.com@EXAMPLE.COM
zookeeper/node3.example.com@EXAMPLE.COM
}}}



! other references 
https://community.pivotal.io/s/article/Kerberos-Cheat-Sheet




















.








<<showtoc>>

! SPNEGO

!! search for "auth" in hdfs advanced config 
* make sure all settings are configured as follows
[img(90%,90%)[ https://i.imgur.com/wAtMrPd.png ]]


!! test using curl 
{{{


[root@node2 scripts]# curl -u : --negotiate http://node1.example.com:50070/webhdfs/v1/?op=LISTSTATUS
{"FileStatuses":{"FileStatus":[
{"accessTime":0,"blockSize":0,"childrenNum":2,"fileId":16392,"group":"hadoop","length":0,"modificationTime":1546970946566,"owner":"yarn","pathSuffix":"app-logs","permission":"777","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":1,"fileId":16418,"group":"hdfs","length":0,"modificationTime":1546739119560,"owner":"hdfs","pathSuffix":"apps","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":2,"fileId":16389,"group":"hadoop","length":0,"modificationTime":1546738288975,"owner":"yarn","pathSuffix":"ats","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":1,"fileId":16399,"group":"hdfs","length":0,"modificationTime":1546738301288,"owner":"hdfs","pathSuffix":"hdp","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":1,"fileId":16395,"group":"hdfs","length":0,"modificationTime":1546738294255,"owner":"mapred","pathSuffix":"mapred","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":2,"fileId":16397,"group":"hadoop","length":0,"modificationTime":1546738323395,"owner":"mapred","pathSuffix":"mr-history","permission":"777","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":6,"fileId":16386,"group":"hdfs","length":0,"modificationTime":1546971003969,"owner":"hdfs","pathSuffix":"tmp","permission":"777","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":5,"fileId":16387,"group":"hdfs","length":0,"modificationTime":1546928769061,"owner":"hdfs","pathSuffix":"user","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"}
]}}
[root@node2 scripts]# 
[root@node2 scripts]# 
[root@node2 scripts]# kdestroy
[root@node2 scripts]# 
[root@node2 scripts]# curl -u : --negotiate http://node1.example.com:50070/webhdfs/v1/?op=LISTSTATUS
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"/>
<title>Error 401 Authentication required</title>
</head>
<body><h2>HTTP ERROR 401</h2>
<p>Problem accessing /webhdfs/v1/. Reason:
<pre>    Authentication required</pre></p><hr /><i><small>Powered by Jetty://</small></i><br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                

</body>
</html>


[root@node2 scripts]# curl -u : --negotiate http://node1.example.com:50070/webhdfs/v1/?op=LISTSTATUS -vvvvv
* About to connect() to node1.example.com port 50070 (#0)
*   Trying 192.168.199.2...
* Connected to node1.example.com (192.168.199.2) port 50070 (#0)
> GET /webhdfs/v1/?op=LISTSTATUS HTTP/1.1
> User-Agent: curl/7.29.0
> Host: node1.example.com:50070
> Accept: */*
> 
< HTTP/1.1 401 Authentication required
< Cache-Control: must-revalidate,no-cache,no-store
< Date: Tue, 08 Jan 2019 19:49:42 GMT
< Pragma: no-cache
< Date: Tue, 08 Jan 2019 19:49:42 GMT
< Pragma: no-cache
< Content-Type: text/html; charset=iso-8859-1
< X-FRAME-OPTIONS: SAMEORIGIN
* gss_init_sec_context() failed: : No Kerberos credentials available (default cache: /tmp/krb5cc_0)
< WWW-Authenticate: Negotiate
< Set-Cookie: hadoop.auth=; Path=/; HttpOnly
< Content-Length: 1404
< Server: Jetty(6.1.26.hwx)
< 
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"/>
<title>Error 401 Authentication required</title>
</head>
<body><h2>HTTP ERROR 401</h2>
<p>Problem accessing /webhdfs/v1/. Reason:
<pre>    Authentication required</pre></p><hr /><i><small>Powered by Jetty://</small></i><br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                
<br/>                                                

</body>
</html>
* Connection #0 to host node1.example.com left intact



[root@node2 scripts]# curl -u : --negotiate http://node1.example.com:50070/webhdfs/v1/?op=LISTSTATUS -vvvvv
* About to connect() to node1.example.com port 50070 (#0)
*   Trying 192.168.199.2...
* Connected to node1.example.com (192.168.199.2) port 50070 (#0)
> GET /webhdfs/v1/?op=LISTSTATUS HTTP/1.1
> User-Agent: curl/7.29.0
> Host: node1.example.com:50070
> Accept: */*
> 
< HTTP/1.1 401 Authentication required
< Cache-Control: must-revalidate,no-cache,no-store
< Date: Tue, 08 Jan 2019 19:50:46 GMT
< Pragma: no-cache
< Date: Tue, 08 Jan 2019 19:50:46 GMT
< Pragma: no-cache
< Content-Type: text/html; charset=iso-8859-1
< X-FRAME-OPTIONS: SAMEORIGIN
< WWW-Authenticate: Negotiate
< Set-Cookie: hadoop.auth=; Path=/; HttpOnly
< Content-Length: 1404
< Server: Jetty(6.1.26.hwx)
< 
* Ignoring the response-body
* Connection #0 to host node1.example.com left intact
* Issue another request to this URL: 'http://node1.example.com:50070/webhdfs/v1/?op=LISTSTATUS'
* Found bundle for host node1.example.com: 0x1762e90
* Re-using existing connection! (#0) with host node1.example.com
* Connected to node1.example.com (192.168.199.2) port 50070 (#0)
* Server auth using GSS-Negotiate with user ''
> GET /webhdfs/v1/?op=LISTSTATUS HTTP/1.1
> Authorization: Negotiate YIICZQYJKoZIhvcSAQICAQBuggJUMIICUKADAgEFoQMCAQ6iBwMFACAAAACjggFhYYIBXTCCAVmgAwIBBaENGwtFWEFNUExFLkNPTaIkMCKgAwIBA6EbMBkbBEhUVFAbEW5vZGUxLmV4YW1wbGUuY29to4IBGzCCARegAwIBEqEDAgEBooIBCQSCAQXLZTgGMbj4xkzKM2CMLYH5zCAciK7lFaCnUvhul79oo/Id5YP2e8lW96h69TZHjp227eHfO1oKgyX1NJqvzDp6QJ5cOGo6QXKNfmx3dEkKPJgsg09w6FcvDaWflhclfH/pN4OKCBoo23IkcR8uv+FmAwKlhT0eA5a0yV9zeoGstRSAPrBA+t63xdBf8hZB9RtAI6ISLDI329OZkblKnTbwBesh7naY8hJtNNqPiLS2n5dd+KsG+cSnSD1EwOytBsnsVN0gRVg6718N95M70Da7DV64bPhaEfWimIfjOX+zaNOJCbpiIzwe34Oeo8MAimZvhahdIWFM/wUFy19FeTIZBtGE/lykgdUwgdKgAwIBEqKBygSBx5uFXt9DLbTQn8FDDz007/VG0EDw7J4o+erYUSejz6ylv4ueEFXo83xGK0I5Nag4DD3RtHXB44jdLmiRmW+Vx0zAck+M/0MqNg3X5xD4p0RKFicVklJw17FLMprpLHeWg1jcsKpCyHdNt8KQeB4modt2DY8okBCyJSMS3snCPt2mDLM0Erfd/MiHYOW2038mUSIPxv8vuEJYUv9zchJ6XAjMWCGA7UqvS5mU49jAsWyXhfTi4sIFWbNm4ftmS4o7d6eCPIvuqcQ=
> User-Agent: curl/7.29.0
> Host: node1.example.com:50070
> Accept: */*
> 
< HTTP/1.1 200 OK
< Cache-Control: no-cache
< Expires: Tue, 08 Jan 2019 19:50:46 GMT
< Date: Tue, 08 Jan 2019 19:50:46 GMT
< Pragma: no-cache
< Expires: Tue, 08 Jan 2019 19:50:46 GMT
< Date: Tue, 08 Jan 2019 19:50:46 GMT
< Pragma: no-cache
< Content-Type: application/json
< X-FRAME-OPTIONS: SAMEORIGIN
< WWW-Authenticate: Negotiate YGoGCSqGSIb3EgECAgIAb1swWaADAgEFoQMCAQ+iTTBLoAMCARKiRARChwZbpr515XQ6+c68a4ZMAPjEGIHhnQJjRn8yt4jQ9qe3DHOozQIWOkQyj6nexCoqhKPWKbc4YG0cMZ/ZcCOnA4g5
< Set-Cookie: hadoop.auth="u=admin&p=admin/admin@EXAMPLE.COM&t=kerberos&e=1547013046868&s=nx4sCU8jegk52hkosxLZaWgouLk="; Path=/; HttpOnly
< Transfer-Encoding: chunked
< Server: Jetty(6.1.26.hwx)
< 
{"FileStatuses":{"FileStatus":[
{"accessTime":0,"blockSize":0,"childrenNum":2,"fileId":16392,"group":"hadoop","length":0,"modificationTime":1546970946566,"owner":"yarn","pathSuffix":"app-logs","permission":"777","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":1,"fileId":16418,"group":"hdfs","length":0,"modificationTime":1546739119560,"owner":"hdfs","pathSuffix":"apps","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":2,"fileId":16389,"group":"hadoop","length":0,"modificationTime":1546738288975,"owner":"yarn","pathSuffix":"ats","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":1,"fileId":16399,"group":"hdfs","length":0,"modificationTime":1546738301288,"owner":"hdfs","pathSuffix":"hdp","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":1,"fileId":16395,"group":"hdfs","length":0,"modificationTime":1546738294255,"owner":"mapred","pathSuffix":"mapred","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":2,"fileId":16397,"group":"hadoop","length":0,"modificationTime":1546738323395,"owner":"mapred","pathSuffix":"mr-history","permission":"777","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":6,"fileId":16386,"group":"hdfs","length":0,"modificationTime":1546971003969,"owner":"hdfs","pathSuffix":"tmp","permission":"777","replication":0,"storagePolicy":0,"type":"DIRECTORY"},
{"accessTime":0,"blockSize":0,"childrenNum":5,"fileId":16387,"group":"hdfs","length":0,"modificationTime":1546928769061,"owner":"hdfs","pathSuffix":"user","permission":"755","replication":0,"storagePolicy":0,"type":"DIRECTORY"}
]}}
* Closing connection 0

}}}



! Knox 
* another way of securing authentication is through knox to act as a gateway, see [[apache sentry vs ranger vs knox]]
[img(90%,90%)[https://i.imgur.com/5TdfGUh.png]]
[img(90%,90%)[https://i.imgur.com/BPUJVlB.jpg]]










<<showtoc>>

! architecture 
[img(90%,90%)[https://i.imgur.com/W50LYmu.png]]
[img(90%,90%)[https://i.imgur.com/vmGmoYO.png]]


! installation 

* On node1 ambari server, configure the connector 
{{{
yum -y install mysql-connector-java
ambari-server setup --jdbc-db=mysql --jdbc-driver=/usr/share/java/mysql-connector-java.jar
}}}

* On ambari, add service 

[img(90%,90%)[https://i.imgur.com/VAyNRPj.png]]

* On node2, Configure mysql root account /usr/bin/mysql_secure_installation
* Keep in mind the Disallow root login remotely (answer should be n)
{{{
[root@node2 scripts]# /usr/bin/mysql_secure_installation 

NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB
      SERVERS IN PRODUCTION USE!  PLEASE READ EACH STEP CAREFULLY!

In order to log into MariaDB to secure it, we'll need the current
password for the root user.  If you've just installed MariaDB, and
you haven't set the root password yet, the password will be blank,
so you should just press enter here.

Enter current password for root (enter for none): 
OK, successfully used password, moving on...

Setting the root password ensures that nobody can log into the MariaDB
root user without the proper authorisation.

Set root password? [Y/n] y
New password: 
Re-enter new password: 
Password updated successfully!
Reloading privilege tables..
 ... Success!


By default, a MariaDB installation has an anonymous user, allowing anyone
to log into MariaDB without having to have a user account created for
them.  This is intended only for testing, and to make the installation
go a bit smoother.  You should remove them before moving into a
production environment.

Remove anonymous users? [Y/n] n
 ... skipping.

Normally, root should only be allowed to connect from 'localhost'.  This
ensures that someone cannot guess at the root password from the network.

Disallow root login remotely? [Y/n] n
 ... skipping.

By default, MariaDB comes with a database named 'test' that anyone can
access.  This is also intended only for testing, and should be removed
before moving into a production environment.

Remove test database and access to it? [Y/n] n
 ... skipping.

Reloading the privilege tables will ensure that all changes made so far
will take effect immediately.

Reload privilege tables now? [Y/n] y
 ... Success!

Cleaning up...

All done!  If you've completed all of the above steps, your MariaDB
installation should now be secure.

Thanks for using MariaDB!
}}}


* Configure ranger user and create ranger database
{{{
mysql -u root -proot
CREATE USER 'ranger'@'localhost' IDENTIFIED BY 'ranger';
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'localhost';
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'node2.example.com';
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'%';
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'localhost' WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'node2.example.com' IDENTIFIED BY 'ranger' WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'localhost' IDENTIFIED BY 'ranger' WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'%' WITH GRANT OPTION;
GRANT ALL PRIVILEGES ON *.* TO 'ranger'@'%' IDENTIFIED BY 'ranger' WITH GRANT OPTION;
FLUSH PRIVILEGES;

system mysql -u ranger -pranger
SELECT CURRENT_USER();
create database ranger;
}}}


* Check users and passwords
{{{
[root@node2 admin]# mysql -u root -proot
Welcome to the MariaDB monitor.  Commands end with ; or \g.
Your MariaDB connection id is 976
Server version: 5.5.60-MariaDB MariaDB Server

Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

MariaDB [(none)]> SELECT User, Host, Password FROM mysql.user;
+---------------+-------------------+-------------------------------------------+
| User          | Host              | Password                                  |
+---------------+-------------------+-------------------------------------------+
| root          | localhost         | *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B |
| root          | node2.example.com | *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B |
| root          | 127.0.0.1         | *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B |
| root          | ::1               | *81F5E21E35407D884A6CD4A731AEBFB6AF209E1B |
| rangerinstall | %                 | *BA6F33B6015522D04D1B2CD0774983FEE64526DD |
| hive          | %                 | *4DF1D66463C18D44E3B001A8FB1BBFBEA13E27FC |
| rangeradmin   | %                 | *93E5B68E67576EF3867192792A3FA17A35376774 |
| rangeradmin   | localhost         | *93E5B68E67576EF3867192792A3FA17A35376774 |
| rangeradmin   | node2.example.com | *93E5B68E67576EF3867192792A3FA17A35376774 |
| ranger        | %                 | *84BB87F6BF7F61703B24CE1C9AA9C0E3F2286900 |
| ranger        | localhost         | *84BB87F6BF7F61703B24CE1C9AA9C0E3F2286900 |
| ranger        | node2.example.com | *84BB87F6BF7F61703B24CE1C9AA9C0E3F2286900 |
+---------------+-------------------+-------------------------------------------+
12 rows in set (0.06 sec)
}}}


* I don't think this is necessary, but I also added ranger to the principal 
{{{
[vagrant@node2 ~]$ sudo su -
Last login: Tue Jan  8 17:45:59 UTC 2019 on pts/0
[root@node2 ~]# 
[root@node2 ~]# 
[root@node2 ~]# kinit admin/admin
Password for admin/admin@EXAMPLE.COM: 
[root@node2 ~]# 
[root@node2 ~]# 
[root@node2 ~]# 
[root@node2 ~]# kadmin.local -q "addprinc ranger"
Authenticating as principal admin/admin@EXAMPLE.COM with password.
WARNING: no policy specified for ranger@EXAMPLE.COM; defaulting to no policy
Enter password for principal "ranger@EXAMPLE.COM": 
Re-enter password for principal "ranger@EXAMPLE.COM": 
Principal "ranger@EXAMPLE.COM" created.
}}}


[img(50%,50%)[https://i.imgur.com/KOWWdq0.png]]
* All Ranger components, KDC host, and mysql are on node2. The ambari-server is on node1
[img(90%,90%)[https://i.imgur.com/K4q5JGG.png]]
[img(90%,90%)[https://i.imgur.com/ZS98uJV.png]]
[img(90%,90%)[https://i.imgur.com/1vaHE8X.png]]
[img(90%,90%)[https://i.imgur.com/Ft6otqX.png]]
* Plugins will be installed after Ranger installation
[img(90%,90%)[https://i.imgur.com/qYZBkZX.png]]
* Uncheck previously configured properties, Click OK
[img(90%,90%)[https://i.imgur.com/9WX7NtI.png]]
[img(90%,90%)[https://i.imgur.com/94XHb8O.png]]
[img(90%,90%)[https://i.imgur.com/gvlLrPg.png]]
[img(90%,90%)[https://i.imgur.com/TEL3NTo.png]]
[img(90%,90%)[https://i.imgur.com/anjL7Pb.png]]
[img(90%,90%)[https://i.imgur.com/eW8REAC.png]]
* Go to http://192.168.199.3:6080 , then login as admin/admin
[img(50%,50%)[https://i.imgur.com/5dp5nUb.png]]
[img(90%,90%)[https://i.imgur.com/ycJQYPL.png]]



! install plugins 
* Go to Ranger - Configs - Ranger Plugin, Select HDFS and Hive plugins and click Save
[img(90%,90%)[https://i.imgur.com/8n22vUY.png]]
* Click OK
[img(90%,90%)[https://i.imgur.com/DoR5lOF.png]]
[img(90%,90%)[https://i.imgur.com/spaodiJ.png]]
* Stop and Start all services
[img(90%,90%)[https://i.imgur.com/ccTY5GM.png]]
[img(40%,40%)[https://i.imgur.com/cY2AKHn.png]]
[img(90%,90%)[https://i.imgur.com/WINcwWk.png]]




! errors 
!! ranger admin process is failing with connection failed 
{{{

        Connection failed to http://node2.example.com:6080/login.jsp 
        (Execution of 'curl --location-trusted -k --negotiate -u : -b /var/lib/ambari-agent/tmp/cookies/70a1480a-b71c-4152-815d-8a171bd0b85e 
        -c /var/lib/ambari-agent/tmp/cookies/70a1480a-b71c-4152-815d-8a171bd0b85e -w '%{http_code}' http://node2.example.com:6080/login.jsp 
        --connect-timeout 5 --max-time 7 -o /dev/null 1>/tmp/tmp5YAC3n 2>/tmp/tmpSeQnlb' returned 28.   
        % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:01 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:02 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:03 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:04 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:05 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:06 --:--:--     0
  0     0    0     0    0     0      0      0 --:--:--  0:00:07 --:--:--     0
curl: (28) Operation timed out after 7022 milliseconds with 0 out of -1 bytes received
000)
      
}}}
!!! troubleshooting and fix
* check the directory /var/log/ranger/admin
* read the catalina.out file 
{{{
[root@node2 admin]# cat catalina.out 
OpenJDK 64-Bit Server VM warning: ignoring option MaxPermSize=256m; support was removed in 8.0
OpenJDK 64-Bit Server VM warning: INFO: os::commit_memory(0x00000000eab00000, 357564416, 0) failed; error='Cannot allocate memory' (errno=12)
#
# There is insufficient memory for the Java Runtime Environment to continue.
# Native memory allocation (mmap) failed to map 357564416 bytes for committing reserved memory.
# An error report file with more information is saved as:
# /usr/hdp/2.6.5.1050-37/ranger-admin/ews/hs_err_pid1286.log
}}}
* add 4GB swapfile 
{{{
dd if=/dev/zero of=/opt/swapfile bs=1024k count=4096
mkswap /opt/swapfile
chmod 0600 /opt/swapfile

add on /etc/fstab
/opt/swapfile               swap                    swap    defaults        0 0

swapon -a

[root@node2 scripts]# free -h
              total        used        free      shared  buff/cache   available
Mem:           3.7G        3.3G        222M        9.9M        215M        190M
Swap:          5.0G        2.0G        3.0G
}}}





! other references 
Hadoop Certification - HDPCA - Install and Configure Ranger https://www.youtube.com/watch?v=2zeVvnw_bZs&t=1s 

















.


<<showtoc>>


[img(40%,40%)[ https://i.imgur.com/zuT36IY.png ]]


! background info - Ranger KMS (Key Management System) 
[img(50%,50%)[https://i.imgur.com/khqEFuj.png]]
[img(50%,50%)[https://i.imgur.com/EQJ97Vc.png]]
[img(50%,50%)[https://i.imgur.com/g5fUv8w.png]]
[img(50%,50%)[https://i.imgur.com/xfCR6dm.png]]

! install and configure



01
[img(90%,90%)[https://i.imgur.com/gKjnTZf.png]]
02
[img(90%,90%)[https://i.imgur.com/vGsbzJH.png]]
03
[img(90%,90%)[https://i.imgur.com/tvts1Up.png]]
04
[img(90%,90%)[https://i.imgur.com/HLf1SZk.png]]
05
* On the Advanced config -> "custom kms-site" add the keyadmin proxy user settings (last three lines) for kerberos authentication 
[img(90%,90%)[https://i.imgur.com/tfCKduW.png]]
06
[img(90%,90%)[https://i.imgur.com/qa1a6JQ.png]]
07
[img(90%,90%)[https://i.imgur.com/nC83kAh.png]]
08
[img(90%,90%)[https://i.imgur.com/77MW6NV.png]]
09
[img(90%,90%)[https://i.imgur.com/j15oGwA.png]]
10
[img(90%,90%)[https://i.imgur.com/dZUEb8p.png]]
11
[img(90%,90%)[https://i.imgur.com/kgo36fo.png]]
12
[img(90%,90%)[https://i.imgur.com/IGfqToA.png]]
13
[img(90%,90%)[https://i.imgur.com/oFBvaKY.png]]
14
* Edit the hadoop_kms, add EXAMPLE.COM
[img(90%,90%)[https://i.imgur.com/oKyKOw9.png]]
15
[img(90%,90%)[https://i.imgur.com/iUXg2v6.png]]
16
[img(90%,90%)[https://i.imgur.com/FoJSoOT.png]]
17
* Go to key manager, add a new key mykey01 to be used to create an encryption zone 
[img(90%,90%)[https://i.imgur.com/Z5JbTjh.png]]
18
[img(90%,90%)[https://i.imgur.com/DiGvYeD.png]]
19
[img(90%,90%)[https://i.imgur.com/8Kl7KVf.png]]
20
[img(90%,90%)[https://i.imgur.com/jISoHhM.png]]
21
* Go back to Access Manager, create a new policy and use the created key mykey01 and grant users to it 
[img(90%,90%)[https://i.imgur.com/JBnnxVs.png]]
22
[img(90%,90%)[https://i.imgur.com/0iBlArv.png]]
23
[img(90%,90%)[https://i.imgur.com/Vq5bAWV.png]]
24
[img(90%,90%)[https://i.imgur.com/6znPj2p.png]]


! create hdfs encryption zone (/encrypted folder only accessible by user vagrant)


!! Keytabs are stored in /etc/security/keytabs/ 
* these are binary files that can be used for kerberos authentication
{{{
 ls /etc/security/keytabs/
dn.service.keytab              jhs.service.keytab             rangeradmin.service.keytab     rangertagsync.service.keytab   smokeuser.headless.keytab      zk.service.keytab
hdfs.headless.keytab           nm.service.keytab              rangerkms.service.keytab       rangerusersync.service.keytab  spnego.service.keytab          
hive.service.keytab            nn.service.keytab              rangerlookup.service.keytab    rm.service.keytab              yarn.service.keytab            

less /etc/security/keytabs/hdfs.headless.keytab 
"/etc/security/keytabs/hdfs.headless.keytab" may be a binary file.  See it anyway? 
}}}

!! To list the principals in the keytab 
{{{
[root@node2 ~]# klist -kt /etc/security/keytabs/hdfs.headless.keytab 
Keytab name: FILE:/etc/security/keytabs/hdfs.headless.keytab
KVNO Timestamp           Principal
---- ------------------- ------------------------------------------------------
   1 01/08/2019 18:00:32 hdfs-hadoop@EXAMPLE.COM
   1 01/08/2019 18:00:32 hdfs-hadoop@EXAMPLE.COM
   1 01/08/2019 18:00:32 hdfs-hadoop@EXAMPLE.COM
   1 01/08/2019 18:00:32 hdfs-hadoop@EXAMPLE.COM
   1 01/08/2019 18:00:32 hdfs-hadoop@EXAMPLE.COM
}}}

!! Switching principal, from admin/admin to hdfs-hadoop
{{{
[root@node2 ~]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: admin/admin@EXAMPLE.COM

Valid starting       Expires              Service principal
01/09/2019 01:53:15  01/10/2019 01:53:15  krbtgt/EXAMPLE.COM@EXAMPLE.COM
[root@node2 ~]# 

[root@node2 ~]# kdestroy
[root@node2 ~]# klist
klist: No credentials cache found (filename: /tmp/krb5cc_0)

[root@node2 ~]# kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs-hadoop
[root@node2 ~]# 
[root@node2 ~]# klist
Ticket cache: FILE:/tmp/krb5cc_0
Default principal: hdfs-hadoop@EXAMPLE.COM

Valid starting       Expires              Service principal
01/10/2019 00:53:19  01/11/2019 00:53:19  krbtgt/EXAMPLE.COM@EXAMPLE.COM
}}}


!! listing the encryption keys 
{{{
# "kinit -kt" is similar to using kinit but you'll NOT have to input a password because a keytab file is used
# user hdfs-hadoop errors with not allowed to do 'GET_KEYS'

[root@node2 ~]# kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs-hadoop
[root@node2 ~]# hadoop key list -metadata
Cannot list keys for KeyProvider: KMSClientProvider[http://node2.example.com:9292/kms/v1/]: org.apache.hadoop.security.authorize.AuthorizationException: User:hdfs not allowed to do 'GET_KEYS'


# keyadmin is the most powerful user, has access to all keys and can view encrypted data. so protect this user 
# even kinit admin/admin will not be able to have access to the keys 

[root@node2 ~]# kinit keyadmin
Password for keyadmin@EXAMPLE.COM: 

[root@node2 ~]# hadoop key list -metadata
Listing keys for KeyProvider: KMSClientProvider[http://node2.example.com:9292/kms/v1/]
mykey01 : cipher: AES/CTR/NoPadding, length: 128, description: , created: Thu Jan 10 00:43:26 UTC 2019, version: 1, attributes: [key.acl.name=mykey01] 
}}}



!! create the new directory "encrypted" using hdfs-hadoop principal
{{{
[root@node2 ~]# kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs-hadoop

[root@node2 ~]# hadoop fs -ls
Found 1 items
drwxr-xr-x   - hdfs hdfs          0 2019-01-09 17:14 .hiveJars

[root@node2 ~]# hadoop fs -ls /
Found 9 items
drwxrwxrwx   - yarn   hadoop          0 2019-01-09 17:48 /app-logs
drwxr-xr-x   - hdfs   hdfs            0 2019-01-06 01:45 /apps
drwxr-xr-x   - yarn   hadoop          0 2019-01-06 01:31 /ats
drwxr-xr-x   - hdfs   hdfs            0 2019-01-06 01:31 /hdp
drwxr-xr-x   - mapred hdfs            0 2019-01-06 01:31 /mapred
drwxrwxrwx   - mapred hadoop          0 2019-01-06 01:32 /mr-history
drwxr-xr-x   - hdfs   hdfs            0 2019-01-09 08:49 /ranger
drwxrwxrwx   - hdfs   hdfs            0 2019-01-08 18:10 /tmp
drwxr-xr-x   - hdfs   hdfs            0 2019-01-09 17:14 /user

[root@node2 ~]# hadoop fs -mkdir /encrypted

[root@node2 ~]# hadoop fs -ls /
Found 10 items
drwxrwxrwx   - yarn   hadoop          0 2019-01-09 17:48 /app-logs
drwxr-xr-x   - hdfs   hdfs            0 2019-01-06 01:45 /apps
drwxr-xr-x   - yarn   hadoop          0 2019-01-06 01:31 /ats
drwxr-xr-x   - hdfs   hdfs            0 2019-01-10 00:59 /encrypted
drwxr-xr-x   - hdfs   hdfs            0 2019-01-06 01:31 /hdp
drwxr-xr-x   - mapred hdfs            0 2019-01-06 01:31 /mapred
drwxrwxrwx   - mapred hadoop          0 2019-01-06 01:32 /mr-history
drwxr-xr-x   - hdfs   hdfs            0 2019-01-09 08:49 /ranger
drwxrwxrwx   - hdfs   hdfs            0 2019-01-08 18:10 /tmp
drwxr-xr-x   - hdfs   hdfs            0 2019-01-09 17:14 /user

[root@node2 ~]# hadoop fs -chown vagrant:vagrant /encrypted

[root@node2 ~]# hadoop fs -ls /
Found 10 items
drwxrwxrwx   - yarn    hadoop           0 2019-01-09 17:48 /app-logs
drwxr-xr-x   - hdfs    hdfs             0 2019-01-06 01:45 /apps
drwxr-xr-x   - yarn    hadoop           0 2019-01-06 01:31 /ats
drwxr-xr-x   - vagrant vagrant          0 2019-01-10 00:59 /encrypted
drwxr-xr-x   - hdfs    hdfs             0 2019-01-06 01:31 /hdp
drwxr-xr-x   - mapred  hdfs             0 2019-01-06 01:31 /mapred
drwxrwxrwx   - mapred  hadoop           0 2019-01-06 01:32 /mr-history
drwxr-xr-x   - hdfs    hdfs             0 2019-01-09 08:49 /ranger
drwxrwxrwx   - hdfs    hdfs             0 2019-01-08 18:10 /tmp
drwxr-xr-x   - hdfs    hdfs             0 2019-01-09 17:14 /user
}}}


!! create the encryption zone on "encrypted" folder using the mykey01
{{{
[root@node2 ~]# hdfs crypto -createZone -keyName mykey01 -path /encrypted 
Added encryption zone /encrypted

[root@node2 ~]# hadoop fs -ls /
Found 10 items
drwxrwxrwx   - yarn    hadoop           0 2019-01-09 17:48 /app-logs
drwxr-xr-x   - hdfs    hdfs             0 2019-01-06 01:45 /apps
drwxr-xr-x   - yarn    hadoop           0 2019-01-06 01:31 /ats
drwxr-xr-x   - vagrant vagrant          0 2019-01-10 01:01 /encrypted
drwxr-xr-x   - hdfs    hdfs             0 2019-01-06 01:31 /hdp
drwxr-xr-x   - mapred  hdfs             0 2019-01-06 01:31 /mapred
drwxrwxrwx   - mapred  hadoop           0 2019-01-06 01:32 /mr-history
drwxr-xr-x   - hdfs    hdfs             0 2019-01-09 08:49 /ranger
drwxrwxrwx   - hdfs    hdfs             0 2019-01-08 18:10 /tmp
drwxr-xr-x   - hdfs    hdfs             0 2019-01-09 17:14 /user
}}}


!! put files in the encryption zone and read it 
{{{
[vagrant@node2 ~]$ kinit
Password for vagrant@EXAMPLE.COM: 

[vagrant@node2 ~]$ hadoop fs -ls /encrypted
Found 1 items
drwxrwxrwt   - hdfs vagrant          0 2019-01-10 01:01 /encrypted/.Trash

[vagrant@node2 ~]$ hadoop fs -put /vagrant/data/constitution.txt /encrypted/constitution.txt

[vagrant@node2 ~]$ hadoop fs -cat /encrypted/constitution.txt | head
We the People of the United States, in Order to form a more perfect Union,
establish Justice, insure domestic Tranquility, provide for the common
defence, promote the general Welfare, and secure the Blessings of Liberty to
ourselves and our Posterity, do ordain and establish this Constitution for the
United States of America.
}}}


!! get files blocks location of the encrypted file in encryption zone 
{{{
[vagrant@node2 ~]$ hadoop fsck /encrypted/constitution.txt -files -blocks -locations
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

Connecting to namenode via http://node1.example.com:50070/fsck?ugi=vagrant&files=1&blocks=1&locations=1&path=%2Fencrypted%2Fconstitution.txt
FSCK started by vagrant (auth:KERBEROS_SSL) from /192.168.199.3 for path /encrypted/constitution.txt at Thu Jan 10 01:12:38 UTC 2019
/encrypted/constitution.txt 44841 bytes, 1 block(s):  OK
0. BP-534825236-192.168.199.2-1546738263299:blk_1073741963_1146 len=44841 repl=3 [DatanodeInfoWithStorage[192.168.199.3:1019,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK], DatanodeInfoWithStorage[192.168.199.4:1019,DS-a66628de-4daa-433f-9aa2-d3a8c400d5c5,DISK], DatanodeInfoWithStorage[192.168.199.2:1019,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK]]

Status: HEALTHY
 Total size:	44841 B
 Total dirs:	0
 Total files:	1
 Total symlinks:		0
 Total blocks (validated):	1 (avg. block size 44841 B)
 Minimally replicated blocks:	1 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	0 (0.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	3
 Average block replication:	3.0
 Corrupt blocks:		0
 Missing replicas:		0 (0.0 %)
 Number of data-nodes:		3
 Number of racks:		1
FSCK ended at Thu Jan 10 01:12:38 UTC 2019 in 12 milliseconds


The filesystem under path '/encrypted/constitution.txt' is HEALTHY
}}}


!! check if the file inside encryption zone is really encrypted
{{{
[vagrant@node2 ~]$ sudo su -
Last login: Thu Jan 10 01:03:59 UTC 2019 on pts/2
[root@node2 ~]# 
[root@node2 ~]# find /hadoop/hdfs/data/ -iname "blk_1073741963"
/hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741963
[root@node2 ~]# 
[root@node2 ~]# 
[root@node2 ~]# head /hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741963
t?ʌF??7h?0?Ɇ??Y???+5e????{??j?,=?(?>?>4e)??l?0?cfC۟??V???<5s?T??Y?Z?.?n9??,
}}}



!! copy the file from encryption zone to an outside folder 
{{{
[vagrant@node2 ~]$ hadoop fs -cp /encrypted/constitution.txt /tmp/constitution_copied.txt


# now login just as regular hdfs user, and you can read the file even without keys. meaning the file is not encrypted once outside of encryption zone 

[hdfs@node2 ~]$ hadoop fs -cat /tmp/constitution_copied.txt | head
We the People of the United States, in Order to form a more perfect Union,
establish Justice, insure domestic Tranquility, provide for the common
defence, promote the general Welfare, and secure the Blessings of Liberty to
ourselves and our Posterity, do ordain and establish this Constitution for the
United States of America.


[hdfs@node2 ~]$ hadoop fsck /tmp/constitution_copied.txt -files -blocks -locations
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it.

Connecting to namenode via http://node1.example.com:50070/fsck?ugi=hdfs&files=1&blocks=1&locations=1&path=%2Ftmp%2Fconstitution_copied.txt
FSCK started by hdfs (auth:KERBEROS_SSL) from /192.168.199.3 for path /tmp/constitution_copied.txt at Thu Jan 10 01:18:08 UTC 2019
/tmp/constitution_copied.txt 44841 bytes, 1 block(s):  OK
0. BP-534825236-192.168.199.2-1546738263299:blk_1073741964_1147 len=44841 repl=3 [DatanodeInfoWithStorage[192.168.199.3:1019,DS-3390c406-9c65-467c-88b7-d2bdc6b7330b,DISK], DatanodeInfoWithStorage[192.168.199.2:1019,DS-f7935053-711f-4558-9c30-57a0fe071bde,DISK], DatanodeInfoWithStorage[192.168.199.4:1019,DS-a66628de-4daa-433f-9aa2-d3a8c400d5c5,DISK]]

Status: HEALTHY
 Total size:	44841 B
 Total dirs:	0
 Total files:	1
 Total symlinks:		0
 Total blocks (validated):	1 (avg. block size 44841 B)
 Minimally replicated blocks:	1 (100.0 %)
 Over-replicated blocks:	0 (0.0 %)
 Under-replicated blocks:	0 (0.0 %)
 Mis-replicated blocks:		0 (0.0 %)
 Default replication factor:	3
 Average block replication:	3.0
 Corrupt blocks:		0
 Missing replicas:		0 (0.0 %)
 Number of data-nodes:		3
 Number of racks:		1
FSCK ended at Thu Jan 10 01:18:08 UTC 2019 in 1 milliseconds


The filesystem under path '/tmp/constitution_copied.txt' is HEALTHY

[vagrant@node2 ~]$ sudo su -

[root@node2 ~]# find /hadoop/hdfs/data/ -iname "blk_1073741964"
/hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741964

[root@node2 ~]# head /hadoop/hdfs/data/current/BP-534825236-192.168.199.2-1546738263299/current/finalized/subdir0/subdir0/blk_1073741964
We the People of the United States, in Order to form a more perfect Union,
establish Justice, insure domestic Tranquility, provide for the common
defence, promote the general Welfare, and secure the Blessings of Liberty to
ourselves and our Posterity, do ordain and establish this Constitution for the
United States of America.
}}}


!! hdfs user can create subdirectories under encryption zone but can't create file 
{{{
[hdfs@node2 ~]$ hadoop fs -mkdir /encrypted/subdir

[hdfs@node2 ~]$ hadoop fs -put /vagrant/data/constitution.txt /encrypted/subdir/constitution2.txt
put: User:hdfs not allowed to do 'DECRYPT_EEK' on 'mykey01'
19/01/10 01:10:56 ERROR hdfs.DFSClient: Failed to close inode 24384
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /encrypted/subdir/constitution2.txt._COPYING_ (inode 24384): File does not exist. Holder DFSClient_NONMAPREDUCE_1910722416_1 does not have any open files.
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3697)
	at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3785)
}}}


!! keyadmin can read any encrypted file on any encryption zone 
{{{
[hdfs@node2 ~]$ kinit keyadmin@EXAMPLE.COM
Password for keyadmin@EXAMPLE.COM: 

[hdfs@node2 ~]$ hdfs fs -cat /encrypted/constitution.txt | head
Error: Could not find or load main class fs
[hdfs@node2 ~]$ hadoop fs -cat /encrypted/constitution.txt | head
We the People of the United States, in Order to form a more perfect Union,
establish Justice, insure domestic Tranquility, provide for the common
defence, promote the general Welfare, and secure the Blessings of Liberty to
ourselves and our Posterity, do ordain and establish this Constitution for the
United States of America.


# admin/admin will not be able to read any encrypted file 
[hdfs@node2 ~]$ kinit admin/admin
Password for admin/admin@EXAMPLE.COM: 

[hdfs@node2 ~]$ klist
Ticket cache: FILE:/tmp/krb5cc_1006
Default principal: admin/admin@EXAMPLE.COM

Valid starting       Expires              Service principal
01/10/2019 01:30:08  01/11/2019 01:30:08  krbtgt/EXAMPLE.COM@EXAMPLE.COM

[hdfs@node2 ~]$ hadoop fs -cat /encrypted/constitution.txt | head
cat: User:admin not allowed to do 'DECRYPT_EEK' on 'mykey01'
}}}













! troubleshooting 

!! kms install properties file 
<<<
/usr/hdp/current/ranger-kms/install.properties
<<<

!! ranger kms install error "unable to connect to DB"
!!! error message 
{{{

stderr: 
Traceback (most recent call last):
  File "/var/lib/ambari-agent/cache/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py", line 121, in <module>
    KmsServer().execute()
  File "/usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py", line 375, in execute
    method(env)
  File "/var/lib/ambari-agent/cache/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py", line 48, in install
    self.configure(env)
  File "/usr/lib/ambari-agent/lib/resource_management/libraries/script/script.py", line 120, in locking_configure
    original_configure(obj, *args, **kw)
  File "/var/lib/ambari-agent/cache/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py", line 90, in configure
    kms()
  File "/var/lib/ambari-agent/cache/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms.py", line 183, in kms
    Execute(db_connection_check_command, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10, environment=env_dict)
  File "/usr/lib/ambari-agent/lib/resource_management/core/base.py", line 166, in __init__
    self.env.run()
  File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 160, in run
    self.run_action(resource, action)
  File "/usr/lib/ambari-agent/lib/resource_management/core/environment.py", line 124, in run_action
    provider_action()
  File "/usr/lib/ambari-agent/lib/resource_management/core/providers/system.py", line 262, in action_run
    tries=self.resource.tries, try_sleep=self.resource.try_sleep)
  File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 72, in inner
    result = function(command, **kwargs)
  File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 102, in checked_call
    tries=tries, try_sleep=try_sleep, timeout_kill_strategy=timeout_kill_strategy)
  File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 150, in _call_wrapper
    result = _call(command, **kwargs_copy)
  File "/usr/lib/ambari-agent/lib/resource_management/core/shell.py", line 303, in _call
    raise ExecutionFailed(err_msg, code, out, err)
resource_management.core.exceptions.ExecutionFailed: Execution of '/usr/lib/jvm/jre//bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/hdp/current/ranger-kms/ews/webapp/lib/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification 'jdbc:mysql://node2:3306/rangerkms' rangerkms [PROTECTED] com.mysql.jdbc.Driver' returned 1. ERROR: Unable to connect to the DB. Please check DB connection properties.
com.mysql.jdbc.exceptions.jdbc4.CommunicationsException: Communications link failure
}}}
!!! fix
* On the Advanced config -> "custom kms-site" add the keyadmin proxy user settings (last three lines) for kerberos authentication
[img(90%,90%)[ https://i.imgur.com/tUpTVUL.png ]]





















.




http://learnxinyminutes.com/docs/javascript/
https://developer.mozilla.org/en-US/docs/Web/JavaScript/A_re-introduction_to_JavaScript

https://en.wikipedia.org/wiki/Oracle_Exadata#Hardware_Configurations
Vagrant + Docker 
http://en.wikipedia.org/wiki/Vagrant_%28software%29
http://www.slideshare.net/3dgiordano/vagrant-docker
http://www.quora.com/What-is-the-difference-between-Docker-and-Vagrant-When-should-you-use-each-one
http://www.scriptrock.com/articles/docker-vs-vagrant


https://www.vagrantup.com/downloads.html
{{{
vagrant up node1 node2 node3
vagrant suspend 
vagrant destroy 

vagrant global-status
}}}

https://stackoverflow.com/questions/10953070/how-to-debug-vagrant-cannot-forward-the-specified-ports-on-this-vm-message
password for geerlingguy/centos7  https://github.com/geerlingguy/drupal-vm/issues/1203

https://docs.oracle.com/database/121/ADMIN/cdb_create.htm#ADMIN13514

<<<
A CDB contains the following files:

One control file

One active online redo log for a single-instance CDB, or one active online redo log for each instance of an Oracle RAC CDB

One set of temp files

There is one default temporary tablespace for the root and for each PDB.

One active undo tablespace for a single-instance CDB, or one active undo tablespace for each instance of an Oracle RAC CDB

Sets of system data files

The primary physical difference between a CDB and a non-CDB is in the non-undo data files. A non-CDB has only one set of system data files. In contrast, a CDB includes one set of system data files for each container in the CDB, including a set of system data files for each PDB. In addition, a CDB has one set of user-created data files for each container.

Sets of user-created data files

Each PDB has its own set of non-system data files. These data files contain the user-defined schemas and database objects for the PDB.

For backup and recovery of a CDB, Recovery Manager (RMAN) is recommended. PDB point-in-time recovery (PDB PITR) must be performed with RMAN. By default, RMAN turns on control file autobackup for a CDB. It is strongly recommended that control file autobackup is enabled for a CDB, to ensure that PDB PITR can undo data file additions or deletions.


<<<
https://leetcode.com/problems/two-sum/
{{{
Given an array of integers, return indices of the two numbers such that they add up to a specific target.

You may assume that each input would have exactly one solution, and you may not use the same element twice.

Example:

Given nums = [2, 7, 11, 15], target = 9,

Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].

Accepted
2,243,655
Submissions
5,021,879
}}}


{{{

# class Solution:
#     # def twoSum(nums, target):
#     def twoSum(self,nums, target):
        
#         for i in range(len(nums)):
#             # print(nums[i])
#             for j in range(i+1, len(nums)):
#                 # print(nums[i],nums[j])
#                 sum = nums[i] + nums[j]
#                 if sum == target:
#                     return(i,j)


class Solution:
    # def twoSum(nums, target):
    def twoSum(self,nums, target):

        if len(nums) <= 1:
            return False

        kv_hmap = dict()

        for i in range(len(nums)):  # 0,1,2,3,4
            # print(i)
            num = nums[i]           # 1,2,7,3,11
            # print(num)
            key = target - num      # 8,7,2,6,-2
            # print(key)

            if num in kv_hmap:
                # print ([kv_hmap[num], i])
                return( [kv_hmap[num],i] )
            else:
                kv_hmap[key] = i
}}}
turbo mode is disabled 
{{{
          <!-- Turbo Mode -->
          <!-- Description: Turbo Mode. -->
          <!-- Possible Values: "Disabled", "Enabled" -->
          <Turbo_Mode>Disabled</Turbo_Mode>
}}}

! cpu_topology script
{{{
[root@enkx3cel01 ~]# sh cpu_topology
        Product Name: SUN FIRE X4270 M3
        Product Name: ASSY,MOTHERBOARD,2U
model name      : Intel(R) Xeon(R) CPU E5-2630L 0 @ 2.00GHz
processors  (OS CPU count)          0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
physical id (processor socket)      0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1
siblings    (logical CPUs/socket)   12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12 12
core id     (# assigned to a core)  0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5 0 1 2 3 4 5
cpu cores   (physical cores/socket) 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
}}}

! intel cpu topology tool
{{{
[root@enkx3cel01 cpu-topology]# ./cpu_topology64.out


        Advisory to Users on system topology enumeration

This utility is for demonstration purpose only. It assumes the hardware topology
configuration within a coherent domain does not change during the life of an OS
session. If an OS support advanced features that can change hardware topology
configurations, more sophisticated adaptation may be necessary to account for
the hardware configuration change that might have added and reduced the number
of logical processors being managed by the OS.

User should also`be aware that the system topology enumeration algorithm is
based on the assumption that CPUID instruction will return raw data reflecting
the native hardware configuration. When an application runs inside a virtual
machine hosted by a Virtual Machine Monitor (VMM), any CPUID instructions
issued by an app (or a guest OS) are trapped by the VMM and it is the VMM's
responsibility and decision to emulate/supply CPUID return data to the virtual
machines. When deploying topology enumeration code based on querying CPUID
inside a VM environment, the user must consult with the VMM vendor on how an VMM
will emulate CPUID instruction relating to topology enumeration.



        Software visible enumeration in the system:
Number of logical processors visible to the OS: 24
Number of logical processors visible to this process: 24
Number of processor cores visible to this process: 12
Number of physical packages visible to this process: 2


        Hierarchical counts by levels of processor topology:
 # of cores in package  0 visible to this process: 6 .
         # of logical processors in Core 0 visible to this process: 2 .
         # of logical processors in Core  1 visible to this process: 2 .
         # of logical processors in Core  2 visible to this process: 2 .
         # of logical processors in Core  3 visible to this process: 2 .
         # of logical processors in Core  4 visible to this process: 2 .
         # of logical processors in Core  5 visible to this process: 2 .
 # of cores in package  1 visible to this process: 6 .
         # of logical processors in Core 0 visible to this process: 2 .
         # of logical processors in Core  1 visible to this process: 2 .
         # of logical processors in Core  2 visible to this process: 2 .
         # of logical processors in Core  3 visible to this process: 2 .
         # of logical processors in Core  4 visible to this process: 2 .
         # of logical processors in Core  5 visible to this process: 2 .


        Affinity masks per SMT thread, per core, per package:
Individual:
        P:0, C:0, T:0 --> 1
        P:0, C:0, T:1 --> 1z3

Core-aggregated:
        P:0, C:0 --> 1001
Individual:
        P:0, C:1, T:0 --> 2
        P:0, C:1, T:1 --> 2z3

Core-aggregated:
        P:0, C:1 --> 2002
Individual:
        P:0, C:2, T:0 --> 4
        P:0, C:2, T:1 --> 4z3

Core-aggregated:
        P:0, C:2 --> 4004
Individual:
        P:0, C:3, T:0 --> 8
        P:0, C:3, T:1 --> 8z3

Core-aggregated:
        P:0, C:3 --> 8008
Individual:
        P:0, C:4, T:0 --> 10
        P:0, C:4, T:1 --> 1z4

Core-aggregated:
        P:0, C:4 --> 10010
Individual:
        P:0, C:5, T:0 --> 20
        P:0, C:5, T:1 --> 2z4

Core-aggregated:
        P:0, C:5 --> 20020

Pkg-aggregated:
        P:0 --> 3f03f
Individual:
        P:1, C:0, T:0 --> 40
        P:1, C:0, T:1 --> 4z4

Core-aggregated:
        P:1, C:0 --> 40040
Individual:
        P:1, C:1, T:0 --> 80
        P:1, C:1, T:1 --> 8z4

Core-aggregated:
        P:1, C:1 --> 80080
Individual:
        P:1, C:2, T:0 --> 100
        P:1, C:2, T:1 --> 1z5

Core-aggregated:
        P:1, C:2 --> 100100
Individual:
        P:1, C:3, T:0 --> 200
        P:1, C:3, T:1 --> 2z5

Core-aggregated:
        P:1, C:3 --> 200200
Individual:
        P:1, C:4, T:0 --> 400
        P:1, C:4, T:1 --> 4z5

Core-aggregated:
        P:1, C:4 --> 400400
Individual:
        P:1, C:5, T:0 --> 800
        P:1, C:5, T:1 --> 8z5

Core-aggregated:
        P:1, C:5 --> 800800

Pkg-aggregated:
        P:1 --> fc0fc0


        APIC ID listings from affinity masks
OS cpu   0, Affinity mask 00000001 - apic id 0
OS cpu   1, Affinity mask 00000002 - apic id 2
OS cpu   2, Affinity mask 00000004 - apic id 4
OS cpu   3, Affinity mask 00000008 - apic id 6
OS cpu   4, Affinity mask 00000010 - apic id 8
OS cpu   5, Affinity mask 00000020 - apic id a
OS cpu   6, Affinity mask 00000040 - apic id 20
OS cpu   7, Affinity mask 00000080 - apic id 22
OS cpu   8, Affinity mask 00000100 - apic id 24
OS cpu   9, Affinity mask 00000200 - apic id 26
OS cpu  10, Affinity mask 00000400 - apic id 28
OS cpu  11, Affinity mask 00000800 - apic id 2a
OS cpu  12, Affinity mask 00001000 - apic id 1
OS cpu  13, Affinity mask 00002000 - apic id 3
OS cpu  14, Affinity mask 00004000 - apic id 5
OS cpu  15, Affinity mask 00008000 - apic id 7
OS cpu  16, Affinity mask 00010000 - apic id 9
OS cpu  17, Affinity mask 00020000 - apic id b
OS cpu  18, Affinity mask 00040000 - apic id 21
OS cpu  19, Affinity mask 00080000 - apic id 23
OS cpu  20, Affinity mask 00100000 - apic id 25
OS cpu  21, Affinity mask 00200000 - apic id 27
OS cpu  22, Affinity mask 00400000 - apic id 29
OS cpu  23, Affinity mask 00800000 - apic id 2b


Package 0 Cache and Thread details


Box Description:
Cache  is cache level designator
Size   is cache size
OScpu# is cpu # as seen by OS
Core   is core#[_thread# if > 1 thread/core] inside socket
AffMsk is AffinityMask(extended hex) for core and thread
CmbMsk is Combined AffinityMask(extended hex) for hw threads sharing cache
       CmbMsk will differ from AffMsk if > 1 hw_thread/cache
Extended Hex replaces trailing zeroes with 'z#'
       where # is number of zeroes (so '8z5' is '0x800000')
L1D is Level 1 Data cache, size(KBytes)= 32,  Cores/cache= 2, Caches/package= 6
L1I is Level 1 Instruction cache, size(KBytes)= 32,  Cores/cache= 2, Caches/package= 6
L2 is Level 2 Unified cache, size(KBytes)= 256,  Cores/cache= 2, Caches/package= 6
L3 is Level 3 Unified cache, size(KBytes)= 15360,  Cores/cache= 12, Caches/package= 1
      +-------------+-------------+-------------+-------------+-------------+-------------+
Cache |   L1D       |   L1D       |   L1D       |   L1D       |   L1D       |   L1D       |
Size  |   32K       |   32K       |   32K       |   32K       |   32K       |   32K       |
OScpu#|     0     12|     1     13|     2     14|     3     15|     4     16|     5     17|
Core  | c0_t0  c0_t1| c1_t0  c1_t1| c2_t0  c2_t1| c3_t0  c3_t1| c4_t0  c4_t1| c5_t0  c5_t1|
AffMsk|     1    1z3|     2    2z3|     4    4z3|     8    8z3|    10    1z4|    20    2z4|
CmbMsk|  1001       |  2002       |  4004       |  8008       | 10010       | 20020       |
      +-------------+-------------+-------------+-------------+-------------+-------------+

Cache |   L1I       |   L1I       |   L1I       |   L1I       |   L1I       |   L1I       |
Size  |   32K       |   32K       |   32K       |   32K       |   32K       |   32K       |
      +-------------+-------------+-------------+-------------+-------------+-------------+

Cache |    L2       |    L2       |    L2       |    L2       |    L2       |    L2       |
Size  |  256K       |  256K       |  256K       |  256K       |  256K       |  256K       |
      +-------------+-------------+-------------+-------------+-------------+-------------+

Cache |    L3                                                                             |
Size  |   15M                                                                             |
CmbMsk| 3f03f                                                                             |
      +-----------------------------------------------------------------------------------+

Combined socket AffinityMask= 0x3f03f


Package 1 Cache and Thread details


Box Description:
Cache  is cache level designator
Size   is cache size
OScpu# is cpu # as seen by OS
Core   is core#[_thread# if > 1 thread/core] inside socket
AffMsk is AffinityMask(extended hex) for core and thread
CmbMsk is Combined AffinityMask(extended hex) for hw threads sharing cache
       CmbMsk will differ from AffMsk if > 1 hw_thread/cache
Extended Hex replaces trailing zeroes with 'z#'
       where # is number of zeroes (so '8z5' is '0x800000')
      +-------------+-------------+-------------+-------------+-------------+-------------+
Cache |   L1D       |   L1D       |   L1D       |   L1D       |   L1D       |   L1D       |
Size  |   32K       |   32K       |   32K       |   32K       |   32K       |   32K       |
OScpu#|     6     18|     7     19|     8     20|     9     21|    10     22|    11     23|
Core  | c0_t0  c0_t1| c1_t0  c1_t1| c2_t0  c2_t1| c3_t0  c3_t1| c4_t0  c4_t1| c5_t0  c5_t1|
AffMsk|    40    4z4|    80    8z4|   100    1z5|   200    2z5|   400    4z5|   800    8z5|
CmbMsk| 40040       | 80080       |100100       |200200       |400400       |800800       |
      +-------------+-------------+-------------+-------------+-------------+-------------+

Cache |   L1I       |   L1I       |   L1I       |   L1I       |   L1I       |   L1I       |
Size  |   32K       |   32K       |   32K       |   32K       |   32K       |   32K       |
      +-------------+-------------+-------------+-------------+-------------+-------------+

Cache |    L2       |    L2       |    L2       |    L2       |    L2       |    L2       |
Size  |  256K       |  256K       |  256K       |  256K       |  256K       |  256K       |
      +-------------+-------------+-------------+-------------+-------------+-------------+

Cache |    L3                                                                             |
Size  |   15M                                                                             |
CmbMsk|fc0fc0                                                                             |
      +-----------------------------------------------------------------------------------+
}}}


! intel turbostat
{{{
[root@enkx3cel01 ~]# ./turbostat
pkg core CPU   %c0   GHz  TSC   %c1    %c3    %c6    %c7   %pc2   %pc3   %pc6   %pc7
               4.22 2.00 2.00  95.78   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   0   0   3.85 2.00 2.00  96.15   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   0  12   2.74 2.00 2.00  97.26   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   1   1  24.62 2.00 2.00  75.38   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   1  13  26.93 2.00 2.00  73.07   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   2   2   2.68 2.00 2.00  97.32   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   2  14   3.15 2.00 2.00  96.85   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   3   3   2.10 2.00 2.00  97.90   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   3  15   1.44 2.00 2.00  98.56   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   4   4   2.66 2.00 2.00  97.34   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   4  16   1.99 2.00 2.00  98.01   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   5   5   1.88 2.00 2.00  98.12   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   5  17   2.34 2.00 2.00  97.66   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   0   6   3.10 2.00 2.00  96.90   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   0  18   2.28 2.00 2.00  97.72   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   1   7   2.73 2.00 2.00  97.27   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   1  19   2.28 2.00 2.00  97.72   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   2   8   1.94 2.00 2.00  98.06   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   2  20   1.41 2.00 2.00  98.59   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   3   9   2.45 2.00 2.00  97.55   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   3  21   2.26 2.00 2.00  97.74   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   4  10   1.41 2.00 2.00  98.59   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   4  22   1.48 2.00 2.00  98.52   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   5  11   1.59 2.00 2.00  98.41   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   5  23   1.87 2.00 2.00  98.13   0.00   0.00   0.00   0.00   0.00   0.00   0.00
}}}




! cpu_topology script
{{{
[root@enkx3db01 cpu-topology]# sh ~root/cpu_topology
        Product Name: SUN FIRE X4170 M3
        Product Name: ASSY,MOTHERBOARD,1U
model name      : Intel(R) Xeon(R) CPU E5-2690 0 @ 2.90GHz
processors  (OS CPU count)          0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
physical id (processor socket)      0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1
siblings    (logical CPUs/socket)   16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16 16
core id     (# assigned to a core)  0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
cpu cores   (physical cores/socket) 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8
}}}


! intel cpu topology tool
{{{
[root@enkx3db01 cpu-topology]# ./cpu_topology64.out


        Advisory to Users on system topology enumeration

This utility is for demonstration purpose only. It assumes the hardware topology
configuration within a coherent domain does not change during the life of an OS
session. If an OS support advanced features that can change hardware topology
configurations, more sophisticated adaptation may be necessary to account for
the hardware configuration change that might have added and reduced the number
of logical processors being managed by the OS.

User should also`be aware that the system topology enumeration algorithm is
based on the assumption that CPUID instruction will return raw data reflecting
the native hardware configuration. When an application runs inside a virtual
machine hosted by a Virtual Machine Monitor (VMM), any CPUID instructions
issued by an app (or a guest OS) are trapped by the VMM and it is the VMM's
responsibility and decision to emulate/supply CPUID return data to the virtual
machines. When deploying topology enumeration code based on querying CPUID
inside a VM environment, the user must consult with the VMM vendor on how an VMM
will emulate CPUID instruction relating to topology enumeration.



        Software visible enumeration in the system:
Number of logical processors visible to the OS: 32
Number of logical processors visible to this process: 32
Number of processor cores visible to this process: 16
Number of physical packages visible to this process: 2


        Hierarchical counts by levels of processor topology:
 # of cores in package  0 visible to this process: 8 .
         # of logical processors in Core 0 visible to this process: 2 .
         # of logical processors in Core  1 visible to this process: 2 .
         # of logical processors in Core  2 visible to this process: 2 .
         # of logical processors in Core  3 visible to this process: 2 .
         # of logical processors in Core  4 visible to this process: 2 .
         # of logical processors in Core  5 visible to this process: 2 .
         # of logical processors in Core  6 visible to this process: 2 .
         # of logical processors in Core  7 visible to this process: 2 .
 # of cores in package  1 visible to this process: 8 .
         # of logical processors in Core 0 visible to this process: 2 .
         # of logical processors in Core  1 visible to this process: 2 .
         # of logical processors in Core  2 visible to this process: 2 .
         # of logical processors in Core  3 visible to this process: 2 .
         # of logical processors in Core  4 visible to this process: 2 .
         # of logical processors in Core  5 visible to this process: 2 .
         # of logical processors in Core  6 visible to this process: 2 .
         # of logical processors in Core  7 visible to this process: 2 .


        Affinity masks per SMT thread, per core, per package:
Individual:
        P:0, C:0, T:0 --> 1
        P:0, C:0, T:1 --> 1z4

Core-aggregated:
        P:0, C:0 --> 10001
Individual:
        P:0, C:1, T:0 --> 2
        P:0, C:1, T:1 --> 2z4

Core-aggregated:
        P:0, C:1 --> 20002
Individual:
        P:0, C:2, T:0 --> 4
        P:0, C:2, T:1 --> 4z4

Core-aggregated:
        P:0, C:2 --> 40004
Individual:
        P:0, C:3, T:0 --> 8
        P:0, C:3, T:1 --> 8z4

Core-aggregated:
        P:0, C:3 --> 80008
Individual:
        P:0, C:4, T:0 --> 10
        P:0, C:4, T:1 --> 1z5

Core-aggregated:
        P:0, C:4 --> 100010
Individual:
        P:0, C:5, T:0 --> 20
        P:0, C:5, T:1 --> 2z5

Core-aggregated:
        P:0, C:5 --> 200020
Individual:
        P:0, C:6, T:0 --> 40
        P:0, C:6, T:1 --> 4z5

Core-aggregated:
        P:0, C:6 --> 400040
Individual:
        P:0, C:7, T:0 --> 80
        P:0, C:7, T:1 --> 8z5

Core-aggregated:
        P:0, C:7 --> 800080

Pkg-aggregated:
        P:0 --> ff00ff
Individual:
        P:1, C:0, T:0 --> 100
        P:1, C:0, T:1 --> 1z6

Core-aggregated:
        P:1, C:0 --> 1000100
Individual:
        P:1, C:1, T:0 --> 200
        P:1, C:1, T:1 --> 2z6

Core-aggregated:
        P:1, C:1 --> 2000200
Individual:
        P:1, C:2, T:0 --> 400
        P:1, C:2, T:1 --> 4z6

Core-aggregated:
        P:1, C:2 --> 4000400
Individual:
        P:1, C:3, T:0 --> 800
        P:1, C:3, T:1 --> 8z6

Core-aggregated:
        P:1, C:3 --> 8000800
Individual:
        P:1, C:4, T:0 --> 1z3
        P:1, C:4, T:1 --> 1z7

Core-aggregated:
        P:1, C:4 --> 10001z3
Individual:
        P:1, C:5, T:0 --> 2z3
        P:1, C:5, T:1 --> 2z7

Core-aggregated:
        P:1, C:5 --> 20002z3
Individual:
        P:1, C:6, T:0 --> 4z3
        P:1, C:6, T:1 --> 4z7

Core-aggregated:
        P:1, C:6 --> 40004z3
Individual:
        P:1, C:7, T:0 --> 8z3
        P:1, C:7, T:1 --> 8z7

Core-aggregated:
        P:1, C:7 --> 80008z3

Pkg-aggregated:
        P:1 --> ff00ff00


        APIC ID listings from affinity masks
OS cpu   0, Affinity mask 0000000001 - apic id 0
OS cpu   1, Affinity mask 0000000002 - apic id 2
OS cpu   2, Affinity mask 0000000004 - apic id 4
OS cpu   3, Affinity mask 0000000008 - apic id 6
OS cpu   4, Affinity mask 0000000010 - apic id 8
OS cpu   5, Affinity mask 0000000020 - apic id a
OS cpu   6, Affinity mask 0000000040 - apic id c
OS cpu   7, Affinity mask 0000000080 - apic id e
OS cpu   8, Affinity mask 0000000100 - apic id 20
OS cpu   9, Affinity mask 0000000200 - apic id 22
OS cpu  10, Affinity mask 0000000400 - apic id 24
OS cpu  11, Affinity mask 0000000800 - apic id 26
OS cpu  12, Affinity mask 0000001000 - apic id 28
OS cpu  13, Affinity mask 0000002000 - apic id 2a
OS cpu  14, Affinity mask 0000004000 - apic id 2c
OS cpu  15, Affinity mask 0000008000 - apic id 2e
OS cpu  16, Affinity mask 0000010000 - apic id 1
OS cpu  17, Affinity mask 0000020000 - apic id 3
OS cpu  18, Affinity mask 0000040000 - apic id 5
OS cpu  19, Affinity mask 0000080000 - apic id 7
OS cpu  20, Affinity mask 0000100000 - apic id 9
OS cpu  21, Affinity mask 0000200000 - apic id b
OS cpu  22, Affinity mask 0000400000 - apic id d
OS cpu  23, Affinity mask 0000800000 - apic id f
OS cpu  24, Affinity mask 0001000000 - apic id 21
OS cpu  25, Affinity mask 0002000000 - apic id 23
OS cpu  26, Affinity mask 0004000000 - apic id 25
OS cpu  27, Affinity mask 0008000000 - apic id 27
OS cpu  28, Affinity mask 0010000000 - apic id 29
OS cpu  29, Affinity mask 0020000000 - apic id 2b
OS cpu  30, Affinity mask 0040000000 - apic id 2d
OS cpu  31, Affinity mask 0080000000 - apic id 2f


Package 0 Cache and Thread details


Box Description:
Cache  is cache level designator
Size   is cache size
OScpu# is cpu # as seen by OS
Core   is core#[_thread# if > 1 thread/core] inside socket
AffMsk is AffinityMask(extended hex) for core and thread
CmbMsk is Combined AffinityMask(extended hex) for hw threads sharing cache
       CmbMsk will differ from AffMsk if > 1 hw_thread/cache
Extended Hex replaces trailing zeroes with 'z#'
       where # is number of zeroes (so '8z5' is '0x800000')
L1D is Level 1 Data cache, size(KBytes)= 32,  Cores/cache= 2, Caches/package= 8
L1I is Level 1 Instruction cache, size(KBytes)= 32,  Cores/cache= 2, Caches/package= 8
L2 is Level 2 Unified cache, size(KBytes)= 256,  Cores/cache= 2, Caches/package= 8
L3 is Level 3 Unified cache, size(KBytes)= 20480,  Cores/cache= 16, Caches/package= 1
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+
Cache |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |
Size  |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |
OScpu#|       0       16|       1       17|       2       18|       3       19|       4       20|       5       21|       6       22|       7       23|
Core  |   c0_t0    c0_t1|   c1_t0    c1_t1|   c2_t0    c2_t1|   c3_t0    c3_t1|   c4_t0    c4_t1|   c5_t0    c5_t1|   c6_t0    c6_t1|   c7_t0    c7_t1|
AffMsk|       1      1z4|       2      2z4|       4      4z4|       8      8z4|      10      1z5|      20      2z5|      40      4z5|      80      8z5|
CmbMsk|   10001         |   20002         |   40004         |   80008         |  100010         |  200020         |  400040         |  800080         |
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+

Cache |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |
Size  |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+

Cache |      L2         |      L2         |      L2         |      L2         |      L2         |      L2         |      L2         |      L2         |
Size  |    256K         |    256K         |    256K         |    256K         |    256K         |    256K         |    256K         |    256K         |
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+

Cache |      L3                                                                                                                                       |
Size  |     20M                                                                                                                                       |
CmbMsk|  ff00ff                                                                                                                                       |
      +-----------------------------------------------------------------------------------------------------------------------------------------------+

Combined socket AffinityMask= 0xff00ff


Package 1 Cache and Thread details


Box Description:
Cache  is cache level designator
Size   is cache size
OScpu# is cpu # as seen by OS
Core   is core#[_thread# if > 1 thread/core] inside socket
AffMsk is AffinityMask(extended hex) for core and thread
CmbMsk is Combined AffinityMask(extended hex) for hw threads sharing cache
       CmbMsk will differ from AffMsk if > 1 hw_thread/cache
Extended Hex replaces trailing zeroes with 'z#'
       where # is number of zeroes (so '8z5' is '0x800000')
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+
Cache |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |     L1D         |
Size  |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |
OScpu#|       8       24|       9       25|      10       26|      11       27|      12       28|      13       29|      14       30|      15       31|
Core  |   c0_t0    c0_t1|   c1_t0    c1_t1|   c2_t0    c2_t1|   c3_t0    c3_t1|   c4_t0    c4_t1|   c5_t0    c5_t1|   c6_t0    c6_t1|   c7_t0    c7_t1|
AffMsk|     100      1z6|     200      2z6|     400      4z6|     800      8z6|     1z3      1z7|     2z3      2z7|     4z3      4z7|     8z3      8z7|
CmbMsk| 1000100         | 2000200         | 4000400         | 8000800         | 10001z3         | 20002z3         | 40004z3         | 80008z3         |
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+

Cache |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |     L1I         |
Size  |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |     32K         |
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+

Cache |      L2         |      L2         |      L2         |      L2         |      L2         |      L2         |      L2         |      L2         |
Size  |    256K         |    256K         |    256K         |    256K         |    256K         |    256K         |    256K         |    256K         |
      +-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+-----------------+

Cache |      L3                                                                                                                                       |
Size  |     20M                                                                                                                                       |
CmbMsk|ff00ff00                                                                                                                                       |
      +-----------------------------------------------------------------------------------------------------------------------------------------------+
}}}


! intel turbostat
{{{
[root@enkx3db01 ~]# ./turbostat
pkg core CPU   %c0   GHz  TSC   %c1    %c3    %c6    %c7   %pc2   %pc3   %pc6   %pc7
               0.73 1.99 2.89  99.27   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   0   0   1.71 1.86 2.89  98.29   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   0  16   0.82 1.88 2.89  99.18   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   1   1   3.66 1.60 2.89  96.34   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   1  17   3.34 1.97 2.89  96.66   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   2   2   0.20 2.12 2.89  99.80   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   2  18   0.32 2.68 2.89  99.68   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   3   3   0.43 2.28 2.89  99.57   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   3  19   0.32 1.47 2.89  99.68   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   4   4   0.14 2.61 2.89  99.86   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   4  20   0.14 1.90 2.89  99.86   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   5   5   0.09 1.98 2.89  99.91   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   5  21   0.18 1.80 2.89  99.82   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   6   6   0.14 1.94 2.89  99.86   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   6  22   0.03 2.12 2.89  99.97   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   7   7   0.02 2.28 2.89  99.98   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   7  23   0.02 2.02 2.89  99.98   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   0   8   3.49 2.37 2.89  96.51   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   0  24   1.30 2.48 2.89  98.70   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   1   9   0.85 2.39 2.89  99.15   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   1  25   0.54 2.66 2.89  99.46   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   2  10   0.49 1.92 2.89  99.51   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   2  26   0.23 2.17 2.89  99.77   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   3  11   0.24 2.18 2.89  99.76   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   3  27   0.57 1.65 2.89  99.43   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   4  12   0.22 2.30 2.89  99.78   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   4  28   0.28 2.10 2.89  99.72   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   5  13   0.44 1.79 2.89  99.56   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   5  29   0.10 2.02 2.89  99.90   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   6  14   0.05 2.46 2.89  99.95   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   6  30   0.06 2.44 2.89  99.94   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   7  15   2.24 1.44 2.89  97.76   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   7  31   0.70 2.23 2.89  99.30   0.00   0.00   0.00   0.00   0.00   0.00   0.00
}}}






turbo mode is disabled 
{{{
          <!-- Turbo Mode -->
          <!-- Description: Turbo Mode. -->
          <!-- Possible Values: "Disabled", "Enabled" -->
          <Turbo_Mode>Disabled</Turbo_Mode>
}}}

! cpu_topology script
{{{
[root@enkx3db02 cpu-topology]# sh ~root/cpu_topology
        Product Name: SUN FIRE X4170 M3
        Product Name: ASSY,MOTHERBOARD,1U
model name      : Intel(R) Xeon(R) CPU E5-2690 0 @ 2.90GHz
processors  (OS CPU count)          0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
physical id (processor socket)      0 0 0 0 1 1 1 1 0 0 0 0 1 1 1 1
siblings    (logical CPUs/socket)   8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8
core id     (# assigned to a core)  0 1 6 7 0 1 6 7 0 1 6 7 0 1 6 7
cpu cores   (physical cores/socket) 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
}}}


! intel cpu topology tool
{{{
[root@enkx3db02 cpu-topology]# ./cpu_topology64.out


        Advisory to Users on system topology enumeration

This utility is for demonstration purpose only. It assumes the hardware topology
configuration within a coherent domain does not change during the life of an OS
session. If an OS support advanced features that can change hardware topology
configurations, more sophisticated adaptation may be necessary to account for
the hardware configuration change that might have added and reduced the number
of logical processors being managed by the OS.

User should also`be aware that the system topology enumeration algorithm is
based on the assumption that CPUID instruction will return raw data reflecting
the native hardware configuration. When an application runs inside a virtual
machine hosted by a Virtual Machine Monitor (VMM), any CPUID instructions
issued by an app (or a guest OS) are trapped by the VMM and it is the VMM's
responsibility and decision to emulate/supply CPUID return data to the virtual
machines. When deploying topology enumeration code based on querying CPUID
inside a VM environment, the user must consult with the VMM vendor on how an VMM
will emulate CPUID instruction relating to topology enumeration.



        Software visible enumeration in the system:
Number of logical processors visible to the OS: 16
Number of logical processors visible to this process: 16
Number of processor cores visible to this process: 8
Number of physical packages visible to this process: 2


        Hierarchical counts by levels of processor topology:
 # of cores in package  0 visible to this process: 4 .
         # of logical processors in Core 0 visible to this process: 2 .
         # of logical processors in Core  1 visible to this process: 2 .
         # of logical processors in Core  2 visible to this process: 2 .
         # of logical processors in Core  3 visible to this process: 2 .
 # of cores in package  1 visible to this process: 4 .
         # of logical processors in Core 0 visible to this process: 2 .
         # of logical processors in Core  1 visible to this process: 2 .
         # of logical processors in Core  2 visible to this process: 2 .
         # of logical processors in Core  3 visible to this process: 2 .


        Affinity masks per SMT thread, per core, per package:
Individual:
        P:0, C:0, T:0 --> 1
        P:0, C:0, T:1 --> 100

Core-aggregated:
        P:0, C:0 --> 101
Individual:
        P:0, C:1, T:0 --> 2
        P:0, C:1, T:1 --> 200

Core-aggregated:
        P:0, C:1 --> 202
Individual:
        P:0, C:2, T:0 --> 4
        P:0, C:2, T:1 --> 400

Core-aggregated:
        P:0, C:2 --> 404
Individual:
        P:0, C:3, T:0 --> 8
        P:0, C:3, T:1 --> 800

Core-aggregated:
        P:0, C:3 --> 808

Pkg-aggregated:
        P:0 --> f0f
Individual:
        P:1, C:0, T:0 --> 10
        P:1, C:0, T:1 --> 1z3

Core-aggregated:
        P:1, C:0 --> 1010
Individual:
        P:1, C:1, T:0 --> 20
        P:1, C:1, T:1 --> 2z3

Core-aggregated:
        P:1, C:1 --> 2020
Individual:
        P:1, C:2, T:0 --> 40
        P:1, C:2, T:1 --> 4z3

Core-aggregated:
        P:1, C:2 --> 4040
Individual:
        P:1, C:3, T:0 --> 80
        P:1, C:3, T:1 --> 8z3

Core-aggregated:
        P:1, C:3 --> 8080

Pkg-aggregated:
        P:1 --> f0f0


        APIC ID listings from affinity masks
OS cpu   0, Affinity mask   000001 - apic id 0
OS cpu   1, Affinity mask   000002 - apic id 2
OS cpu   2, Affinity mask   000004 - apic id c
OS cpu   3, Affinity mask   000008 - apic id e
OS cpu   4, Affinity mask   000010 - apic id 20
OS cpu   5, Affinity mask   000020 - apic id 22
OS cpu   6, Affinity mask   000040 - apic id 2c
OS cpu   7, Affinity mask   000080 - apic id 2e
OS cpu   8, Affinity mask   000100 - apic id 1
OS cpu   9, Affinity mask   000200 - apic id 3
OS cpu  10, Affinity mask   000400 - apic id d
OS cpu  11, Affinity mask   000800 - apic id f
OS cpu  12, Affinity mask   001000 - apic id 21
OS cpu  13, Affinity mask   002000 - apic id 23
OS cpu  14, Affinity mask   004000 - apic id 2d
OS cpu  15, Affinity mask   008000 - apic id 2f


Package 0 Cache and Thread details


Box Description:
Cache  is cache level designator
Size   is cache size
OScpu# is cpu # as seen by OS
Core   is core#[_thread# if > 1 thread/core] inside socket
AffMsk is AffinityMask(extended hex) for core and thread
CmbMsk is Combined AffinityMask(extended hex) for hw threads sharing cache
       CmbMsk will differ from AffMsk if > 1 hw_thread/cache
Extended Hex replaces trailing zeroes with 'z#'
       where # is number of zeroes (so '8z5' is '0x800000')
L1D is Level 1 Data cache, size(KBytes)= 32,  Cores/cache= 2, Caches/package= 4
L1I is Level 1 Instruction cache, size(KBytes)= 32,  Cores/cache= 2, Caches/package= 4
L2 is Level 2 Unified cache, size(KBytes)= 256,  Cores/cache= 2, Caches/package= 4
L3 is Level 3 Unified cache, size(KBytes)= 20480,  Cores/cache= 8, Caches/package= 1
      +-----------+-----------+-----------+-----------+
Cache |  L1D      |  L1D      |  L1D      |  L1D      |
Size  |  32K      |  32K      |  32K      |  32K      |
OScpu#|    0     8|    1     9|    2    10|    3    11|
Core  |c0_t0 c0_t1|c1_t0 c1_t1|c2_t0 c2_t1|c3_t0 c3_t1|
AffMsk|    1   100|    2   200|    4   400|    8   800|
CmbMsk|  101      |  202      |  404      |  808      |
      +-----------+-----------+-----------+-----------+

Cache |  L1I      |  L1I      |  L1I      |  L1I      |
Size  |  32K      |  32K      |  32K      |  32K      |
      +-----------+-----------+-----------+-----------+

Cache |   L2      |   L2      |   L2      |   L2      |
Size  | 256K      | 256K      | 256K      | 256K      |
      +-----------+-----------+-----------+-----------+

Cache |   L3                                          |
Size  |  20M                                          |
CmbMsk|  f0f                                          |
      +-----------------------------------------------+

Combined socket AffinityMask= 0xf0f


Package 1 Cache and Thread details


Box Description:
Cache  is cache level designator
Size   is cache size
OScpu# is cpu # as seen by OS
Core   is core#[_thread# if > 1 thread/core] inside socket
AffMsk is AffinityMask(extended hex) for core and thread
CmbMsk is Combined AffinityMask(extended hex) for hw threads sharing cache
       CmbMsk will differ from AffMsk if > 1 hw_thread/cache
Extended Hex replaces trailing zeroes with 'z#'
       where # is number of zeroes (so '8z5' is '0x800000')
      +-----------+-----------+-----------+-----------+
Cache |  L1D      |  L1D      |  L1D      |  L1D      |
Size  |  32K      |  32K      |  32K      |  32K      |
OScpu#|    4    12|    5    13|    6    14|    7    15|
Core  |c0_t0 c0_t1|c1_t0 c1_t1|c2_t0 c2_t1|c3_t0 c3_t1|
AffMsk|   10   1z3|   20   2z3|   40   4z3|   80   8z3|
CmbMsk| 1010      | 2020      | 4040      | 8080      |
      +-----------+-----------+-----------+-----------+

Cache |  L1I      |  L1I      |  L1I      |  L1I      |
Size  |  32K      |  32K      |  32K      |  32K      |
      +-----------+-----------+-----------+-----------+

Cache |   L2      |   L2      |   L2      |   L2      |
Size  | 256K      | 256K      | 256K      | 256K      |
      +-----------+-----------+-----------+-----------+

Cache |   L3                                          |
Size  |  20M                                          |
CmbMsk| f0f0                                          |
      +-----------------------------------------------+
}}}


! intel turbostat
{{{
[root@enkx3db02 ~]# ./turbostat
pkg core CPU   %c0   GHz  TSC   %c1    %c3    %c6    %c7   %pc2   %pc3   %pc6   %pc7
               2.05 2.42 2.89  97.95   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   0   0   3.19 1.93 2.89  96.81   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   0   8   2.09 1.93 2.89  97.91   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   1   1   4.14 2.22 2.89  95.86   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   1   9  10.10 2.66 2.89  89.90   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   6   2   0.89 1.98 2.89  99.11   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   6  10   5.12 2.79 2.89  94.88   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   7   3   0.40 2.26 2.89  99.60   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   0   7  11   0.46 2.33 2.89  99.54   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   0   4   1.86 2.07 2.89  98.14   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   0  12   0.53 2.33 2.89  99.47   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   1   5   0.57 2.45 2.89  99.43   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   1  13   0.95 2.55 2.89  99.05   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   6   6   0.58 1.62 2.89  99.42   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   6  14   1.04 2.68 2.89  98.96   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   7   7   0.31 2.18 2.89  99.69   0.00   0.00   0.00   0.00   0.00   0.00   0.00
   1   7  15   0.58 2.75 2.89  99.42   0.00   0.00   0.00   0.00   0.00   0.00   0.00
}}}
http://rnm1978.wordpress.com/2011/02/02/instrumenting-obiee-for-tracing-oracle-db-calls/
http://rnm1978.wordpress.com/2010/01/26/identify-your-users-by-setting-client-id-in-oracle/

http://www.oracle-base.com/articles/10g/SQLTrace10046TrcsessAndTkprof10g.php
http://www.oracle-base.com/articles/10g/PerformanceTuningEnhancements10g.php

http://method-r.com/software/mrtools
http://method-r.com/component/content/article/115 <-- mrls
http://method-r.com/component/content/article/116 <-- mrnl
http://method-r.com/component/content/article/117 <-- mrskew

http://appsdba.com/docs/orcl_event_6340.html <-- trace file event timeline 
http://www.appsdba.com/blog/?category_name=oracle-dba&paged=2
http://www.appsdba.com/blog/?p=109 <-- trace file execution tree
http://appsdba.com/utilities_resource.htm 

http://www.juliandyke.com/Diagnostics/Trace/EnablingTrace.html
http://www.rittmanmead.com/2005/04/tracing-parallel-execution/
http://www.antognini.ch/2012/08/event-10046-full-list-of-levels/
http://www.sagecomputing.com.au/papers_presentations/lostwithoutatrace.pdf   <- good stuff, with sample codes
http://www.oracle-base.com/articles/8i/DBMS_APPLICATION_INFO.php    <- DBMS_APPLICATION_INFO : For Code Instrumentation
http://www.oracle-base.com/articles/misc/DBMS_SESSION.php <- DBMS_SESSION : Managing Sessions From a Connection Pool in Oracle Databases
http://www.oracle-base.com/articles/10g/SQLTrace10046TrcsessAndTkprof10g.php
http://www.petefinnigan.com/ramblings/how_to_set_trace.htm
http://psoug.org/reference/dbms_monitor.html
http://psoug.org/reference/dbms_applic_info.html
http://asktom.oracle.com/pls/apex/f?p=100:11:0::::P11_QUESTION_ID:49818662859946

How to: Trace the SQL executed by SYSMAN Using a Trigger [ID 400937.1]
{{{
CREATE OR REPLACE TRIGGER logontrig AFTER logon ON database 
begin 
if ora_login_user = 'SYSMAN' then 
execute immediate 'alter session set tracefile_identifier = '||'SYSMAN'; 
execute immediate 'Alter session set events ''10046 trace name context forever, level 12'''; 
end if; 
end;
/
}}}


Capture 10046 Traces Upon User Login (without using a trigger) [ID 371678.1]
http://dbmentors.blogspot.com/2011/09/using-dbmsmonitor.html
http://docs.oracle.com/cd/B28359_01/network.111/b28531/app_context.htm <- application context
https://method-r.fogbugz.com/default.asp?method-r.11.139.2 <- hotsos ILO 
http://www.databasejournal.com/features/oracle/article.php/3435431/Oracle-Session-Tracing-Part-I.htm   <- Oracle Session Tracing Part I







''per module''
{{{
exec DBMS_MONITOR.serv_mod_act_trace_enable (service_name => 'FSTSTAH', module_name => 'EX_APPROVAL');
exec DBMS_MONITOR.serv_mod_act_trace_disable (service_name => 'FSTSTAH', module_name => 'EX_APPROVAL');
trcsess output=client.trc module=EX_APPROVAL *.trc
./orasrp --aggregate=no --binds=0 --recognize-idle-events=no --sys=no client.trc fsprd.html
tkprof client.trc client.tkprof sort=exeela 
}}}

''grep tkprof SQLs''
{{{
less client.tkprof-webapp | grep -B3 -A30 "SELECT L2.TREE_NODE_NUM" | egrep "SQL ID|total" | less

SQL ID: 9gxa3r2v0mkzp Plan Hash: 751140913
total       24      3.65       3.65          0       9103          0        4294
SQL ID: 9zssps0292n9m Plan Hash: 2156210208
total       17      2.64       2.64          0     206748          0        2901
SQL ID: 034a6u0h7psb1 Plan Hash: 2156210208
total        3      0.18       0.18          0       8929          0           4
SQL ID: 2yr2m4xfb14z0 Plan Hash: 4136997945
total        3      0.18       0.18          0       9102          0           3
SQL ID: 0rurft7y2paks Plan Hash: 3656446192
total       14      3.62       3.62          0       9102          0        2391
SQL ID: 99ugjzcz1j1r4 Plan Hash: 2156210208
total       24      2.62       2.62          0     206749          0        4337
SQL ID: 5fgb0cvhqy8w2 Plan Hash: 2156210208
total       28      3.26       3.26          0     215957          0        5077
SQL ID: amrb5fkaysu2r Plan Hash: 2156210208
total        3      0.14       0.14          0      11367          0           3
SQL ID: 3d6u5vjh1y5ny Plan Hash: 2156210208
total       20      3.26       3.27          0     215956          0        3450

}}}


{{{
select service_name, module from v$session where module = 'EX_APPROVAL'
 
SERVICE_NAME                                                     MODULE
---------------------------------------------------------------- ----------------------------------------------------------------
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
FSPRDOL                                                          EX_APPROVAL
 
9 rows selected.
 
 
 
SYS@fsprd2> SELECT * FROM DBA_ENABLED_TRACES ;
SYS@fsprd2>
SYS@fsprd2> /
 
no rows selected
 
SYS@fsprd2>
SYS@fsprd2>
SYS@fsprd2> exec DBMS_MONITOR.serv_mod_act_trace_enable (service_name => 'FSPRDOL', module_name => 'EX_APPROVAL');
 
PL/SQL procedure successfully completed.
 
 
SELECT 
TRACE_TYPE,
PRIMARY_ID,
QUALIFIER_ID1,
waits,
binds
FROM DBA_ENABLED_TRACES;
 
 
TRACE_TYPE            PRIMARY_ID                                                       QUALIFIER_ID1                                WAITS BINDS
--------------------- ---------------------------------------------------------------- ------------------------------------------------ ----- -----
SERVICE_MODULE        FSPRDOL                                                          EX_APPROVAL                                  TRUE  FALSE
 


--To disable
 exec DBMS_MONITOR.serv_mod_act_trace_disable (service_name => 'FSPRDOL', module_name => 'EX_APPROVAL');
}}}
<<showtoc>>


! 10046 and 10053

* when this is used the 10046 and 10053 data are contained in one trace file
* you can parse this using the tv10053.exe but not with lab128 (v10053.exe)

* you may have to regenerate a separate 10046 and 10053 traces for a less noisy session call graph on 10046 report 
* if you want a separate run of 10046 and 10053, then remove the 10053 trace on the testcase file and use DBMS_SQLDIAG.DUMP_TRACE at the end of the SQL execution as shown here [[10053]]

{{{

+++10046_10053++++
sqlplus <app user>/<pwd>

alter session set timed_statistics = true;
alter session set statistics_level=ALL;
alter session set max_dump_file_size=UNLIMITED;
alter session set tracefile_identifier='10046_10053';
alter session set events '10046 trace name context forever, level 12';
alter session set events '10053 trace name context forever, level 1';

>>>here run the query

--run dummy query to close cursor
select 1 from dual;

exit;

Find trc with suffix "10046_10053" in <diag> directory and upload it to the SR.

To find all trace files for the current instance >>>>> SELECT VALUE FROM V$DIAG_INFO WHERE NAME = 'Diag Trace';


select tracefile from v$process where addr=(select paddr from v$session where sid=sys_context('userenv','sid'));


}}}



! time series short_stack 

[[genstack loop, short_stack loop, time series short_stack]]




! perf and flamegraph

[[Flamegraph using SQL]]


<<showtoc>>


11g
http://structureddata.org/2011/08/18/creating-optimizer-trace-files/?utm_source=rss&utm_medium=rss&utm_campaign=creating-optimizer-trace-files

Examining the Oracle Database 10053 Trace Event Dump File
http://www.databasejournal.com/features/oracle/article.php/3894901/article.htm

Don Seiler
http://seilerwerks.wordpress.com/2007/08/17/dr-statslove-or-how-i-learned-to-stop-guessing-and-love-the-10053-trace/



! new way 
{{{

-- execute the SQL here 


-- put this at the end of the testcase file
BEGIN
  DBMS_SQLDIAG.DUMP_TRACE (
      p_sql_id    => 'd4cdk8w5sazzq',
      p_child_number=> 0,
      p_component => 'Compiler',
      p_file_id   => 'TESTCASE_COLUMN_GROUP_C0');
END;
/

BEGIN
  DBMS_SQLDIAG.DUMP_TRACE (
      p_sql_id    => 'd4cdk8w5sazzq',
      p_child_number=> 1,
      p_component => 'Compiler',
      p_file_id   => 'TESTCASE_COLUMN_GROUP_C1');
END;
/


select value from v$diag_info where name = 'Default Trace File';

select tracefile from v$process where addr=(select paddr from v$session where sid=sys_context('userenv','sid'));

rm  /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/*TESTCASE*

ls -ltr /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/*TESTCASE*

$ ls -ltr /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/*TESTCASE*
-rw-r-----. 1 oracle oinstall 326371 Aug 17 10:49 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20180_TESTCASE_COLUMN_GROUP_C0.trm
-rw-r-----. 1 oracle oinstall 760584 Aug 17 10:49 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20180_TESTCASE_COLUMN_GROUP_C0.trc
-rw-r-----. 1 oracle oinstall 323253 Aug 17 10:49 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20180_TESTCASE_COLUMN_GROUP_C1.trm
-rw-r-----. 1 oracle oinstall 751171 Aug 17 10:49 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20180_TESTCASE_COLUMN_GROUP_C1.trc
-rw-r-----. 1 oracle oinstall 318794 Aug 17 10:50 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20274_TESTCASE_NO_COLUMN_GROUP_C0.trm
-rw-r-----. 1 oracle oinstall 745873 Aug 17 10:50 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20274_TESTCASE_NO_COLUMN_GROUP_C0.trc
-rw-r-----. 1 oracle oinstall 318767 Aug 17 10:50 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20274_TESTCASE_NO_COLUMN_GROUP_C1.trm
-rw-r-----. 1 oracle oinstall 745875 Aug 17 10:50 /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_20274_TESTCASE_NO_COLUMN_GROUP_C1.trc

}}}


! generic 
{{{
99g0fgyrhb4n7

BEGIN
  DBMS_SQLDIAG.DUMP_TRACE (
      p_sql_id    => 'bmd4dk0p4r0pc',
      p_child_number=> 0,
      p_component => 'Compiler',
      p_file_id   => 'bmd4dk0p4r0pc');
END;
/

select tracefile from v$process where addr=(select paddr from v$session where sid=sys_context('userenv','sid'));


mv /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_24762_TC_NOLOB_PEEKED.trc . 

cat orclcdb_ora_19285_TCPEEKED.trc | grep -hE "^DP|^AP"


mv /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_25322_TC_NOLOB_ACTUAL.trc .

cat orclcdb_ora_25322_TC_NOLOB_ACTUAL.trc | grep -hE "^DP|^AP"


cat /u01/app/oracle/diag/rdbms/orclcdb/orclcdb/trace/orclcdb_ora_3776_bmd4dk0p4r0pc.trc  | grep -hE "^DP|^AP"
}}}


! your own session
{{{
trace the session

ALTER SESSION SET TRACEFILE_IDENTIFIER='LIO_TRACE';
ALTER SESSION SET EVENTS '10200 TRACE NAME CONTEXT FOREVER, LEVEL 1';

Then take the occurrence of the LIO reasons

$ less emrep_ora_9946_WATCH_CONSISTENT.trc | grep "started for block" | awk '{print $1} ' | sort | uniq -c
    324 ktrget2():
     44 ktrgtc2():


I found this too which more on tracking the objects
http://hoopercharles.wordpress.com/2011/01/24/watching-consistent-gets-10200-trace-file-parser/
}}}

! another session
{{{

1) create the files ss.sql and getlio.awk (see below)

2) get the sid and serial# and trace file name

SELECT s.sid, 
s.serial#,
s.server, 
lower( 
CASE 
WHEN s.server IN ('DEDICATED','SHARED') THEN 
i.instance_name || '_' || 
nvl(pp.server_name, nvl(ss.name, 'ora')) || '_' || 
p.spid || '.trc' 
ELSE NULL 
END 
) AS trace_file_name 
FROM v$instance i, 
v$session s, 
v$process p, 
v$px_process pp, 
v$shared_server ss 
WHERE s.paddr = p.addr 
AND s.sid = pp.sid (+) 
AND s.paddr = ss.paddr(+) 
AND s.type = 'USER' 
ORDER BY s.sid;

3) to start trace, set the 10200 event level 1

exec sys.dbms_system.set_ev(200   ,   11667, 10200, 1, '');

4) monitor the file size

while : ; do du -sm dw_ora_18177.trc ; echo "--" ; sleep 2 ; done

5) execute ss.sql on the sid for 5 times

6) to stop trace, set the 10200 event level 0

exec sys.dbms_system.set_ev(200   ,   11667, 10200, 0, '');

7) process the trace file and the oradebug output

-- get the top objects
awk -v trcfile=dw_ora_18177.trc -f getlio.awk

-- get the function names
less dw_ora_18177.trc | grep "started for block" | awk '{print $1} ' | sort | uniq -c

8) SQL to get the object names

	SELECT
	  OBJECT_NAME,
	  DATA_OBJECT_ID,
	  TO_CHAR(DATA_OBJECT_ID, 'XXXXX') HEX_DATA_OBJECT_ID
	FROM
	  DBA_OBJECTS
	WHERE
	  DATA_OBJECT_ID IN(
	    TO_NUMBER('15ced', 'XXXXX'))
	/

	OBJECT_NAME                                                                                                                      DATA_OBJECT_ID HEX_DA
	-------------------------------------------------------------------------------------------------------------------------------- -------------- ------
	OBJ$                                                                                                                                         18     12


	Summary obj for file: dw_ora_18177.trc
	---------------------------------
	0x00000012 2781466


	2781466 ktrget2():



#### ss.sql and getlio.awk scripts below

cat ss.sql
oradebug setospid &spid
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack
oradebug short_stack



$ cat getlio.awk
BEGIN {
   FS ="[ \t<>:]+"
    print "Details for file: " trcfile
   print "---------------------------------"
   while( getline < trcfile != EOF ){
      if ( $0 ~ /started for block/ ) {
      rdba[$6]+=1
      obj[$8]+=1
      both[$6","$8]+=1
      #print $6 " " rdba[$6] ", " $8 " " obj[$8]
      }
   }
   close (trcfile)
   print ""

   print ""
   print "Summary rdba and obj for file: " trcfile
   print "---------------------------------"
   for ( var in both) {
      #print var " " both[var]
   }

   print ""
   print "Summary obj for file: " trcfile
   print "---------------------------------"
   for ( var in obj ) {
      print var " " obj[var]
   }
}

}}}
https://leetcode.com/problems/customers-who-bought-all-products/
{{{
1045. Customers Who Bought All Products
Medium
SQL Schema

Table: Customer

+-------------+---------+
| Column Name | Type    |
+-------------+---------+
| customer_id | int     |
| product_key | int     |
+-------------+---------+
product_key is a foreign key to Product table.

Table: Product

+-------------+---------+
| Column Name | Type    |
+-------------+---------+
| product_key | int     |
+-------------+---------+
product_key is the primary key column for this table.

 

Write an SQL query for a report that provides the customer ids from the Customer table that bought all the products in the Product table.

For example:

Customer table:
+-------------+-------------+
| customer_id | product_key |
+-------------+-------------+
| 1           | 5           |
| 2           | 6           |
| 3           | 5           |
| 3           | 6           |
| 1           | 6           |
+-------------+-------------+

Product table:
+-------------+
| product_key |
+-------------+
| 5           |
| 6           |
+-------------+

Result table:
+-------------+
| customer_id |
+-------------+
| 1           |
| 3           |
+-------------+
The customers who bought all the products (5 and 6) are customers with id 1 and 3.

Accepted
4,086
Submissions
6,109
}}}

{{{
select customer_id
from customer
group by customer_id
having sum(distinct(product_key)) = (select sum(distinct(product_key)) from product)

-- select a.customer_id 
-- from customer a, product b
-- where a.product_key = b.product_key;


select
customer_id
from customer
group by customer_id
having count(distinct product_key) in (select count(*) from product);
}}}
http://www.freelists.org/post/oracle-l/SQL-High-version-count-because-of-too-many-varchar2-columns,12
http://t31808.db-oracle-general.databasetalk.us/sql-high-version-count-because-of-too-many-varchar2-columns-t31808.html

SQLs With Bind Variable Has Very High Version Count (Doc ID 258742.1)
{{{
event="10503 trace name context forever, level " 

For eg., if the maximum length of a bind variable in the application is 128, then 

event="10503 trace name context forever, level 128" 

The EVENT 10503 was added as a result of BUG:2450264 
This fix introduces the EVENT 10503 which enables users to specify a character bind buffer length. 
Depending on the length used, the character binds in the child cursor can all be created 
using the same bind length; 
skipping bind graduation and keeping the child chain relatively small. 
This helps to alleviate a potential cursor-sharing problem related to graduated binds. 

The level of the event is the bind length to use, in bytes. 
It is relevant for binds of types: 

Character (but NOT ANSI Fixed CHAR (type 96 == DTYAFC)) 
Raw 
Long Raw 
Long 

* There really is no limit for the EVENT 10503 but for the above datatypes. 
For non-PL/SQL calls, the maximum bind buffer size is 4001 (bytes). For PL/SQL, 
the maximum bind buffer size is 32K. 

* Specifying a buffer length which is greater than the pre-set maximum will cause the 
pre-set maximum to be used. To go back to using the pre-set lengths, specify '0' for the buffer 
length. 


Test the patch and event in development environment before implementing in the production environment. 
}}}
! tuning
http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
http://www.oracle.com/technetwork/server-storage/vm/ovm3-10gbe-perf-1900032.pdf
http://dak1n1.com/blog/7-performance-tuning-intel-10gbe
-- this will hog your server's memory in no time
{{{
select count(*) from dual connect by 1=1;
}}}

http://www.pythian.com/news/26003/rdbms-online-patching/

''Online Patching is a new feature introduced in 11.1.0.6. It will be delivered starting with RDBMS 11.2.0.2.0.''

http://goo.gl/2U3H3

http://apex.oracle.com/pls/apex/f?p=44785:24:0:::24:P24_CONTENT_ID,P24_PREV_PAGE:4679,1

RDBMS Online Patching Aka Hot Patching [ID 761111.1]
''Quick guide to package ORA- errors with ADRCI'' http://www.evernote.com/shard/s48/sh/e6086cd4-ab4e-4065-b145-323cfa545f80/a831bef2f6480f43c96bb23749df2710


http://goo.gl/mNnaD

''quick step by step'' https://support.oracle.com/CSP/main/article?cmd=show&type=ATT&id=443529.1:Steps&inline=1
How to Build a Testcase for Oracle Data Server Support to Reproduce ORA-600 and ORA-7445 Errors (Doc ID 232963.1)


To change the ADR base
<<<
ADR base = "/u01/app/oracle/product/11.2.0.3/dbhome_1/log"
adrci>
adrci>
''adrci> set base /u01/app/oracle''
adrci>
adrci> show home
ADR Homes:
diag/asm/+asm/+ASM4
diag/tnslsnr/pd01db04/listener
diag/tnslsnr/pd01db04/listener_fsprd
diag/tnslsnr/pd01db04/listener_temp
diag/tnslsnr/pd01db04/listener_mtaprd11
diag/tnslsnr/pd01db04/listener_scan2
diag/tnslsnr/pd01db04/listener_mvwprd
diag/tnslsnr/pd01db04/stat
diag/rdbms/dbm/dbm4
diag/rdbms/dbfsprd/DBFSPRD4
diag/rdbms/mtaprd11/mtaprd112
diag/rdbms/fsprd/fsprd2
diag/rdbms/fsqacdc/fsqa2
diag/rdbms/fsprddal/fsprd2
diag/rdbms/mtaprd11dal/mtaprd112
diag/rdbms/mvwprd/mvwprd2
diag/rdbms/mvwprddal/mvwprd2
diag/clients/user_oracle/host_783020838_80
diag/clients/user_oracle/host_783020838_11
<<<


{{{
Use ADRCI or SWB steps to create IPS packages
ADRCI
1. Enter ADRCI
# Adrci
2 shows the existence of the ADR home
adrci> show home
4 Setting ADR home
adrci> set home
5 shows all the problems
adrci> show problem
6 show all events
adrci> show incident
7 diagnostic information packed event
adrci> ips pack incident <incident id>
SWB
1 Log in to Enterprise Manager
2 Click the link 'support workbench'
3 Select 'all active' problem
4 Click the 'problem id' to view the corresponding event
5 Select the appropriate event
6 Click the 'quick package'
7 Enter the package name, description, choose whether to upload to oracle support
8 See the information package
9. Select the 'immediate' create the package, and click the button 'submit'

<br /> For more information, please read the following note for more information.
Note 422893.1 - 11g Understanding Automatic Diagnostic Repository.
Note 1091653.1 - "11g Quick Steps - How to create an IPS package using Support Workbench" [Video]
Note 443529.1 - 11g Quick Steps to Package and Send Critical Error Diagnostic Information to Support [Video] 
}}}

! purge
http://www.runshell.com/2013/01/oracle-how-to-purge-old-trace-and-dump.html


11g : Active Database Duplication
 	Doc ID:	Note:568034.1



-- DATABASE REPLAY

Oracle Database Replay Client Provisioning - Platform Download Matrix
  	Doc ID: 	815567.1

How To Find Database Replay Divergence Details [ID 1388309.1]


Oracle Database 11g: Interactive Quick Reference http://goo.gl/rQejT
{{{

New Products Installed in 11g:
------------------------------

1) Oracle APEX
	**- Installed by default

2) Oracle Warehouse Builder
	**- Installed by default

3) Oracle Configuration Manager
	- Offered, not installed by default
		two options:
			connected mode	
			disconnected mode

4) SQL Developer
	- Installed by default with template-based database installations
	- It is also installed with database client

5) Database Vault
	- Installed by default (OPTIONAL component - custom installation)



Changes in Install Options:
---------------------------

1) Oracle Configuration Manager
	- Starting 11g, Integrated with OUI (OPTIONAL component)

2) Oracle Data Mining
	- Selected on Enterprise Edition Installation type

3) Oracle Database Vault
	- Starting 11g, Integrated with OUI (OPTIONAL component - custom installation)

4) Oracle HTTP Server
	- Starting 11g, Available on separate media

5) Oracle Ultra Search
	- Starting 11g, Integrated with the Oracle Database

6) Oracle XML DB
	- Starting 11g, Installed by default



New Parameters:
---------------

MEMORY_TARGET
DIAGNOSTIC_DEST



New in ASM:
-----------

Automatic Storage Management Fast Mirror Resync
	see: Oracle Database Storage Administrator's Guide
SYSASM privilege
OSASM group



New Directories:
----------------

ADR_base/diag	<-- automatic diagnostic repository



Deprecated Components: 
----------------------

iSQL*Plus
Oracle Workflow
Oracle Data Mining Scoring Engine
Oracle Enterprise Manager Java Console




Overview of Installation:
-------------------------

CSS (Cluster Synchronization Services) does the synchronization between ASM and database instance
	for RAC, resides on Clusterware Home
	for Single Node-Single System, resides on home directory of ASM instance


Automatic Storage Management
	can be used starting 10.1.0.3 or later
	also, if you are 11.1 then you could use ASM from 10.1


Database Management Options:
	either you use:
	1) Enterprise Manager Grid Control
		Oracle Management Repository & Service --> Install Management Agent on each computer
	2) Local Database Control


Upgrading the database using RHEL 2.1 OS
	www.oracle.com/technology/tech/linux/pdf/rhel_23_upgrade.pdf



Preinstallation:
----------------


1) Logging In to the System as root

2) Checking the Hardware Requirements
	**NEW-parameters:
		memory_max_target
		memory_target

3) Checking the Software Requirements
	# Operating System Requirements
	# Kernel Requirements
	# Package Requirements
rpm -qa | grep -i "binutils"
rpm -qa | grep -i "compat-libstdc++"
rpm -qa | grep -i "elfutils-libelf"
rpm -qa | grep -i "elfutils-libelf-devel"
rpm -qa | grep -i "glibc"
rpm -qa | grep -i "glibc-common"
rpm -qa | grep -i "glibc-devel"
rpm -qa | grep -i "gcc"
rpm -qa | grep -i "gcc-c++"
rpm -qa | grep -i "libaio"
rpm -qa | grep -i "libaio-devel" 
rpm -qa | grep -i "libgcc"
rpm -qa | grep -i "libstdc++" 
rpm -qa | grep -i "libstdc++-devel"
rpm -qa | grep -i "make"
rpm -qa | grep -i "sysstat"
rpm -qa | grep -i "unixODBC"
rpm -qa | grep -i "unixODBC-devel"


NOT DISCOVERED:
rpm -qa | grep -i "elfutils-libelf-devel"
	dep: elfutils-libelf-devel-static-0.125-3.el5.i386.rpm
rpm -qa | grep -i "libaio-devel"
rpm -qa | grep -i "sysstat"
rpm -qa | grep -i "unixODBC"
rpm -qa | grep -i "unixODBC-devel"

	# Compiler Requirements
	# Additional Software Requirements

4) Preinstallation Requirements for Oracle Configuration Manager

5) Checking the Network Setup
	# Configuring Name Resolution
	# Installing on DHCP Computers
	# Installing on Multihomed Computers
	# Installing on Computers with Multiple Aliases
	# Installing on Non-Networked Computers

6) Creating Required Operating System Groups and Users
	**NEW-group:
		OSASM group...which has a usual name of "ASMADMIN"
		this group is for ASM storage administrators

groupadd oinstall
groupadd dba
groupadd oper
groupadd asmadmin
useradd -g oinstall -G dba,oper,asmadmin oracle

7) Configuring Kernel Parameters

in /etc/sysctl.conf
	# Controls the maximum shared segment size, in bytes
	kernel.shmmax = 4294967295
	
	# Controls the maximum number of shared memory segments, in pages
	kernel.shmall = 268435456
	
	fs.file-max = 102552
	kernel.shmmni = 4096
	kernel.sem = 250 32000 100 128
	net.ipv4.ip_local_port_range = 1024 65000
	net.core.rmem_default = 4194304
	net.core.rmem_max = 4194304
	net.core.wmem_default = 262144
	net.core.wmem_max = 262144

to increase shell limits:
in /etc/security/limits.conf
	oracle              soft    nproc   2047
	oracle              hard    nproc   16384
	oracle              soft    nofile  1024
	oracle              hard    nofile  65536

in /etc/pam.d/login
	session    required     /lib/security/pam_limits.so
	session    required     pam_limits.so

in /etc/profile
	if [ $USER = "oracle" ]; then
		if [ $SHELL = "/bin/ksh" ]; then
		ulimit -p 16384
		ulimit -n 65536
		else
		ulimit -u 16384 -n 65536
		fi
	fi

8) Identifying Required Software Directories

9) Identifying or Creating an Oracle Base Directory
root@localhost ~]# mkdir -p /u01/app
[root@localhost ~]# chown -R oracle:oinstall /u01/app
[root@localhost ~]# chmod -R 775 /u01/app

10) Choosing a Storage Option for Oracle Database and Recovery Files

11) Creating Directories for Oracle Database or Recovery Files
[root@localhost oracle]# mkdir flash_recovery_area
[root@localhost oracle]# chown oracle:oinstall flash_recovery_area/
[root@localhost oracle]# chmod 775 flash_recovery_area/

12) Preparing Disk Groups for an Automatic Storage Management Installation
13) Stopping Existing Oracle Processes
14) Configuring the oracle User's Environment
umask 022

export ORACLE_HOME=/u01/app/oracle/product/11.1.0/db_1
export ORACLE_BASE=/u01/app/oracle
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export ORACLE_SID=ora11

PATH=$ORACLE_HOME/bin:$PATH


}}}
Creating an Oracle ACFS File System https://docs.oracle.com/database/121/OSTMG/GUID-4C98CF06-8CCC-45F1-9316-C40FB3EFF268.htm#OSTMG94787
http://www.oracle-base.com/articles/11g/ACFS_11gR2.php

ACFS Technical Overview and Deployment Guide [ID 948187.1]  ''<-- ACFS now supports RMAN, DataPump on 11.2.0.3 above... BTW, it does not support archivelogs… You still have to have the FRA diskgroup to put your archivelogs/redo. At least you can have the ACFS as container of backupsets and data pump files''

''update''
11.2.0.3 now supports almost everything
http://docs.oracle.com/cd/E11882_01/server.112/e18951/asmfilesystem.htm#CACJFGCD
Starting with Oracle Automatic Storage Management 11g Release 2 (11.2.0.3), Oracle ACFS supports RMAN backups (BACKUPSET file type), archive logs (ARCHIVELOG file type), and Data Pump dumpsets (DUMPSET file type). Note that Oracle ACFS snapshots are not supported with these files.

''update 08/2014''
ACFS supported on Exadata
<<<
Creating AFCS file systems on Exadata storage requires the following:

Oracle Linux
Grid Infrastructure 12.1.0.2
Database files stored in ACFS on Exadata storage are subject to the following guidelines and restrictions:

Supported database versions are 10.2.0.4, 10.2.0.5, 11.2.0.4, and 12.1.
Hybrid Columnar Compression (HCC) support (for 11.2 and 12.1) requires fix for bug 19136936.
Exadata-offload features such as Smart Scan, Storage Indexes, IORM, Network RM, etc. are not supported.
Exadata Smart Flash Cache will cache read operations. Caching of write operations is expected in a later release.
No specialized cache hints are passed from the Database to the Exadata Storage layer, which means the Smart Flash Cache heuristics are based on I/O size, similar to any other block storage caching technology.
Exadata Smart Flash Logging is not supported.
Hardware Assisted Resilient Data (HARD) checks are not performed.
<<<


How To Install/Reinstall Or Deinstall ACFS Modules/Installation Manually? [ID 1371067.1]

http://www.oracle-base.com/articles/11g/DBFS_11gR2.php
http://ronnyegner.wordpress.com/2009/10/08/the-oracle-database-file-system-dbfs/
http://www.pythian.com/news/17849/chopt-utility/
http://perumal.org/enabling-and-disabling-database-options/
http://juliandyke.wordpress.com/2010/10/06/oracle-11-2-0-2-requires-multicasting-on-the-interconnect/
http://dbastreet.com/blog/?p=515
http://blog.ronnyegner-consulting.de/oracle-11g-release-2-install-guide/
{{{
the only difference it would make on the databases that will have the DBV and TDE configured is that when 
DBAs would try to create a user it has to go through the dvadmin user. Other databases that doesn’t have the 
DV schemas created and configured will still behave as is. 

Below is a sample of create a user in a DBV environment

SYS@dbv_1> SYS@dbv_1> select username from dba_users order by 1;

USERNAME
------------------------------
ANONYMOUS
APEX_030200
APEX_PUBLIC_USER
APPQOSSYS
BI
CTXSYS
DBSNMP
DIP
DVADMIN
DVF
DVOWNER
DVSYS

SYS@dbv_1> conn / as sysdba 
SYS@dbv_1> create user karlarao identified by karlarao;


create user karlarao identified by karlarao
                                   *
ERROR at line 1:
ORA-01031: insufficient privileges


SYS@dbv_1> conn dvadmin/<password>
Connected.
DVADMIN@dbv_1> create user karlarao identified by karlarao;

User created.
}}}
http://www.dpriver.com/blog/list-of-demos-illustrate-how-to-use-general-sql-parser/oracle-sql-query-rewrite/

{{{
1. (NOT) IN sub-query to (NOT) EXISTS sub-query

2. (NOT) EXISTS sub-query to (NOT) IN sub-query

3. Separate outer joined inline view using UNION ALL or add hint for the inline view

4. IN clause to UNION ALL statement

5. OR clause to UNION ALL statement

6. NVL function to UNION ALL statement

7. Re-write suppressed joined columns in the WHERE clause

8. VIEW expansion

9. NOT EXISTS to NOT IN hash anti-join

10. Make columns suppressed using RTRIM function or ‘+0’

11. Add hint to the statement

12. Co-related sub-query to inline View
}}}


! 2021 
Common Coding and Design mistakes (that really mess up performance) https://www.slideshare.net/SageComputing/optmistakesora11dist








.

https://balazspapp.wordpress.com/2018/04/05/oracle-18c-recover-standby-database-from-service/
https://www.virtual-dba.com/blog/refreshing-physical-standby-using-recover-from-service-on-12c/
https://dbtut.com/index.php/2019/12/27/recover-datbase-using-service-refresh-standby-database-in-oracle-12c/


Restoring and Recovering Files Over the Network (from SERVICE)
https://docs.oracle.com/database/121/BRADV/rcmadvre.htm#BRADV685

Creating a Physical Standby database using RMAN restore database from service (Doc ID 2283978.1)
http://emarcel.com/upgrade-oracle-database-12c-with-asm-12-1-0-1-to-12-1-0-2/
{{{
LISTENER =
  (ADDRESS_LIST=
        (ADDRESS=(PROTOCOL=tcp)(HOST=localhost)(PORT=1521))
        (ADDRESS=(PROTOCOL=ipc)(KEY=EXTPROC1521)))

SID_LIST_LISTENER=
   (SID_LIST=
        (SID_DESC=
          (GLOBAL_DBNAME=orcl)
          (SID_NAME=orcl)
          (ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1)
        )
        (SID_DESC=
          (GLOBAL_DBNAME=noncdb)
          (SID_NAME=noncdb)
          (ORACLE_HOME=/u01/app/oracle/product/12.1.0/dbhome_1)
        )

      )

SECURE_REGISTER_LISTENER = (IPC)


}}}

https://martincarstenbach.wordpress.com/2012/06/20/little-things-worth-knowing-static-and-dynamic-listener-registration/
http://kerryosborne.oracle-guy.com/papers/12c_Adaptive_Optimization.pdf
https://oracle-base.com/articles/12c/adaptive-plans-12cr1


https://blog.dbi-services.com/sql-monitoring-12102-shows-adaptive-plans/
https://blog.dbi-services.com/oracle-12c-adaptive-plan-inflexion-point/
https://blogs.oracle.com/letthesunshinein/sql-monitor-now-tells-you-whether-the-execution-plan-was-adaptive-or-not
https://oracle.readthedocs.io/en/latest/sql/plans/adaptive-query-optimization.html#sql-adaptive
<<showtoc>> 

! 12.1 

!! optimizer_adaptive_features 
* In 12.1, adaptive optimization as a whole is controlled by the dynamic parameter optimizer_adaptive_features, which defaults to TRUE. All of the features it controls are enabled when optimizer_features_enable >= 12.1


! 12.2 

!! optimizer_adaptive_features has been obsoleted, replaced by two new parameters

!! optimizer_adaptive_plans, defaults to TRUE
* The optimizer_adaptive_plans parameter controls whether the optimizer creates adaptive plans and defaults to TRUE.
* The most commonly seen use of adaptive plans is where different sub-plans that may use different join methods are selected at run time. For example, a nested loops join may be converted to a hash join once execution information has identified that it provides better performance. The plan has been adapted according to the data presented.

!! optimizer_adaptive_statistics, defaults to FALSE
* The optimizer_adaptive_statistics parameter controls whether the optimizer uses adaptive statistics and defaults to FALSE
* The creation of automatic extended statistics is controlled by the table-level statistics preference AUTO_STAT_EXTENSIONS, which defaults to OFF.  (AUTO_STAT_EXTENSIONS can be set using DBMS_STATS procedures like SET_TABLE_PREFS and SET_GLOBAL_PREFS.) These defaults have been chosen to place emphasis on achieving stable SQL execution plans
* Setting optimizer_features_enable has no effect on the features controlled by optimizer_adaptive_statistics. The creation of automatic extended statistics is controlled by the table-level statistics preference AUTO_STAT_EXTENSIONS, which defaults to OFF.




Monitoring Business Applications http://docs.oracle.com/cd/E24628_01/install.121/e24215/bussapps.htm#BEIBBHFH

It’s kind of a Service Type that combines information from:
* Systems (PSFT systems for example), 
* Service tests, 
* Real User experience Insight data and 
* Business Transaction Management data. 

http://hemantoracledba.blogspot.sg/2013/07/concepts-features-overturned-in-12c.html

Oracle Database 12c Release 1 Information Center (Doc ID 1595421.2)
Release Schedule of Current Database Releases (Doc ID 742060.1)

Master Note For Oracle Database 12c Release 1 (12.1) Database/Client Installation/Upgrade/Migration Standalone Environment (Non-RAC) (Doc ID 1520299.1)
Master Note of Linux OS Requirements for Database Server (Doc ID 851598.1)
Requirements for Installing Oracle Database 12.1 on RHEL5 or OL5 64-bit (x86-64) (Doc ID 1529433.1)
Requirements for Installing Oracle Database 12.1 on RHEL6 or OL6 64-bit (x86-64) (Doc ID 1529864.1)

Exadata 12.1.1.1.0 release and patch (16980054 ) (Doc ID 1571789.1)
http://ermanarslan.blogspot.com/2014/02/rac-listener-configuration-in-oracle.html
<<showtoc>>

<<<
12c, single instance installation featuring Oracle 12.1.0.2.0 on Oracle Linux 6.6. 
The system is configured with 8 GB of RAM and 2 virtual CPUs. 
The username and password match for the oracle account. Root password is r00t. 
The ORACLE_HOME is in /u01/app/oracle/product/12.1.0.2/dbhome_1
<<<


! LAB X: OEM EXPRESS
{{{
0) create a swingbench schema

method a: lights out using swingbench installation

$> ./oewizard -scale 1 -dbap change_on_install -u soe_master -p soe_master -cl -cs //localhost/NCDB -ts SOE -create
SwingBench Wizard
Author  :	 Dominic Giles
Version :	 2.5.0.949

Running in Lights Out Mode using config file : oewizard.xml

============================================
|           Datagenerator Run Stats        |
============================================
Connection Time                        0:00:00.004
Data Generation Time                   0:00:20.889
DDL Creation Time                      0:00:56.606
Total Run Time                         0:01:17.503
Rows Inserted per sec                      579,546
Data Generated (MB) per sec                   47.2
Actual Rows Generated                   13,007,340


Post Creation Validation Report
===============================
The schema appears to have been created successfully.

Valid Objects
=============
Valid Tables : 'ORDERS','ORDER_ITEMS','CUSTOMERS','WAREHOUSES','ORDERENTRY_METADATA','INVENTORIES','PRODUCT_INFORMATION','PRODUCT_DESCRIPTIONS','ADDRESSES','CARD_DETAILS'
Valid Indexes : 'PRD_DESC_PK','PROD_NAME_IX','PRODUCT_INFORMATION_PK','PROD_SUPPLIER_IX','PROD_CATEGORY_IX','INVENTORY_PK','INV_PRODUCT_IX','INV_WAREHOUSE_IX','ORDER_PK','ORD_SALES_REP_IX','ORD_CUSTOMER_IX','ORD_ORDER_DATE_IX','ORD_WAREHOUSE_IX','ORDER_ITEMS_PK','ITEM_ORDER_IX','ITEM_PRODUCT_IX','WAREHOUSES_PK','WHS_LOCATION_IX','CUSTOMERS_PK','CUST_EMAIL_IX','CUST_ACCOUNT_MANAGER_IX','CUST_FUNC_LOWER_NAME_IX','ADDRESS_PK','ADDRESS_CUST_IX','CARD_DETAILS_PK','CARDDETAILS_CUST_IX'
Valid Views : 'PRODUCTS','PRODUCT_PRICES'
Valid Sequences : 'CUSTOMER_SEQ','ORDERS_SEQ','ADDRESS_SEQ','LOGON_SEQ','CARD_DETAILS_SEQ'
Valid Code : 'ORDERENTRY'
Schema Created

Method b) exp/imp

FYI - the export information

[enkdb03:oracle:MBACH] /home/oracle/mbach/swingbench/bin
> expdp system/manager directory=oradir logfile=exp_soe_master.txt dumpfile=exp_soe_master.dmp schemas=soe_master

Export: Release 12.1.0.2.0 - Production on Mon Jun 8 05:20:55 2015

Copyright (c) 1982, 2014, Oracle and/or its affiliates.  All rights reserved.

Connected to: Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, Real Application Clusters, Automatic Storage Management, OLAP,
Advanced Analytics and Real Application Testing options
Starting "SYSTEM"."SYS_EXPORT_SCHEMA_01":  system/******** directory=oradir logfile=exp_soe_master.txt dumpfile=exp_soe_master.dmp schemas=soe_master
Estimate in progress using BLOCKS method...
Processing object type SCHEMA_EXPORT/TABLE/TABLE_DATA
Total estimation using BLOCKS method: 1.219 GB
Processing object type SCHEMA_EXPORT/USER
Processing object type SCHEMA_EXPORT/SYSTEM_GRANT
Processing object type SCHEMA_EXPORT/ROLE_GRANT
Processing object type SCHEMA_EXPORT/DEFAULT_ROLE
Processing object type SCHEMA_EXPORT/TABLESPACE_QUOTA
Processing object type SCHEMA_EXPORT/PRE_SCHEMA/PROCACT_SCHEMA
Processing object type SCHEMA_EXPORT/SEQUENCE/SEQUENCE
Processing object type SCHEMA_EXPORT/TABLE/TABLE
Processing object type SCHEMA_EXPORT/PACKAGE/PACKAGE_SPEC
Processing object type SCHEMA_EXPORT/PACKAGE/COMPILE_PACKAGE/PACKAGE_SPEC/ALTER_PACKAGE_SPEC
Processing object type SCHEMA_EXPORT/VIEW/VIEW
Processing object type SCHEMA_EXPORT/PACKAGE/PACKAGE_BODY
Processing object type SCHEMA_EXPORT/TABLE/INDEX/INDEX
Processing object type SCHEMA_EXPORT/TABLE/INDEX/FUNCTIONAL_INDEX/INDEX
Processing object type SCHEMA_EXPORT/TABLE/CONSTRAINT/CONSTRAINT
Processing object type SCHEMA_EXPORT/TABLE/INDEX/STATISTICS/INDEX_STATISTICS
Processing object type SCHEMA_EXPORT/TABLE/INDEX/STATISTICS/FUNCTIONAL_INDEX/INDEX_STATISTICS
Processing object type SCHEMA_EXPORT/TABLE/CONSTRAINT/REF_CONSTRAINT
Processing object type SCHEMA_EXPORT/TABLE/STATISTICS/TABLE_STATISTICS
Processing object type SCHEMA_EXPORT/STATISTICS/MARKER
. . exported "SOE_MASTER"."ORDER_ITEMS"                  228.4 MB 4290312 rows
. . exported "SOE_MASTER"."ADDRESSES"                    110.4 MB 1500000 rows
. . exported "SOE_MASTER"."CUSTOMERS"                    108.0 MB 1000000 rows
. . exported "SOE_MASTER"."ORDERS"                       129.1 MB 1429790 rows
. . exported "SOE_MASTER"."INVENTORIES"                  15.26 MB  901254 rows
. . exported "SOE_MASTER"."CARD_DETAILS"                 63.88 MB 1500000 rows
. . exported "SOE_MASTER"."LOGON"                        51.24 MB 2382984 rows
. . exported "SOE_MASTER"."PRODUCT_DESCRIPTIONS"         216.8 KB    1000 rows
. . exported "SOE_MASTER"."PRODUCT_INFORMATION"          188.1 KB    1000 rows
. . exported "SOE_MASTER"."ORDERENTRY_METADATA"          5.617 KB       4 rows
. . exported "SOE_MASTER"."WAREHOUSES"                   35.70 KB    1000 rows
Master table "SYSTEM"."SYS_EXPORT_SCHEMA_01" successfully loaded/unloaded
******************************************************************************
Dump file set for SYSTEM.SYS_EXPORT_SCHEMA_01 is:
  /home/oracle/mbach/oradir/exp_soe_master.dmp
Job "SYSTEM"."SYS_EXPORT_SCHEMA_01" successfully completed at Mon Jun 8 05:23:39 2015 elapsed 0 00:02:39


-> this needs to be imported into NCDB, taken from /u01/software

1) enable OEM express if you haven't already with your database.

Check if enabled: 
  select dbms_xdb.getHttpPort() from dual;
  select dbms_xdb_config.getHttpsPort() from dual;

If none returns a result, set it up
  exec dbms_xdb_config.sethttpsport(5500);

2) start charbench on the command line

 create a AWR snapshot (exec dbms_workload_repository.create_snapshot)

 ./charbench -u soe_master -p soe_master -cs //localhost/NCDB -uc 10 -min -10 -max 100 -stats full -rt 0:10 -bs 0:01 -a 

 create another AWR snapshot (exec dbms_workload_repository.create_snapshot)

3) view the activity with your OEM express

if you need to use port-forwarding:
	ssh -L<oem express port>localhost:<oem express port>

Then point your browser to it: https://<VM-IP>:<oem express port>/em

4) explore OEM express

Look at the performance overview page
Review the performance hub and look at the various panes available to you

5) Create an active-html AWR report

Review and admire it
}}}

! LAB X) SQL Monitor reports

{{{
SQL Monitor reports are an very useful performance monitoring and tuning tool. In this lab you will start experimenting with it. In order to do so you need a query. In the first step you'll create one of your own liking based on the SOE schema you imported earlier. Ensure to supply the /*+ monitor */ hint when executing it!

0) run a large query

select /*+ monitor gather_plan_statistics sqlmon001 */
count(*) 
from customers c, 
 addresses a, 
 orders o, 
 order_items oi
where o.order_id = oi.order_id
and o.customer_id = c.customer_id
and a.customer_id = c.customer_id
and c.credit_limit = 
  (select max(credit_limit) from customers);

1) Create a SQL Monitor report from OEM express

Navigate the User Interface and find your monitored query. Take a note of the SQL ID, you will need it in step 3

2) Create a text version of the same SQL report

The graphical monitoring report requires a GUI and once retrieved, also relies on loading data from Oracle's website. In secure environments you may not have access to the Internet. In this step you need to look up the documentation for dbms_sqltune.report_sql_monitor and produce a text version of the report.

select dbms_sqltune.report_sql_monitor('&sqlID') from dual;

Review the reports and have a look around
}}}

! LAB X: OTHER DEVELOPMENT FEATURES

{{{
This is a large-ish lab where you are going to explore various development-related features with the database. 

1) Advanced index compression

The first lab will introduce you to index compression. It's based on a table created as a subset of soe.order_items. Copy the following script and execute it in your environment. 

SET ECHO ON;

DROP TABLE t1 purge;

CREATE TABLE t1 NOLOGGING AS 
SELECT * FROM ORDER_ITEMS WHERE ROWNUM <= 1e6;

CREATE INDEX t1_i1 ON t1 (order_id,line_item_id,product_id);

CREATE INDEX t1_i2 ON t1 (order_id, line_item_id);

CREATE INDEX t1_i3 ON t1 (order_id,line_item_id,product_id,unit_price);

CREATE INDEX t1_i4 ON t1 (order_id);

COL segment_name FOR A5 HEA "INDEX";

SET ECHO OFF;
SPO index.txt;
PRO NO COMPRESS

SELECT segment_name,
       blocks
  FROM user_segments
 WHERE segment_name LIKE 'T1%'
   AND segment_type = 'INDEX'
 ORDER BY
       segment_name;

SPO OFF;

SET ECHO ON;

/*
DROP TABLE t1 purge;

CREATE TABLE t1 NOLOGGING 
AS 
SELECT * FROM ORDER_ITEMS WHERE ROWNUM <= 1e6;
*/

DROP INDEX t1_i1;
DROP INDEX t1_i2;
DROP INDEX t1_i3;
DROP INDEX t1_i4;

CREATE INDEX t1_i1 ON t1 (order_id,line_item_id,product_id) COMPRESS 2;

CREATE INDEX t1_i2 ON t1 (order_id, line_item_id) COMPRESS 1;

CREATE INDEX t1_i3 ON t1 (order_id,line_item_id,product_id,unit_price) COMPRESS 3;

CREATE INDEX t1_i4 ON t1 (order_id) COMPRESS 1;

SET ECHO OFF;
SPO index.txt APP;
PRO PREFIX COMPRESSION

SELECT segment_name,
       blocks
  FROM user_segments
 WHERE segment_name LIKE 'T1%'
   AND segment_type = 'INDEX'
 ORDER BY
       segment_name;

SPO OFF;

SET ECHO ON;

DROP INDEX t1_i1;
DROP INDEX t1_i2;
DROP INDEX t1_i3;
DROP INDEX t1_i4;

CREATE INDEX t1_i1 ON t1 (order_id,line_item_id,product_id) COMPRESS ADVANCED LOW;

CREATE INDEX t1_i2 ON t1 (order_id, line_item_id) COMPRESS ADVANCED LOW;

CREATE INDEX t1_i3 ON t1 (order_id,line_item_id,product_id,unit_price) COMPRESS ADVANCED LOW;

CREATE INDEX t1_i4 ON t1 (order_id) COMPRESS ADVANCED LOW;

SET ECHO OFF;
SPO index.txt APP;
PRO ADVANCED COMPRESSION

SELECT segment_name,
       blocks
  FROM user_segments
 WHERE segment_name LIKE 'T1%'
   AND segment_type = 'INDEX'
 ORDER BY
       segment_name;

SPO OFF;

SET ECHO ON;

Review file index.txt and have a look at the various compression results.


2) Sequences as default values

In this part of the lab you will create two tables and experiment with sequences as default values for surrogate keys. You will need to create the following:

- table the_old_way: make sure it has an "ID" column as primary key
- create a sequence
- create a trigger that populates the ID if not supplied in the insert command 
- insert 100000 rows

One Potential Solution:

Create a sequence to allow the population of the table using default values.

create sequence s cache 10000 noorder;

create a simple table to hold an ID column to be used as a primary key. Add a few random columns such as a timestamp and a vc to store information. Next you need to create a before insert trigger that captures the insert statement and sets the ID's value to sequence.nextval, but only if the ID column is not part of the insert statement! The next step is to create an anonymous PL/SQL block to insert 100000 rows into the table.

create table the_old_way (
  id number primary key,
   d  timestamp not null,
  vc varchar2(50) not null
)
/

create or replace trigger the_old_way_bit
before insert on the_old_way for each row
declare
begin
 if :new.id is null then
  :new.id := s.nextval;
 end if;
end;
/

begin
   for i in 1..100000 loop
    insert into the_old_way (d, vc) values (systimestamp, 'with trigger');
   end loop;
end;
/

Note down the time for the execution of the PL/SQL block

Part two of the lab is a test with sequences as default values for the column. Create another table similar to the first one created but this time without the trigger. Ensure that the ID column is used as a primary key and that it has the sequence's next value as its default value. Then insert 100000 and note the time.

drop sequence s;

create sequence s cache 10000 noorder;

create table the_12c_way (
   id number default s.nextval primary key,
   d  timestamp not null,
   vc varchar2(50) not null
)
/

begin
   for i in 1..100000 loop
    insert into the_12c_way (d, vc) values (systimestamp, 'with trigger');
   end loop;
end;
/

Finally create yet another table, but this time with identity columns. Ensure that the identity column is defined in the same way as the sequence you created earlier. Then insert again and note the time.

create table the_12c_way_with_id (
   id number generated always as identity (
     start with 1 cache 100000),
   d  timestamp not null,
   vc varchar2(50) not null
)
/

begin
   for i in 1..100000 loop
    insert into the_12c_way_with_id (d, vc) values (systimestamp, 'with identity');
   end loop;
end;
/

Before finishing this section review the objects created as part of the identity table's DDL.

col IDENTITY_OPTIONS for a50 wrap
col SEQUENCE_NAME    for a30
col COLUMN_NAME      for a15

select column_name, generation_type, sequence_name, identity_options from USER_TAB_IDENTITY_COLS;

3) Embed a function in the WITH clause

Create a statement that selects from t1 and uses a function declared in the with-clause of the query to return a truncated date.

with
 function silly_little_function (pi_d in date) 
 return date is
 begin 
  return trunc(pi_d); 
 end;
select order_id, silly_little_function(dispatch_date)
 from t1 where rownum < 11
/

4) Automatic gathering of table statistics

create table t2 as select * from t1 and check the table statistics. Are they current? Why are there table statistics during a CTAS statement?

SQL> create table t2 as select * from t1 sample (50);

Table created.

Elapsed: 00:00:00.73
SQL> select table_name, partitioned, num_rows from tabs where table_name = 'T2';

TABLE_NAME                     PAR   NUM_ROWS
------------------------------ --- ----------
T2                             NO      500736

Elapsed: 00:00:00.04

SQL> select count(*) from t2;

  COUNT(*)
----------
    500736

Elapsed: 00:00:00.10
SQL> select sql_id from v$sql where sql_text = 'create table t2 as select * from t1 sample (50)';

SQL_ID
-------------
0h72ryws535xf

SQL> select * from table(dbms_xplan.display_cursor('0h72ryws535xf',null));

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------
SQL_ID  0h72ryws535xf, child number 0
-------------------------------------
create table t2 as select * from t1 sample (50)

Plan hash value: 2307360015

-----------------------------------------------------------------------------------------
| Id  | Operation                        | Name | Rows  | Bytes | Cost (%CPU)| Time     |
-----------------------------------------------------------------------------------------
|   0 | CREATE TABLE STATEMENT           |      |       |       |  2780 (100)|          |
|   1 |  LOAD AS SELECT                  |      |       |       |            |          |
|   2 |   OPTIMIZER STATISTICS GATHERING |      |   500K|    24M|  2132   (1)| 00:00:01 |
|   3 |    TABLE ACCESS STORAGE SAMPLE   | T1   |   500K|    24M|  2132   (1)| 00:00:01 |
-----------------------------------------------------------------------------------------

Note
-----
   - automatic DOP: Computed Degree of Parallelism is 1 because of parallel threshold


19 rows selected.

5) Top-N queries and pagination

Top-N queries used to be interesting in Oracle before 12c. In this lab you will appreciate their ease of use.

- list how many rows there are in table t1

select count(*) from t1;

- what are the min and max dispatch dates in the table?

SQL> alter session set nls_date_format='dd.mm.yyyy hh24:mi:ss';

SQL> select min(dispatch_date), max(dispatch_date) from t1;

MIN(DISPATCH_DATE)  MAX(DISPATCH_DATE)
------------------- -------------------
01.01.2012 00:00:00 03.05.2012 00:00:00

- Create a query that orders rows in t1 by dispatch date and shows the first 15 rows only

select order_id, dispatch_date, gift_wrap from t1 order by dispatch_date fetch first 15 rows only;

- create a query that orders rows in t1 by dispatch date and shows rows 150 to 155

select order_id, dispatch_date, gift_wrap from t1 order by dispatch_date offset 150 rows fetch next 5 rows only;

- rewrite the last query with the pre-12c syntax and compare results

http://www.oracle.com/technetwork/issue-archive/2006/06-sep/o56asktom-086197.html

select * 
  from ( select /*+ FIRST_ROWS(n) */ 
  a.*, ROWNUM rnum 
      from ( select order_id, dispatch_date, gift_wrap from t1 order by dispatch_date ) a 
      where ROWNUM <= 155 ) 
where rnum  > 150;

- Compare execution times and plans


SQL> select * from table(dbms_xplan.display_cursor);

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------
SQL_ID  b20zp6jrn2yag, child number 0
-------------------------------------
select *   from ( select /*+ FIRST_ROWS(n) */   a.*, ROWNUM rnum
from ( select order_id, dispatch_date, gift_wrap from t1 order by
dispatch_date ) a       where ROWNUM <= 155 ) where rnum  > 150

Plan hash value: 2771300550

---------------------------------------------------------------------------------------------------------
| Id  | Operation                                | Name | Rows  | Bytes |TempSpc| Cost (%CPU)| Time     |
---------------------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT                         |      |       |       |       |  8202 (100)|          |
|*  1 |  VIEW                                    |      |   155 |  7285 |       |  8202   (1)| 00:00:01 |
|*  2 |   COUNT STOPKEY                          |      |       |       |       |            |          |
|   3 |    VIEW                                  |      |  1000K|    32M|       |  8202   (1)| 00:00:01 |
|*  4 |     SORT ORDER BY STOPKEY                |      |  1000K|    19M|    30M|  8202   (1)| 00:00:01 |
|   5 |      TABLE ACCESS STORAGE FULL FIRST ROWS| T1   |  1000K|    19M|       |  2134   (1)| 00:00:01 |
---------------------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter("RNUM">150)
   2 - filter(ROWNUM<=155)
   4 - filter(ROWNUM<=155)

Note
-----
   - automatic DOP: Computed Degree of Parallelism is 1 because of parallel threshold

SQL> select * from table(dbms_xplan.display_cursor);

PLAN_TABLE_OUTPUT
------------------------------------------------------------------------------------------------------------------------
SQL_ID  0xhpxrmzzbwkp, child number 0
-------------------------------------
select order_id, dispatch_date, gift_wrap from t1 order by
dispatch_date offset 150 rows fetch next 5 rows only

Plan hash value: 2433988517

--------------------------------------------------------------------------------------------
| Id  | Operation                   | Name | Rows  | Bytes |TempSpc| Cost (%CPU)| Time     |
--------------------------------------------------------------------------------------------
|   0 | SELECT STATEMENT            |      |       |       |       |  8202 (100)|          |
|*  1 |  VIEW                       |      |  1000K|    53M|       |  8202   (1)| 00:00:01 |
|*  2 |   WINDOW SORT PUSHED RANK   |      |  1000K|    19M|    30M|  8202   (1)| 00:00:01 |
|   3 |    TABLE ACCESS STORAGE FULL| T1   |  1000K|    19M|       |  2134   (1)| 00:00:01 |
--------------------------------------------------------------------------------------------

Predicate Information (identified by operation id):
---------------------------------------------------

   1 - filter(("from$_subquery$_002"."rowlimit_$$_rownumber"<=CASE  WHEN (150>=0)
              THEN 150 ELSE 0 END +5 AND "from$_subquery$_002"."rowlimit_$$_rownumber">150))
   2 - filter(ROW_NUMBER() OVER ( ORDER BY "DISPATCH_DATE")<=CASE  WHEN (150>=0)
              THEN 150 ELSE 0 END +5)

Note
-----
   - automatic DOP: Computed Degree of Parallelism is 1 because of parallel threshold
}}}

! LAB X: PLUGGABLE DATABASES

{{{
Unlike the database in-memory option we can use the lab to experiment with Pluggable Databases. 

1) create a CDB

Use dbca in silent mode or any other technique you like to create a CDB with 1 PDB. Specify the data file location to be /u01/oradata and the FRA to go to /u01/fra. It is recommended to use oracle managed files for the database but you are free to chose whichever method you are most comfortable with.

2) log in to the CDB root

Once the CDB is created connect to it as SYSDBA and list all of the PDBs in the database. Where can you find them?

SQL> show pdbs

SQL> select con_id, name, open_mode, total_size from v$pdbs;

SQL> select pdb_id, pdb_name, status, logging, force_logging, force_nologging from dba_pdbs;

3) create a new PDB named MASTER from the seed

 - check if you are using OMF

  SQL> show parameter db_create_file_dest
  SQL> create pluggable database master admin user master_admin identified by secret roles=(dba) 
    2  default tablespace users datafile size 20m;
  SQL> alter pluggable database master open

4) list the MASTER PDBs data files

 - from the root
  SQL> select name from v$datafile where con_id = (select con_id from v$pdbs where name = 'MASTER');

 - from the PDB
  SQL> select name, bytes/power(1024,2) m from v$datafile;

  --> what is odd here? Compare with DBA_DATA_FILES

  SQL> select con_id, name, bytes/power(1024,2) m from v$datafile;

5) get familiar with the new dictionary views

The new architecture introduces new views and columns to existing views. Explore these, focus on the CDB% views and how they differ from the DBA views. Also check how many V$- views have a new column? Can you find evidence for linking packages in the PDB to the Root? 

 SQL> desc cdb_data_files
 Name                                      Null?    Type
 ----------------------------------------- -------- ----------------------------
 FILE_NAME                                          VARCHAR2(513)
 FILE_ID                                            NUMBER
 TABLESPACE_NAME                                    VARCHAR2(30)
 BYTES                                              NUMBER
 BLOCKS                                             NUMBER
 STATUS                                             VARCHAR2(9)
 RELATIVE_FNO                                       NUMBER
 AUTOEXTENSIBLE                                     VARCHAR2(3)
 MAXBYTES                                           NUMBER
 MAXBLOCKS                                          NUMBER
 INCREMENT_BY                                       NUMBER
 USER_BYTES                                         NUMBER
 USER_BLOCKS                                        NUMBER
 ONLINE_STATUS                                      VARCHAR2(7)
 CON_ID                                             NUMBER

SQL> desc dba_data_files
 Name                                      Null?    Type
 ----------------------------------------- -------- ----------------------------
 FILE_NAME                                          VARCHAR2(513)
 FILE_ID                                            NUMBER
 TABLESPACE_NAME                                    VARCHAR2(30)
 BYTES                                              NUMBER
 BLOCKS                                             NUMBER
 STATUS                                             VARCHAR2(9)
 RELATIVE_FNO                                       NUMBER
 AUTOEXTENSIBLE                                     VARCHAR2(3)
 MAXBYTES                                           NUMBER
 MAXBLOCKS                                          NUMBER
 INCREMENT_BY                                       NUMBER
 USER_BYTES                                         NUMBER
 USER_BLOCKS                                        NUMBER
 ONLINE_STATUS                                      VARCHAR2(7)

SQL> desc v$datafile

SQL> desc v$datafile
 Name														   Null?    Type
 ----------------------------------------------------------------------------------------------------------------- -------- ----------------------------------------------------------------------------
 FILE#															    NUMBER
 CREATION_CHANGE#													    NUMBER
 CREATION_TIME														    DATE
 TS#															    NUMBER
 RFILE# 														    NUMBER
 STATUS 														    VARCHAR2(7)
 ENABLED														    VARCHAR2(10)
 CHECKPOINT_CHANGE#													    NUMBER
 CHECKPOINT_TIME													    DATE
 UNRECOVERABLE_CHANGE#													    NUMBER
 UNRECOVERABLE_TIME													    DATE
 LAST_CHANGE#														    NUMBER
 LAST_TIME														    DATE
 OFFLINE_CHANGE#													    NUMBER
 ONLINE_CHANGE# 													    NUMBER
 ONLINE_TIME														    DATE
 BYTES															    NUMBER
 BLOCKS 														    NUMBER
 CREATE_BYTES														    NUMBER
 BLOCK_SIZE														    NUMBER
 NAME															    VARCHAR2(513)
 PLUGGED_IN														    NUMBER
 BLOCK1_OFFSET														    NUMBER
 AUX_NAME														    VARCHAR2(513)
 FIRST_NONLOGGED_SCN													    NUMBER
 FIRST_NONLOGGED_TIME													    DATE
 FOREIGN_DBID														    NUMBER
 FOREIGN_CREATION_CHANGE#												    NUMBER
 FOREIGN_CREATION_TIME													    DATE
 PLUGGED_READONLY													    VARCHAR2(3)
 PLUGIN_CHANGE# 													    NUMBER
 PLUGIN_RESETLOGS_CHANGE#												    NUMBER
 PLUGIN_RESETLOGS_TIME													    DATE
 CON_ID 														    NUMBER

SQL> select object_name, object_type, namespace, sharing, oracle_maintained from dba_objects where object_name = 'DBMS_REPCAT_AUTH';

OBJECT_NAME			 OBJECT_TYPE		  NAMESPACE SHARING	  O
-------------------------------- ----------------------- ---------- ------------- -
DBMS_REPCAT_AUTH		 PACKAGE			  1 METADATA LINK Y
DBMS_REPCAT_AUTH		 PACKAGE BODY			  2 METADATA LINK Y
DBMS_REPCAT_AUTH		 SYNONYM			  1 METADATA LINK Y
DBMS_REPCAT_AUTH		 PACKAGE			  1 METADATA LINK Y
DBMS_REPCAT_AUTH		 PACKAGE BODY			  2 METADATA LINK Y

6) switch to the PDB as root

Explore ways to switch to the newly created PDB from the root without logging in again

  SQL> alter session set container = MASTER;

  SQL> select sys_context('userenv', 'con_name') from dual;

  SQL> select sys_context('userenv', 'con_id') from dual;

7) connect to the PDB using Net*8

Now try to connect to the PDB using Net*8 as MASTER_ADMIN. Ensure that you are connected to the correct container! Can you see that the user has the DBA role granted?

  $ sqlplus master_admin/secret@localhost/MASTER

  SQL> select sys_context('userenv', 'con_name') from dual;

  SQL> select sys_context('userenv', 'con_id') from dual;

  SQL> select * from session_privs;

8) view the privileges granted to MASTER_ADMIN

Now let's look a bit closer at the privileges granted to MASTER_ADMIN. Find the hierarchy of roles and grants. Which is the primary role granted to the user? How are the roles specified in the create pluggable database command linked to this role?

 SQL> select * from dba_role_privs where grantee = user;

GRANTEE                        GRANTED_ROLE                   ADM DEL DEF COM
------------------------------ ------------------------------ --- --- --- ---
MASTER_ADMIN                   PDB_DBA                        YES NO  YES NO

SQL> select * from dba_role_privs where grantee = 'PDB_DBA';

GRANTEE                        GRANTED_ROLE                   ADM DEL DEF COM
------------------------------ ------------------------------ --- --- --- ---
PDB_DBA                        DBA                            NO  NO  YES NO

Is it really the DBA role?

 SQL> select * from dba_sys_privs where grantee = 'DBA'; 

9) view the connection to the PDB from the root

You can see anyone connected to the PDBs from the root. In a separate session, connect to the MASTER PDB and try to identify that particular session from the CDB$ROOT.

  - connect to the PDB
   $ sqlplus master_admin/secret@localhost/MASTER
   SQL> exec dbms_application_info.set_client_info('find me!')

  - in another session, connect to the root
   $ sqlplus / as sysdba
   SQL> select username,sid,serial#,client_info,con_id from v$session where con_id = (select con_id from v$pdbs where name = 'MASTER');
 
USERNAME                              SID    SERIAL# CLIENT_INFO              CON_ID
------------------------------ ---------- ---------- -------------------- ----------
MASTER_ADMIN                           21      26215 find me!                      3

10) limit the maximum size of the PDB to 50M

PDBs are often used for consolidation. When consolidating, users pay for storage. We don't want them to use more than they pay for. Can you think of a way to limit the space available to a PDB? Can you test if that limit is enforced?

 - what is the minimum size you can set it to?

 SQL> alter pluggable database MASTER storage (maxsize 800M);
  
 - what is the PDB_MASTER's default tablespace?

 SQL> select default_tablespace from dba_users where username = user;

DEFAULT_TABLESPACE
------------------------------
USERS

 - check if the limit is enforced

 SQL> grant unlimited tablespace to master_admin;

 SQL> create table t1 nologging as select a.*, rpad(object_name, 200, 'x') large_c from dba_objects a;
 
 (may have to allow users to autoextend)

11) create a PDB from the MASTER

Creating a PDB from the SEED is only one way of creating a PDB. In the next step, create a PDB named PDB1 as a clone of MASTER. But first create a golden image of a database you'd like to use. To do so, create the following accounts in the MASTER PDB:

  + MONITORING
  + BACKUP
  + APPL_USER

Grant whichever privileges you like to grant to them. APPL_USER must have 3 tables in his schema: T1, T2 and T3. While you perform these tasks, tail the alert.log in a different session.

SQL> create user monitoring identified by monitoring;

User created.

SQL> grant select any dictionary to monitoring;

Grant succeeded.

SQL> create user backup identified by backup;

User created.

SQL> grant create session to backup;

Grant succeeded.

SQL> create user appl_user identified by appl_user;

User created.

SQL> alter user appl_user quota unlimited on users;

User altered.

SQL> grant connect , resource to appl_user;

Grant succeeded.

SQL> conn appl_user/appl_user@localhost/MASTER
Connected.

SQL> create table t1 as select * from all_objects ;

Table created.

SQL> select count(*) from t1;

  COUNT(*)
----------
     73704

SQL> create table t2 as select * from all_objects where rownum < 11 ;

Table created.

SQL> c.t2.t3
  1* create table t3 as select * from all_objects where rownum < 11
SQL> r
  1* create table t3 as select * from all_objects where rownum < 11

Table created.

SQL> show user
USER is "APPL_USER"
SQL>

- prepare the PDB for cloning

 alter pluggable database master close immediate;
 alter pluggable database master open read only;

- view the alert log

 adrci> set home CDB1
 adrci> show alert -tail -f

- clone the PDB

 SQL> create pluggable database pdb1 from master;

 (are you still tailing the alert.log?)

 SQL> alter pluggable database PDB1 open;

 + do you see the users you created? Do they have data in the tables?

SQL> conn appl_user/appl_user@localhost/PDB1
Connected.
SQL> select count(*) from t1;

  COUNT(*)
----------
     73704

SQL> select count(*) from t2;

  COUNT(*)
----------
        10

SQL> select count(*) from t3;

  COUNT(*)
----------
        10

 + perform any further validations you like

12) Create a metadata only clone

Since 12.1.0.2 it is possible to perform a metadata only clone. Try to perform one based on MASTER. Ensure that the tables in the new PDB have no data!

 - as SYSDBA
 
 SQL> create pluggable database pdb2 from master no data;

 SQL> alter pluggable database pdb2 open;

SQL> conn appl_user/appl_user@localhost/PDB2
Connected.
SQL> select count(*) from t1;

  COUNT(*)
----------
         0

13) Unplug and plug

In this lab you will unplug a PDB and plug it back in. Usually you'd perform these steps on a different CDB but due to space constraints it'll be the same one you will experiment with. Note that it is crucial to drop the PDB once unplugged. This isn't documented that clear in the official documentation set but nevertheless required. https://blogs.oracle.com/UPGRADE/entry/recent_news_about_pluggable_databases

The steps to perform are:
 a) unplug the PDB
 b) review the metadata file
 c) check for plug-in-compatibility (a formality in our case but important in real life)
 d) drop the PDB _keeping_ data files
 e) create the new PDB by plugging it in

All the while you are tailing the alert.log

- unplug the PDB

SQL> alter pluggable database pdb2 close immediate;

SQL> alter pluggable database pdb2 unplug into '/home/oracle/pdb2.xml';

-> keep tailing the alert.log!

- verify the contents of the XML file 

[oracle@server3 ~]$ cat /home/oracle/pdb2.xml
<?xml version="1.0" encoding="UTF-8"?>
<PDB>
  <xmlversion>1</xmlversion>
  <pdbname>PDB2</pdbname>
  <cid>5</cid>
  <byteorder>1</byteorder>
  <vsn>202375680</vsn>
  <vsns>
    <vsnnum>12.1.0.2.0</vsnnum>
    <cdbcompt>12.1.0.2.0</cdbcompt>
    <pdbcompt>12.1.0.2.0</pdbcompt>
    <vsnlibnum>0.0.0.0.22</vsnlibnum>
    <vsnsql>22</vsnsql>
    <vsnbsv>8.0.0.0.0</vsnbsv>
  </vsns>
  <dbid>1858507191</dbid>
  <ncdb2pdb>0</ncdb2pdb>
  <cdbid>628942599</cdbid>
  <guid>18135BAD243A6341E0530C64A8C0B88F</guid>
  <uscnbas>1675596</uscnbas>
  <uscnwrp>0</uscnwrp>
  <rdba>4194824</rdba>
  <tablespace>
    <name>SYSTEM</name>
    <type>0</type>
    <tsn>0</tsn>
    <status>1</status>
    <issft>0</issft>
    <file>
      <path>/u01/oradata/CDB2/18135BAD243A6341E0530C64A8C0B88F/datafile/o1_mf_system_bqfdbtg0_.dbf</path>
      <afn>17</afn>
      <rfn>1</rfn>
      <createscnbas>1674775</createscnbas>
      <createscnwrp>0</createscnwrp>
      <status>1</status>
      <fileblocks>32000</fileblocks>
      <blocksize>8192</blocksize>
      <vsn>202375680</vsn>
      <fdbid>1858507191</fdbid>
      <fcpsw>0</fcpsw>
      <fcpsb>1675592</fcpsb>
      <frlsw>0</frlsw>
      <frlsb>1594143</frlsb>
      <frlt>881895559</frlt>
    </file>
  </tablespace>
  <tablespace>
    <name>SYSAUX</name>
    <type>0</type>
    <tsn>1</tsn>
    <status>1</status>
    <issft>0</issft>
    <file>
      <path>/u01/oradata/CDB2/18135BAD243A6341E0530C64A8C0B88F/datafile/o1_mf_sysaux_bqfdbtg1_.dbf</path>
      <afn>18</afn>
      <rfn>4</rfn>
      <createscnbas>1674799</createscnbas>
      <createscnwrp>0</createscnwrp>
      <status>1</status>
      <fileblocks>65280</fileblocks>
      <blocksize>8192</blocksize>
      <vsn>202375680</vsn>
      <fdbid>1858507191</fdbid>
      <fcpsw>0</fcpsw>
      <fcpsb>1675592</fcpsb>
      <frlsw>0</frlsw>
      <frlsb>1594143</frlsb>
      <frlt>881895559</frlt>
    </file>
  </tablespace>
  <tablespace>
    <name>TEMP</name>
    <type>1</type>
    <tsn>2</tsn>
    <status>1</status>
    <issft>0</issft>
    <bmunitsize>128</bmunitsize>
    <file>
      <path>/u01/oradata/CDB2/18135BAD243A6341E0530C64A8C0B88F/datafile/o1_mf_temp_bqfdbtg1_.dbf</path>
      <afn>5</afn>
      <rfn>1</rfn>
      <createscnbas>1674776</createscnbas>
      <createscnwrp>0</createscnwrp>
      <status>0</status>
      <fileblocks>2560</fileblocks>
      <blocksize>8192</blocksize>
      <vsn>202375680</vsn>
      <autoext>1</autoext>
      <maxsize>4194302</maxsize>
      <incsize>80</incsize>
    </file>
  </tablespace>
  <tablespace>
    <name>USERS</name>
    <type>0</type>
    <tsn>3</tsn>
    <status>1</status>
    <issft>0</issft>
    <file>
      <path>/u01/oradata/CDB2/18135BAD243A6341E0530C64A8C0B88F/datafile/o1_mf_users_bqfdbtg1_.dbf</path>
      <afn>19</afn>
      <rfn>10</rfn>
      <createscnbas>1674802</createscnbas>
      <createscnwrp>0</createscnwrp>
      <status>1</status>
      <fileblocks>2560</fileblocks>
      <blocksize>8192</blocksize>
      <vsn>202375680</vsn>
      <fdbid>1858507191</fdbid>
      <fcpsw>0</fcpsw>
      <fcpsb>1675592</fcpsb>
      <frlsw>0</frlsw>
      <frlsb>1594143</frlsb>
      <frlt>881895559</frlt>
    </file>
  </tablespace>
  <optional>
    <ncdb2pdb>0</ncdb2pdb>
    <csid>178</csid>
    <ncsid>2000</ncsid>
    <options>
      <option>APS=12.1.0.2.0</option>
      <option>CATALOG=12.1.0.2.0</option>
      <option>CATJAVA=12.1.0.2.0</option>
      <option>CATPROC=12.1.0.2.0</option>
      <option>CONTEXT=12.1.0.2.0</option>
      <option>DV=12.1.0.2.0</option>
      <option>JAVAVM=12.1.0.2.0</option>
      <option>OLS=12.1.0.2.0</option>
      <option>ORDIM=12.1.0.2.0</option>
      <option>OWM=12.1.0.2.0</option>
      <option>SDO=12.1.0.2.0</option>
      <option>XDB=12.1.0.2.0</option>
      <option>XML=12.1.0.2.0</option>
      <option>XOQ=12.1.0.2.0</option>
    </options>
    <olsoid>0</olsoid>
    <dv>0</dv>
    <APEX>4.2.5.00.08:1</APEX>
    <parameters>
      <parameter>processes=300</parameter>
      <parameter>nls_language='ENGLISH'</parameter>
      <parameter>nls_territory='UNITED KINGDOM'</parameter>
      <parameter>sga_target=1073741824</parameter>
      <parameter>db_block_size=8192</parameter>
      <parameter>compatible='12.1.0.2.0'</parameter>
      <parameter>open_cursors=300</parameter>
      <parameter>pga_aggregate_target=536870912</parameter>
      <parameter>enable_pluggable_database=TRUE</parameter>
    </parameters>
    <tzvers>
      <tzver>primary version:18</tzver>
      <tzver>secondary version:0</tzver>
    </tzvers>
    <walletkey>0</walletkey>
    <opatches>
      <opatch>19769480</opatch>
      <opatch>20299022</opatch>
      <opatch>20299023</opatch>
      <opatch>20415564</opatch>
    </opatches>
    <hasclob>1</hasclob>
    <awr>
      <loadprofile>CPU Usage Per Sec=0.000000</loadprofile>
      <loadprofile>DB Block Changes Per Sec=0.000000</loadprofile>
      <loadprofile>Database Time Per Sec=0.000000</loadprofile>
      <loadprofile>Executions Per Sec=0.000000</loadprofile>
      <loadprofile>Hard Parse Count Per Sec=0.000000</loadprofile>
      <loadprofile>Logical Reads Per Sec=0.000000</loadprofile>
      <loadprofile>Logons Per Sec=0.000000</loadprofile>
      <loadprofile>Physical Reads Per Sec=0.000000</loadprofile>
      <loadprofile>Physical Writes Per Sec=0.000000</loadprofile>
      <loadprofile>Redo Generated Per Sec=0.000000</loadprofile>
      <loadprofile>Total Parse Count Per Sec=0.000000</loadprofile>
      <loadprofile>User Calls Per Sec=0.000000</loadprofile>
      <loadprofile>User Rollbacks Per Sec=0.000000</loadprofile>
      <loadprofile>User Transaction Per Sec=0.000000</loadprofile>
    </awr>
    <hardvsnchk>0</hardvsnchk>
  </optional>
</PDB>

- check for compatibility

SQL> drop pluggable database pdb2 keep datafiles;

DECLARE
  compatible CONSTANT VARCHAR2(3) :=
    CASE DBMS_PDB.CHECK_PLUG_COMPATIBILITY(
           pdb_descr_file => '/home/oracle/pdb2.xml',
           pdb_name       => 'PDB2')
    WHEN TRUE THEN 'YES'
    ELSE 'NO'
END;
BEGIN
  DBMS_OUTPUT.PUT_LINE(compatible);
END;
/

- If you get a YES then plug the PDB in 

SQL> create pluggable database pdb2 using '/home/oracle/pdb2.xml' nocopy tempfile reuse;

14) drop a PDB

You use the drop pluggable database command to drop the PDB.

SQL> alter pluggable database PDB2 close immediate;

SQL> drop pluggable database PDB2;

- what happens to its data files? Do you get an error? how do you correct the error?

LAB 5: RMAN and PDBs

1) Connect the the CDB$ROOT as RMAN and "report schema"

RMAN> report schema;

using target database control file instead of recovery catalog
Report of database schema for database with db_unique_name CDB2

List of Permanent Datafiles
===========================
File Size(MB) Tablespace           RB segs Datafile Name
---- -------- -------------------- ------- ------------------------
1    780      SYSTEM               YES     /u01/oradata/CDB2/datafile/o1_mf_system_bqf3ktdf_.dbf
3    600      SYSAUX               NO      /u01/oradata/CDB2/datafile/o1_mf_sysaux_bqf3jpvv_.dbf
4    355      UNDOTBS1             YES     /u01/oradata/CDB2/datafile/o1_mf_undotbs1_bqf3lz4q_.dbf
5    250      PDB$SEED:SYSTEM      NO      /u01/oradata/CDB2/datafile/o1_mf_system_bqf3phmo_.dbf
6    5        USERS                NO      /u01/oradata/CDB2/datafile/o1_mf_users_bqf3lxrp_.dbf
7    490      PDB$SEED:SYSAUX      NO      /u01/oradata/CDB2/datafile/o1_mf_sysaux_bqf3ph5z_.dbf
8    250      MASTER:SYSTEM        NO      /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_system_bqf4tntv_.dbf
9    510      MASTER:SYSAUX        NO      /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_sysaux_bqf4tnv2_.dbf
10   20       MASTER:USERS         NO      /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_users_bqf4v3cm_.dbf
14   250      PDB1:SYSTEM          NO      /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_system_bqfd57b5_.dbf
15   510      PDB1:SYSAUX          NO      /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_sysaux_bqfd57b7_.dbf
16   20       PDB1:USERS           NO      /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfd57b8_.dbf

List of Temporary Files
=======================
File Size(MB) Tablespace           Maxsize(MB) Tempfile Name
---- -------- -------------------- ----------- --------------------
1    60       TEMP                 32767       /u01/oradata/CDB2/datafile/o1_mf_temp_bqf3p96h_.tmp
2    20       PDB$SEED:TEMP        32767       /u01/oradata/CDB2/datafile/pdbseed_temp012015-06-09_02-59-59-AM.dbf
3    20       MASTER:TEMP          32767       /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_temp_bqf4tnv3_.dbf
4    20       PDB1:TEMP            32767       /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_temp_bqfd57b8_.dbf

RMAN>

- what do you notice? How is the output different from the Non-CDB

2) Review the configuration settings

Have a look at the RMAN configuration settings. There is one item that is different from non-CDBs. Can you spot it?

RMAN> show all;

RMAN configuration parameters for database with db_unique_name CDB2 are:
CONFIGURE RETENTION POLICY TO REDUNDANCY 1; # default
CONFIGURE BACKUP OPTIMIZATION OFF; # default
CONFIGURE DEFAULT DEVICE TYPE TO DISK; # default
CONFIGURE CONTROLFILE AUTOBACKUP ON; # default
CONFIGURE CONTROLFILE AUTOBACKUP FORMAT FOR DEVICE TYPE DISK TO '%F'; # default
CONFIGURE DEVICE TYPE DISK PARALLELISM 1 BACKUP TYPE TO BACKUPSET; # default
CONFIGURE DATAFILE BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default
CONFIGURE ARCHIVELOG BACKUP COPIES FOR DEVICE TYPE DISK TO 1; # default
CONFIGURE MAXSETSIZE TO UNLIMITED; # default
CONFIGURE ENCRYPTION FOR DATABASE OFF; # default
CONFIGURE ENCRYPTION ALGORITHM 'AES128'; # default
CONFIGURE COMPRESSION ALGORITHM 'BASIC' AS OF RELEASE 'DEFAULT' OPTIMIZE FOR LOAD TRUE ; # default
CONFIGURE RMAN OUTPUT TO KEEP FOR 7 DAYS; # default
CONFIGURE ARCHIVELOG DELETION POLICY TO NONE; # default
CONFIGURE SNAPSHOT CONTROLFILE NAME TO '/u01/app/oracle/product/12.1.0.2/dbhome_1/dbs/snapcf_CDB2.f'; # default


3) Back up the CDB

It is possible to backup up the CDB, CDB$ROOT, and PDBs. In this step you back up the entire CDB. Always good to have a full backup. If not yet in archivelog mode, change that and perform a full backup (incremental or full does not matter)

RMAN> shutdown immediate

startup mount
database closed
database dismounted
Oracle instance shut down

RMAN>
connected to target database (not started)
Oracle instance started
database mounted

Total System Global Area    1073741824 bytes

Fixed Size                     2932632 bytes
Variable Size                377487464 bytes
Database Buffers             687865856 bytes
Redo Buffers                   5455872 bytes

RMAN> alter database archivelog;

Statement processed

RMAN> alter database open;

Statement processed

RMAN> configure channel device type disk format '/u01/oraback/CDB2/%U';

new RMAN configuration parameters:
CONFIGURE CHANNEL DEVICE TYPE DISK FORMAT   '/u01/oraback/CDB2/%U';
new RMAN configuration parameters are successfully stored

RMAN> CONFIGURE DEVICE TYPE DISK PARALLELISM 2;

new RMAN configuration parameters:
CONFIGURE DEVICE TYPE DISK PARALLELISM 2 BACKUP TYPE TO BACKUPSET;
new RMAN configuration parameters are successfully stored

RMAN> backup database plus archivelog;


Starting backup at 09-JUN-15
current log archived
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=16 device type=DISK
allocated channel: ORA_DISK_2
channel ORA_DISK_2: SID=27 device type=DISK
channel ORA_DISK_1: starting archived log backup set
channel ORA_DISK_1: specifying archived log(s) in backup set
input archived log thread=1 sequence=18 RECID=1 STAMP=881907051
channel ORA_DISK_1: starting piece 1 at 09-JUN-15
channel ORA_DISK_1: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/01q91lbd_1_1 tag=TAG20150609T061052 comment=NONE
channel ORA_DISK_1: backup set complete, elapsed time: 00:00:01
Finished backup at 09-JUN-15

Starting backup at 09-JUN-15
using channel ORA_DISK_1
using channel ORA_DISK_2
channel ORA_DISK_1: starting full datafile backup set
channel ORA_DISK_1: specifying datafile(s) in backup set
input datafile file number=00001 name=/u01/oradata/CDB2/datafile/o1_mf_system_bqf3ktdf_.dbf
input datafile file number=00004 name=/u01/oradata/CDB2/datafile/o1_mf_undotbs1_bqf3lz4q_.dbf
channel ORA_DISK_1: starting piece 1 at 09-JUN-15
channel ORA_DISK_2: starting full datafile backup set
channel ORA_DISK_2: specifying datafile(s) in backup set
input datafile file number=00003 name=/u01/oradata/CDB2/datafile/o1_mf_sysaux_bqf3jpvv_.dbf
input datafile file number=00006 name=/u01/oradata/CDB2/datafile/o1_mf_users_bqf3lxrp_.dbf
channel ORA_DISK_2: starting piece 1 at 09-JUN-15
channel ORA_DISK_1: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/02q91lbf_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_1: backup set complete, elapsed time: 00:00:25
channel ORA_DISK_1: starting full datafile backup set
channel ORA_DISK_1: specifying datafile(s) in backup set
input datafile file number=00009 name=/u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_sysaux_bqf4tnv2_.dbf
channel ORA_DISK_1: starting piece 1 at 09-JUN-15
channel ORA_DISK_2: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/03q91lbf_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_2: backup set complete, elapsed time: 00:00:25
channel ORA_DISK_2: starting full datafile backup set
channel ORA_DISK_2: specifying datafile(s) in backup set
input datafile file number=00015 name=/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_sysaux_bqfd57b7_.dbf
channel ORA_DISK_2: starting piece 1 at 09-JUN-15
channel ORA_DISK_1: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/04q91lc8_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_1: backup set complete, elapsed time: 00:00:08
channel ORA_DISK_1: starting full datafile backup set
channel ORA_DISK_1: specifying datafile(s) in backup set
input datafile file number=00007 name=/u01/oradata/CDB2/datafile/o1_mf_sysaux_bqf3ph5z_.dbf
channel ORA_DISK_1: starting piece 1 at 09-JUN-15
channel ORA_DISK_2: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/05q91lc9_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_2: backup set complete, elapsed time: 00:00:10
channel ORA_DISK_2: starting full datafile backup set
channel ORA_DISK_2: specifying datafile(s) in backup set
input datafile file number=00008 name=/u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_system_bqf4tntv_.dbf
input datafile file number=00010 name=/u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_users_bqf4v3cm_.dbf
channel ORA_DISK_2: starting piece 1 at 09-JUN-15
channel ORA_DISK_1: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/06q91lcj_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_1: backup set complete, elapsed time: 00:00:08
channel ORA_DISK_1: starting full datafile backup set
channel ORA_DISK_1: specifying datafile(s) in backup set
input datafile file number=00014 name=/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_system_bqfd57b5_.dbf
input datafile file number=00016 name=/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfd57b8_.dbf
channel ORA_DISK_1: starting piece 1 at 09-JUN-15
channel ORA_DISK_2: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/07q91lcj_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_2: backup set complete, elapsed time: 00:00:07
channel ORA_DISK_2: starting full datafile backup set
channel ORA_DISK_2: specifying datafile(s) in backup set
input datafile file number=00005 name=/u01/oradata/CDB2/datafile/o1_mf_system_bqf3phmo_.dbf
channel ORA_DISK_2: starting piece 1 at 09-JUN-15
channel ORA_DISK_1: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/08q91lcr_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_1: backup set complete, elapsed time: 00:00:07
channel ORA_DISK_2: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/09q91lcr_1_1 tag=TAG20150609T061054 comment=NONE
channel ORA_DISK_2: backup set complete, elapsed time: 00:00:07
Finished backup at 09-JUN-15

Starting backup at 09-JUN-15
current log archived
using channel ORA_DISK_1
using channel ORA_DISK_2
channel ORA_DISK_1: starting archived log backup set
channel ORA_DISK_1: specifying archived log(s) in backup set
input archived log thread=1 sequence=19 RECID=2 STAMP=881907108
channel ORA_DISK_1: starting piece 1 at 09-JUN-15
channel ORA_DISK_1: finished piece 1 at 09-JUN-15
piece handle=/u01/oraback/CDB2/0aq91ld5_1_1 tag=TAG20150609T061148 comment=NONE
channel ORA_DISK_1: backup set complete, elapsed time: 00:00:01
Finished backup at 09-JUN-15

Starting Control File and SPFILE Autobackup at 09-JUN-15
piece handle=/u01/fra/CDB2/autobackup/2015_06_09/o1_mf_s_881907110_bqfgzb3n_.bkp comment=NONE
Finished Control File and SPFILE Autobackup at 09-JUN-15

RMAN>


4) Try to cause some trouble and get out unscathed

Assume someone in PDB1 removed an essential file from the database. Time to recover! In this part of the lab you
 a) close PDB1 
 b) remove a data file
 c) perform a full recovery (agree it not strictly speaking needed but a good test)
 d) open the database without data loss 

SQL> set lines 200
SQL> select name from v$datafile where con_id = (select con_id from v$pdbs where name = 'PDB1');

NAME
---------------------------------------------------------------------------------------------------------------------------
/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_system_bqfd57b5_.dbf
/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_sysaux_bqfd57b7_.dbf
/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfd57b8_.dbf

[oracle@server3 ~]$ rm /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfd57b8_.dbf
[oracle@server3 ~]$

SQL> alter pluggable database pdb1 open;
alter pluggable database pdb1 open
*
ERROR at line 1:
ORA-01157: cannot identify/lock data file 16 - see DBWR trace file
ORA-01110: data file 16:
'/u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfd57b8_.dbf'

- perform full recovery

[oracle@server3 ~]$ rman target sys/change_on_install@localhost/PDB1

Recovery Manager: Release 12.1.0.2.0 - Production on Tue Jun 9 06:48:03 2015

Copyright (c) 1982, 2014, Oracle and/or its affiliates.  All rights reserved.

connected to target database: CDB2 (DBID=628942599, not open)

RMAN> run {
2> restore database;
3> recover database;
4> alter database open;
5> }

Starting restore at 09-JUN-15
        using target database control file instead of recovery catalog
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=255 device type=DISK
allocated channel: ORA_DISK_2
channel ORA_DISK_2: SID=269 device type=DISK

skipping datafile 14; already restored to file /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_system_bqfd57b5_.dbf
skipping datafile 15; already restored to file /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_sysaux_bqfd57b7_.dbf
channel ORA_DISK_1: starting datafile backup set restore
channel ORA_DISK_1: specifying datafile(s) to restore from backup set
channel ORA_DISK_1: restoring datafile 00016 to /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfd57b8_.dbf
channel ORA_DISK_1: reading from backup piece /u01/oraback/CDB2/08q91lcr_1_1
channel ORA_DISK_1: piece handle=/u01/oraback/CDB2/08q91lcr_1_1 tag=TAG20150609T061054
channel ORA_DISK_1: restored backup piece 1
channel ORA_DISK_1: restore complete, elapsed time: 00:00:01
Finished restore at 09-JUN-15

Starting recover at 09-JUN-15
using channel ORA_DISK_1
using channel ORA_DISK_2

starting media recovery
media recovery complete, elapsed time: 00:00:00

Finished recover at 09-JUN-15

Statement processed

RMAN>

- check if that worked

[oracle@server3 ~]$ sqlplus appl_user/appl_user@localhost/pdb1

SQL*Plus: Release 12.1.0.2.0 Production on Tue Jun 9 06:50:32 2015

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

Last Successful login time: Tue Jun 09 2015 06:50:12 -04:00

Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, OLAP, Advanced Analytics and Real Application Testing options

SQL> select count(*) from t1;

  COUNT(*)
----------
     73704

SQL> select tablespace_name from tabs;

TABLESPACE_NAME
------------------------------
USERS
USERS
USERS

SQL>

5) Beware of PDB backups when dropping PDBs!

This is an example about what can be considered a bug, but is expected behaviour. Assume that you dropped a PDB accidentally including data files. How can you get it back? 

- create a PDB we don't really care about with a default tablespace named USERS

SQL> create pluggable database I_AM_AN_EX_PARROT admin user martin identified by secret default tablespace users datafile size 10m;

Pluggable database created.

SQL> alter pluggable database I_AM_AN_EX_PARROT open;

Pluggable database altered.

- create a level 0 backup of the CDB and make sure I_AM_AN_EX_PARRAT has been backed up. Validate both the PDB backup and the archivelogs.

RMAN> backup incremental level 0 database plus archivelog delete all input;

...

RMAN> list backup of pluggable database I_AM_AN_EX_PARROT;


List of Backup Sets
===================


BS Key  Type LV Size       Device Type Elapsed Time Completion Time
------- ---- -- ---------- ----------- ------------ ---------------
26      Incr 0  395.59M    DISK        00:00:02     11-JUN-15
        BP Key: 26   Status: AVAILABLE  Compressed: NO  Tag: TAG20150611T060056
        Piece Name: /u01/oraback/CDB2/0pq96tij_1_1
  List of Datafiles in backup set 26
  Container ID: 6, PDB Name: I_AM_AN_EX_PARROT
  File LV Type Ckp SCN    Ckp Time  Name
  ---- -- ---- ---------- --------- ----
  26   0  Incr 2345577    11-JUN-15 /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_sysaux_bqlowk9f_.dbf

BS Key  Type LV Size       Device Type Elapsed Time Completion Time
------- ---- -- ---------- ----------- ------------ ---------------
29      Incr 0  203.38M    DISK        00:00:02     11-JUN-15
        BP Key: 29   Status: AVAILABLE  Compressed: NO  Tag: TAG20150611T060056
        Piece Name: /u01/oraback/CDB2/0tq96tjc_1_1
  List of Datafiles in backup set 29
  Container ID: 6, PDB Name: I_AM_AN_EX_PARROT
  File LV Type Ckp SCN    Ckp Time  Name
  ---- -- ---- ---------- --------- ----
  25   0  Incr 2345609    11-JUN-15 /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_system_bqlowk93_.dbf
  27   0  Incr 2345609    11-JUN-15 /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_users_bqloxjmr_.dbf

- the backup exists!

RMAN> restore pluggable database I_AM_AN_EX_PARROT validate;

Starting restore at 11-JUN-15
using channel ORA_DISK_1
using channel ORA_DISK_2

channel ORA_DISK_1: starting validation of datafile backup set
channel ORA_DISK_2: starting validation of datafile backup set
channel ORA_DISK_1: reading from backup piece /u01/oraback/CDB2/0pq96tij_1_1
channel ORA_DISK_2: reading from backup piece /u01/oraback/CDB2/0tq96tjc_1_1
channel ORA_DISK_1: piece handle=/u01/oraback/CDB2/0pq96tij_1_1 tag=TAG20150611T060056
channel ORA_DISK_1: restored backup piece 1
channel ORA_DISK_1: validation complete, elapsed time: 00:00:01
channel ORA_DISK_2: piece handle=/u01/oraback/CDB2/0tq96tjc_1_1 tag=TAG20150611T060056
channel ORA_DISK_2: restored backup piece 1
channel ORA_DISK_2: validation complete, elapsed time: 00:00:01
Finished restore at 11-JUN-15

RMAN> RESTORE ARCHIVELOG ALL VALIDATE;

Starting restore at 11-JUN-15
using channel ORA_DISK_1
using channel ORA_DISK_2

channel ORA_DISK_1: starting validation of archived log backup set
channel ORA_DISK_2: starting validation of archived log backup set
channel ORA_DISK_1: reading from backup piece /u01/oraback/CDB2/0gq96tfe_1_1
channel ORA_DISK_2: reading from backup piece /u01/oraback/CDB2/0hq96tff_1_1
channel ORA_DISK_1: piece handle=/u01/oraback/CDB2/0gq96tfe_1_1 tag=TAG20150611T060013
channel ORA_DISK_1: restored backup piece 1
channel ORA_DISK_1: validation complete, elapsed time: 00:00:01
channel ORA_DISK_1: starting validation of archived log backup set
channel ORA_DISK_2: piece handle=/u01/oraback/CDB2/0hq96tff_1_1 tag=TAG20150611T060013
channel ORA_DISK_2: restored backup piece 1
channel ORA_DISK_2: validation complete, elapsed time: 00:00:01
channel ORA_DISK_2: starting validation of archived log backup set
channel ORA_DISK_1: reading from backup piece /u01/oraback/CDB2/0iq96tg9_1_1
channel ORA_DISK_2: reading from backup piece /u01/oraback/CDB2/0vq96tjp_1_1
channel ORA_DISK_1: piece handle=/u01/oraback/CDB2/0iq96tg9_1_1 tag=TAG20150611T060013
channel ORA_DISK_1: restored backup piece 1
channel ORA_DISK_1: validation complete, elapsed time: 00:00:01
channel ORA_DISK_2: piece handle=/u01/oraback/CDB2/0vq96tjp_1_1 tag=TAG20150611T060233
channel ORA_DISK_2: restored backup piece 1
channel ORA_DISK_2: validation complete, elapsed time: 00:00:01
Finished restore at 11-JUN-15

- drop the PDB including data files. We have a backup, should be ok even if we made a mistake. Tail the alert.log while executing the steps

RMAN> alter pluggable database I_AM_AN_EX_PARROT close;

Statement processed

RMAN> drop pluggable database I_AM_AN_EX_PARROT including datafiles;

Statement processed

2015-06-11 06:19:47.088000 -04:00
alter pluggable database I_AM_AN_EX_PARROT close
ALTER SYSTEM: Flushing buffer cache inst=0 container=6 local
2015-06-11 06:19:59.242000 -04:00
Pluggable database I_AM_AN_EX_PARROT closed
Completed: alter pluggable database I_AM_AN_EX_PARROT close
2015-06-11 06:20:15.885000 -04:00
drop pluggable database I_AM_AN_EX_PARROT including datafiles
2015-06-11 06:20:20.655000 -04:00
Deleted Oracle managed file /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_users_bqloxjmr_.dbf
Deleted Oracle managed file /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_temp_bqlowk9g_.dbf
Deleted Oracle managed file /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_sysaux_bqlowk9f_.dbf
Deleted Oracle managed file /u01/oradata/CDB2/183BC85FA4F548B1E0530C64A8C04B67/datafile/o1_mf_system_bqlowk93_.dbf
Completed: drop pluggable database I_AM_AN_EX_PARROT including datafiles

- oops, that was a mistake! Call from the users: restore the PDB, it is production critical!

RMAN> report schema;

Report of database schema for database with db_unique_name CDB2

List of Permanent Datafiles
===========================
File Size(MB) Tablespace           RB segs Datafile Name
---- -------- -------------------- ------- ------------------------
1    780      SYSTEM               YES     /u01/oradata/CDB2/datafile/o1_mf_system_bqf3ktdf_.dbf
3    680      SYSAUX               NO      /u01/oradata/CDB2/datafile/o1_mf_sysaux_bqf3jpvv_.dbf
4    355      UNDOTBS1             YES     /u01/oradata/CDB2/datafile/o1_mf_undotbs1_bqf3lz4q_.dbf
5    250      PDB$SEED:SYSTEM      NO      /u01/oradata/CDB2/datafile/o1_mf_system_bqf3phmo_.dbf
6    5        USERS                NO      /u01/oradata/CDB2/datafile/o1_mf_users_bqf3lxrp_.dbf
7    490      PDB$SEED:SYSAUX      NO      /u01/oradata/CDB2/datafile/o1_mf_sysaux_bqf3ph5z_.dbf
8    250      MASTER:SYSTEM        NO      /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_system_bqf4tntv_.dbf
9    510      MASTER:SYSAUX        NO      /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_sysaux_bqf4tnv2_.dbf
10   20       MASTER:USERS         NO      /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_users_bqf4v3cm_.dbf
14   250      PDB1:SYSTEM          NO      /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_system_bqfd57b5_.dbf
15   520      PDB1:SYSAUX          NO      /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_sysaux_bqfd57b7_.dbf
16   20       PDB1:USERS           NO      /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_users_bqfk42sq_.dbf
23   260      PDBSBY:SYSTEM        NO      /u01/oradata/CDB2/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_system_bqfmtn2d_.dbf
24   520      PDBSBY:SYSAUX        NO      /u01/oradata/CDB2/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_sysaux_bqfmtn2l_.dbf

List of Temporary Files
=======================
File Size(MB) Tablespace           Maxsize(MB) Tempfile Name
---- -------- -------------------- ----------- --------------------
1    60       TEMP                 32767       /u01/oradata/CDB2/datafile/o1_mf_temp_bqf3p96h_.tmp
2    20       PDB$SEED:TEMP        32767       /u01/oradata/CDB2/datafile/pdbseed_temp012015-06-09_02-59-59-AM.dbf
3    20       MASTER:TEMP          32767       /u01/oradata/CDB2/18119189D3265B51E0530C64A8C0A3AE/datafile/o1_mf_temp_bqf4tnv3_.dbf
4    20       PDB1:TEMP            32767       /u01/oradata/CDB2/1813503BE37C62FEE0530C64A8C02F2C/datafile/o1_mf_temp_bqfd57b8_.dbf
5    20       PDBSBY:TEMP          32767       /u01/oradata/CDB2/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_temp_bqfmtn2l_.dbf

RMAN> run {
2> restore pluggable database I_AM_AN_EX_PARROT;
3> recover pluggable database I_AM_AN_EX_PARROT;
4> }

Starting restore at 11-JUN-15
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=280 device type=DISK
allocated channel: ORA_DISK_2
channel ORA_DISK_2: SID=55 device type=DISK
RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-03002: failure of restore command at 06/11/2015 06:22:07
RMAN-06813: could not translate pluggable database I_AM_AN_EX_PARROT

- Why? the backup was there a minute ago! Check the controlfile for the PDB backup:

RMAN> list backup of pluggable database I_AM_AN_EX_PARROT;

RMAN-00571: ===========================================================
RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS ===============
RMAN-00571: ===========================================================
RMAN-03002: failure of list command at 06/11/2015 06:22:52
RMAN-06813: could not translate pluggable database I_AM_AN_EX_PARROT

- And indeed the backup is gone, as well as all the information with it.
}}}

! LAB X: Data Guard

{{{
Data Guard is an essential part of data protection. CDBs can be Data-Guarded as well. In this lab you will learn how to. It might be a bit more involved than previous labs and therefore we have most time here. The steps in the lab guide you through what needs to be done, the examples may have to be updated according to your environment.

1) create a physical standby of the CDB you used

- connect to the CDB as root and enable automatic standby_file_management 
- make sure that you use a SPFILE
- edit tnsnames.ora in $ORACLE_HOME to include the new standby database

CDBSBY =
  (DESCRIPTION =
    (ADDRESS = (PROTOCOL = TCP)(HOST = class<n>)(PORT = 1521))
    (CONNECT_DATA =
      (SERVER = DEDICATED)
      (SERVICE_NAME = CDBSBY)
    )
  )

- modify listener.ora in $ORACLE_HOME/network/admin/listener.ora and reload it. Ensure the names match your environment!

SID_LIST_LISTENER =
  (SID_LIST =
    (SID_DESC =
      (GLOBAL_DBNAME = CDB2)
      (ORACLE_HOME = /u01/app/oracle/product/12.1.0.2/dbhome_1)
      (SID_NAME = CDB2)
    )
    (SID_DESC =
      (GLOBAL_DBNAME = CDB2_DGMGRL)
      (ORACLE_HOME = /u01/app/oracle/product/12.1.0.2/dbhome_1)
      (SID_NAME = CDB2)
    )
    (SID_DESC =
      (GLOBAL_DBNAME = CDBSBY)
      (ORACLE_HOME = /u01/app/oracle/product/12.1.0.2/dbhome_1)
      (SID_NAME = CDBSBY)
    )
    (SID_DESC =
      (GLOBAL_DBNAME = CDBSBY_DGMGRL)
      (ORACLE_HOME = /u01/app/oracle/product/12.1.0.2/dbhome_1)
      (SID_NAME = CDBSBY)
    )
  )

- use lsnrctl service to ensure the services are registered

- update oratab with the new standby database

CDBSBY:/u01/app/oracle/product/12.1.0.2/dbhome_1:N

- create a minimum ppfile for the clone 

*.audit_file_dest='/u01/app/oracle/admin/CDBSBY/adump'
*.audit_trail='db'
*.compatible='12.1.0.2.0'
*.db_block_size=8192
*.db_create_file_dest='/u01/oradata'
*.db_domain=''
*.db_name='CDB2'
*.db_unique_name='CDBSBY'
*.db_recovery_file_dest='/u01/fra'
*.db_recovery_file_dest_size=4560m
*.diagnostic_dest='/u01/app/oracle'
*.dispatchers='(PROTOCOL=TCP) (SERVICE=CDBSBYXDB)'
*.enable_pluggable_database=true
*.nls_language='ENGLISH'
*.nls_territory='UNITED KINGDOM'
*.open_cursors=300
*.pga_aggregate_target=512m
*.processes=300
*.remote_login_passwordfile='EXCLUSIVE'
*.sga_target=1024m
*.standby_file_management='AUTO'
*.undo_tablespace='UNDOTBS1'

- ensure the audit file dest is created

mkdir -vp /u01/app/oracle/admin/CDBSBY/adump

- copy the pwfile to allow remote login

[oracle@server3 dbs]$ cp orapwCDB2 orapwCDBSBY

- duplicate

[oracle@server3 ~]$  rman target sys/password@cdb2 auxiliary sys/password@cdbsby

RMAN> startup clone nomount

....

RMAN> duplicate target database for standby;

- Make sure to note down the control files. Their names are in the RMAN output

executing Memory Script

Starting restore at 09-JUN-15
using channel ORA_AUX_DISK_1
using channel ORA_AUX_DISK_2

channel ORA_AUX_DISK_1: starting datafile backup set restore
channel ORA_AUX_DISK_1: restoring control file
channel ORA_AUX_DISK_1: reading from backup piece /u01/fra/CDB2/autobackup/2015_06_09/o1_mf_s_881907110_bqfgzb3n_.bkp
channel ORA_AUX_DISK_1: piece handle=/u01/fra/CDB2/autobackup/2015_06_09/o1_mf_s_881907110_bqfgzb3n_.bkp tag=TAG20150609T061150
channel ORA_AUX_DISK_1: restored backup piece 1
channel ORA_AUX_DISK_1: restore complete, elapsed time: 00:00:01
output file name=/u01/oradata/CDBSBY/controlfile/o1_mf_bqflhy0t_.ctl
output file name=/u01/fra/CDBSBY/controlfile/o1_mf_bqflhydr_.ctl
Finished restore at 09-JUN-15

- In this case they are:

output file name=/u01/oradata/CDBSBY/controlfile/o1_mf_bqflhy0t_.ctl
output file name=/u01/fra/CDBSBY/controlfile/o1_mf_bqflhydr_.ctl

- modify the pfile to include these. You can also use "show parameter control_files".

- create spfile from pfile and restart the standby

2) add the database into the broker configuration

- Enable the broker on primary and standby

SQL> alter system set dg_broker_start = true;

- add the databases to the broker configuration

[oracle@server3 ~]$ dgmgrl /
DGMGRL for Linux: Version 12.1.0.2.0 - 64bit Production

Copyright (c) 2000, 2013, Oracle. All rights reserved.

Welcome to DGMGRL, type "help" for information.
Connected as SYSDBA.

DGMGRL>  CREATE CONFIGURATION twelve as PRIMARY DATABASE IS 'CDB2' CONNECT IDENTIFIER IS 'CDB2';
Configuration "twelve" created with primary database "CDB2"

DGMGRL> add database 'CDBSBY' AS CONNECT IDENTIFIER IS 'CDBSBY';
Database "CDBSBY" added

- create standby redo logs on each database

Check their size in v$log, and create the files on each database. The following should work, you create group# + 1 SRLs per thread (there is only 1 in single instance Oracle)

SQL> begin
  2  for i in 1..4 loop
  3   execute immediate 'alter database add standby logfile size 52428800';
  4  end loop;
  5  end;
  6  /

- enable the configuration

DGMGRL> enable configuration
Enabled.

DGMGRL> show configuration

Configuration - twelve

  Protection Mode: MaxPerformance
  Members:
  CDB2   - Primary database
    CDBSBY - Physical standby database

Fast-Start Failover: DISABLED

Configuration Status:
SUCCESS   (status updated 4 seconds ago)


3) create a new PDB on the primary, tail the alert.log to see what's happening on the standby

- be sure to have standby_file_management set to auto

DGMGRL> show database 'CDB2' standbyfilemanagement
  StandbyFileManagement = 'AUTO'
DGMGRL> show database 'CDBSBY' standbyfilemanagement
  StandbyFileManagement = 'AUTO'

SQL> select name, db_unique_name, database_role from v$database;

NAME      DB_UNIQUE_NAME                 DATABASE_ROLE
--------- ------------------------------ ----------------
CDB2      CDB2                           PRIMARY

SQL> create pluggable database PDBSBY admin user PDBSBY_ADMIN identified by secret;

- tail the primary alert.log

2015-06-09 07:34:43.265000 -04:00
create pluggable database PDBSBY admin user PDBSBY_ADMIN identified by *
 APEX_040200.WWV_FLOW_ADVISOR_CHECKS (CHECK_STATEMENT) - CLOB populated
2015-06-09 07:35:05.280000 -04:00
****************************************************************
Pluggable Database PDBSBY with pdb id - 5 is created as UNUSABLE.
If any errors are encountered before the pdb is marked as NEW,
then the pdb must be dropped
****************************************************************
Database Characterset for PDBSBY is WE8MSWIN1252
2015-06-09 07:35:06.834000 -04:00
Deleting old file#5 from file$
Deleting old file#7 from file$
Adding new file#23 to file$(old file#5)
Adding new file#24 to file$(old file#7)
2015-06-09 07:35:08.031000 -04:00
Successfully created internal service pdbsby at open
2015-06-09 07:35:12.391000 -04:00
ALTER SYSTEM: Flushing buffer cache inst=0 container=5 local
2015-06-09 07:35:20.225000 -04:00
****************************************************************
Post plug operations are now complete.
Pluggable database PDBSBY with pdb id - 5 is now marked as NEW.
****************************************************************
Completed: create pluggable database PDBSBY admin user PDBSBY_ADMIN identified by *

- tail the standby

2015-06-09 07:34:58.636000 -04:00
Recovery created pluggable database PDBSBY
2015-06-09 07:35:03.499000 -04:00
Recovery copied files for tablespace SYSTEM
Recovery successfully copied file /u01/oradata/CDBSBY/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_system_bqfmtn2d_.dbf from /u01/oradata/CDBSBY/datafile/o1_mf_system_bqflkyry_.dbf
2015-06-09 07:35:05.219000 -04:00
Successfully added datafile 23 to media recovery
Datafile #23: '/u01/oradata/CDBSBY/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_system_bqfmtn2d_.dbf'
2015-06-09 07:35:13.119000 -04:00
Recovery copied files for tablespace SYSAUX
Recovery successfully copied file /u01/oradata/CDBSBY/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_sysaux_bqfmtn2l_.dbf from /u01/oradata/CDBSBY/datafile/o1_mf_sysaux_bqflkpc2_.dbf
2015-06-09 07:35:17.968000 -04:00
Successfully added datafile 24 to media recovery
Datafile #24: '/u01/oradata/CDBSBY/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_sysaux_bqfmtn2l_.dbf'


4) switch over to CDBSBY

Using the broker connect to CDBSBY as sysdba. Then verify switchover readiness

[oracle@server3 ~]$ dgmgrl
DGMGRL for Linux: Version 12.1.0.2.0 - 64bit Production

Copyright (c) 2000, 2013, Oracle. All rights reserved.

Welcome to DGMGRL, type "help" for information.
DGMGRL> connect sys@cdbsby
Password:
Connected as SYSDBA.
DGMGRL> validate database 'CDBSBY';

  Database Role:     Physical standby database
  Primary Database:  CDB2

  Ready for Switchover:  Yes
  Ready for Failover:    Yes (Primary Running)

  Temporary Tablespace File Information:
    CDB2 TEMP Files:    5
    CDBSBY TEMP Files:  4

  Flashback Database Status:
    CDB2:    Off
    CDBSBY:  Off

  Current Log File Groups Configuration:
    Thread #  Online Redo Log Groups  Standby Redo Log Groups Status
              (CDB2)                  (CDBSBY)
    1         3                       3                       Insufficient SRLs

  Future Log File Groups Configuration:
    Thread #  Online Redo Log Groups  Standby Redo Log Groups Status
              (CDBSBY)                (CDB2)
    1         3                       0                       Insufficient SRLs
    Warning: standby redo logs not configured for thread 1 on CDB2

DGMGRL>

If you see "ready for switchover", do it:

DGMGRL> switchover to 'CDBSBY';
Performing switchover NOW, please wait...
New primary database "CDBSBY" is opening...
Oracle Clusterware is restarting database "CDB2" ...
Switchover succeeded, new primary is "CDBSBY"
DGMGRL>

4) check if you can access PDBSBY

SQL> select name,db_unique_name,database_role from v$database;

NAME      DB_UNIQUE_NAME                 DATABASE_ROLE
--------- ------------------------------ ----------------
CDB2      CDBSBY                         PRIMARY

SQL> show pdbs

    CON_ID CON_NAME                       OPEN MODE  RESTRICTED
---------- ------------------------------ ---------- ----------
         2 PDB$SEED                       READ ONLY  NO
         3 MASTER                         READ WRITE NO
         4 PDB1                           READ WRITE NO
         5 PDBSBY                         READ WRITE NO

SQL> select name from v$datafile
  2  /

NAME
---------------------------------------------------------------------------------------------------------
/u01/oradata/CDBSBY/datafile/o1_mf_undotbs1_bqfljffy_.dbf
/u01/oradata/CDBSBY/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_system_bqfmtn2d_.dbf
/u01/oradata/CDBSBY/1815250A88E57497E0530C64A8C01A28/datafile/o1_mf_sysaux_bqfmtn2l_.dbf

SQL> select sys_context('userenv', 'con_name') from dual;

SYS_CONTEXT('USERENV','CON_NAME')
----------------------------------------------------------------------------------------------------------
PDBSBY


- keep the standby database! Will be needed for later on.
}}}

! LAB X: CDB Resource Manager

{{{
1) Create a CDB resource plan

Consolidation requires the creation of a CDB resource manager plan. Please ensure you have the following PDBs in your CDB:
- MASTER
- PDB1
- PDBSBY

Create a CDB plan for your CDB and set the distribution of CPU shares and utilisation limts as follows:
- MASTER: 1 share, limit 30
- PDB1: 5 shares, limit 100
- PDBSBY: 3 shares, limit 70

There is no need to limit PQ. To keep the lab simple, no PDB plans are needed.

Unfortunately due to a limited number of CPUs we cannot test the plans in action!

SQL> show pdbs

    CON_ID CON_NAME                       OPEN MODE  RESTRICTED
---------- ------------------------------ ---------- ----------
         2 PDB$SEED                       READ ONLY  NO
         3 MASTER                         READ WRITE NO
         4 PDB1                           READ WRITE NO
         5 PDBSBY                         READ WRITE NO

- make sure you are in the ROOT

SQL> select sys_context('userenv','con_name') from dual;

declare
 v_plan_name varchar2(50) := 'ENKITEC_CDB_PLAN';
begin
 dbms_resource_manager.clear_pending_area;
 dbms_resource_manager.create_pending_area;

 dbms_resource_manager.create_cdb_plan(
  plan => v_plan_name,
  comment => 'A CDB plan for the 12c class'
 );

 dbms_resource_manager.create_cdb_plan_directive(
  plan => v_plan_name,
  pluggable_database => 'MASTER',
  shares => 1,
  utilization_limit => 30);

 dbms_resource_manager.create_cdb_plan_directive(
  plan => v_plan_name,
  pluggable_database => 'PDB1',
  shares => 5,
  utilization_limit => 100);

 dbms_resource_manager.create_cdb_plan_directive(
  plan => v_plan_name,
  pluggable_database => 'PDBSBY',
  shares => 3,
  utilization_limit => 70);

 dbms_resource_manager.validate_pending_area;
 dbms_resource_manager.submit_pending_area;
end;
/

2) Query the CDB Resource Plan dictionary information

COLUMN PLAN FORMAT A30
COLUMN STATUS FORMAT A10
COLUMN COMMENTS FORMAT A35
 
SELECT PLAN, STATUS, COMMENTS FROM DBA_CDB_RSRC_PLANS ORDER BY PLAN;

3) Query the CDB Plan directives in the dictionary

COLUMN PLAN HEADING 'Plan' FORMAT A26
COLUMN PLUGGABLE_DATABASE HEADING 'Pluggable|Database' FORMAT A25
COLUMN SHARES HEADING 'Shares' FORMAT 999
COLUMN UTILIZATION_LIMIT HEADING 'Utilization|Limit' FORMAT 999
COLUMN PARALLEL_SERVER_LIMIT HEADING 'Parallel|Server|Limit' FORMAT 999
 
SELECT PLAN, 
       PLUGGABLE_DATABASE, 
       SHARES, 
       UTILIZATION_LIMIT,
       PARALLEL_SERVER_LIMIT
  FROM DBA_CDB_RSRC_PLAN_DIRECTIVES
  ORDER BY PLAN;


4) Set the CDB resource plan

Set the new plan in the CDB$ROOT

SQL> alter system set RESOURCE_MANAGER_PLAN = 'ENKITEC_CDB_PLAN' scope=both;

System altered.

5) create a DBRM plan for MASTER PDB

Create a new database resource plan for 2 new consumer group, lowprio and highprio. The consumer group mappings are to be based on the oracle user. Do not forget to add the SYS_GROUP and OTHER_GROUP.

The various CPU entitlements are as follows:
- SYS_GROUP      - level 1 - 70
- HIGHPRIO_GROUP - level 1 - 100
- LOWPRIO_GROUP  - level 1 - 25
- OTHERS_GROUP   - level 1 - 45
- ORA$AUTOTASK   - level 1 - 15 

No other plan directives are needed. You can only have plans at level 1 in a PDB... Make sure you are connected against the PDB when executing the commands! Start by creating the users and grant them the connect role. Define the mapping based on the oracle user, and grant both the privilege to switch consumer groups. In the last step, create the plan and plan directives.

create user LOWPRIO identified by lowprio;
create user HIGHPRIO identified by highprio;

grant connect to LOWPRIO;
grant connect to HIGHPRIO;


begin
 dbms_resource_manager.clear_pending_area;
 dbms_resource_manager.create_pending_area;

 dbms_resource_manager.create_consumer_group('LOWPRIO_GROUP', 'for low priority processing');
 dbms_resource_manager.create_consumer_group('HIGHPRIO_GROUP', 'we will starve you');

 dbms_resource_manager.validate_pending_area();
 dbms_resource_manager.submit_pending_area();
end;
/

begin
 dbms_resource_manager.create_pending_area();
 dbms_resource_manager.set_consumer_group_mapping(
		dbms_resource_manager.oracle_user, 'LOWPRIO', 'LOWPRIO_GROUP');
 dbms_resource_manager.set_consumer_group_mapping(
		dbms_resource_manager.oracle_user, 'HIGHPRIO', 'HIGHPRIO_GROUP');
 dbms_resource_manager.submit_pending_area();
end;
/

begin
 dbms_resource_manager_privs.grant_switch_consumer_group('LOWPRIO','LOWPRIO_GROUP', true);
 dbms_resource_manager_privs.grant_switch_consumer_group('HIGHPRIO','HIGHPRIO_GROUP', true);
end;
/

BEGIN
 dbms_resource_manager.clear_pending_area();
 dbms_resource_manager.create_pending_area();
 
 dbms_resource_manager.create_plan(
 	plan => 'ENKITEC_MASTER_PDB_PLAN',
 	comment => 'sample DBRM plan for the training classes'
 );

 dbms_resource_manager.create_plan_directive(
  plan => 'ENKITEC_MASTER_PDB_PLAN',
  comment => 'sys_group is level 1',
  group_or_subplan => 'SYS_GROUP',
  mgmt_p1 => 50);

 dbms_resource_manager.create_plan_directive(
  plan => 'ENKITEC_MASTER_PDB_PLAN',
  group_or_subplan => 'HIGHPRIO_GROUP',
  comment => 'us before anyone else',
  mgmt_p1 => 30
 );

 -- artificially limit the resources
 dbms_resource_manager.create_plan_directive(
  plan => 'ENKITEC_MASTER_PDB_PLAN',
  group_or_subplan => 'LOWPRIO_GROUP',
  comment => 'then the LOWPRIO group',
  mgmt_p1 => 10
 );

 -- finally anyone not in a previous consumer group will be mapped to the
 -- OTHER_GROUPS
 dbms_resource_manager.create_plan_directive(
  plan => 'ENKITEC_MASTER_PDB_PLAN',
  group_or_subplan => 'OTHER_GROUPS',
  comment => 'all the rest',
  mgmt_p1 => 5
 );
 
 dbms_resource_manager.validate_pending_area();
 dbms_resource_manager.submit_pending_area();
end;
/

6) enable the PDB resource plan

alter system set resource_manager_plan = 'ENKITEC_MASTER_PDB_PLAN';

7) Verify the resource manager plans are correct in their respective container

SQL> select sys_context('userenv','con_name') from dual;

SYS_CONTEXT('USERENV','CON_NAME')
--------------------------------------------------------------------------------
CDB$ROOT

SQL> show parameter resource_manager_plan

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
resource_manager_plan                string      ENKITEC_CDB_PLAN
SQL>

SQL> alter session set container = master;

Session altered.

SQL> select sys_context('userenv','con_name') from dual;

SYS_CONTEXT('USERENV','CON_NAME')
--------------------------------------------------------------------------------
MASTER

SQL> show parameter resource_manager_plan

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
resource_manager_plan                string      ENKITEC_MASTER_PDB_PLAN
SQL>

8) connect as either highprio or lowprio to the PDB and check if the mapping works

[oracle@server3 ~]$ sqlplus highprio/highprio@localhost/master

SQL*Plus: Release 12.1.0.2.0 Production on Wed Jun 10 05:58:55 2015

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

Last Successful login time: Wed Jun 10 2015 05:57:33 -04:00

Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, OLAP, Advanced Analytics and Real Application Testing options


- in a different session

SQL> select con_id, resource_consumer_group from v$session where username = 'HIGHPRIO';

    CON_ID RESOURCE_CONSUMER_GROUP
---------- --------------------------------
         3 HIGHPRIO_GROUP
}}}

! LAB X: generic RMAN enhancements

{{{
In this lab you learn how to perform a table point in time recovery. 

1) create a table in a schema of your choice in the non-CDB and populate it with data

SQL> sho user
USER is "MARTIN"

SQL> create table recoverme tablespace users as select * from dba_objects;

Table created 

SQL> select table_name from tabs;

TABLE_NAME
--------------------------------------------------------------------------------
RECOVERME

2) get some information about the database, the most useful is the SCN. This is the SCN to recover to in the next steps, so take a note of it.

SQL> select db_unique_name, database_role, cdb, current_scn from v$database;

DB_UNIQUE_NAME                    DATABASE_ROLE CDB CURRENT_SCN
------------------------------ ---------------- --- -----------
NCDB                                    PRIMARY  NO     1766295

3) ensure there were rows in the table at this particular SCN

SQL> select count(*) from recoverme;

COUNT(*)
----------
91858

4) truncate the table to simulate something daft

SQL> truncate table recoverme;

table truncated.

5) try to salvage the table without having to revert to a restore

SQL> flashback table recoverme to scn 1766304;
flashback table recoverme to scn 1766304
*
ERROR at line 1:
ORA-08189: cannot flashback the table because row movement is not enabled


SQL> alter table recoverme enable row movement;

Table altered.

SQL> flashback table recoverme to scn 1766304;
flashback table recoverme to scn 1766304
*
ERROR at line 1:
ORA-01466: unable to read data - table definition has changed

6) After this proved unsuccessful perform a table point in time recovery

NB: which other recovery technique could you have tried in step 5?

RECOVER TABLE MARTIN.RECOVERME
UNTIL SCN 1766295
AUXILIARY DESTINATION '/u02/oradata/adata/oraback/NCDB/temp'
REMAP TABLE 'MARTIN'.'RECOVERME':'RECOVERME_RESTRD';
....
executing Memory Script

Oracle instance shut down

Performing import of tables...
IMPDP> Master table "SYS"."TSPITR_IMP_onvy_iEei" successfully loaded/unloaded
IMPDP> Starting "SYS"."TSPITR_IMP_onvy_iEei":
IMPDP> Processing object type TABLE_EXPORT/TABLE/TABLE
IMPDP> Processing object type TABLE_EXPORT/TABLE/TABLE_DATA
IMPDP> . . imported "MARTIN"."RECOVERME_RESTRD" 10.01 MB 91858 rows
IMPDP> Processing object type TABLE_EXPORT/TABLE/STATISTICS/TABLE_STATISTICS
IMPDP> Processing object type TABLE_EXPORT/TABLE/STATISTICS/MARKER
IMPDP> Job "SYS"."TSPITR_IMP_onvy_iEei" successfully completed at Thu Jun 11 09:42:52 2015 elapsed 0 00:00:15
Import completed


Removing automatic instance
Automatic instance removed
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/NCDB/datafile/o1_mf_temp_bqlldwvd_.tmp deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/ONVY_PITR_NCDB/onlinelog/o1_mf_3_bqllgqf9_.log deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/ONVY_PITR_NCDB/onlinelog/o1_mf_2_bqllgq2t_.log deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/ONVY_PITR_NCDB/onlinelog/o1_mf_1_bqllgpq2_.log deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/ONVY_PITR_NCDB/datafile/o1_mf_users_bqllgnnh_.dbf deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/NCDB/datafile/o1_mf_sysaux_bqllddlj_.dbf deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/NCDB/datafile/o1_mf_undotbs1_bqllddln_.dbf deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/NCDB/datafile/o1_mf_system_bqllddlb_.dbf deleted
auxiliary instance file /u02/oradata/adata/oraback/NCDB/temp/NCDB/controlfile/o1_mf_bqlld6c7_.ctl deleted
auxiliary instance file tspitr_onvy_78692.dmp deleted
Finished recover at 11.06.2015 09:42:54

7) check if the table is imported ok and has the rows needed.

SQL> conn martin/secret
Connected.
SQL> select table_name from tabs;

TABLE_NAME
--------------------------------------------------------------------------------
RECOVERME
RECOVERME_RESTRD

SQL> select count(*) from RECOVERME_RESTRD;

COUNT(*)
----------
91858

SQL> select count(*) from RECOVERME;

COUNT(*)
----------
0
}}}

! Lab X: Threaded Execution

{{{
In the final lab you will experiment with the changes introduced with threaded execution

1) Verify your current settings

Start your NCDB if it is not open. Have a look at all the OS process IDs. How many are started?

[oracle@server3 ~]$ ps -ef | grep NCDB
oracle   19728 19658  0 06:27 pts/0    00:00:00 screen -S NCDB
oracle   19729 19728  0 06:27 ?        00:00:00 SCREEN -S NCDB
oracle   19963     1  0 06:28 ?        00:00:00 ora_pmon_NCDB
oracle   19965     1  0 06:28 ?        00:00:00 ora_psp0_NCDB
oracle   19967     1  2 06:28 ?        00:00:11 ora_vktm_NCDB
oracle   19971     1  0 06:28 ?        00:00:00 ora_gen0_NCDB
oracle   19973     1  0 06:28 ?        00:00:00 ora_mman_NCDB
oracle   19977     1  0 06:28 ?        00:00:00 ora_diag_NCDB
oracle   19979     1  0 06:28 ?        00:00:00 ora_dbrm_NCDB
oracle   19981     1  0 06:28 ?        00:00:00 ora_vkrm_NCDB
oracle   19983     1  0 06:28 ?        00:00:00 ora_dia0_NCDB
oracle   19985     1  0 06:28 ?        00:00:02 ora_dbw0_NCDB
oracle   19987     1  0 06:28 ?        00:00:03 ora_lgwr_NCDB
oracle   19989     1  0 06:28 ?        00:00:00 ora_ckpt_NCDB
oracle   19991     1  0 06:28 ?        00:00:00 ora_lg00_NCDB
oracle   19993     1  0 06:28 ?        00:00:00 ora_smon_NCDB
oracle   19995     1  0 06:28 ?        00:00:00 ora_lg01_NCDB
oracle   19997     1  0 06:28 ?        00:00:00 ora_reco_NCDB
oracle   19999     1  0 06:28 ?        00:00:00 ora_lreg_NCDB
oracle   20001     1  0 06:28 ?        00:00:00 ora_pxmn_NCDB
oracle   20003     1  0 06:28 ?        00:00:00 ora_rbal_NCDB
oracle   20005     1  0 06:28 ?        00:00:00 ora_asmb_NCDB
oracle   20007     1  0 06:28 ?        00:00:01 ora_mmon_NCDB
oracle   20009     1  0 06:28 ?        00:00:00 ora_mmnl_NCDB
oracle   20013     1  0 06:28 ?        00:00:00 ora_mark_NCDB
oracle   20015     1  0 06:28 ?        00:00:00 ora_d000_NCDB
oracle   20017     1  0 06:28 ?        00:00:00 ora_s000_NCDB
oracle   20021     1  0 06:28 ?        00:00:00 ora_dmon_NCDB
oracle   20033     1  0 06:28 ?        00:00:00 ora_o000_NCDB
oracle   20039     1  0 06:28 ?        00:00:00 ora_o001_NCDB
oracle   20043     1  0 06:28 ?        00:00:00 ora_rvwr_NCDB
oracle   20045     1  0 06:28 ?        00:00:00 ora_insv_NCDB
oracle   20047     1  0 06:28 ?        00:00:00 ora_nsv1_NCDB
oracle   20049     1  0 06:28 ?        00:00:00 ora_fsfp_NCDB
oracle   20054     1  0 06:28 ?        00:00:00 ora_rsm0_NCDB
oracle   20056     1  0 06:29 ?        00:00:00 ora_tmon_NCDB
oracle   20058     1  0 06:29 ?        00:00:00 ora_arc0_NCDB
oracle   20060     1  0 06:29 ?        00:00:00 ora_arc1_NCDB
oracle   20062     1  0 06:29 ?        00:00:00 ora_arc2_NCDB
oracle   20064     1  0 06:29 ?        00:00:00 ora_arc3_NCDB
oracle   20066     1  0 06:29 ?        00:00:00 ora_o002_NCDB
oracle   20068     1  0 06:29 ?        00:00:00 ora_o003_NCDB
oracle   20074     1  0 06:29 ?        00:00:00 ora_o004_NCDB
oracle   20078     1  0 06:29 ?        00:00:00 ora_tt00_NCDB
oracle   20080     1  0 06:29 ?        00:00:00 ora_tt01_NCDB
oracle   20091     1  0 06:29 ?        00:00:00 ora_p000_NCDB
oracle   20093     1  0 06:29 ?        00:00:00 ora_p001_NCDB
oracle   20095     1  0 06:29 ?        00:00:00 ora_p002_NCDB
oracle   20097     1  0 06:29 ?        00:00:00 ora_p003_NCDB
oracle   20099     1  0 06:29 ?        00:00:00 ora_smco_NCDB
oracle   20101     1  0 06:29 ?        00:00:00 ora_w000_NCDB
oracle   20103     1  0 06:29 ?        00:00:00 ora_w001_NCDB
oracle   20107     1  0 06:29 ?        00:00:00 ora_aqpc_NCDB
oracle   20111     1  0 06:29 ?        00:00:00 ora_p004_NCDB
oracle   20113     1  0 06:29 ?        00:00:00 ora_p005_NCDB
oracle   20115     1  0 06:29 ?        00:00:00 ora_p006_NCDB
oracle   20117     1  0 06:29 ?        00:00:00 ora_p007_NCDB
oracle   20119     1  0 06:29 ?        00:00:00 ora_cjq0_NCDB
oracle   20121     1  0 06:29 ?        00:00:00 ora_qm02_NCDB
oracle   20125     1  0 06:29 ?        00:00:00 ora_q002_NCDB
oracle   20127     1  0 06:29 ?        00:00:00 ora_q003_NCDB
oracle   20131     1  0 06:29 ?        00:00:00 oracleNCDB (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
oracle   20305     1  1 06:30 ?        00:00:03 ora_m005_NCDB
oracle   20412     1  0 06:33 ?        00:00:00 ora_m004_NCDB
oracle   20481     1  0 06:34 ?        00:00:00 ora_j000_NCDB
oracle   20483     1  0 06:34 ?        00:00:00 ora_j001_NCDB
oracle   20508 20415  0 06:36 pts/15   00:00:00 grep --color=auto NCDB

[oracle@server3 ~]$ ps -ef | grep NCDB | grep -v grep | wc -l
66

- keep that number in mind

2) switch to threaded_execution

Connect as sysdba check if it is using threaded execution. If not, enable threaded execution and bounce the instance for the parameter to take effect. What do you notice when the database restarts?

SQL> show parameter threaded_

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
threaded_execution                   boolean     FALSE
SQL> alter system set threaded_execution=true scope=spfile;

System altered.

SQL> shutdown immediate
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> startup
ERROR:
ORA-01017: invalid username/password; logon denied


ORA-01017: invalid username/password; logon denied
SQL>

3) how can you start the database?

SQL> conn sys/change_on_install as sysdba
Connected.

SQL> alter database mount;

Database altered.

SQL> alter database open;

Database altered.

SQL>

3) check the OS processes now-how many are there?

[oracle@server3 ~]$ ps -ef | grep NCDB | egrep -vi "grep|screen" | nl
     1  oracle   20858     1  0 06:40 ?        00:00:00 ora_pmon_NCDB
     2  oracle   20860     1  0 06:40 ?        00:00:00 ora_psp0_NCDB
     3  oracle   20862     1  2 06:40 ?        00:00:05 ora_vktm_NCDB
     4  oracle   20866     1  0 06:40 ?        00:00:01 ora_u004_NCDB
     5  oracle   20872     1  6 06:40 ?        00:00:15 ora_u005_NCDB
     6  oracle   20879     1  0 06:40 ?        00:00:00 ora_dbw0_NCDB
     7  oracle   20959     1  0 06:42 ?        00:00:00 oracleNCDB (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
[oracle@server3 ~]$

4) Can you think of a reason why there are so few? Can you make the others appear? Clue: check man ps

[oracle@server3 ~]$ ps -eLf | grep NCDB | egrep -vi "grep|screen" | grep -v grep
oracle   20858     1 20858  0    1 06:40 ?        00:00:00 ora_pmon_NCDB
oracle   20860     1 20860  0    1 06:40 ?        00:00:00 ora_psp0_NCDB
oracle   20862     1 20862  2    1 06:40 ?        00:00:06 ora_vktm_NCDB
oracle   20866     1 20866  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20867  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20868  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20869  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20875  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20880  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20881  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20882  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20883  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20884  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20886  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20888  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20889  0   14 06:40 ?        00:00:00 ora_u004_NCDB
oracle   20866     1 20928  0   14 06:41 ?        00:00:00 ora_u004_NCDB
oracle   20872     1 20872  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20873  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20874  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20876  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20877  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20885  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20887  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20890  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20891  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20892  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20893  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20896  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20897  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20898  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20900  0   45 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20922  0   45 06:41 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20925  0   45 06:41 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20932  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20934  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20935  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20936  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20937  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20938  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20939  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20940  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20941  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20942  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20943  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20944  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20945  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20947  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20948  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20949  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20950  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20951  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20952  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20953  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20954  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20955  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21088  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21089  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21091  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21092  0   45 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21138  0   45 06:44 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21139  0   45 06:44 ?        00:00:00 ora_u005_NCDB
oracle   20879     1 20879  0    1 06:40 ?        00:00:00 ora_dbw0_NCDB
oracle   20959     1 20959  0    1 06:42 ?        00:00:00 oracleNCDB (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))

5) create a session using net*8 - is the session a process or a thread?

[oracle@server3 ~]$ sqlplus martin/secret@ncdb

SQL*Plus: Release 12.1.0.2.0 Production on Thu Jun 11 06:45:57 2015

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

Last Successful login time: Thu Apr 23 2015 04:11:21 -04:00

Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, Automatic Storage Management, OLAP, Advanced Analytics
and Real Application Testing options

SQL> select userenv('sid') from dual;

USERENV('SID')
--------------
            15


- in another session

SQL> select pid,sosid,spid,stid,execution_type from v$process where addr = (select paddr from v$session where sid = 15);

       PID SOSID                    SPID                     STID                     EXECUTION_
---------- ------------------------ ------------------------ ------------------------ ----------
        30 21178                    21178                    21178                    PROCESS

- this appears to be a process. Confirm on the OS-level:

[oracle@server3 ~]$ ps -eLf | grep 21178 | grep -v grep
oracle   21178     1 21178  0    1 06:45 ?        00:00:00 oracleNCDB (LOCAL=NO)

6) Now enable new sessions to be created as threads. In order to do so you need to change the listener and add DEDICATED_THROUGH_BROKER_listener = ON. Then reload the listener

7) connect again

[oracle@server3 ~]$ sqlplus martin/secret@ncdb

SQL*Plus: Release 12.1.0.2.0 Production on Thu Jun 11 06:56:38 2015

Copyright (c) 1982, 2014, Oracle.  All rights reserved.

Last Successful login time: Thu Jun 11 2015 06:45:58 -04:00

Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.2.0 - 64bit Production
With the Partitioning, Automatic Storage Management, OLAP, Advanced Analytics
and Real Application Testing options

SQL> select userenv('sid') from dual;

USERENV('SID')
--------------
            31


- verify if it's a process or a thread

SQL> select pid,sosid,spid,stid,execution_type from v$process where addr = (select paddr from v$session where sid = 31);

       PID SOSID                    SPID                     STID                     EXECUTION_
---------- ------------------------ ------------------------ ------------------------ ----------
        30 20872_21481              20872                    21481                    THREAD

- it is. Can you see this on the OS too?

[oracle@server3 ~]$ ps -eLf | egrep 21481
oracle   20872     1 21481  0   46 06:56 ?        00:00:00 ora_u005_NCDB

- Note that this is the STID. The SPID is the thread ID

[oracle@server3 ~]$ ps -eLf | egrep 20872
oracle   20872     1 20872  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20873  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20874  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20876  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20877  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20885  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20887  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20890  0   49 06:40 ?        00:00:01 ora_u005_NCDB
oracle   20872     1 20891  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20892  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20893  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20896  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20898  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20900  0   49 06:40 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20922  0   49 06:41 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20925  0   49 06:41 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20932  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20934  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20935  0   49 06:42 ?        00:00:01 ora_u005_NCDB
oracle   20872     1 20936  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20937  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20938  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20939  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20940  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20941  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20942  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20943  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20944  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20945  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20947  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20948  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20949  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20950  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20951  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20952  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20953  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20954  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 20955  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21088  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21089  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21091  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21092  0   49 06:42 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21481  0   49 06:56 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21494  0   49 06:57 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21499  0   49 06:57 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21500  0   49 06:57 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21521  0   49 06:59 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21522  0   49 06:59 ?        00:00:00 ora_u005_NCDB
oracle   20872     1 21536  0   49 07:00 ?        00:00:00 ora_u005_NCDB

8) kill the user session with SID 31 ON THE OS LEVEL. Do not use alter system disconnect session!

[oracle@server3 ~]$ ps -eLf | egrep 21481
oracle   20872     1 21481  0   48 06:56 ?        00:00:00 ora_u005_NCDB
oracle   21578 21236 21578  0    1 07:01 pts/16   00:00:00 grep -E --color=auto 21481
[oracle@server3 ~]$ kill -9 21481
[oracle@server3 ~]$

- what do you see? Remember never to do this in production!


}}}


! end
http://facedba.blogspot.com/2017/10/recover-table-from-rman-backup-in.html
<<showtoc>>

! instrumentation for the before and after change
vi memcount.sh
{{{
echo "##### count the threads"
ps -eLf | grep $ORACLE_SID | wc -l
echo "##### count the processes"
ps -ef | grep $ORACLE_SID | wc -l
echo "##### CPU%, MEM%, VSZ, RSS for all users"
ps -A -o pcpu,pmem,vsz,rss | awk '{cpu += $1; mem += $2; vsz += $3; rss += $4} END {print cpu, mem, vsz/1024, rss/1024}'
echo "##### CPU%, MEM%, VSZ, RSS for oracle user"
ps -u $USER -o pcpu,pmem,vsz,rss | awk '{cpu += $1; mem += $2; vsz += $3; rss += $4} END {print cpu, mem, vsz/1024, rss/1024}'
echo "##### system memory"
free -m
echo "##### this sums the %MEM for all users"
ps aux | awk 'NR != 1 {x[$1] += $4} END{ for(z in x) {print z, x[z]"%"}}'
echo "##### this greps the current ORACLE_SID (excluding others) and sums the %MEM"
ps aux | grep $ORACLE_SID | awk 'NR != 1 {x[$1] += $4} END{ for(z in x) {print z, x[z]"%"}}'
}}}

! enable / disable
-- enable
alter system set threaded_execution=true scope=spfile sid='*';
srvctl stop database -d noncdb
srvctl start database -d noncdb
sqlplus sys/oracle@enkx4db01.enkitec.com/noncdb.enkitec.com as sysdba

-- disable
alter system set threaded_execution=false scope=spfile sid='*';
srvctl stop database -d noncdb
srvctl start database -d noncdb
sqlplus sys/oracle@enkx4db01.enkitec.com/noncdb.enkitec.com as sysdba


show parameter threaded

select sys_context('userenv','sid') from dual;
set lines 300
select s.username, s.sid, s.serial#, s.con_id, p.spid, p.sosid, p.stid, p.execution_type
from v$session s, v$process p
where s.sid = 270
and s.paddr = p.addr
/

select count(spid),spid,execution_type from v$process where background = 1 group by spid, execution_type;

select pname, pid, sosid, spid, stid, execution_type
from v$process where background = 1
order by pname
/

select pname, pid, sosid, spid, stid, execution_type
from v$process 
order by pname
/

ps -ef | grep noncdb
ps -eLf | grep noncdb


! before and after effect

-- BEFORE
{{{
$ sh memcount.sh
##### count the threads
49
##### count the processes
49
##### CPU%, MEM%, VSZ, RSS for all users
42.3 159.7 81873.2 1686.75
##### CPU%, MEM%, VSZ, RSS for oracle user
41 154.1 62122.9 1586.95
##### system memory
             total       used       free     shared    buffers     cached
Mem:           994        978         15          0          1        592
-/+ buffers/cache:        385        609
Swap:         1227        591        636
##### this sums the %MEM for all users
gdm 1.1%
oracle 153.9%
rpc 0%
dbus 0.1%
68 0.1%
rtkit 0%
postfix 0.1%
rpcuser 0%
root 4.2%
##### this greps the current ORACLE_SID (excluding others) and sums the %MEM
oracle 142.3%
}}}


-- AFTER
{{{
$ sh memcount.sh
##### count the threads
55
##### count the processes
7
##### CPU%, MEM%, VSZ, RSS for all users
58.3 92.9 56845.8 1005.93
##### CPU%, MEM%, VSZ, RSS for oracle user
57.1 87.3 37095.5 906.363
##### system memory
             total       used       free     shared    buffers     cached
Mem:           994        965         28          0          1        628
-/+ buffers/cache:        336        658
Swap:         1227        591        636
##### this sums the %MEM for all users
gdm 1.1%
oracle 87.4%
rpc 0%
dbus 0.1%
68 0.1%
rtkit 0%
postfix 0.1%
rpcuser 0%
root 4.2%
##### this greps the current ORACLE_SID (excluding others) and sums the %MEM
oracle 75.7%
}}}


! initial conclusions
* all in all the count of processes dropped from @@49 to 7@@, but what does this mean in terms of resource savings? 
I say this mostly affects the memory
** VSZ (virtual memory size) dropped from 62122.9 MB to 37095.5 MB for the oracle user which is a 40% decrease
** RSS (resident set size) dropped from 1586.95 MB to 906.363 MB for the oracle user which is a 42% decrease
** %MEM (ratio of the processes resident set size  to the physical memory on the machine)  dropped from 142.3% to 75.7% which is 46% decrease

So when you consolidate, the savings gained from changing to threaded_execution will be more physical memory headroom for more instances 
and even more when switched to PDB (multi-tenant) architecture

For CPU, there's really no effect. I say, the CPU workload requirements of an app will be the same and it's only going to decrease if you 1) tune 2) move to a faster CPU. 
See the slide 27-30 of this OOW presentation by Arup http://www.oracle.com/technetwork/oem/app-quality-mgmt/con8788-2088738.pdf  

! updates to the initial conclusions 

Had a talk with Frits on the memory part of 12c threaded_execution.. 

I did a research on the performance and here's the result https://twitter.com/karlarao/status/582053491079843840
For the memory here's the before and after  https://twitter.com/karlarao/status/581367258804396032 

On the side of performance, non-threaded_execution is faster. I'm definitive about that. 
For the memory gains, yes some of the sessions could still end up eating the same (SGA+PGA) memory but there are still some memory gains with some background processes although there are some inconsistencies 
* on the VM test that I did it showed ~40 decrease in RSS memory 
* but on the Exadata test it actually increased in RSS memory (from ~27258.4MB to ~42487.8MB). 

All in all, I don't like threaded_execution in terms of performance. For memory, that needs a little bit more investigation because I'm seeing different results on VM and non-VM environments. 



! FINAL: complete view of metrics for CPU speed comparison - elap_exec, lios_elap, us_lio
[img(95%,95%)[ http://i.imgur.com/WpgZHre.png ]]


! a lot of POLL syscalls which is an overhead causing slower overall performance
This blog post validated what I found. Here he showed that threaded_execution=true is doing a lot of POLL syscalls which is an overhead causing slower overall performance. 
The process ora_u00N was also discovered using sockets to communicate with its threads. 
http://blog.ora-600.pl/2015/12/17/oracle-12c-internals-of-threaded-execution/

{{{
Oracle instance with threaded_execution=false:

[root@rico ~]# strace -cp 12168
Process 12168 attached
^CProcess 12168 detached
% time     seconds  usecs/call     calls    errors syscall
------ ----------- ----------- --------- --------- ----------------
  0.00    0.000000           0         2           read
  0.00    0.000000           0         2           write
  0.00    0.000000           0         1           semctl
  0.00    0.000000           0       159           getrusage
  0.00    0.000000           0        12           times
  0.00    0.000000           0         3           semtimedop
------ ----------- ----------- --------- --------- ----------------
100.00    0.000000                   179           total
Oracle instance with threaded_execution=true:

[root@rico fd]# strace -cp 12165
Process 12165 attached
^CProcess 12165 detached
% time     seconds  usecs/call     calls    errors syscall
------ ----------- ----------- --------- --------- ----------------
 84.22    0.113706           0    980840           poll
 10.37    0.014000        7000         2           read
  5.41    0.007310        1218         6           semtimedop
  0.00    0.000000           0         2           write
  0.00    0.000000           0         1           semctl
  0.00    0.000000           0       419           getrusage
  0.00    0.000000           0        12           times
------ ----------- ----------- --------- --------- ----------------
100.00    0.135016                981282           total
 
[root@rico fd]# strace -p 12165 -o /tmp/threaded_exec.out
Process 12165 attached
^CProcess 12165 detached
[root@rico fd]# grep poll /tmp/threaded_exec.out | tail
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)
poll([{fd=63, events=POLLIN|POLLRDNORM}], 1, 0) = 0 (Timeout)

The STID 12165 was signed to SPID 8107:
SQL> get spid
  1  select spid, stid
  2  from v$process p, v$session s
  3  where p.addr=s.paddr
  4* and   s.sid=sys_context('userenv','sid')
SQL> /
 
SPID             STID
------------------------ ------------------------
8107             12165

Let’s check the file descriptors for this thread:
[root@rico ~]# cd /proc/8107/task/12165/fd
[root@rico fd]# ls -al | grep 63
lrwx------. 1 oracle oinstall 64 12-17 21:38 63 -> socket:[73968]
[root@rico fd]# lsof | grep 73968
ora_scmn_  8107    oracle   63u     IPv6              73968        0t0        TCP localhost:ncube-lm->localhost:32400 (ESTABLISHED)
[root@rico fd]# ps aux | grep 8107 | grep -v grep
oracle    8107  4.7 29.0 6155520 2901516 ?     Ssl  20:01   6:54 ora_u005_orclth
[root@rico fd]#

}}}












also check this tiddler [[12c New Features]]



https://docs.oracle.com/database/121/DWHSG/refresh.htm#DWHSG-GUID-51191C38-D52F-4A4D-B6FF-E631965AD69A
<<<
Types of Out-of-Place Refresh

There are three types of out-of-place refresh:

    out-of-place fast refresh

    This offers better availability than in-place fast refresh. It also offers better performance when changes affect a large part of the materialized view.

    out-of-place PCT refresh

    This offers better availability than in-place PCT refresh. There are two different approaches for partitioned and non-partitioned materialized views. If truncation and direct load are not feasible, you should use out-of-place refresh when the changes are relatively large. If truncation and direct load are feasible, in-place refresh is preferable in terms of performance. In terms of availability, out-of-place refresh is always preferable.

    out-of-place complete refresh

    This offers better availability than in-place complete refresh.

Using the refresh interface in the DBMS_MVIEW package, with method = ? and out_of_place = true, out-of-place fast refresh are attempted first, then out-of-place PCT refresh, and finally out-of-place complete refresh. An example is the following:

DBMS_MVIEW.REFRESH('CAL_MONTH_SALES_MV', method => '?', 
   atomic_refresh => FALSE, out_of_place => TRUE);
<<<



http://karandba.blogspot.com/2014/10/out-of-place-refresh-option-for.html
http://horia-berca-oracledba.blogspot.com/2013/10/out-of-place-materialized-view-refresh.html
https://community.oracle.com/mosc/discussion/4281580/partition-truncate-with-deferred-invalidation-any-side-effects
https://blog.dbi-services.com/oracle-12cr2-ddl-deferred-invalidation/
truncate table https://docs.oracle.com/database/121/SQLRF/statements_10007.htm#SQLRF01707

https://blogs.oracle.com/optimizer/optimizer-adaptive-features-in-oracle-database-12c-release-2


http://kerryosborne.oracle-guy.com/2013/11/24/12c-adaptive-optimization-part-1/
http://kerryosborne.oracle-guy.com/2013/12/09/12c-adaptive-optimization-part-2-hints/












.
Interesting observation about 15sec Top Activity graph
http://oracleprof.blogspot.com/2010/07/oem-performance-tab-and-active-session.html
https://leetcode.com/problems/combine-two-tables/
{{{
175. Combine Two Tables
Easy
SQL Schema

Table: Person

+-------------+---------+
| Column Name | Type    |
+-------------+---------+
| PersonId    | int     |
| FirstName   | varchar |
| LastName    | varchar |
+-------------+---------+
PersonId is the primary key column for this table.

Table: Address

+-------------+---------+
| Column Name | Type    |
+-------------+---------+
| AddressId   | int     |
| PersonId    | int     |
| City        | varchar |
| State       | varchar |
+-------------+---------+
AddressId is the primary key column for this table.

 

Write a SQL query for a report that provides the following information for each person in the Person table, regardless if there is an address for each of those people:

FirstName, LastName, City, State


Accepted
201,012
Submissions
365,130
}}}



{{{
/* Write your PL/SQL query statement below */

select a.FirstName, a.LastName, b.City, b.State from 
person a, address b
where a.personid = b.personid (+); 

}}}
https://leetcode.com/problems/second-highest-salary/
{{{
Write a SQL query to get the second highest salary from the Employee table.

+----+--------+
| Id | Salary |
+----+--------+
| 1  | 100    |
| 2  | 200    |
| 3  | 300    |
+----+--------+

For example, given the above Employee table, the query should return 200 as the second highest salary. If there is no second highest salary, then the query should return null.

+---------------------+
| SecondHighestSalary |
+---------------------+
| 200                 |
+---------------------+

Accepted
162,933
Submissions
566,565
}}}



{{{


SELECT  MAX(salary) AS SecondHighestSalary
FROM    employee
WHERE   salary NOT IN (
                        SELECT  MAX(salary)
                        FROM    employee
                      );
                      
                      
--select id, salary from
--(
--select a.id, a.salary, 
--    dense_rank() over(order by a.salary desc) drank 
--from test a)
--where drank = 2;                      
}}}
https://leetcode.com/problems/nth-highest-salary/
{{{
177. Nth Highest Salary
Medium

Write a SQL query to get the nth highest salary from the Employee table.

+----+--------+
| Id | Salary |
+----+--------+
| 1  | 100    |
| 2  | 200    |
| 3  | 300    |
+----+--------+

For example, given the above Employee table, the nth highest salary where n = 2 is 200. If there is no nth highest salary, then the query should return null.

+------------------------+
| getNthHighestSalary(2) |
+------------------------+
| 200                    |
+------------------------+

Accepted
81,363
Submissions
288,058
}}}



{{{
CREATE or replace FUNCTION getNthHighestSalary(N IN NUMBER) RETURN NUMBER IS
result NUMBER;
BEGIN
    /* Write your PL/SQL query statement below */
    select nvl(null,salary) salary 
    into result
    from
    (
    select distinct a.salary, 
        dense_rank() over(order by a.salary desc) drank 
    from employee a)
    where drank = N;
    
    RETURN result;
END;
/
select getNthHighestSalary(2) from dual;


CREATE or replace FUNCTION getNthHighestSalary2(N IN NUMBER) RETURN NUMBER IS
result NUMBER;

BEGIN
    select salary into result 
    from (select distinct(salary),rank() over (order by salary desc) as r  
            from test group by salary) where r=N;
return result;
END;
/
select getNthHighestSalary2(2) from dual;




select * from test;


select nvl('x',null) from (
select 1/NULL a from dual);

select 1/nvl(null,1) from dual; -- if not null return 1st , if null return 1
select 1/nvl(0,1) from dual; -- errors if zero
select 1/nullif(nvl( nullif(21,0) ,1),0) from dual; 

SELECT NULLIF(0,0) FROM DUAL;



select nvl(null,salary) salary from
(
select a.id, a.salary, 
    dense_rank() over(order by a.salary desc) drank 
from test a)
where drank = 2;
}}}
https://leetcode.com/problems/rank-scores/
{{{
Write a SQL query to rank scores. If there is a tie between two scores, both should have the same ranking. Note that after a tie, the next ranking number should be the next consecutive integer value. In other words, there should be no "holes" between ranks.

+----+-------+
| Id | Score |
+----+-------+
| 1  | 3.50  |
| 2  | 3.65  |
| 3  | 4.00  |
| 4  | 3.85  |
| 5  | 4.00  |
| 6  | 3.65  |
+----+-------+

For example, given the above Scores table, your query should generate the following report (order by highest score):

+-------+------+
| Score | Rank |
+-------+------+
| 4.00  | 1    |
| 4.00  | 1    |
| 3.85  | 2    |
| 3.65  | 3    |
| 3.65  | 3    |
| 3.50  | 4    |
+-------+------+

Accepted
78,882
Submissions
199,284
}}}

{{{
select  a.score score, 
        dense_rank() over(order by a.score desc) rank 
    from scores a;
}}}
https://leetcode.com/problems/employees-earning-more-than-their-managers/
{{{
The Employee table holds all employees including their managers. Every employee has an Id, and there is also a column for the manager Id.

+----+-------+--------+-----------+
| Id | Name  | Salary | ManagerId |
+----+-------+--------+-----------+
| 1  | Joe   | 70000  | 3         |
| 2  | Henry | 80000  | 4         |
| 3  | Sam   | 60000  | NULL      |
| 4  | Max   | 90000  | NULL      |
+----+-------+--------+-----------+

Given the Employee table, write a SQL query that finds out employees who earn more than their managers. For the above table, Joe is the only employee who earns more than his manager.

+----------+
| Employee |
+----------+
| Joe      |
+----------+

Accepted
134,240
Submissions
261,500
}}}

{{{
/* Write your PL/SQL query statement below */
select b.name as Employee 
from Employee a, Employee b
where a.id = b.managerid
and b.salary > a.salary;


select * from employeeslc a, employeeslc b
where a.employee_id = b.manager_id
and b.salary > a.salary;
}}}
https://leetcode.com/problems/department-highest-salary/

{{{
The Employee table holds all employees. Every employee has an Id, a salary, and there is also a column for the department Id.

+----+-------+--------+--------------+
| Id | Name  | Salary | DepartmentId |
+----+-------+--------+--------------+
| 1  | Joe   | 70000  | 1            |
| 2  | Jim   | 90000  | 1            |
| 3  | Henry | 80000  | 2            |
| 4  | Sam   | 60000  | 2            |
| 5  | Max   | 90000  | 1            |
+----+-------+--------+--------------+

The Department table holds all departments of the company.

+----+----------+
| Id | Name     |
+----+----------+
| 1  | IT       |
| 2  | Sales    |
+----+----------+

Write a SQL query to find employees who have the highest salary in each of the departments. For the above tables, your SQL query should return the following rows (order of rows does not matter).

+------------+----------+--------+
| Department | Employee | Salary |
+------------+----------+--------+
| IT         | Max      | 90000  |
| IT         | Jim      | 90000  |
| Sales      | Henry    | 80000  |
+------------+----------+--------+

Explanation:

Max and Jim both have the highest salary in the IT department and Henry has the highest salary in the Sales department.
Accepted
79,335
Submissions
250,355
}}}

{{{
/* Write your PL/SQL query statement below */
select department, employee, salary  from (
select a.departmentid, b.name department, a.name employee, a.salary, dense_rank() over(partition by a.departmentid order by a.salary desc) rank
from employee a, department b
where a.departmentid = b.id
)
where rank = 1;


-- in oracle
select department, employee, salary  from (
select a.department_id, b.department_name department, a.first_name employee, a.salary, dense_rank() over(partition by a.department_id order by a.salary desc) rank
from employeeslc a, departmentslc b
where a.department_id = b.department_id(+)
)
where rank = 1;


}}}
https://leetcode.com/problems/department-top-three-salaries/
{{{
The Employee table holds all employees. Every employee has an Id, and there is also a column for the department Id.

+----+-------+--------+--------------+
| Id | Name  | Salary | DepartmentId |
+----+-------+--------+--------------+
| 1  | Joe   | 85000  | 1            |
| 2  | Henry | 80000  | 2            |
| 3  | Sam   | 60000  | 2            |
| 4  | Max   | 90000  | 1            |
| 5  | Janet | 69000  | 1            |
| 6  | Randy | 85000  | 1            |
| 7  | Will  | 70000  | 1            |
+----+-------+--------+--------------+

The Department table holds all departments of the company.

+----+----------+
| Id | Name     |
+----+----------+
| 1  | IT       |
| 2  | Sales    |
+----+----------+

Write a SQL query to find employees who earn the top three salaries in each of the department. For the above tables, your SQL query should return the following rows (order of rows does not matter).

+------------+----------+--------+
| Department | Employee | Salary |
+------------+----------+--------+
| IT         | Max      | 90000  |
| IT         | Randy    | 85000  |
| IT         | Joe      | 85000  |
| IT         | Will     | 70000  |
| Sales      | Henry    | 80000  |
| Sales      | Sam      | 60000  |
+------------+----------+--------+

Explanation:

In IT department, Max earns the highest salary, both Randy and Joe earn the second highest salary, and Will earns the third highest salary. There are only two employees in the Sales department, Henry earns the highest salary while Sam earns the second highest salary.
Accepted
54,931
Submissions
189,029
}}}

{{{
select department, employee, salary from 
(
select b.name department, a.name employee, a.salary salary, dense_rank() over(partition by b.id order by a.salary desc) drank
from employee a, department b
where a.departmentid = b.id) a
where a.drank <= 3;


select * from employees;
select * from departments;


select b.department_name department, a.first_name employee, a.salary salary
from employees a, departments b
where a.department_id = b.department_id;


select department, employee, salary from 
(
select b.department_name department, a.first_name employee, a.salary salary, dense_rank() over(partition by b.department_name order by a.salary desc) drank
from employees a, departments b
where a.department_id = b.department_id) a
where a.drank <= 3;


select department, employee, salary from 
(
select b.name department, a.name employee, a.salary salary, dense_rank() over(partition by b.id order by a.salary desc) drank
from employee a, department b
where a.departmentid = b.id) a
where a.drank <= 3;
}}}
https://docs.oracle.com/en/database/oracle/oracle-database/18/newft/new-features.html#GUID-04A4834D-848F-44D5-8C34-36237D40F194

https://docs.oracle.com/en/database/oracle/oracle-database/19/newft/new-features.html#GUID-06A15128-1172-48E5-8493-CD670B9E57DC


! issues 


!! upgrade 
https://jonathanlewis.wordpress.com/2019/04/08/describe-upgrade/


!! 19c RAC limitations / licensing 
<<<
We know that a license of Oracle Database Standard Edition (DB SE) includes into it clustering services with Oracle Real Application Clusters (RAC) as a standard feature. Oracle RAC is not included in the Standard Edition of releases prior to Oracle Database 10g, nor is it an available option with those earlier releases. For Oracle DB SE that is no longer available on the price list, the free feature of RAC could be used to cluster up to a maximum of 4 sockets for eligible versions of DB SE.

For customers that are using Oracle DB SE, Oracle has now announced the de-support of RAC with Oracle DB SE 19c. If a customer attempts to upgrade to Oracle DB 19c, they will have 2 options (upgrade paths) to choose from:

OPTION 1:  Upgrade to DB EE, on which RAC 19c is an extra-add on, chargeable option (as opposed to standard feature with DB SE). Here, a customer will upgrade from Oracle RAC Standard Edition (SE) to Oracle RAC Enterprise Edition (EE). Note: If customer attempts to install RAC 19c Database using Standard Edition, the Oracle Universal Installer will prevent the installation.

OR

OPTION 2:  Convert Oracle RAC Standard Edition to  a Single Instance (Non-RAC) Standard Edition


There is another consideration. Most real life requirements are for business critical HA, that is Active Passive. If this is the real requirement then you can also use Clusterware from Oracle which comes included at no charge when you buy Oracle Linux support. If you are using Red Hat you don’t have to re install the OS. Oracle will just take over supporting your current Red Hat OS and you get to use Clusterware for free. Best part is that Oracle Linux is lower in price to buy than Red Hat. Over all a much much lower solution cost. Many Oracle customers are choosing this option.
<<<
* Auto STS Capture Task	
https://mikedietrichde.com/2020/05/28/do-you-love-unexpected-surprises-sys_auto_sts-in-oracle-19-7-0/
<<<
As far as I can see, the starting point for this is Bug 30001331 - CAPTURE SQL STATEMENTS INTO STS FOR PLAN STABILITY. It directed me to Bug 30260530 - CONTENT INCLUSION OF 30001331 IN DATABASE RU 19.7.0.0.0. So this seem to be present since 19.7.0.  And the capture into it happens by default.
<<<
http://www.evernote.com/shard/s48/sh/1a9c1779-94ec-4e5a-a26f-ba92ea08988e/3bb10603e76f4fb346d7df4328882dcd

Also check out this thread at oracle-l for options on 10GbE on V2 http://www.freelists.org/post/oracle-l/Exadata-V2-Compute-Node-10GigE-PCI-card-installation







{{{
create table parallel_t1(c1 int, c2 char(100));

insert into parallel_t1
select level, 'x'
from dual
connect by level <= 8000
;

commit;


alter system set db_file_multiblock_read_count=128;
*._db_block_prefetch_limit=0
*._db_block_prefetch_quota=0
*._db_file_noncontig_mblock_read_count=0

alter system flush buffer_cache;


-- generate one parallel query
select count(*) from parallel_t1;


16:28:36 SYS@orcl> shutdown abort
ORACLE instance shut down.
16:29:21 SYS@orcl> startup pfile='/home/oracle/app/oracle/product/11.2.0/dbhome_2/dbs/initorcl.ora'
ORACLE instance started.

Total System Global Area  456146944 bytes
Fixed Size                  1344840 bytes
Variable Size             348129976 bytes
Database Buffers          100663296 bytes
Redo Buffers                6008832 bytes
Database mounted.
Database opened.
16:29:33 SYS@orcl> alter system flush buffer_cache;

System altered.

16:29:38 SYS@orcl> show parameter db_file_multi

NAME                                 TYPE        VALUE
------------------------------------ ----------- ------------------------------
db_file_multiblock_read_count        integer     128
16:29:47 SYS@orcl>
16:29:47 SYS@orcl> set lines 300
16:29:51 SYS@orcl> col "Parameter" FOR a40
16:29:51 SYS@orcl> col "Session Value" FOR a20
16:29:51 SYS@orcl> col "Instance Value" FOR a20
16:29:51 SYS@orcl> col "Description" FOR a50
16:29:51 SYS@orcl> SELECT a.ksppinm "Parameter", b.ksppstvl "Session Value", c.ksppstvl "Instance Value", a.ksppdesc "Description"
16:29:51   2  FROM x$ksppi a, x$ksppcv b, x$ksppsv c
16:29:51   3  WHERE a.indx = b.indx AND a.indx = c.indx
16:29:51   4  AND substr(ksppinm,1,1)='_'
16:29:51   5  AND a.ksppinm like '%&parameter%'
16:29:51   6  /
Enter value for parameter: read_count

Parameter                                Session Value        Instance Value       Description
---------------------------------------- -------------------- -------------------- --------------------------------------------------
_db_file_exec_read_count                 128                  128                  multiblock read count for regular clients
_db_file_optimizer_read_count            128                  128                  multiblock read count for regular clients
_db_file_noncontig_mblock_read_count     0                    0                    number of noncontiguous db blocks to be prefetched
_sort_multiblock_read_count              2                    2                    multi-block read count for sort

16:29:54 SYS@orcl>
16:29:54 SYS@orcl> @mystat

628 rows created.


SNAP_DATE_END
-------------------
2014-09-08 16:29:57


SNAP_DATE_BEGIN
-------------------



no rows selected


no rows selected


0 rows deleted.

16:29:57 SYS@orcl> select count(*) from parallel_t1;

  COUNT(*)
----------
      8000

16:30:03 SYS@orcl> @mystat

628 rows created.


SNAP_DATE_END
-------------------
2014-09-08 16:30:05


SNAP_DATE_BEGIN
-------------------
2014-09-08 16:29:57


      Difference Statistics Name
---------------- --------------------------------------------------------------
               2 CPU used by this session
               4 CPU used when call started
               3 DB time
             628 HSC Heap Segment Block Changes
              10 SQL*Net roundtrips to/from client
              80 buffer is not pinned count
           3,225 bytes received via SQL*Net from client
           2,308 bytes sent via SQL*Net to client
              15 calls to get snapshot scn: kcmgss
               1 calls to kcmgas
              32 calls to kcmgcs
       1,097,728 cell physical IO interconnect bytes
               4 cluster key scan block gets
               4 cluster key scans
             672 consistent changes
             250 consistent gets
              12 consistent gets - examination
             250 consistent gets from cache
             211 consistent gets from cache (fastpath)
               1 cursor authentications
           1,307 db block changes
             703 db block gets
             703 db block gets from cache
              10 db block gets from cache (fastpath)
              18 enqueue releases
              19 enqueue requests
              14 execute count
             530 file io wait time
             149 free buffer requested
               5 index fetch by key
               2 index scans kdiixs1
             218 no work - consistent read gets
              42 non-idle wait count
              19 opened cursors cumulative
               5 parse count (failures)
              12 parse count (hard)
              19 parse count (total)
               1 parse time elapsed
              32 physical read IO requests
       1,097,728 physical read bytes
              32 physical read total IO requests
       1,097,728 physical read total bytes
             134 physical reads
             134 physical reads cache
             102 physical reads cache prefetch
              56 recursive calls
             629 redo entries
          88,372 redo size
             953 session logical reads
               3 shared hash latch upgrades - no wait
               3 sorts (memory)
               2 sorts (rows)
               5 sql area purged
               1 table fetch by rowid
             211 table scan blocks gotten
          13,560 table scan rows gotten
               4 table scans (short tables)
          42,700 undo change vector size
              17 user calls
               3 workarea executions - optimal
               4 workarea memory allocated

61 rows selected.


SNAP_DATE_BEGIN     SNAP_DATE_END
------------------- -------------------
2014-09-08 16:29:57 2014-09-08 16:30:05


1256 rows deleted.

16:30:05 SYS@orcl> set lines 300
16:30:38 SYS@orcl> col "Parameter" FOR a40
16:30:38 SYS@orcl> col "Session Value" FOR a20
16:30:38 SYS@orcl> col "Instance Value" FOR a20
16:30:38 SYS@orcl> col "Description" FOR a50
16:30:38 SYS@orcl> SELECT a.ksppinm "Parameter", b.ksppstvl "Session Value", c.ksppstvl "Instance Value", a.ksppdesc "Description"
16:30:38   2  FROM x$ksppi a, x$ksppcv b, x$ksppsv c
16:30:38   3  WHERE a.indx = b.indx AND a.indx = c.indx
16:30:38   4  AND substr(ksppinm,1,1)='_'
16:30:38   5  AND a.ksppinm like '%&parameter%'
16:30:38   6  /
Enter value for parameter: prefetch

Parameter                                Session Value        Instance Value       Description
---------------------------------------- -------------------- -------------------- --------------------------------------------------
_db_block_prefetch_quota                 0                    0                    Prefetch quota as a percent of cache size
_db_block_prefetch_limit                 0                    0                    Prefetch limit in blocks


}}}
{{{

-- CREATE THE JOB 
-- 1min interval --   repeat_interval => 'FREQ=MINUTELY;BYSECOND=0',
-- 2mins interval -- repeat_interval => 'FREQ=MINUTELY;INTERVAL=2;BYSECOND=0',
-- 10secs interval -- repeat_interval => 'FREQ=SECONDLY;INTERVAL=10',

BEGIN
    SYS.DBMS_SCHEDULER.CREATE_JOB (
            job_name => '"SYSTEM"."AWR_1MIN_SNAP"',
            job_type => 'PLSQL_BLOCK',
            job_action => 'BEGIN
dbms_workload_repository.create_snapshot;
END;',
            number_of_arguments => 0,
            start_date => SYSTIMESTAMP,
            repeat_interval => 'FREQ=MINUTELY;BYSECOND=0',
            end_date => NULL,
            job_class => '"SYS"."DEFAULT_JOB_CLASS"',
            enabled => FALSE,
            auto_drop => FALSE,
            comments => 'AWR_1MIN_SNAP',
            credential_name => NULL,
            destination_name => NULL);

    SYS.DBMS_SCHEDULER.SET_ATTRIBUTE( 
             name => '"SYSTEM"."AWR_1MIN_SNAP"', 
             attribute => 'logging_level', value => DBMS_SCHEDULER.LOGGING_OFF);
          
    SYS.DBMS_SCHEDULER.enable(
             name => '"SYSTEM"."AWR_1MIN_SNAP"');

END; 
/


-- ENABLE JOB
BEGIN
    SYS.DBMS_SCHEDULER.enable(
             name => '"SYSTEM"."AWR_1MIN_SNAP"');
END;
/   


-- RUN JOB
BEGIN
	SYS.DBMS_SCHEDULER.run_job('"SYSTEM"."AWR_1MIN_SNAP"');
END;
/


-- DISABLE JOB
BEGIN
    SYS.DBMS_SCHEDULER.disable(
             name => '"SYSTEM"."AWR_1MIN_SNAP"');
END;
/   


-- DROP JOB
BEGIN
    SYS.DBMS_SCHEDULER.DROP_JOB(job_name => '"SYSTEM"."AWR_1MIN_SNAP"',
                                defer => false,
                                force => true);
END;
/




-- MONITOR JOB
SELECT * FROM DBA_SCHEDULER_JOB_LOG WHERE job_name = 'AWR_1MIN_SNAP';

col JOB_NAME format a15
col START_DATE format a25
col LAST_START_DATE format a25
col NEXT_RUN_DATE format a25
SELECT job_name, enabled, start_date, last_start_date, next_run_date FROM DBA_SCHEDULER_JOBS WHERE job_name = 'AWR_1MIN_SNAP';

-- AWR get recent snapshot
select * from 
(SELECT s0.instance_number, s0.snap_id, s0.startup_time,
  TO_CHAR(s0.END_INTERVAL_TIME,'YYYY-Mon-DD HH24:MI:SS') snap_start,
  TO_CHAR(s1.END_INTERVAL_TIME,'YYYY-Mon-DD HH24:MI:SS') snap_end,
  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) ela_min
FROM dba_hist_snapshot s0,
  dba_hist_snapshot s1
WHERE s1.snap_id           = s0.snap_id + 1
ORDER BY snap_id DESC)
where rownum < 11;

}}}




.
A short video about it, worth watching it whenever you get some time, only 12min:
https://www.ansible.com/quick-start-video


https://www.doag.org/formes/pubfiles/7375105/2015-K-INF-Frits_Hoogland-Automating__DBA__tasks_with_Ansible-Praesentation.pdf
https://fritshoogland.wordpress.com/2014/09/14/using-ansible-for-executing-oracle-dba-tasks/

https://learnxinyminutes.com/docs/ansible/

{{{
oracle@localhost.localdomain:/u01/oracle:orcl
$ s1

SQL*Plus: Release 12.1.0.1.0 Production on Tue Dec 16 00:53:22 2014

Copyright (c) 1982, 2013, Oracle.  All rights reserved.


Connected to:
Oracle Database 12c Enterprise Edition Release 12.1.0.1.0 - 64bit Production
With the Partitioning, OLAP, Advanced Analytics and Real Application Testing options

00:53:23 SYS@orcl>  select name, cdb, con_id from v$database;

NAME      CDB     CON_ID
--------- --- ----------
ORCL      YES          0

00:53:23 SYS@orcl> select INSTANCE_NAME, STATUS, CON_ID from v$instance;

INSTANCE_NAME    STATUS           CON_ID
---------------- ------------ ----------
orcl             OPEN                  0

00:53:39 SYS@orcl> col name format A20
00:54:24 SYS@orcl> select name, con_id from v$services;

NAME                     CON_ID
-------------------- ----------
pdb1                          3
orclXDB                       1
orcl                          1
SYS$BACKGROUND                1
SYS$USERS                     1

00:54:30 SYS@orcl> select CON_ID, NAME, OPEN_MODE from v$pdbs;

    CON_ID NAME                 OPEN_MODE
---------- -------------------- ----------
         2 PDB$SEED             READ ONLY
         3 PDB1                 READ WRITE

00:57:49 SYS@orcl> show con_name

CON_NAME
------------------------------
CDB$ROOT
00:58:19 SYS@orcl> show con_id

CON_ID
------------------------------
1
00:58:25 SYS@orcl> SELECT sys_context('userenv','CON_NAME') from dual;

SYS_CONTEXT('USERENV','CON_NAME')
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
CDB$ROOT

00:58:36 SYS@orcl> SELECT sys_context('userenv','CON_ID') from dual;

SYS_CONTEXT('USERENV','CON_ID')
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
1

00:58:44 SYS@orcl> col PDB_NAME format a8
00:59:43 SYS@orcl> col CON_ID format 99
00:59:51 SYS@orcl> select PDB_ID, PDB_NAME, DBID, GUID, CON_ID from cdb_pdbs;

    PDB_ID PDB_NAME       DBID GUID                             CON_ID
---------- -------- ---------- -------------------------------- ------
         2 PDB$SEED 4080030308 F081641BB43F0F7DE045000000000001      1
         3 PDB1     3345156736 F0832BAF14721281E045000000000001      1

01:00:07 SYS@orcl> col MEMBER format A40
01:00:42 SYS@orcl> select GROUP#, CON_ID, MEMBER from v$logfile;

    GROUP# CON_ID MEMBER
---------- ------ ----------------------------------------
         3      0 /u01/app/oracle/oradata/ORCL/onlinelog/o
                  1_mf_3_9fxn1pmn_.log

         3      0 /u01/app/oracle/fast_recovery_area/ORCL/
                  onlinelog/o1_mf_3_9fxn1por_.log

         2      0 /u01/app/oracle/oradata/ORCL/onlinelog/o
                  1_mf_2_9fxn1lmy_.log

         2      0 /u01/app/oracle/fast_recovery_area/ORCL/
                  onlinelog/o1_mf_2_9fxn1lox_.log

    GROUP# CON_ID MEMBER
---------- ------ ----------------------------------------

         1      0 /u01/app/oracle/oradata/ORCL/onlinelog/o
                  1_mf_1_9fxn1dq4_.log

         1      0 /u01/app/oracle/fast_recovery_area/ORCL/
                  onlinelog/o1_mf_1_9fxn1dsx_.log


6 rows selected.

01:00:49 SYS@orcl> col NAME format A60
01:01:28 SYS@orcl> select NAME , CON_ID from v$controlfile;

NAME                                                         CON_ID
------------------------------------------------------------ ------
/u01/app/oracle/oradata/ORCL/controlfile/o1_mf_9fxn1csd_.ctl      0
/u01/app/oracle/fast_recovery_area/ORCL/controlfile/o1_mf_9f      0
xn1d0k_.ctl


01:01:35 SYS@orcl> col file_name format A50
01:02:01 SYS@orcl> col tablespace_name format A8
01:02:10 SYS@orcl> col file_id format 9999
01:02:18 SYS@orcl> col con_id format 999
01:02:26 SYS@orcl> select FILE_NAME, TABLESPACE_NAME, FILE_ID, con_id from cdb_data_files order by con_id ;

FILE_NAME                                          TABLESPA FILE_ID CON_ID
-------------------------------------------------- -------- ------- ------
/u01/app/oracle/oradata/ORCL/datafile/o1_mf_system SYSTEM         1      1
_9fxmx6s1_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_mf_sysaux SYSAUX         3      1
_9fxmvhl3_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_mf_users_ USERS          6      1
9fxn0t8s_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_mf_undotb UNDOTBS1       4      1
s1_9fxn0vgg_.dbf

FILE_NAME                                          TABLESPA FILE_ID CON_ID
-------------------------------------------------- -------- ------- ------

/u01/app/oracle/oradata/ORCL/datafile/o1_mf_system SYSTEM         5      2
_9fxn22po_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_mf_sysaux SYSAUX         7      2
_9fxn22p3_.dbf

/u01/app/oracle/oradata/ORCL/F0832BAF14721281E0450 USERS         13      3
00000000001/datafile/o1_mf_users_9fxvoh6n_.dbf

/u01/app/oracle/oradata/ORCL/F0832BAF14721281E0450 SYSAUX        12      3

FILE_NAME                                          TABLESPA FILE_ID CON_ID
-------------------------------------------------- -------- ------- ------
00000000001/datafile/o1_mf_sysaux_9fxvnjdl_.dbf

/u01/app/oracle/oradata/ORCL/F0832BAF14721281E0450 APEX_226      14      3
00000000001/datafile/o1_mf_apex_226_9gfgd96o_.dbf  45286309
                                                   61551

/u01/app/oracle/oradata/ORCL/F0832BAF14721281E0450 SYSTEM        11      3
00000000001/datafile/o1_mf_system_9fxvnjdq_.dbf


10 rows selected.

01:02:40 SYS@orcl> col file_name format A42
01:03:49 SYS@orcl> select FILE_NAME, TABLESPACE_NAME, FILE_ID from dba_data_files;

FILE_NAME                                  TABLESPA FILE_ID
------------------------------------------ -------- -------
/u01/app/oracle/oradata/ORCL/datafile/o1_m SYSTEM         1
f_system_9fxmx6s1_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_m SYSAUX         3
f_sysaux_9fxmvhl3_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_m USERS          6
f_users_9fxn0t8s_.dbf

/u01/app/oracle/oradata/ORCL/datafile/o1_m UNDOTBS1       4
f_undotbs1_9fxn0vgg_.dbf

FILE_NAME                                  TABLESPA FILE_ID
------------------------------------------ -------- -------


01:03:56 SYS@orcl> col NAME format A12
01:07:23 SYS@orcl> select FILE#, ts.name, ts.ts#, ts.con_id
01:07:24   2  from v$datafile d, v$tablespace ts
01:07:30   3  where d.ts#=ts.ts#
01:07:39   4  and d.con_id=ts.con_id
01:07:46   5  order by 4,3;

     FILE# NAME                TS# CON_ID
---------- ------------ ---------- ------
         1 SYSTEM                0      1
         3 SYSAUX                1      1
         4 UNDOTBS1              2      1
         6 USERS                 4      1
         5 SYSTEM                0      2
         7 SYSAUX                1      2
        11 SYSTEM                0      3
        12 SYSAUX                1      3
        13 USERS                 3      3
        14 APEX_2264528          4      3
           630961551

     FILE# NAME                TS# CON_ID
---------- ------------ ---------- ------


10 rows selected.

01:07:52 SYS@orcl> col file_name format A47
01:08:23 SYS@orcl> select FILE_NAME, TABLESPACE_NAME, FILE_ID
01:08:30   2  from cdb_temp_files;

FILE_NAME                                       TABLESPA FILE_ID
----------------------------------------------- -------- -------
/u01/app/oracle/oradata/ORCL/datafile/o1_mf_tem TEMP           1
p_9fxn206l_.tmp

/u01/app/oracle/oradata/ORCL/F0832BAF14721281E0 TEMP           3
45000000000001/datafile/o1_mf_temp_9fxvnznp_.db
f

/u01/app/oracle/oradata/ORCL/datafile/pdbseed_t TEMP           2
emp01.dbf


01:08:36 SYS@orcl> col username format A22
01:09:09 SYS@orcl> select username, common, con_id from cdb_users
01:09:17   2  where username ='SYSTEM';

USERNAME               COM CON_ID
---------------------- --- ------
SYSTEM                 YES      1
SYSTEM                 YES      3
SYSTEM                 YES      2

01:09:22 SYS@orcl> select distinct username from cdb_users
01:09:37   2  where common ='YES';

USERNAME
----------------------
SPATIAL_WFS_ADMIN_USR
OUTLN
CTXSYS
SYSBACKUP
APEX_REST_PUBLIC_USER
ORACLE_OCM
APEX_PUBLIC_USER
MDDATA
GSMADMIN_INTERNAL
SYSDG
ORDDATA

USERNAME
----------------------
APEX_040200
DVF
MDSYS
GSMUSER
FLOWS_FILES
AUDSYS
DVSYS
OJVMSYS
APPQOSSYS
SI_INFORMTN_SCHEMA
ANONYMOUS

USERNAME
----------------------
LBACSYS
WMSYS
DIP
SYSKM
XS$NULL
OLAPSYS
SPATIAL_CSW_ADMIN_USR
APEX_LISTENER
SYSTEM
ORDPLUGINS
DBSNMP

USERNAME
----------------------
ORDSYS
XDB
GSMCATUSER
SYS

37 rows selected.

01:09:43 SYS@orcl> select distinct username, con_id from cdb_users
01:10:07   2  where common ='NO';

USERNAME               CON_ID
---------------------- ------
HR                          3
OE                          3
ADMIN                       3
PMUSER                      3
OBE                         3

01:10:26 SYS@orcl> select username, con_id from cdb_users
01:10:51   2  where common ='NO';

USERNAME               CON_ID
---------------------- ------
PMUSER                      3
HR                          3
ADMIN                       3
OE                          3
OBE                         3

01:10:59 SYS@orcl> col role format A30
01:11:34 SYS@orcl> select role, common, con_id from cdb_roles;

ROLE                           COM CON_ID
------------------------------ --- ------
CONNECT                        YES      1
RESOURCE                       YES      1
DBA                            YES      1
AUDIT_ADMIN                    YES      1
AUDIT_VIEWER                   YES      1
SELECT_CATALOG_ROLE            YES      1
EXECUTE_CATALOG_ROLE           YES      1
DELETE_CATALOG_ROLE            YES      1
CAPTURE_ADMIN                  YES      1
EXP_FULL_DATABASE              YES      1
IMP_FULL_DATABASE              YES      1

... output snipped ...

ROLE                           COM CON_ID
------------------------------ --- ------
DV_PATCH_ADMIN                 YES      2
DV_STREAMS_ADMIN               YES      2
DV_GOLDENGATE_ADMIN            YES      2
DV_XSTREAM_ADMIN               YES      2
DV_GOLDENGATE_REDO_ACCESS      YES      2
DV_AUDIT_CLEANUP               YES      2
DV_DATAPUMP_NETWORK_LINK       YES      2
DV_REALM_RESOURCE              YES      2
DV_REALM_OWNER                 YES      2

251 rows selected.

01:11:40 SYS@orcl> desc sys.system_privilege_map
 Name                                                                                                                                                  Null?    Type
 ----------------------------------------------------------------------------------------------------------------------------------------------------- -------- ----------------------------------------------------------------------------------------------------
 PRIVILEGE                                                                                                                                             NOT NULL NUMBER
 NAME                                                                                                                                                  NOT NULL VARCHAR2(40)
 PROPERTY                                                                                                                                              NOT NULL NUMBER

01:12:22 SYS@orcl> desc sys.table_privilege_map
 Name                                                                                                                                                  Null?    Type
 ----------------------------------------------------------------------------------------------------------------------------------------------------- -------- ----------------------------------------------------------------------------------------------------
 PRIVILEGE                                                                                                                                             NOT NULL NUMBER
 NAME                                                                                                                                                  NOT NULL VARCHAR2(40)

01:12:30 SYS@orcl> desc CDB_SYS_PRIVS
 Name                                                                                                                                                  Null?    Type
 ----------------------------------------------------------------------------------------------------------------------------------------------------- -------- ----------------------------------------------------------------------------------------------------
 GRANTEE                                                                                                                                                        VARCHAR2(128)
 PRIVILEGE                                                                                                                                                      VARCHAR2(40)
 ADMIN_OPTION                                                                                                                                                   VARCHAR2(3)
 COMMON                                                                                                                                                         VARCHAR2(3)
 CON_ID                                                                                                                                                         NUMBER

01:13:07 SYS@orcl> desc CDB_TAB_PRIVS
 Name                                                                                                                                                  Null?    Type
 ----------------------------------------------------------------------------------------------------------------------------------------------------- -------- ----------------------------------------------------------------------------------------------------
 GRANTEE                                                                                                                                                        VARCHAR2(128)
 OWNER                                                                                                                                                          VARCHAR2(128)
 TABLE_NAME                                                                                                                                                     VARCHAR2(128)
 GRANTOR                                                                                                                                                        VARCHAR2(128)
 PRIVILEGE                                                                                                                                                      VARCHAR2(40)
 GRANTABLE                                                                                                                                                      VARCHAR2(3)
 HIERARCHY                                                                                                                                                      VARCHAR2(3)
 COMMON                                                                                                                                                         VARCHAR2(3)
 TYPE                                                                                                                                                           VARCHAR2(24)
 CON_ID                                                                                                                                                         NUMBER

01:13:16 SYS@orcl> col grantee format A10
01:14:02 SYS@orcl> col granted_role format A28
01:14:09 SYS@orcl> select grantee, granted_role, common, con_id
01:14:16   2  from cdb_role_privs
01:14:22   3  where grantee='SYSTEM';

GRANTEE    GRANTED_ROLE                 COM CON_ID
---------- ---------------------------- --- ------
SYSTEM     DBA                          YES      1
SYSTEM     AQ_ADMINISTRATOR_ROLE        YES      1
SYSTEM     DBA                          YES      2
SYSTEM     AQ_ADMINISTRATOR_ROLE        YES      2
SYSTEM     DBA                          YES      3
SYSTEM     AQ_ADMINISTRATOR_ROLE        YES      3

6 rows selected.

01:14:29 SYS@orcl>

}}}
https://docs.cloudera.com/HDPDocuments/HDP3/HDP-3.1.5/release-notes/content/upgrading_parent.html
https://docs.cloudera.com/HDPDocuments/HDP3/HDP-3.1.5/release-notes/content/known_issues.html
— Oracle Mix - Oracle OpenWorld and Oracle Develop Suggest-a-Session
https://mix.oracle.com/oow10/faq
https://mix.oracle.com/oow10/streams


http://blogs.oracle.com/oracleopenworld/2010/06/missed_the_call_for_papers_dea.html
http://blogs.oracle.com/datawarehousing/2010/06/openworld_suggest-a-session_vo.html
http://structureddata.org/2010/07/13/oracle-openworld-2010-the-oracle-real-world-performance-group/
http://kevinclosson.wordpress.com/2010/08/26/whats-really-happening-at-openworld-2010/

BI 
http://www.rittmanmead.com/2010/09/03/rittman-mead-at-oracle-openworld-2010-san-francisco/
OCW 2010 photos by Karl Arao
http://www.flickr.com/photos/kylehailey/sets/72157625025196338/

Oracle Closed World 2010
http://www.flickr.com/photos/kylehailey/sets/72157625018583630/
-- scheduler builder username is karlara0
https://oracleus.wingateweb.com/scheduler/login.jsp


Volunteer geek work at RACSIG 9-10am Wed, Oct5
http://en.wikibooks.org/wiki/RAC_Attack_-_Oracle_Cluster_Database_at_Home/Events

my notes ... http://www.evernote.com/shard/s48/sh/6591ce43-e00f-4b5c-ad12-b1f1547183a7/2a146737c4bfb7dab7453ba0bcdb4677

''bloggers meetup''
http://blogs.portrix-systems.de/brost/good-morning-san-francisco-5k-partner-fun-run/
http://dbakevlar.com/2011/10/oracle-open-world-2011-followup/
https://connor-mcdonald.com/2019/09/24/all-the-openworld-2019-downloads/
Data Mining for Business Analytics https://learning.oreilly.com/library/view/data-mining-for/9781119549840/
https://www.dataminingbook.com/book/python-edition
https://github.com/gedeck/dmba


Agile Data Science 2.0 https://learning.oreilly.com/library/view/agile-data-science/9781491960103/
https://gumroad.com/d/910c45fe02199287cc2ff23abcfcf821
https://github.com/rjurney/Agile_Data_Code_2





making use of smart scan made the run times faster, cpu on a lower utilization, + can accommodate more databases 
http://www.evernote.com/shard/s48/sh/b1f43d49-1bcd-4319-b274-19a91cf338ac/f9f554d2d03b3f20db591d5e68392cbf

https://leetcode.com/problems/valid-anagram/
{{{
242. Valid Anagram
Easy

Given two strings s and t , write a function to determine if t is an anagram of s.

Example 1:

Input: s = "anagram", t = "nagaram"
Output: true

Example 2:

Input: s = "rat", t = "car"
Output: false

Note:
You may assume the string contains only lowercase alphabets.

Follow up:
What if the inputs contain unicode characters? How would you adapt your solution to such case?
Accepted
415,494
Submissions
769,852
}}}

{{{
# class Solution:
#     def isAnagram(self, s: str, t: str) -> bool:

        
class Solution:

    def isAnagram(self, text1, text2):

        # text1 = 'Dog '
        text1 = text1.replace(' ', '').lower()

        # text2 = 'God '
        text2 = text2.replace(' ', '').lower()

        if sorted(text1) == sorted(text2):
            return True
        else:
            return False

}}}
{{{
Glenn Fawcett 
http://glennfawcett.files.wordpress.com/2013/06/ciops_data_x3-2.jpg
---
It wasn’t actually SLOB, but that might be interesting.
I used a mod of my blkhammer populate script to populate a bunch of tables OLTP style to 
show how WriteBack is used. As expected, Exadata is real good on 
“db file sequential read”… in the sub picosecond range if I am not mistaken :)
---
That was just a simple OLTP style insert test that spawns a bunch of PLSQL.  Yes for sure 
their were spills to disk... But the benefit was the coalescing of blocks.  DBWR is flushing really 
mostly random blocks, but the write back flash is pretty huge these days.  I was seeing average 
iosize to disk being around 800k but only about 8k to flash.
}}}
Backup and Recovery Performance and Best Practices for Exadata Cell and Oracle Exadata Database Machine  Oracle Database Release 11.2.0.2 and 11.2.0.3
http://www.oracle.com/technetwork/database/features/availability/maa-tech-wp-sundbm-backup-11202-183503.pdf

ODA (Oracle Database Appliance): HowTo Configure Multiple Public Network on GI (Grid Infrastructure) (Doc ID 1501039.1)
Data Guard: Redo Transport Services – How to use a separate network in a RAC environment. (Doc ID 1210153.1)
Data Guard Physical Standby 11.2 RAC Primary to RAC Standby using a second network (Doc ID 1349977.1)


https://blog.gruntwork.io/why-we-use-terraform-and-not-chef-puppet-ansible-saltstack-or-cloudformation-7989dad2865c


https://www.udemy.com/course/learn-devops-infrastructure-automation-with-terraform/learn/lecture/5890850#overview
https://www.udemy.com/course/building-oracle-cloud-infrastructure-using-terraform/
https://www.udemy.com/course/oracle-database-automation-using-ansible/
https://www.udemy.com/course/oracle-database-and-elk-stack-lets-do-data-visualization/
https://www.udemy.com/course/automate-file-processing-in-oracle-db-using-dbms-scheduler/


! short and sweet
https://www.linkedin.com/learning/learning-terraform-2/next-steps
https://www.udemy.com/course/learn-devops-infrastructure-automation-with-terraform/learn/lecture/5886134#overview



! OCI example stack (MuShop app)
https://oracle-quickstart.github.io/oci-cloudnative/introduction/
https://github.com/oracle-quickstart/oci-cloudnative

..
<<showtoc>>

! @@Create a new@@ PDB from the seed PDB
@@quickest way is to DBCA@@
DBCA options: 
* create a new PDB
* create new PDB from PDB Archive
* create PDB from PDB file set (RMAN backup and PDB XML metadata file)
<<<
1) Copies the data files from PDB$SEED data files 
2) Creates tablespaces SYSTEM, SYSAUX 
3) Creates a full catalog including metadata pointing to Oracle-supplied objects 
4) Creates common users: 
>	– Superuser SYS 
>	– SYSTEM 
5) Creates a local user (PDBA) 
> granted local PDB_DBA role 
6) Creates a new default service 
7) After PDB creation make sure TNS entry is created 
> CONNECT sys/oracle@pdb2 AS SYSDBA 
> CONNECT oracle/oracle@pdb2 
<<<

! @@Plug a non-CDB@@ in a CDB
options:
* TTS
* full export/import
* TDB (transportable database)
* DBMS_PDB package
* Clone a Remote Non-CDB (you can do it remotely)
* replication (Golden Gate)
<<<
using DBMS_PDB package below (running on the same server):
{{{

Cleanly shutdown the non-CDB and start it in read-only mode.

sqlplus / as sysdba
SHUTDOWN IMMEDIATE;
STARTUP OPEN READ ONLY;

Describe the non-DBC using the DBMS_PDB.DESCRIBE procedure

BEGIN
  DBMS_PDB.DESCRIBE(
    pdb_descr_file => '/tmp/db12c.xml');
END;
/

Shutdown the non-CDB database.
SHUTDOWN IMMEDIATE;

Connect to an existing CDB and create a new PDB using the file describing the non-CDB database
CREATE PLUGGABLE DATABASE pdb4 USING '/tmp/db12c.xml'
  COPY;

ALTER SESSION SET CONTAINER=pdb4;
@$ORACLE_HOME/rdbms/admin/noncdb_to_pdb.sql

ALTER SESSION SET CONTAINER=pdb4;
ALTER PLUGGABLE DATABASE OPEN;

08:24:03 SYS@cdb21> ALTER SESSION SET CONTAINER=pdb4;

Session altered.

08:24:23 SYS@cdb21>
08:24:24 SYS@cdb21>
08:24:24 SYS@cdb21> select name from v$datafile;

NAME
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+DATA/CDB2/DATAFILE/undotbs1.340.868397457
+DATA/CDB2/0C1D0762158FE2B6E053AA08A8C0D1F5/DATAFILE/system.391.868520259
+DATA/CDB2/0C1D0762158FE2B6E053AA08A8C0D1F5/DATAFILE/sysaux.390.868520259
+DATA/CDB2/0C1D0762158FE2B6E053AA08A8C0D1F5/DATAFILE/users.389.868520259

08:24:29 SYS@cdb21> conn / as sysdba
Connected.
08:24:41 SYS@cdb21> select name from v$datafile
08:24:45   2  ;

NAME
-----------------------------------------------------------------------------------------------------------------------------------------------------------
+DATA/CDB2/DATAFILE/system.338.868397411
+DATA/CDB2/DATAFILE/sysaux.337.868397377
+DATA/CDB2/DATAFILE/undotbs1.340.868397457
+DATA/CDB2/FD9AC20F64D244D7E043B6A9E80A2F2F/DATAFILE/system.346.868397513
+DATA/CDB2/DATAFILE/users.339.868397457
+DATA/CDB2/FD9AC20F64D244D7E043B6A9E80A2F2F/DATAFILE/sysaux.345.868397513
+DATA/CDB2/DATAFILE/undotbs2.348.868397775
+DATA/CDB2/DATAFILE/undotbs3.349.868397775
+DATA/CDB2/DATAFILE/undotbs4.350.868397775
+DATA/CDB2/0C1D0762158FE2B6E053AA08A8C0D1F5/DATAFILE/system.391.868520259
+DATA/CDB2/0C1D0762158FE2B6E053AA08A8C0D1F5/DATAFILE/sysaux.390.868520259
+DATA/CDB2/0C1D0762158FE2B6E053AA08A8C0D1F5/DATAFILE/users.389.868520259

12 rows selected.

08:24:46 SYS@cdb21> select FILE_NAME, TABLESPACE_NAME, FILE_ID, con_id from cdb_data_files order by con_id ;

FILE_NAME
-----------------------------------------------------------------------------------------------------------------------------------------------------------
TABLESPACE_NAME                   FILE_ID     CON_ID
------------------------------ ---------- ----------
+DATA/CDB2/DATAFILE/system.338.868397411
SYSTEM                                  1          1

+DATA/CDB2/DATAFILE/sysaux.337.868397377
SYSAUX                                  3          1

+DATA/CDB2/DATAFILE/undotbs1.340.868397457
UNDOTBS1                                4          1

+DATA/CDB2/DATAFILE/undotbs4.350.868397775
UNDOTBS4                               10          1

+DATA/CDB2/DATAFILE/undotbs2.348.868397775
UNDOTBS2                                8          1

+DATA/CDB2/DATAFILE/undotbs3.349.868397775
UNDOTBS3                                9          1

+DATA/CDB2/DATAFILE/users.339.868397457
USERS                                   6          1


7 rows selected.
}}}
<<<

! @@Clone a PDB@@ from another PDB
@@through SQL*Plus@@
<<<
{{{
This technique copies a source PDB from a CDB and plugs the copy in to a CDB. The source
PDB is in the local CDB.
The steps to clone a PDB within the same CDB are the following:
1. In init.ora, set DB_CREATE_FILE_DEST= 'PDB3dir' (OMF) or
PDB_FILE_NAME_CONVERT= 'PDB1dir', 'PDB3dir' (non OMF).
2. Connect to the root of the CDB as a common user with CREATE PLUGGABLE DATABASE
privilege.
3. Quiesce the source PDB used to clone, using the command ALTER PLUGGABLE
DATABASE pdb1 READ ONLY after closing the PDB using the command ALTER
PLUGGABLE DATABASE CLOSE
4. Use the command CREATE PLUGGABLE DATABASE to clone the PDB pdb3 FROM pdb1.
5. Then open the new pdb3 with the command ALTER PLUGGABLE DATABASE OPEN.
If you do not use OMF, in step 4, use the command CREATE PLUGGABLE DATABASE with the
clause FILE_NAME_CONVERT=(’pdb1dir’,’ pdb3dir’) to define the directory of the
source files to copy from PDB1 and the target directory for the new files of PDB3.

quick step by step

alter session set container=cdb$root;
show con_name
set db_create_file_dest

15:09:04 SYS@orcl> ALTER PLUGGABLE DATABASE pdb2 close;

Pluggable database altered.

15:09:30 SYS@orcl> ALTER PLUGGABLE DATABASE pdb2 open read only;

Pluggable database altered.

15:09:40 SYS@orcl> CREATE PLUGGABLE DATABASE PDB3 FROM PDB2;

Pluggable database created.

15:12:25 SYS@orcl> ALTER PLUGGABLE DATABASE pdb3 open;

Pluggable database altered.

15:12:58 SYS@orcl> select CON_ID, dbid, NAME, OPEN_MODE from v$pdbs;

    CON_ID NAME                           OPEN_MODE
— — —
         2 PDB$SEED                       READ ONLY
         3 PDB1                           READ WRITE
         4 PDB2                           READ ONLY
         5 PDB3                           READ WRITE

 select FILE_NAME, TABLESPACE_NAME, FILE_ID, con_id from cdb_data_files order by con_id ;

15:13:33 SYS@orcl> show parameter db_create

NAME                                 TYPE        VALUE
-
db_create_file_dest                  string      /u01/app/oracle/oradata
db_create_online_log_dest_1          string
db_create_online_log_dest_2          string
db_create_online_log_dest_3          string
db_create_online_log_dest_4          string
db_create_online_log_dest_5          string
15:15:14 SYS@orcl>
15:15:17 SYS@orcl>
15:15:17 SYS@orcl> show parameter file_name

NAME                                 TYPE        VALUE
-
db_file_name_convert                 string
log_file_name_convert                string
pdb_file_name_convert                string

ALTER PLUGGABLE DATABASE pdb2 close;
ALTER PLUGGABLE DATABASE pdb2 open;

CONNECT sys/oracle@pdb3 AS SYSDBA
CONNECT oracle/oracle@pdb3 
}}}
<<<

! @@Plug an unplugged PDB@@ into another CDB
@@quickest is DBCA, just do the unplug and plug from UI@@






! References:
http://oracle-base.com/articles/12c/multitenant-create-and-configure-pluggable-database-12cr1.php














http://kevinclosson.wordpress.com/2012/02/12/how-many-non-exadata-rac-licenses-do-you-need-to-match-exadata-performance/
{{{
kevinclosson
February 14, 2012 at 8:52 pm
Actually, Matt, I see nothing wrong with what the rep said. A single Exadata database grid host can drive a tremendous amount of storage throughput but it can only eat 3.2GB/s since there is but a single 40Gb HCA port active on each host. A single host can drive the storage grid nearly to saturation via Smart Scan…but as soon as the data flow back to the host approaches 3.2GB/s the Smart Scan will start to throttle. In fact single session (non-Parallel Query) can drive Smart Scan to well over 10GB/s in a full rack but, in that case you’d have a single foreground process on a single core of WSM-EP so there wouldn’t sufficient bandwidth to ingest much data..about 250MB/s can flow into a single session performing a Smart Scan. So the hypothetical there would be Smart Scan is churning through, let’s say, 10GB/s and Smart Scan is whittling down the payload by about 9.75GB/s through filtration and projection. Those are very close to realistic numbers I’ve just cited but I haven’t measured those sort of “atomics” in a year so I’m going by memory. Let’s say give or take 5% on my numbers.
<<<
}}}

http://forums.theregister.co.uk/forum/1/2011/12/12/ibm_vs_oracle_data_centre_optimisation/
{{{
Exadata: 2 Grids, 2 sets of roles.
>The Exadata storage nodes compress database files using a hybrid columnar algorithm so they take up less space and can be searched more quickly. They also run a chunk of the Oracle 11g code, pre-processing SQL queries on this compressed data before passing it off to the full-on 11g database nodes.
Exadata cells do not compress data. Data compression is done at load time (in the direct path) and compression (all varieties not just HCC) is code executed only on the RAC grid CPUS. Exadata users get no CPU help from the 168 cores in the storage grid when it comes to compressing data.
Exadata cells can, however, decompress HCC data (but not the other types of compressed data). I wrote "can" because cells monitor how busy they are and are constantly notified by the RAC servers about their respective CPU utilization. Since decompressing HCC data is murderously CPU-intensive the cells easily go processor-bound. At that time cells switch to "pass-through" mode shipping up to 40% of the HCC blocks to the RAC grid in compressed form. Unfortunately there are more CPUs in the storage grid than the RAC grid. There is a lot of writing on this matter on my blog and in the Expert Oracle Exadata book (Apress).
Also, while there are indeed 40GB DDR Infiniband paths to/from the RAC grid and the storage grid, there is only 3.2GB/s usable bandwidth for application payload between these grids. Therefore, the aggregate maximum data flow between the RAC grid and the cells is 25.6GB/s (3.2x8). There are 8 IB HCAs in either X2 model as well so the figure sticks for both. In the HP Oracle Database Mahine days that figure was 12.8GB/s.
With a maximum of 25.6 GB/s for application payload (Oracle's iDB protocol as it is called) one has to quickly do the math to see the mandatory data reduction rate in storage. That is, if only 25.6 GB/s fits through the network between these two grids yet a full rack can scan combined HDD+FLASH at 75 GB/s then you have to write SQL that throws away at least 66% of the data that comes off disk. Now, I'll be the first to point out that 66% payload reduction from cells is common. Indeed, the cells filter (WHERE predicate) and project columns (only the cited and join columns need shipped). However, compression changes all of that.
If scanning HCC data on a full rack Exadata configuration, and that data is compressed at the commonly cited compression ratio of 10:1 then the "effective" scan rate is 750GB/s. Now use the same predicates and cite the same columns and you'll get 66% reduced payload--or 255GB/s that needs to flow over iDB. That's about 10x over-subscription of the available 25.6 GB/s iDB bandwidth. When this occurs, I/O is throttled. That is, if the filtered/projected data produced by the cells is greater than 25.6GB/s then I/O wanes. Don't expect 10x query speedup because the product only has to perform 10% the I/O it would in the non-compressed case (given a HCC compression ratio of 10:1).
That is how the product works. So long as your service levels are met, fine. Just don't expect to see 75GB/s of HCC storage throughput with complex queries because this asymmetrical MPP architecture (Exadata) cannot scale that way (for more info see: http://bit.ly/tFauDA )
}}}


http://kevinclosson.wordpress.com/2011/11/23/mark-hurd-knows-cios-i-know-trivia-cios-may-not-care-about-either-hang-on-im-booting-my-cell-phone/#comment-37527
{{{
kevinclosson
November 28, 2011 at 7:09 pm
“I can see the shared nothing vs shared everything point in a CPU + separate storage perspective.”

…actually, I don’t fester about with the shared-disk versus shared nothing as I really don’t think it matters. It’s true that Real Application Clusters requires shared disk but that is not a scalability hindrance–so long as one works out the storage bandwidth requirements–a task that is not all that difficult with modern storage networking options. So long as ample I/O flow is plumbed into RAC it scales DW/BI workloads. It is as simple as that. On the other hand, what doesn’t scale is asymmetry. Asymmetry has never scaled as would be obvious to even the casual observer. As long as all code can run on all CPUs (symmetry) scalability is within reach. What I’m saying is that RAC actually has better scalability characteristics when running with conventional storage than with Exadata! That’s a preposterous statement to the folks who don’t actually know the technology, as well as those who are dishonest about the technology, but obvious to the rest of us. It’s simple computer science. One cannot take the code path of query processing, chop it off at the knees (filtration/projection) and offload that to some arbitrary percentage of your CPU assets and pigeon-hole all the rest of the code to the remaining CPUs and cross fingers.

A query cannot be equally CPU-intensive in all query code all the time. There is natural ebb and tide. If the query plan is at the point of intensive join processing it is not beneficial to have over fifty percent of the CPUs in the rack unable to process join code (as is the case with Exadata).

To address this sort of ebb/tide imbalance Oracle has “released” a “feature” referred to as “passthrough” where Exadata cells stop doing their value-add (filtration and HCC decompression) for up to about 40% of the data flowing off storage when cells get too busy (CPU-wise). At that point they just send unfiltered, compressed data to the RAC grid. The RAC grid, unfortunately, has less CPU cores than the storage grid and has brutally CPU-intensive work of its own to do (table join, sort, agg). “Passthrough” is discussed in the Expert Oracle Exadata (Apress) book.

This passthrough feature does allow water to find its level, as it were. When Exadata falls back to passthrough mode the whole configuration does indeed utilize all CPU and since idle CPU doesn’t do well to increase query processing performance this is a good thing. However, if Exadata cells stop doing the “Secret Sauce” (a.k.a., Offload Processing) when they get busy then why not just build a really large database grid (e.g., with the CPU count of all servers in an Exadata rack) and feed it with conventional storage? That way all CPU power is “in the right place” all the time. Well, the answer to that is clearly RAC licensing. Very few folks can afford to license enough cores to run a large enough RAC grid to make any of this matter. Instead they divert some monies that could go for a bigger database grid into “intelligent storage” and hope for the best.
}}}



http://www.snia.org/sites/default/education/tutorials/2008/fall/networking/DrorGoldenberg-Fabric_Consolidation_InfiniBand.pdf
3.2 GB/s unidirectional 
theoretical limit 3.2 GB/s measured due to server IO limitations


http://www.it-einkauf.de/images/PDF/677C777.pdf
{{{
INFINIBAND PHYSICAL-LAYER CHARACTERISTICS 
The InfiniBand physical-layer specification supports three data rates, designated 1X, 4X, and 12X, over both copper and fiber optic media. 
The base data rate, 1X single data rate (SDR), is clocked at 2.5 Gbps and is transmitted over two pairs of wires—transmit and receive—and 
yields an effective data rate of 2 Gbps full duplex (2 Gbps transmit, 2 Gbps receive). The 25 percent difference between data rate and  
clock rate is due to 8B/10B line encoding that dictates that for every 8 bits of data transmitted, an additional 2 bits of transmission 
overhead is incurred. 
}}}

infiniband cabling issues
{{{
InfiniBand cable presents a challenge within this environment because the cables are considerably thicker, heavier, and shorter in length 
to mitigate the effects of cross-talk and signal attenuation and achieve low bit error rates (BERs). To assure the operational integrity and 
performance of the HPC cluster, it is critically important to maintain the correct bend radius, or the integrity of the cable can be 
compromised such that the effects of cross-talk introduce unacceptable BERs. 
To address these issues, it is essential to thoroughly plan the InfiniBand implementation and provide a good cable management solution 
that enables easy expansion and replacement of failed cables and hardware. This is especially important when InfiniBand 12X or DDR 
technologies are being deployed because the high transmission rates are less tolerant to poor installation practices. 
}}}

http://www.redbooks.ibm.com/abstracts/tips0456.html
A single PCI Express serial link is a dual-simplex connection using two pairs of wires, one pair for transmit and one pair for receive, and can only transmit one bit per cycle. Although this sounds limiting, it can transmit at the extremely high speed of 2.5 Gbps, which equates to a burst mode of 320 MBps on a single connection. These two pairs of wires is called a lane.
{{{
Table: PCI Express maximum transfer rate
Lane width	Clock speed	Throughput (duplex, bits)	Throughput (duplex, bytes)	Initial expected uses
x1	2.5 GHz	5 Gbps	400 MBps	Slots, Gigabit Ethernet
x2	2.5 GHz	10 Gbps	800 MBps	
x4	2.5 GHz	20 Gbps	1.6 GBps	Slots, 10 Gigabit Ethernet, SCSI, SAS
x8	2.5 GHz	40 Gbps	3.2 GBps	
x16	2.5 GHz	80 Gbps	6.4 GBps	Graphics adapters
}}}


http://www.aiotestking.com/juniper/2011/07/when-using-a-40-gbps-switch-fabric-how-much-full-duplex-bandwidth-is-available-to-each-slot/
{{{
When using a 40 Gbps switch fabric, how much full duplex bandwidth is available to each slot?
A.
1.25 Gbps
}}}

Sun Blade 6048 InfiniBand QDR Switched Network Express Module Introduction
http://docs.oracle.com/cd/E19914-01/820-6705-10/chapter1.html
{{{
IB transfer rate (maximum)	
40 Gbps (QDR) per 4x IB port for the Sun Blade X6275 server module and 20 Gbps (DDR) per 4x IB port for the Sun Blade X6270 server module. There are two 4x IB ports per server module.

1,536 Gbps aggregate throughput
}}}


''email with Kevin''
<<<
on Exadata the 3.2 is establsihed by the PCI slot the HCA is sitting in. I don't scrutinize QDR IB these days. It would be duplex...would have to look it up.
<<<

wikipedia
<<<
http://en.wikipedia.org/wiki/InfiniBand where it mentioned about "The SDR connection's signalling rate is 2.5 gigabit per second (Gbit/s) in each direction per connection"
<<<

''The flash and HCA cards uses pci-e x8''
http://jarneil.wordpress.com/2012/02/02/upgradingdowngrading-exadata-ilom-firmware/
http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&ved=0CFkQFjAA&url=http%3A%2F%2Fhusnusensoy.files.wordpress.com%2F2010%2F10%2Foracle-exadata-v2-fast-track.pptx&ei=M4zTT_mvLcGC2AWpufi6Dw&usg=AFQjCNFMAJgvIx9QuD3513dWS9nETkeXqw
{{{
IB Switches
3 x 36-port managed switches as opposed to Exadata v1 (2+1).
2 “leaf”
1 “spine” switches
Spine switch is only available for Full Rack because it is for connecting multiple full racks side by side.
A subnet manager running on one switch discovers the topology of the network.
HCA
Each node (RAC & Storage Cell) has a PCIe x8 40 Gbit HCA with two ports
Active-Standby Intracard Bonding.
}}}

F20 PCIe Card
{{{
Not a SATA/SAS SSD driver but a x8 PCIe device providing SATA/SAS interface.
4 Solid State Flash Disk Modules (FMod) each of 24 GB size
256 MB Cache
SuperCap Power Reserve (EnergyStorageModule) provides write-back operation mode.
ESM should be enabled for optimal write performance
Should be replaced in every two years.
Can be monitored using various tools like ILOM
Embedded SAS/SATA configuration will expose  16 (4 cards x 4 FMod) Linux devices.
/dev/sdn
4K sector boundary for Fmods
Each FMod consists of several NAND modules best performance can be reached with multithreading (32+ thread/FMod etc)

}}}







{{{

How To Avoid ORA-04030/ORA-12500 In 32 Bit Windows Environment
  	Doc ID: 	Note:373602.1

How to convert a 32-bit database to 64-bit database on Linux?
  	Doc ID: 	Note:341880.1 	
  	


-- PAE/AWE
		
		Some relief may be obtained by setting the /3GB flag as well as the /PAE flag in Oracle. This at least assures that up to 2 GB of memory is available for the Large Pool, 
		the Shared Pool, the PGA, and all user threads, after the AWE_WINDOW_SIZE parameter is taken into account. However, Microsoft recommends that the /3GB flag not be set if 
		the /AWE flag is set. This is due to the fact that the total amount of RAM accessible for ALL purposes is limited to 16 GB if the /3GB flag is set. RAM above 16 GB simply 
		�disappears� from the view of the OS. For PowerEdge 6850 servers that can support up to 64 GB of RAM, a limitation to only 16 GB of RAM is unacceptable.
		
		As noted previously, the model used for extended memory access under a 32-bit Operating System entails a substantial performance penalty. However, with a 64-bit OS, a flat linear model for memory used, with no need for PAE to access memory above 4 GB. Improved performance will be experienced for database SGA sizes greater than 3 GB, due to elimination of PAE overhead.
		
		
		MAXIMUM OF 4 GB OF ADDRESSABLE MEMORY FOR THE 32 BIT ARCHITECTURE. THIS IS A MAXIMUM PER PROCESS. THAT IS, EACH PROCESS MAY ALLOCATE UP TO 4 GB OF MEMORY
		
		2GB for OS
		2GB for USER THREADS
		
		1st workaround on 4GB limit: 
			- To expand the total memory used by Oracle above 2 GB, the /3GB flag may be set in the boot.ini file.	
				With the /3GB flag set, only 1 GB is used for the OS, and 3 GB is available for all user threads, including the Oracle SGA. 
			
		2nd workaround on 4GB limit: 
			- use the PAE, Intel 32-bit processors such as the Xeon processor support PAGING ADDRESS EXTENSIONS for large memory support
				MS Windows 2000 and 2003 support PAE through ADDRESS WINDOWING EXTENSIONS (AWE). PAE/AWE may be enabled by setting the /PAE flag in the boot.ini file. 
				The �USE_INDIRECT_BUFFERS=TRUE� parameter must also be set in the Oracle initialization file. In addition, the DB_BLOCK_BUFFERS parameter must be used 
				instead of the DB_CACHE parameter in the Oracle initialization file. With this method, Windows 2000 Server and Windows Server 2003 versions can support 
				up to 8 GB of total memory.
				Windows Advanced Server and Data Center versions support up to 64 GB of addressable memory with PAE/AWE.
			- One limitation of AWE is that only the Data Buffer component of the SGA may be placed in extended memory. Threads for other 
				SGA components such as the Shared Pool and the Large Pool, as well as the PGA and all Oracle user sessions must still fit inside 
				a relatively small memory area. THERE IS AN AWE_WINDOW_SIZE REGISTRY KEY PARAMETER THAT IS USED TO SET THE SIZE OF A KIND OF  �SWAP� AREA IN THE SGA. <-- swap area in SGA
				This �swap� area is used for mapping data blocks in upper memory to a lower memory location. By default, 
				this takes an additional 1 GB of low memory. This leaves only 2 GB of memory for everything other than the Buffer cache, assuming 
				the /3GB flag is set. If the /3GB flag is not set, only 1 GB of memory is available for the non-Buffer Cache components.
			- Note that the maximum addressable memory was limited to 16 GB of RAM
				Some relief may be obtained by setting the /3GB flag as well as the /PAE flag in Oracle. This at least assures that up to 2 GB of memory is available 
				for the Large Pool, the Shared Pool, the PGA, and all user threads, after the AWE_WINDOW_SIZE parameter is taken into account. However, Microsoft 
				recommends that the /3GB flag not be set if the /AWE flag is set. This is due to the fact that the total amount of RAM accessible for ALL purposes 
				is limited to 16 GB if the /3GB flag is set. RAM ABOVE 16 GB SIMPLY �DISAPPEARS� FROM THE VIEW OF THE OS. For PowerEdge 6850 servers that can support 
				up to 64 GB of RAM, a limitation to only 16 GB of RAM is unacceptable.
					This will give you (/3GB is set):
						3-4GB 	for Buffer Cache
						1GB 	for the swap area
						2GB 	for everything other than the Buffer Cache
						1GB 	for OS
					This will give you (/3GB is not set):
						3-4GB 	for Buffer Cache
						1GB 	for the swap area
						1GB 	for everything other than the Buffer Cache
						2GB 	for OS
			- Performance Tuning Corporation Benchmark:
					This will give you (/3GB is set):
						11GB 	for Buffer Cache
						.75GB 	for the swap area (AWE_MEMORY_WINDOW..minimum size that allowed the database to start)
						2.25GB 	for everything other than the Buffer Cache
						1GB 	for OS
					This will give you (/3GB is not set):
						11GB 	for Buffer Cache
						.75GB 	for the swap area (AWE_MEMORY_WINDOW..minimum size that allowed the database to start)
						1.25GB 	for everything other than the Buffer Cache
						2GB 	for OS

}}}

Using Large Pages for Oracle on Windows 64-bit (ORA_LPENABLE) http://blog.ronnyegner-consulting.de/2010/10/19/using-large-pages-for-oracle-on-windows-64-bit-ora_lpenable/
http://www.sketchup.com/download
<<<
3D XPoint (cross point) memory, which will be sold under the name Optane
<<<

https://www.intel.com/content/www/us/en/architecture-and-technology/intel-optane-technology.html
https://www.intel.com/content/www/us/en/architecture-and-technology/optane-memory.html
https://www.computerworld.com/article/3154051/data-storage/intel-unveils-its-optane-hyperfast-memory.html
https://www.computerworld.com/article/3082658/data-storage/intel-lets-slip-roadmap-for-optane-ssds-with-1000x-performance.html






http://docs.oracle.com/cd/E11857_01/em.111/e16790/ha_strategy.htm#EMADM9613
http://www.ibm.com/developerworks/linux/library/l-4kb-sector-disks/ <-- good stuff
http://ubuntuforums.org/showthread.php?t=1854524
http://ubuntuforums.org/showthread.php?t=1685666
http://ubuntuforums.org/showthread.php?t=1768635

also see [[Get BlockSize of OS]]


''Oracle related links''
1.8.1.4 Support 4 KB Sector Disk Drives http://docs.oracle.com/cd/E11882_01/server.112/e22487/chapter1.htm#FEATURENO08747 
Planning the Block Size of Redo Log Files http://docs.oracle.com/cd/E11882_01/server.112/e25494/onlineredo002.htm#ADMIN12891
Specifying the Sector Size for Drives http://docs.oracle.com/cd/E11882_01/server.112/e18951/asmdiskgrps.htm#OSTMG10203
Microsoft support policy for 4K sector hard drives in Windows http://support.microsoft.com/kb/2510009
ATA 4 KiB sector issues https://ata.wiki.kernel.org/index.php/ATA_4_KiB_sector_issues
http://en.wikipedia.org/wiki/Advanced_format
http://martincarstenbach.wordpress.com/2013/04/29/4k-sector-size-and-grid-infrastructure-11-2-installation-gotcha/
http://flashdba.com/4k-sector-size/
http://flashdba.com/install-cookbooks/installing-oracle-database-11-2-0-3-single-instance-using-4k-sector-size/
http://flashdba.com/2013/04/12/strange-asm-behaviour-with-4k-devices/
http://flashdba.com/2013/05/08/the-most-important-thing-you-need-to-know-about-flash/
http://www.storagenewsletter.com/news/disk/217-companies-hdd-since-1956
http://www.theregister.co.uk/2013/02/04/ihs_hdd_projections/
Alert: (Fix Is Ready + Additional Steps!) : After SAN Firmware Upgrade, ASM Diskgroups ( Using ASMLIB) Cannot Be Mounted Due To ORA-15085: ASM disk "" has inconsistent sector size. [1500460.1]


Design Tradeoffs for SSD Performance http://research.cs.wisc.edu/adsl/Publications/ssd-usenix08.pdf
Enabling Enterprise Solid State Disks Performance http://repository.cmu.edu/cgi/viewcontent.cgi?article=1732&context=compsci
https://flashdba.com/?s=4kb











http://karlarao.wordpress.com/2009/12/31/50-sql-performance-optimization-scenarios/

{{{
ORACLE SQL Performance Optimization Series (1)

1. The types of ORACLE optimizer
2. The way to visit Table
3. Shared SQL statement

ORACLE SQL Performance Optimization Series (2)

4. Select the table name of the most efficient order (only in the effective rule-based optimizer)
5. WHERE clause in the order of the connections
6. SELECT clause to avoid using ‘*’
7. Access to the database to reduce the number of

ORACLE SQL Performance Optimization Series (3)

8. Using the DECODE function to reduce the processing time
9. Integration of simple, non-associated database access
10. Remove duplicate records
11. Alternative DELETE with TRUNCATE
12. As much as possible the use of COMMIT

ORACLE SQL Performance Optimization Series (4)

13. Calculate the number of records
14. Where clause with the HAVING clause to replace
15. To reduce the query table
16. Through an internal function to improve SQL efficiency

ORACLE SQL Performance Optimization Series (5)

17. Use the table alias (Alias)
18. Replace IN with EXISTS
19. Replace NOT IN with NOT EXISTS

ORACLE SQL performance optimization Series (6)

20. Connect with the table to replace EXISTS
21. Replace DISTINCT with EXISTS
22. Recognition ‘inefficient implementation of the’ in SQL statements
23. Use TKPROF tool to query SQL Performance Status

ORACLE SQL Performance Optimization Series (7)

24. Analysis of SQL statements with EXPLAIN PLAN

ORACLE SQL Performance Optimization Series (8)

25. With the index to improve efficiency
26. Operation index

ORACLE SQL Performance Optimization Series (9)

27. The choice of the basis of the table
28. Number of equal index
29. Comparing and scope of the comparison equation
30. The index level is not clear

ORACLE SQL Performance Optimization Series (10)

31. Force index failure
32. Avoid the use of columns in the index calculation.
33. Auto Select Index
34. Avoid the use of NOT in the index column
35. With “= substitute”

ORACLE SQL Performance Optimization Series (11)

36. UNION replaced with the OR (for the index column)
37. To replace the OR with the IN
38. Avoid the use of columns in the index IS NULL and IS NOT NULL

ORACLE SQL Performance Optimization Series (12)

39. Always use the first column index
40. ORACLE internal operations
41. With the UNION-ALL replaced UNION (if possible)
42. Usage Tips (Hints)

ORACLE SQL Performance Optimization Series (13)

43. WHERE replaced with ORDER BY
44. Avoid changing the index of the column type
45. Need to be careful of the WHERE clause

ORACLE SQL Performance Optimization Series (14)

46. Connect multiple scan
47. CBO to use a more selective index of
48. Avoid the use of resource-intensive operations
49. GROUP BY Optimization
50. Use Date
51. Use explicit cursor (CURSORs)
52. Optimization EXPORT and IMPORT
53. Separate tables and indexes

ORACLE SQL Performance Optimization Series (15)

EXISTS / NOT EXISTS must be better than IN / NOT IN the efficiency of high?

ORACLE SQL Performance Optimization Series (16)

I used the view of how query results are wrong?

ORACLE SQL Performance Optimization Series (17)

Page Which writing efficient SQL?

ORACLE SQL Performance Optimization Series (18)

COUNT (rowid) / COUNT (pk) the efficiency of high?

ORACLE SQL Performance Optimization Series (19)

ORACLE data type implicit conversions

ORACLE SQL Performance Optimization Series (20)

The use of INDEX should pay attention to the three questions

ORACLE Tips (HINT) use (Part 1) (21)

ORACLE Tips (HINT) use (Part 2) (22)

Analysis of function-based index (Part 1) (23)

Analysis of function-based index (Part 2) (24)

How to achieve efficient paging query (25)

ORACLE achieved in the SELECT TOP N method (26)
}}}
http://highscalability.com/blog/2013/11/25/how-to-make-an-infinitely-scalable-relational-database-manag.html
http://dimitrik.free.fr/blog/archives/2013/11/mysql-performance-over-1m-qps-with-innodb-memcached-plugin-in-mysql-57.html

Average Active Sessions (AAS) is a metric of the database load. This value should not go above the CPU count, if it does then that means the database is working very hard or waiting a lot for something. 

''The AAS & CPU count is used as a yardstick for a possible performance problem (I suggest reading Kyle's stuff about this):''
{{{
    if AAS < 1 
      -- Database is not blocked
    AAS ~= 0 
      -- Database basically idle
      -- Problems are in the APP not DB
    AAS < # of CPUs
      -- CPU available
      -- Database is probably not blocked
      -- Are any single sessions 100% active?
    AAS > # of CPUs
      -- Could have performance problems
    AAS >> # of CPUS
      -- There is a bottleneck
}}}

''AAS Formula''
--
{{{
* AAS is either dbtime/elapsed
* or count/samples
* in the case of dba_hist_ count is count*10 since they only write out 1/10 samples (19751*10)/600 = 329.18
}}}
<<showtoc>>

This Tiddler will show you a new interesting metric included in the performance graph of Enterprise Manager 11g.. which is the ''CPU Wait'' or ''CPU + CPU Wait''

a little background.. 

I've done an IO test with the intention of bringing the system down to its knees and characterizing the IO performance on that level of stress. That time I want to know the IO performance of my R&D server http://www.facebook.com/photo.php?pid=5272015&l=d5f2be4166&id=552113028 (which I intend to run lots of VMs) having 8GB memory, IntelCore2Quad Q9500 & 5 x 1TB short stroked disk (on the outer 100GB area) and I was able to built from it an LVM stripe that produced about 900+ IOPS & 300+ MB/s on my ''Orion'' and ''dbms_resource_manager.calibrate_io'' runs and validated those numbers against the database I created by actually running ''256 parallel sessions'' doing SELECT * on a 300GB table http://goo.gl/PYYyH (the same disks are used but as ASM disks on the next 100GB area - short stroked). 

I'll start off by showing you how AAS is computed.. Then detail on how it is being graphed and show you the behavior of AAS on IO and CPU bound workload.. 

The tools I used for graphing the AAS: 
* Enterprise Manager 11g
** both the real time and historical graphs
* ASH Viewer by Alexander Kardapolov http://j.mp/dNidrB 
** this tool samples from the ASH itself and graphs it.. so it allows me to check the correctness and compare it with the ''real time'' graph of Enterprise Manager
* MS Excel and awr_topevents.sql 
** this tool samples from the DBA_HIST views and graphs it.. so it allows me to check the correctness and compare it with the ''historical'' graph of Enterprise Manager

Let's get started.. 


! How AAS is computed

AAS is the abstraction of database load and you can get it by the following means... 

!!!! 1) From ASH
<<<
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZtyRXwwiOI/AAAAAAAABLA/BYOUYtXO1Vo/AASFromASH.png]]
<<<

!!!! 2) From DBA_HIST_ACTIVE_SESS_HISTORY
* In the case of DBA_HIST_ ''sample count'' is sample count*10 since they only write out 1/10 samples
<<<
[img[picturename| https://lh4.googleusercontent.com/_F2x5WXOJ6Q8/TZtyRcp7m_I/AAAAAAAABLI/sLqztbLY3Mw/AASFromDBA_HIST.png]]
<<<

!!!! 3) From the AWR Top Events
* The Top Events section unions the output of ''dba_hist_system_event'' (all the events) and the ''CPU'' from time model (''dba_hist_sys_time_model'') and then filter only the ''top 5'' and do this across the SNAP_IDs
** To get the ''high level AAS'' you have to divide DB Time / Elapsed Time
** To get the ''AAS for the Top Events'', you have to divide the ''time'' (from event or cpu) by ''elapsed time''
* You can see below that we are having ''the same'' AAS numbers compared to the ASH reports 
<<<
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TZtyRdPqm3I/AAAAAAAABLE/o23FMIG1yeQ/AASFromAWRTop.png]]
<<<


! How AAS is being graphed
I have a dedicated blog post on this topic.. http://karlarao.wordpress.com/2010/07/25/graphing-the-aas-with-perfsheet-a-la-enterprise-manager/

So we already know how we get the AAS, and how is it graphed.. ''so what's my issue?''

''Remember I mentioned this on the blog post above.. ?''
<<<
"So what’s the effect? mm… on a high CPU activity period you’ll notice that there will be a higher AAS on the Top Activity Page compared to Performance Page. Simply because ASH samples every second and it does that quickly on every active session (the only way to see CPU usage realtime) while the time model CPU although it updates quicker (5secs I think) than v$sysstat “CPU used by this session” there could still be some lag time and it will still be based on Time Statistics (one of two ways to calculate AAS) which could be affected by averages."
<<<
I'll expound on that with test cases included.. ''see below!''


! AAS behavior on an IO bound load
* This is the graph of an IO bound load using ASH Viewer, this will be similar to the graph you will see on ''real time'' view of the Enterprise Manager 11g
<<<
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TZ9Cp2Kc8aI/AAAAAAAABN0/1konJAJZMUo/highio-3.png]]
[img[picturename| https://lh4.googleusercontent.com/_F2x5WXOJ6Q8/TZt3yMWQUCI/AAAAAAAABLM/8d-I2RqvF3I/AASIObound.png]]
<<<
* This is the graph of the same workload using MS Excel and the script awr_topevents.sql, this will be the similar graph you will see on the ''historical'' view of the Enterprise Manager 11g
<<<
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TZ9FJ6cXxRI/AAAAAAAABN4/eWRs8SQd0ws/highio-4.png]]
<<<

As you can see from the images above and the numbers below.. the database is doing a lot of ''direct path read'' and we don't have a high load average. Although when you look at the OS statistics, from this IO intensive workload you will see high IO WAIT from the CPU.

Looking at the data below from AWR and ASH.. ''we see no discrepancies''.. now, let's compare this to the workload below where the database server is CPU bound and has a really high load average. 

''AAS Data from AWR''
<<<
[img[picturename| https://lh4.googleusercontent.com/_F2x5WXOJ6Q8/TZ8-nKFw2pI/AAAAAAAABNk/oozsoEgnmeE/highio-1.png]]
<<<

''AAS Data from ASH''
<<<
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TZ8-nFhmP7I/AAAAAAAABNo/x5kIF-HuhnY/highio-2.png]]
<<<


! AAS behavior on a CPU bound load

This is the Enterprise Manager 11g graph of a CPU bound load 
<<<
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZt8ZxUeEUI/AAAAAAAABLY/gmclSmutRVg/AASCPUbound.png]]
<<<
This is the ASH Viewer graph of a CPU bound load 
* The dark green color you see below (18:30 - 22:00) is actually the ''CPU Wait'' metric that you are seeing on the Enterprise Manager graph above
* The light green color on the end part of the graph (22:00) is the ''Scheduler wait - resmgr: cpu quantum'' 
* The small hump on the 16:30-17:30 time frame is the IO bound load test case
<<<
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TZ6emvgui7I/AAAAAAAABNI/fxVzQryIwKc/highcpu-4.png]]
<<<
Below are the data from AWR and ASH of the same time period ''(21:50 - 22:00)''.. see the high level and drill down numbers below 
... it seems like if the database server is ''high on CPU/high on runqueue'' or the ''"wait for CPU"'' appears.. then the AAS numbers from the AWR and ASH reports don't match anymore but I would expect ASH to be bigger because it has fine grained samples of 1 second. But as you can see (below).. 
* the ASH top events correctly accounted the CPU time ''(95.37 AAS)'' which was tagged as ''CPU + Wait  for CPU''
* while the AWR CPU seems to be idle ''(.2 AAS)''. 
And what's even more interesting is 
* the high level AAS on AWR is ''356.7'' 
* while on the ASH it is ''329.18'' 
that's a huge gap! Well that could be because of 
* the high DB Time ''(215947.8)'' on AWR 
* compared to what Sample Count ASH has ''(197510)''. 
Do you have any idea why is this happening? Interesting right? 

''AAS Data from AWR''
<<<
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZ6BdKu23hI/AAAAAAAABMw/Nuwg_qTt6m8/highcpu-1.png]]

[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZ6BdrU46FI/AAAAAAAABM4/6Inv_8_Z5dc/highcpu-2.png]]
<<<

''AAS Data from ASH''
<<<
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TZ8rp2UTbWI/AAAAAAAABNg/6VBzvJxxApM/highcpu-3.png]]
<<<

''A picture is worth a thousand words...'' - To clearly explain this behavior of ''CPU not properly accounted'' I'll show you the graph of the data samples

__''AWR Top Events with CPU "not properly" accounted''__
<<<
* This is the high level AAS we are getting from the ''DB Time/Elapsed Time'' from the AWR report across SNAP_IDs.. this output comes from the script ''awr_genwl.sql'' (AAS column - http://goo.gl/MUWr) notice that there are AAS number as high as 350 and above.. the second occurence of 350+ is from the SNAP_ID 495-496 mentioned above..
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZ61tG_iQ0I/AAAAAAAABNY/iKAy7j4Y534/highcpu-5.png]]
* Drilling down on the AAS components of that high level AAS we have to graph the output of the ''awr_topevents.sql''... given that this is still the same workload, you see here that only the ''Direct Path Read'' is properly accounted and when you look at the CPU time it seems to be idle... thus, giving lower AAS than the image above..
* Take note that SNAP_ID 495 the AWR ''CPU'' seems to be idle (.2 AAS) which is what is happening on this image
* Also on the 22:00 period, the database stopped waiting on CPU and started to wait on ''Scheduler''.. and then it matched again the high level AAS from the image above (AAS range of 320).. Interesting right? 
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZ53u_cLWLI/AAAAAAAABMY/9QP2C4S7AUI/highcpu-6.png]]
* We will also have this same behavior on Enterprise Manager 11g when we go to the ''Top Activity page'' and change the ''Real Time'' to ''Historical''... see the similarities on the graph from MS Excel? So when you go ''Real Time'' you are actually pulling from ASH.. then when you go ''Historical'' you are just pulling the Top Timed events across SNAP_IDs and graphing it.. but when you have issues like CPU time not properly accounted you'll see a really different graph and if you are not careful and don't know what it means you may end up with bad conclusions.. 
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TZ6fz5UzkVI/AAAAAAAABNM/9xL8IukSM4A/highcpu-10.png]]
<<<

__''AWR Top Events with CPU "properly" accounted''__
<<<
* Now, this is really interesting... the graph shown below is from the ''Performance page'' and is also ''Historical'' but produced a different graph from the ''Top Activity page''... 
* Why and how did it account for the ''CPU Wait''? where did it pull the data that the ''Top Activity page'' missed? 
* This is an improvement in the Enterprise Manager! So I'm curious how is this happening...
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TZ6ogMsAp0I/AAAAAAAABNQ/b9dTIxATxoY/highcpu-11.png]]
<<<

__''ASH with CPU "properly" accounted (well.. I say, ALWAYS!)''__

From the graph above & below where the CPU is properly accounted, you see the AAS is consistent at the range of 320.. 
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TZ53uvK1xkI/AAAAAAAABMU/7HThzn4uoEo/highcpu-7.png]]
What makes ASH different is the proper accounting of the ''CPU'' AAS component unlike the chart coming from awr_topevents.sql (mentioned on the AWR Top Events with CPU "not properly" accounted) where there's no CPU accounted at all... this could be the problem of DBA_HIST_SYS_TIME_MODEL - ''DB CPU'' metric that when the database server is high on runqueue and there are already scheduling issues in the OS the ''ASH is even more reliable'' on accounting all the CPU time.. 

Another thing that bothers me is why is it that the ''DB Time'' when applied to the AAS formula gives much higher AAS value than of the ASH? so that could also mean that ''the DB Time is another reliable source'' if the database server is high on runqueue.. 

If this is the case, from a pure AWR perspective... what I would do is have the output of ''awr_genwl.sql''.. then run the ''awr_topevents.sql''.. 
and then if I would see that my AAS is high on awr_genwl.sql with a really high "OS Load" and "CPU Utilization" and then if I compare it with the output of awr_topevents.sql and see a big discrepancy that would give me an idea that I'm experiencing the same issue mentioned here, and I would investigate further with the ASH data to solidify my conclusions.. 

If you are curious about the output of Time model statistics on SNAP_ID 495-496
the CPU values found here does not help either because they have low values..

{{{
   DB CPU = 126.70 sec
   BG CPU = 4.32 sec
   OS CPU (osstat) = 335.71 sec

Statistic Name                                       Time (s) % of DB Time
------------------------------------------ ------------------ ------------
sql execute elapsed time                            215,866.2        100.0
DB CPU                                                  126.7           .1
parse time elapsed                                       62.8           .0
hard parse elapsed time                                  60.0           .0
PL/SQL execution elapsed time                            33.9           .0
hard parse (sharing criteria) elapsed time                9.7           .0
sequence load elapsed time                                0.6           .0
PL/SQL compilation elapsed time                           0.2           .0
connection management call elapsed time                   0.0           .0
repeated bind elapsed time                                0.0           .0
hard parse (bind mismatch) elapsed time                   0.0           .0
DB time                                             215,947.9
background elapsed time                               1,035.5
background cpu time                                       4.3
          -------------------------------------------------------------
}}}

''Now we move on by splitting the ASH AAS components into their separate areas..''
* the ''CPU'' 
* and ''USER IO'' 
see the charts below.. 

This just shows that there is something about ASH properly accounting the ''CPU + WAIT FOR CPU'' whenever the database server is high on runqueue or OS load average... as well as the ''DB Time''
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TZ53wDeLd4I/AAAAAAAABMc/G5lodk6IAqE/highcpu-8.png]]
this is the ''USER IO'' AAS.. same as what is accounted in awr_topevents.sql
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TZ53wKTIMVI/AAAAAAAABMg/dAihs-LYGfY/highcpu-9.png]]


So the big question for me is...

How does ASH and the Enterprise Manager performance page account for the "CPU + WAIT FOR CPU"? even if you drill down on the V$ACTIVE_SESSION_HISTORY you will not find this metric. So I'm really interested on where they pull the data.. :)


''update''
... and then I asked a couple of people, and I had a recent problem on a client site running on Exadata where I was troubleshooting their ETL runs. I was running 10046 for every run and found out that my unaccounted-for time is due to the CPU wait that is shown on this tiddler. So using Mr. Tools, and given that I'm having a similar workload.. I had an idea that the unaccounted-for time is the CPU wait. See the write up here http://www.evernote.com/shard/s48/sh/3ccc1e38-b5ef-46f8-bc75-371156ade4b3/69066fa2741f780f93b86af1626a1bcd , and I was right all along ;)


''AAS investigation updates:  Answered questions + bits of interesting findings''
http://www.evernote.com/shard/s48/sh/b4ecaaf2-1ceb-43ea-b58e-6f16079a775c/cb2e28e651c3993b325e66cc858c3935


''I've updated the awr_topevents.sql script to show CPU wait to solve the unnaccounted DB Time issue'' see the write up on the link below:
awr_topevents_v2.sql - http://www.evernote.com/shard/s48/sh/a64a656f-6511-4026-be97-467dccc82688/de5991c75289f16eee73c26c249a60bf



Thanks to the following people for reading/listening about this research, and for the interesting discussions and ideas around this topic: 
- Kyle Hailey, Riyaj Shamsudeen, Dave Abercrombie, Cary Millsap, John Beresniewicz


''Here's the MindMap of the AAS investigation'' http://www.evernote.com/shard/s48/sh/90cdf56f-da52-4dc5-91d0-a9540905baa6/9eb34e881a120f82f2dab0f5424208bf



! update (rmoug 2012 slides on cpu wait)
[img(100%,100%)[https://i.imgur.com/xArySP8.png]]
[img(100%,100%)[https://i.imgur.com/j4FiOwY.png]]
[img(100%,100%)[https://i.imgur.com/hw7ttDe.png]]
[img(100%,100%)[https://i.imgur.com/AQqrZSn.png]]
[img(100%,100%)[https://i.imgur.com/POHSIQ5.png]]
[img(100%,100%)[https://i.imgur.com/GXPupKb.png]]
xxx
[img(100%,100%)[https://i.imgur.com/94TlhTh.jpg]]
[img(100%,100%)[https://i.imgur.com/jtWvg7Z.png]]
[img(100%,100%)[https://i.imgur.com/PzDdJGs.png]]
[img(100%,100%)[https://i.imgur.com/SKmcj4K.png]]
[img(100%,100%)[https://i.imgur.com/MHTcbPD.png]]
[img(100%,100%)[https://i.imgur.com/83Jfspe.png]]













.



http://www.evernote.com/shard/s48/sh/a0875f07-26e6-4ec7-ab31-2d946925ef73/6d2fe9d6adc6f716a40ec87e35a0b264
https://blogs.oracle.com/RobertGFreeman/entry/exadata_support_for_acfs_and
''Further Reading:'' @@Brewer@@ (http://www.infoq.com/articles/cap-twelve-years-later-how-the-rules-have-changed) and @@Gilbert and Lynch@@ (http://groups.csail.mit.edu/tds/papers/Gilbert/Brewer2.pdf) on the CAP Theorem; @@Vogels@@ (http://queue.acm.org/detail.cfm?id=1466448) on Eventual Consistency, @@Hamilton@@ (http://perspectives.mvdirona.com/2010/02/24/ILoveEventualConsistencyBut.aspx) on its limitations, and @@Bailis and Ghodsi@@ (https://queue.acm.org/detail.cfm?id=2462076) on measuring it and more; and @@Sirer@@ (http://hackingdistributed.com/2013/03/23/consistency-alphabet-soup/) on the multiple meanings of consistency in Computer Science. @@Liveness manifestos@@ (http://cs.nyu.edu/acsys/beyond-safety/liveness.htm) has interesting definition variants for liveness and safety.

! Big Data 4Vs + 1 
<<<
Volume - scale at which data is generated 
Variety - different forms of data
Velocity - data arrives in continuous stream 
Veracity - uncertainty: data is not always accurate 
Value - immediacy and hidden relationships
<<<


! ACID
* redo and undo in Oracle provides ACID
<<<
Atomic - they all complete successfully, or not at all
Consistent - integrity rules. ACID consistency is all about database rules.
Isolated - locking
Durable - transaction is guaranteed
<<<

* ACID http://docs.oracle.com/database/122/CNCPT/glossary.htm#CNCPT89623 The basic properties of a database transaction that all Oracle Database transactions must obey. ACID is an acronym for atomicity, consistency, isolation, and durability.
* Transaction http://docs.oracle.com/database/122/CNCPT/glossary.htm#GUID-212D8EA1-D704-4D7B-A72D-72001965CE45 Logical unit of work that contains one or more SQL statements. All statements in a transaction commit or roll back together. The use of transactions is one of the most important ways that a database management system differs from a file system.
* Oracle Fusion Middleware Developing JTA Applications for Oracle WebLogic Server - ACID Properties of Transactions http://docs.oracle.com/middleware/12212/wls/WLJTA/gstrx.htm#WLJTA117
http://cacm.acm.org/magazines/2011/6/108651-10-rules-for-scalable-performance-in-simple-operation-datastores/fulltext
http://www.slideshare.net/jkanagaraj/oracle-vs-nosql-the-good-the-bad-and-the-ugly
http://highscalability.com/blog/2009/11/30/why-existing-databases-rac-are-so-breakable.html
Databases in the wild file:///C:/Users/karl/Downloads/Databases%20in%20the%20Wild%20(1).pdf




! CAP theorem
* CAP is a tool to explain trade-offs in distributed systems.
<<<
Consistent: All replicas of the same data will be the same value across a distributed system. CAP consistency promises that every replica of the same logical value, spread across nodes in a distributed system, has the same exact value at all times. Note that this is a logical guarantee, rather than a physical one. Due to the speed of light, it may take some non-zero time to replicate values across a cluster. The cluster can still present a logical view by preventing clients from viewing different values at different nodes.
Available: All live nodes in a distributed system can process operations and respond to queries.
Partition Tolerant: The system is designed to operate in the face of unplanned network connectivity loss between replicas. 
<<<
https://en.wikipedia.org/wiki/CAP_theorem
https://dzone.com/articles/better-explaining-cap-theorem
https://cloudplatform.googleblog.com/2017/02/inside-Cloud-Spanner-and-the-CAP-Theorem.html
http://guyharrison.squarespace.com/blog/2010/6/13/consistency-models-in-non-relational-databases.html  <- good stuff 
http://www.datastax.com/2014/08/comparing-oracle-rac-and-nosql <- good stuff
http://docs.oracle.com/database/121/GSMUG/toc.htm , http://www.oracle.com/technetwork/database/availability/global-data-services-12c-wp-1964780.pdf <- Database Global Data Services Concepts and Administration Guide [[Global Data Services]] 
http://www.oracle.com/technetwork/database/options/clustering/overview/backtothefuture-2192291.pdf  <- good stuff  Back to the Future with Oracle Database 12c
https://blogs.oracle.com/MAA/tags/cap  <- two parts good stuff
https://www.percona.com/live/mysql-conference-2013/sites/default/files/slides/aslett%20cap%20theorem.pdf  <- very good stuff
<<<
[img(100%,100%)[ http://i.imgur.com/q1QEtGI.png ]]
<<<
http://blog.nahurst.com/visual-guide-to-nosql-systems
<<<
[img(100%,100%)[ http://i.imgur.com/I7jYbVD.png ]]
<<<

http://www.ctodigest.com/2014/distributed-applications/the-distributed-relational-database-shattering-the-cap-theorem/
https://www.infoq.com/articles/cap-twelve-years-later-how-the-rules-have-changed
Spanner, TrueTime and the CAP Theorem https://research.google.com/pubs/pub45855.html , https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45855.pdf
https://www.voltdb.com/blog/disambiguating-acid-and-cap <- difference between two Cs (voltdb founder)
https://martin.kleppmann.com/2015/05/11/please-stop-calling-databases-cp-or-ap.html <- nice, a lot of references! author of "Designing Data-Intensive Applications"
<<<
https://aphyr.com/posts/322-call-me-maybe-mongodb-stale-reads
http://blog.thislongrun.com/2015/04/cap-availability-high-availability-and_16.html
https://github.com/jepsen-io/knossos
https://aphyr.com/posts/288-the-network-is-reliable
http://dbmsmusings.blogspot.co.uk/2010/04/problems-with-cap-and-yahoos-little.html
https://codahale.com/you-cant-sacrifice-partition-tolerance/
https://www.somethingsimilar.com/2013/01/14/notes-on-distributed-systems-for-young-bloods/
http://henryr.github.io/cap-faq/
http://henryr.github.io/distributed-systems-readings/
<<<
http://blog.thislongrun.com/2015/03/the-confusing-cap-and-acid-wording.html
https://news.ycombinator.com/item?id=9285751
[img(100%,100%)[http://i.imgur.com/G9vV8Qh.png ]]
http://www.slideshare.net/AerospikeDB/acid-cap-aerospike
Next Generation Databases: NoSQL, NewSQL, and Big Data https://www.safaribooksonline.com/library/view/next-generation-databases/9781484213292/9781484213308_Ch09.xhtml#Sec2  
https://www.pluralsight.com/courses/cqrs-theory-practice
https://www.pluralsight.com/blog/software-development/relational-non-relational-databases
https://www.amazon.com/Seven-Concurrency-Models-Weeks-Programmers-ebook/dp/B00MH6EMN6/ref=mt_kindle?_encoding=UTF8&me=
https://en.wikipedia.org/wiki/Michael_Stonebraker#Data_Analysis_.26_Extraction
http://scaledb.blogspot.com/2011/03/cap-theorem-event-horizon.html

! Think twice before dropping ACID and throw your CAP away
https://static.rainfocus.com/oracle/oow19/sess/1552610610060001frc7/PF/AG_%20Think%20twice%20before%20dropping%20ACID%20and%20throw%20your%20CAP%20away%202019_09_16%20-%20oco_1568777054624001jIWT.pdf


! BASE (eventual consistency)
BASE – Basically Available Soft-state Eventually consistent is an acronym used to contrast this approach with the RDBMS ACID transactions described above.
http://www.allthingsdistributed.com/2008/12/eventually_consistent.html  <- amazon cto



! NRW notation 
NRW notation describes at a high level how a distributed database will trade off consistency, read performance and write performance.  NRW stands for:
N: the number of copies of each data item that the database will maintain. 
R: the number of copies that the application will access when reading the data item 
W: the number of copies of the data item that must be written before the write can complete.  






! Database test

!! jepsen 
A framework for distributed systems verification, with fault injection
https://github.com/jepsen-io/jepsen
https://www.youtube.com/watch?v=tRc0O9VgzB0


!! sqllogictest 
https://github.com/gregrahn/sqllogictest











.






{{{
connect / as sysdba

set serveroutput on

show user;

create or replace procedure mailserver_acl(
  aacl       varchar2,
  acomment   varchar2,
  aprincipal varchar2,
  aisgrant   boolean,
  aprivilege varchar2,
  aserver    varchar2,
  aport      number)
is
begin  
  begin
    DBMS_NETWORK_ACL_ADMIN.DROP_ACL(aacl);
     dbms_output.put_line('ACL dropped.....'); 
  exception
    when others then
      dbms_output.put_line('Error dropping ACL: '||aacl);
      dbms_output.put_line(sqlerrm);
  end;
  begin
    DBMS_NETWORK_ACL_ADMIN.CREATE_ACL(aacl,acomment,aprincipal,aisgrant,aprivilege);
    dbms_output.put_line('ACL created.....'); 
  exception
    when others then
      dbms_output.put_line('Error creating ACL: '||aacl);
      dbms_output.put_line(sqlerrm);
  end;  
  begin
    DBMS_NETWORK_ACL_ADMIN.ASSIGN_ACL(aacl,aserver,aport);
    dbms_output.put_line('ACL assigned.....');         
  exception
    when others then
      dbms_output.put_line('Error assigning ACL: '||aacl);
      dbms_output.put_line(sqlerrm);
  end;    
  commit;
  dbms_output.put_line('ACL commited.....'); 
end;
/
show errors



select acl, host, lower_port, upper_port from dba_network_acls

ACL                                      HOST                           LOWER_PORT UPPER_PORT
---------------------------------------- ------------------------------ ---------- ----------
/sys/acls/IFSAPP-PLSQLAP-Permission.xml  haiapp09.mfg.am.mds.       59080      59080

 select acl, principal, privilege, is_grant from dba_network_acl_privileges

ACL                                      PRINCIPAL                      PRIVILE IS_GR
---------------------------------------- ------------------------------ ------- -----
/sys/acls/IFSAPP-PLSQLAP-Permission.xml  IFSAPP                         connect true
/sys/acls/IFSAPP-PLSQLAP-Permission.xml  IFSSYS                         connect true



begin
  mailserver_acl(
    '/sys/acls/IFSAPP-PLSQLAP-Permission.xml',
    'ACL for used Email Server to connect',
    'IFSAPP',
    TRUE,
    'connect',
    'haiapp09.mfg.am.mds.',
    59080);    
end;
/


begin
   DBMS_NETWORK_ACL_ADMIN.ADD_PRIVILEGE('/sys/acls/IFSAPP-PLSQLAP-Permission.xml','IFSSYS',TRUE,'connect');
   commit;
end;
/
}}}
{{{

Summary:
> Implement Instance Caging
> Enable Parallel Force Query and Parallel Statement Queuing  
> A database trigger has to be created on the Active Data Guard for all databases to enable Parallel Force Query on the session level upon login
> Create a new Resource Management Plan to limit the per session parallelism to 4
> Enable IORM and set to objective of AUTO on the Storage Cells

Commands to implement the recommended changes:
> The numbers 1 and 2 need to be executed on each database of the Active Data Guard environment
> #3 needs to be executed on all the Storage Cells, use the dcli and execute only on the 1st storage cell if passwordless ssh is configured
> #4 needs to be executed on each database (ECC, EWM, GTS, APO) of the Primary site to create the new Resource Management Plan
> #5 needs to be executed on each database of the Active Data Guard environment to activate the Resource Management Plan

The behavior:
        instance caging is set to CPU_COUNT of 40 (83% max CPU utilization)	
	parallel 4 will be set to all users logged in as ENTERPRISE, no need for hints	
	although the hints override the session settings, the non-ENTERPRISE users will be throttled on the resource management layer to PX of 4 even if hints are set	
		RM plan has PX limit of 4 for other_groups 
		We can set a higher limit (let's say 8) for the ENTERPRISE users so they can override the PX 4 to a higher value through hints
	this configuration will be done on all 4 databases 	
		
Switchover steps - just in case the 4 DBs will switchover to Exadata:
	disable the px trigger	
	alter the resource plan to SAP primary	


######################################################################

1) instance caging 

alter system set cpu_count=40 scope=both sid='*';
alter system set resource_manager_plan=default_plan; 

2) statement queueing and create trigger

alter system set parallel_force_local=false scope=both sid='*';
alter system set parallel_max_servers=128 scope=both sid='*';
alter system set parallel_servers_target=64 scope=both sid='*';
alter system set parallel_min_servers=64 scope=both sid='*';
alter system set "_parallel_statement_queuing"=true scope=both sid='*';

-- alter trigger sys.adg_pxforce_trigger disable;

-- the trigger checks if ENTERPRISE user is logged on, if it's running as PHYSICAL STANDBY, and if it's running on X4DP cluster

CREATE OR REPLACE TRIGGER adg_pxforce_trigger
AFTER LOGON ON database
WHEN (USER in ('ENTERPRISE'))
BEGIN
IF (SYS_CONTEXT('USERENV','DATABASE_ROLE') IN ('PHYSICAL STANDBY'))
AND (UPPER(SUBSTR(SYS_CONTEXT ('USERENV','SERVER_HOST'),1,4)) IN ('X4DP'))
THEN
execute immediate 'alter session force parallel query parallel 4';
END IF;
END;
/


3) IORM AUTO

-- execute on each storage cell
cellcli -e list iormplan detail
cellcli -e alter iormplan objective = auto
cellcli -e alter iormplan active

-- use these commands if passwordless ssh is configured 
dcli -g ~/cell_group -l root 'cellcli -e list iormplan detail'
dcli -g ~/cell_group -l root 'cellcli -e alter iormplan objective = auto'
dcli -g ~/cell_group -l root 'cellcli -e alter iormplan active'

######################################################################

4) RM plan to be created on the primary site


exec DBMS_RESOURCE_MANAGER.CLEAR_PENDING_AREA;

BEGIN
DBMS_RESOURCE_MANAGER.CREATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.CREATE_PLAN(PLAN => 'px_force', COMMENT => 'force parallel query parallel 4');
DBMS_RESOURCE_MANAGER.CREATE_CONSUMER_GROUP(CONSUMER_GROUP => 'CG_ENTERPRISE',    COMMENT => 'CG for ENTERPRISE users');
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(PLAN =>'px_force', GROUP_OR_SUBPLAN => 'CG_ENTERPRISE', COMMENT => 'Directive for ENTERPRISE users', PARALLEL_DEGREE_LIMIT_P1 => 4);
DBMS_RESOURCE_MANAGER.CREATE_PLAN_DIRECTIVE(PLAN =>'px_force', GROUP_OR_SUBPLAN => 'OTHER_GROUPS', COMMENT => 'Low priority users', PARALLEL_DEGREE_LIMIT_P1 => 4);
DBMS_RESOURCE_MANAGER.VALIDATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.SUBMIT_PENDING_AREA();
END;
/ 

begin
 dbms_resource_manager_privs.grant_switch_consumer_group(grantee_name => 'ENTERPRISE',consumer_group => 'CG_ENTERPRISE', grant_option => FALSE);
end;
/ 

begin
DBMS_RESOURCE_MANAGER.CREATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.SET_INITIAL_CONSUMER_GROUP ('ENTERPRISE', 'CG_ENTERPRISE');
DBMS_RESOURCE_MANAGER.VALIDATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.SUBMIT_PENDING_AREA();
END;
/


-- check config 

set wrap off
set head on
set linesize 300
set pagesize 132
col comments format a64

-- show current resource plan
select * from  V$RSRC_PLAN;

-- show all resource plans
select PLAN,NUM_PLAN_DIRECTIVES,CPU_METHOD,substr(COMMENTS,1,64) "COMMENTS",STATUS,MANDATORY 
from dba_rsrc_plans 
order by plan;

-- show consumer groups
select CONSUMER_GROUP,CPU_METHOD,STATUS,MANDATORY,substr(COMMENTS,1,64) "COMMENTS" 
from DBA_RSRC_CONSUMER_GROUPS 
order by consumer_group;

-- show  category
SELECT consumer_group, category
FROM DBA_RSRC_CONSUMER_GROUPS
ORDER BY category;

-- show mappings
col value format a30
select ATTRIBUTE, VALUE, CONSUMER_GROUP, STATUS 
from DBA_RSRC_GROUP_MAPPINGS
order by 3;

-- show mapping priority 
select * from DBA_RSRC_MAPPING_PRIORITY;

-- show directives 
SELECT plan,group_or_subplan,cpu_p1,cpu_p2,cpu_p3, PARALLEL_DEGREE_LIMIT_P1, status 
FROM dba_rsrc_plan_directives 
order by 1,3 desc,4 desc,5 desc;

-- show grants
select * from DBA_RSRC_CONSUMER_GROUP_PRIVS order by grantee;
select * from DBA_RSRC_MANAGER_SYSTEM_PRIVS order by grantee;

-- show scheduler windows
select window_name, resource_plan, START_DATE, DURATION, WINDOW_PRIORITY, enabled, active from dba_scheduler_windows;


5) enforce on the standby site
connect / as sysdba
--ALTER SYSTEM SET RESOURCE_MANAGER_PLAN = 'FORCE:px_force';



-- revert
connect / as sysdba
exec DBMS_RESOURCE_MANAGER.CLEAR_PENDING_AREA;

ALTER SYSTEM SET RESOURCE_MANAGER_PLAN = 'default_plan';

BEGIN
  DBMS_RESOURCE_MANAGER.CREATE_PENDING_AREA();
  DBMS_RESOURCE_MANAGER.DELETE_PLAN_CASCADE ('px_force');
DBMS_RESOURCE_MANAGER.VALIDATE_PENDING_AREA();
DBMS_RESOURCE_MANAGER.SUBMIT_PENDING_AREA();
END;
/
}}}



Setting up Swingbench for Oracle Autonomous Data Warehousing (ADW) http://www.dominicgiles.com/blog/files/7fd178b363b32b85ab889edfca6cadb2-170.html
https://www.accenture.com/_acnmedia/pdf-108/accenture-destination-autonomous-oracle-database.pdf


! exploring ADW
https://content.dsp.co.uk/exploring-adw-part-1-uploading-more-than-1mb-to-object-storage
https://content.dsp.co.uk/exploring-autonomous-data-warehouse-loading-data


! how autonomous is ADW 
https://indico.cern.ch/event/757894/attachments/1720580/2777513/8b_AutonomousIsDataWarehouse_AntogniniSchnider.pdf







.
<<showtoc>>


! not explicit set compression

{{{

-- not explicit set compression --# for some reason this resulted to BASIC compression 
set timing on 
alter session set optimizer_ignore_hints = false;

create table SD_HECHOS_COBERTURA_PP_HCC_TEST
compress parallel as select /*+ NO_GATHER_OPTIMIZER_STATISTICS full(sd_hechos_cobertura_pp) */ * from sd_hechos_cobertura_pp;

}}}


! explicit set compression 
{{{
-- explicit set compression 

set timing on 
alter session set optimizer_ignore_hints = false;

create table SD_HECHOS_COBERTURA_PP_HCC_TEST
compress for QUERY HIGH ROW LEVEL LOCKING  parallel as select /*+ NO_GATHER_OPTIMIZER_STATISTICS full(sd_hechos_cobertura_pp) */ * from sd_hechos_cobertura_pp;

}}}
https://docs.oracle.com/en/database/oracle/oracle-database/21/nfcon/automatic-operations-256569003.html
<<<
! Automatic Operations
    Automatic Indexing Enhancements
    Automatic Index Optimization
    Automatic Materialized Views
    Automatic SQL Tuning Set
    Automatic Temporary Tablespace Shrink
    Automatic Undo Tablespace Shrink
    Automatic Zone Maps
    Object Activity Tracking System
    Sequence Dynamic Cache Resizing
<<<


.
{{{

-- registry 
SELECT /*+  NO_MERGE  */ /* 1a.15 */
       x.*
	   ,c.name con_name
  FROM cdb_registry x
       LEFT OUTER JOIN v$containers c ON c.con_id = x.con_id
ORDER BY
       x.con_id,
	   x.comp_id;


-- registry history 
SELECT /*+  NO_MERGE  */ /* 1a.17 */
       x.*
	   ,c.name con_name
  FROM cdb_registry_history x
       LEFT OUTER JOIN v$containers c ON c.con_id = x.con_id
 ORDER BY 1
	   ,x.con_id;


-- registry hierarchy
SELECT /*+  NO_MERGE  */ /* 1a.18 */
       x.*
	   ,c.name con_name
  FROM cdb_registry_hierarchy x
       LEFT OUTER JOIN v$containers c ON c.con_id = x.con_id
 ORDER BY
       1, 2, 3;       
}}}
{{{
set lines 400 pages 2000
col MESSAGE_TEXT format a200
col ORIGINATING_TIMESTAMP format a40
col MESSAGE_ARGUMENTS format a20

SELECT originating_timestamp,
       MESSAGE_TEXT
FROM v$diag_alert_ext
WHERE component_id = 'rdbms'
  AND originating_timestamp >= to_date('2021/11/23 21:00', 'yyyy/mm/dd hh24:mi')
  AND originating_timestamp <= to_date('2021/11/24 14:00', 'yyyy/mm/dd hh24:mi')
ORDER BY originating_timestamp;
}}}



{{{
SQL> set lines 400 pages 2000
SQL> col MESSAGE_TEXT format a200
SQL> col ORIGINATING_TIMESTAMP format a40
SQL> col MESSAGE_ARGUMENTS format a20
SQL> 
SQL> SELECT originating_timestamp,
  2         MESSAGE_TEXT
  3  FROM v$diag_alert_ext
  4  WHERE component_id = 'rdbms'
  5    AND originating_timestamp >= to_date('2021/11/23 21:00', 'yyyy/mm/dd hh24:mi')
  6    AND originating_timestamp <= to_date('2021/11/24 14:00', 'yyyy/mm/dd hh24:mi')
  7  ORDER BY originating_timestamp;

ORIGINATING_TIMESTAMP                    MESSAGE_TEXT                                                                                                                                                                                            
---------------------------------------- --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
24-NOV-21 05.49.29.724000000 AM GMT      Space search: ospid:105880 starts dumping trace                                                                                                                                                         

}}}
https://github.com/oracle/data-warehouse-etl-offload-samples
Monitor the Performance of Autonomous Data Warehouse
https://docs.oracle.com/en/cloud/paas/autonomous-data-warehouse-cloud/user/monitor-performance-intro.html#GUID-54CCC1C6-C32E-47F4-8EB6-64CD6EDB5938


! also you can dump the ASH (PDB level) and graph it
* the service account created on root cdb 

{{{
C##CLOUD$SERVICE
}}}
https://docs.oracle.com/en/cloud/paas/autonomous-database/adbsa/unavailable-oracle-database-features.html#GUID-B6FB5EFC-4828-43F4-BA63-72DA74FFDB87
<<<
Database Features Unavailable in Autonomous Database
Lists the Oracle Database features that are not available in Autonomous Database. Additionally, database features designed for administration are not available.

List of Unavailable Oracle Features

Oracle Real Application Testing (Database Replay)

Oracle Real Application Security Administration Console (RASADM)

Oracle OLAP: Not available in Autonomous Database. See Deprecation of Oracle OLAP for more information.

Oracle R capabilities of Oracle Advanced Analytics

Oracle Industry Data Models

Oracle Database Lifecycle Management Pack

Oracle Data Masking and Subsetting Pack

Oracle Cloud Management Pack for Oracle Database

Oracle Multimedia: Not available in Autonomous Database and deprecated in Oracle Database 18c.

Oracle Sharding

Java in DB

Oracle Workspace Manager
<<<
https://wiki.archlinux.org/index.php/AHCI
http://en.wikipedia.org/wiki/AHCI
http://en.wikipedia.org/wiki/NCQ

Disks from the Perspective of a File System - TCQ,NCQ,4KSectorSize,MRAM http://goo.gl/eWUK7


Power5
Power6	<-- most advanced processor, starting clock is 4Ghz
Power7	

Hardware Virtualization (LPAR)
1) Standard Partition 
	4 LPARs, each have its own dedicated resources (processor, memory)

2) Micropartition
	4 LPARs can utilize a pool of 8 processors
	2 LPARs can utilize 1 processor


Note:
- Dynamic allocation can happen, 
	CPU	5seconds
	Memory	1minute
http://www.oraclerant.com/?p=8
{{{
# Oracle Database environment variables
umask 022
export ORACLE_BASE='/oracle/app/oracle'
export ORACLE_HOME="${ORACLE_BASE}/product/10.2.0/db_1"
export AIXTHREAD_SCOPE=S
export PATH="${ORACLE_HOME}/OPatch:${ORACLE_HOME}/bin:${PATH}"
# export NLS_LANG=language_territory.characterset
export LIBPATH=$ORACLE_HOME/lib:$LIBPATH
export TNS_ADMIN=$ORACLE_HOME/network/admin
}}}
http://www.scribd.com/doc/2153747/AIX-EtherChannel-Load-Balancing-Options
http://gjilevski.wordpress.com/2009/12/13/hardware-solution-for-oracle-rac-11g-private-interconnect-aggregating/
http://www.freelists.org/post/oracle-l/Oracle-10g-R2-RAC-network-configuration
! show system configuration
<<<
* show overall system config
{{{
prtconf
}}}
* to give the highest installed maintenance level
{{{
$ oslevel -r
6100-05
}}}
* to give the known recommended ML
{{{
$ oslevel -rq
Known Recommended Maintenance Levels
------------------------------------
6100-06
6100-05
6100-04
6100-03
6100-02
6100-01
6100-00
}}}
* To show you Service Packs levels as well 
{{{
$ oslevel -s
6100-05-03-1036
}}}
* amount of real memory 
{{{
lsattr -El sys0 -a realmem
realmem 21757952 Amount of usable physical memory in Kbytes False
}}}
* Displays the system model name. For example, IBM, 9114-275
{{{
uname -M

-- on p6
IBM,8204-E8A

-- on p7
IBM,8205-E6C
}}}
<<<

! get CPU information
* get number of CPUs
{{{
lscfg | grep proc

-- on p6
+ proc0                                                      Processor
+ proc2                                                      Processor
+ proc4                                                      Processor
+ proc6                                                      Processor
+ proc8                                                      Processor
+ proc10                                                     Processor
+ proc12                                                     Processor
+ proc14                                                     Processor

-- on p7
+ proc0                                                                          Processor
+ proc4                                                                          Processor
}}}
* get CPU speed
{{{
lsattr -El proc0

-- on p6
frequency   4204000000     Processor Speed       False
smt_enabled true           Processor SMT enabled False
smt_threads 2              Processor SMT threads False
state       enable         Processor state       False
type        PowerPC_POWER6 Processor type        False

-- on p7
frequency   3550000000     Processor Speed       False
smt_enabled true           Processor SMT enabled False
smt_threads 4              Processor SMT threads False
state       enable         Processor state       False
type        PowerPC_POWER7 Processor type        False
}}}

{{{

# lsdev -Cc processor
proc0  Available 00-00 Processor
proc2  Available 00-02 Processor
proc4  Available 00-04 Processor
proc6  Available 00-06 Processor
proc8  Available 00-08 Processor
proc10 Available 00-10 Processor
Which says 6 processors but the following command shows it is only a single 6-way card:
lscfg -vp |grep -ip proc |grep "PROC"
    6 WAY PROC CUOD :
    
The problem seems to revolve around what is a cpu these days, is it a chip or a core or a single piece of a silicone wafer and whatever resides on that being counted as 1 or many.
IBM deem a core to be a CPU so they would say your system has 6 processors.
They are all on one card and may all be in one MCM / chip or there may be several MCMs / chips on that card but you have a 6 CPU system there.
lsdev shows 6 processors so AIX has configured 6 processors.
lscfg shows it is a CUoD 6 processor system and as AIX has configured all 6 it shows all 6 are activated by a suitable POD code.
The Oracle wiki at orafaq.com shows Oracle licence the Standard Edition by CPU (definition undefined) and Enterprise by core (again undefined).
http://www.orafaq.com/wiki/Oracle_Licensing
What ever you call a cpu or a core I would say you have a 6 way / 6 processor system there and the fact that all 6 may or may not be on one bit of silicone wafer will not make any difference.

#############################################################################

get number of processors, its name, physical location, Lists all processors
odmget -q"PdDvLn LIKE processor/*" CuDv

list specific processor, but it is more about Physical location etc, nothing about single/dual core etc
odmget -q"PdDvLn LIKE processor/* AND name=proc0" CuDv

#############################################################################

I've checked is on LPARs on two servers - p55A and p570 - both servers 8 CPUs and seems that in p55A there are 2 4-core CPUs and in 570 4 2-core CPUs.

$ lsattr -El sys0 -a modelname
modelname IBM,9133-55A Machine name False
$ lparstat -i|grep ^Active\ Phys
Active Physical CPUs in system : 8
$ lscfg -vp|grep WAY
4-WAY PROC CUOD :
4-WAY PROC CUOD :
$ lscfg -vp|grep proc
proc0 Processor
proc2 Processor
proc4 Processor
proc6 Processor
$

$ lsattr -El sys0 -a modelname
modelname IBM,9117-570 Machine name False
$ lparstat -i|grep ^Active\ Phys
Active Physical CPUs in system : 8
$ lscfg -vp|grep WAY
2-WAY PROC CUOD :
2-WAY PROC CUOD :
2-WAY PROC CUOD :
2-WAY PROC CUOD :
$ lscfg -vp|grep proc
proc0 Processor
proc2 Processor
proc4 Processor
proc6 Processor
$

#############################################################################

p550 with 2 quad-core processors (no LPARs):

/ #>lsattr -El sys0 -a modelname
modelname IBM,9133-55A Machine name False

/ #>lparstat -i|grep Active\ Phys
Active Physical CPUs in system : 8

/ #>lscfg -vp | grep WAY
2-WAY PROC CUOD :
2-WAY PROC CUOD :

/ #>lscfg -vp |grep proc
proc0 Processor
proc2 Processor
proc4 Processor
proc6 Processor
proc8 Processor
proc10 Processor
proc12 Processor
proc14 Processor

And the further detailed lscfg -vp output shows:
2-WAY PROC CUOD :
Record Name.................VINI
Flag Field..................XXPF
Hardware Location Code......U787B.001.DNWC2F7-P1-C9
Customer Card ID Number.....8313
Serial Number...............YL10HA68E008
FRU Number..................10N6469
Part Number.................10N6469
As you can see, the part number is 10N6469, which clearly is a quad-core cpu:
http://www.searchlighttech.com/searchResults.cfm?part=10N6469

#############################################################################

Power5 and Power6 processors are both Dual Core - Dual Threads.
The next Power7 should have 8 cores and each core can execute 4 threads (comes 2010) but less frequency (3.2Ghz max instead of 5.0Ghz on the power6).

#############################################################################

To get the information about the partition, enter the following command:
lparstat -i

#############################################################################

 lparstat -i
 lparstat
 lscfg | grep proc
 lsattr -El proc0
 uname -M
 lsattr -El sys0 -a realmem
 lscfg | grep proc
 lsdev -Cc processor
 lscfg -vp |grep -ip proc |grep "PROC"
 odmget -q"PdDvLn LIKE processor/*" CuDv
 odmget -q"PdDvLn LIKE processor/* AND name=proc0" CuDv
 odmget -q"PdDvLn LIKE processor/* AND name=proc14" CuDv
 lsattr -El sys0 -a modelname
 lparstat -i|grep ^Active\ Phys
 lscfg -vp|grep WAY
 lscfg -vp|grep proc
 lsattr -El sys0 -a modelname
 lparstat -i|grep Active\ Phys
 lscfg -vp | grep WAY
 lscfg -vp |grep proc
 lscfg -vp

#############################################################################

So the physical CPUs of the AIX box is 8… now it’s a bit tricky to get the real CPU% in AIX.. 
First you have to determine the CPUs of the machine

$ prtconf
System Model: IBM,8204-E8A
Machine Serial Number: 10F2441
Processor Type: PowerPC_POWER6
Processor Implementation Mode: POWER 6
Processor Version: PV_6_Compat
Number Of Processors: 8
Processor Clock Speed: 4204 MHz
CPU Type: 64-bit
Kernel Type: 64-bit
LPAR Info: 2 nad0019aixp21
Memory Size: 21248 MB
Good Memory Size: 21248 MB
Platform Firmware level: Not Available
Firmware Version: IBM,EL350_132
Console Login: enable
Auto Restart: true
Full Core: false

Then, execute the lparstat… 
•	The ent 2.30 is the entitled CPU capacity
•	The psize is the # of physical CPUs on the shared pool
•	The physc 4.42 means that the CPU usage went above the entitled capacity because it is “Uncapped”.. so to get the real CPU% just do a 4.42/8 = 55% utilization
•	55% utilization could either be applied on the 8Physical CPUs or 16 Logical CPUs… because that’s just the percentage used so I just put on the prov worksheet 60% 

$ lparstat 1 10000

System configuration: type=Shared mode=Uncapped smt=On lcpu=16 mem=21247 psize=8 ent=2.30

%user  %sys  %wait  %idle physc %entc  lbusy  vcsw phint
----- ----- ------ ------ ----- ----- ------ ----- -----
 91.4   7.6    0.8    0.3  3.94 171.1   29.9  4968  1352
 92.0   6.9    0.7    0.4  3.76 163.4   26.2  4548  1054
 93.1   6.0    0.5    0.3  4.42 192.3   33.2  4606  1316
 91.3   7.5    0.7    0.5  3.74 162.6   25.6  5220  1191
 93.4   5.7    0.6    0.3  4.07 176.9   28.7  4423  1239
 93.1   6.0    0.6    0.4  4.05 176.0   29.4  4709  1164
 92.3   6.7    0.6    0.5  3.46 150.2   24.8  4299   718
 92.2   6.9    0.6    0.4  3.69 160.6   27.9  4169   973
 91.9   7.3    0.5    0.3  4.06 176.5   33.2  4248  1233
}}}



! install IYs
{{{
To list all IYs
# instfix –i | pg
To show the filesets on a given IY
# instfix –avik IY59135
To commit a fileset
# smitty maintain_software
To list the fileset of an executable
# lslpp –w <full path of the executable>
To install an IY
# Uncompress <file>
# Tar –xvf <file>
# inutoc .
# smity installp
}}}


! iostat

{{{
> iostat -sl

System configuration: lcpu=4 drives=88 ent=0.20 paths=176 vdisks=8

tty:      tin         tout    avg-cpu: % user % sys % idle % iowait physc % entc
          0.3         29.5               64.5  28.6    5.1      1.9   0.9  435.5

System: 
                           Kbps      tps    Kb_read   Kb_wrtn
                         30969.7     429.9   937381114927  200661442300

Disks:        % tm_act     Kbps      tps    Kb_read   Kb_wrtn
hdisk0           1.3      61.9       7.6   1479300432  794583660
...

> iostat -st

System configuration: lcpu=4 drives=88 ent=0.20 paths=176 vdisks=8

tty:      tin         tout    avg-cpu: % user % sys % idle % iowait physc % entc
          0.3         29.5               64.5  28.6    5.1      1.9   0.9  435.5

System: 
                           Kbps      tps    Kb_read   Kb_wrtn
                         30969.7     429.9   937381298349  200661442605

}}}

{{{
$ iostat -DRTl 10 100

System configuration: lcpu=16 drives=80 paths=93 vdisks=2

Disks:                     xfers                                read                                write                                  queue                    time
-------------- -------------------------------- ------------------------------------ ------------------------------------ -------------------------------------- ---------
                 %tm    bps   tps  bread  bwrtn   rps    avg    min    max time fail   wps    avg    min    max time fail    avg    min    max   avg   avg  serv
                 act                                    serv   serv   serv outs              serv   serv   serv outs        time   time   time  wqsz  sqsz qfull
hdisk3           0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk13          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk15         61.5   3.3M 162.0   3.2M  90.2K 158.4   6.4    0.2   60.0     0    0   3.5   3.3    0.7    4.6     0    0   0.5    0.0   15.7    0.0   0.1  53.6  16:05:30
hdisk14         67.3   3.4M 166.2   3.3M  67.7K 162.3   7.2    0.2   71.8     0    0   3.9   2.8    0.8    5.7     0    0   1.0    0.0   36.0    0.0   0.1  63.0  16:05:30
hdisk8          58.9   3.0M 165.2   2.9M 112.8K 160.6   5.6    0.2   57.1     0    0   4.6   3.0    0.6    5.5     0    0   0.4    0.0   18.8    0.0   0.1  43.2  16:05:30
hdisk12         57.6   3.4M 151.3   3.3M  91.8K 147.4   6.0    0.2   54.7     0    0   3.9   3.1    0.6    4.7     0    0   0.5    0.0   23.4    0.0   0.1  43.6  16:05:30
hdisk11          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk10         86.0   2.9M 144.9   2.9M  58.0K 141.4  12.7    0.3  109.3     0    0   3.5   2.8    0.8    5.1     0    0   5.3    0.0   82.6    0.0   0.1  86.2  16:05:30
hdisk9           0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk16          0.1 402.8    0.1   0.0  402.8    0.0   0.0    0.0    0.0     0    0   0.1   8.8    8.8    8.8     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk5           0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk18          1.3 391.7K  17.1   0.0  391.7K   0.0   0.0    0.0    0.0     0    0  17.1   1.0    0.5    6.2     0    0   0.0    0.0    0.1    0.0   0.0   0.1  16:05:30
hdisk7           0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk4          43.7   3.2M 150.8   3.2M  67.7K 147.0   4.0    0.3   27.6     0    0   3.8   2.9    0.7    5.0     0    0   0.3    0.0   19.4    0.0   0.0  26.1  16:05:30
hdisk17          0.3   1.2K   0.3   0.0    1.2K   0.0   0.0    0.0    0.0     0    0   0.3   7.2    5.3    8.2     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk6          67.8   3.0M 151.8   2.9M  45.1K 149.1   7.6    0.2   58.4     0    0   2.8   2.8    0.7    4.6     0    0   0.5    0.0   27.1    0.0   0.1  51.6  16:05:30
hdisk21          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk27          0.4   1.2K   0.3   0.0    1.2K   0.0   0.0    0.0    0.0     0    0   0.3  16.7    7.7   34.3     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk23         61.3   3.3M 178.8   3.3M  59.6K 175.9   5.8    0.2   63.7     0    0   2.9   2.9    0.8    5.7     0    0   0.8    0.0   61.8    0.0   0.1  57.6  16:05:30
hdisk1          64.5   3.2M 149.7   3.2M  48.3K 146.8   7.0    0.3   45.0     0    0   2.9   2.5    0.9    4.5     0    0   0.7    0.0   46.4    0.0   0.1  42.0  16:05:30
hdisk20         64.8   3.3M 148.6   3.2M  90.2K 145.0   7.1    0.3   52.5     0    0   3.5   2.7    0.9    4.9     0    0   1.0    0.0   41.7    0.0   0.1  49.8  16:05:30
hdisk22          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk28          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk19         42.6   3.5M 162.6   3.4M  68.9K 160.0   3.6    0.2   22.2     0    0   2.7   1.6    0.5    4.3     0    0   0.1    0.0    8.2    0.0   0.0  27.2  16:05:30

Disks:                     xfers                                read                                write                                  queue                    time
-------------- -------------------------------- ------------------------------------ ------------------------------------ -------------------------------------- ---------
                 %tm    bps   tps  bread  bwrtn   rps    avg    min    max time fail   wps    avg    min    max time fail    avg    min    max   avg   avg  serv
                 act                                    serv   serv   serv outs              serv   serv   serv outs        time   time   time  wqsz  sqsz qfull
hdisk0          53.9   3.0M 153.7   3.0M  41.9K 151.1   5.1    0.2   38.4     0    0   2.6   3.0    1.1    4.6     0    0   0.2    0.0   14.7    0.0   0.0  31.7  16:05:30
hdisk26          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk2          63.6   3.2M 144.1   3.2M  64.4K 141.3   7.3    0.2   72.3     0    0   2.8   3.2    0.7    4.5     0    0   0.9    0.0   28.8    0.0   0.1  46.1  16:05:30
hdisk24         56.0   2.9M 139.6   2.8M  77.3K 135.3   6.2    0.2   56.6     0    0   4.3   3.0    1.0    4.7     0    0   0.5    0.0   19.0    0.0   0.1  34.9  16:05:30
hdisk30         65.5   3.3M 156.9   3.2M  70.9K 152.7   7.1    0.3   42.8     0    0   4.2   3.0    0.7    5.6     0    0   0.6    0.0   20.2    0.0   0.1  50.1  16:05:30
hdisk33          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk34          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk37          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk41          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk40         63.5   2.8M 148.2   2.7M 103.1K 143.9   7.0    0.2   42.0     0    0   4.3   2.9    1.0    5.2     0    0   0.8    0.0   19.2    0.0   0.1  49.7  16:05:30
hdisk38         60.6   3.0M 146.1   2.9M  70.9K 142.5   7.0    0.2   64.1     0    0   3.6   2.7    0.8    5.4     0    0   0.8    0.0   24.1    0.0   0.1  45.4  16:05:30
hdisk25          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk35         50.0   4.0M 197.6   3.9M 107.9K 193.2   3.7    0.2   37.7     0    0   4.3   3.0    0.6    5.4     0    0   0.3    0.0   15.2    0.0   0.0  41.9  16:05:30
hdisk32         41.9   3.0M 159.2   3.0M  54.8K 156.0   3.5    0.2   25.7     0    0   3.2   3.4    1.0    4.8     0    0   0.1    0.0   12.6    0.0   0.0  21.7  16:05:30
hdisk36          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk42         79.7   3.0M 159.3   2.9M  83.8K 155.5  10.1    0.2   92.3     0    0   3.8   2.6    0.9    5.3     0    0   2.2    0.0   50.5    0.0   0.1  79.7  16:05:30
hdisk31          3.6   2.1M  52.7   1.7M 391.7K  35.6   0.8    0.2    7.1     0    0  17.1   1.0    0.5    3.4     0    0   0.0    0.0    0.2    0.0   0.0   1.3  16:05:30
hdisk43         42.6   2.9M 144.2   2.8M  64.4K 140.9   4.0    0.2   34.3     0    0   3.2   3.0    1.3    5.4     0    0   0.1    0.0   10.9    0.0   0.0  21.2  16:05:30
hdisk52          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk48         51.2   3.7M 165.5   3.6M  69.3K 161.4   4.6    0.2   31.7     0    0   4.1   3.0    0.6    4.7     0    0   0.3    0.0   12.7    0.0   0.0  35.5  16:05:30
hdisk47          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk44          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk51         50.1   3.7M 187.6   3.6M  90.2K 183.5   3.7    0.2   40.0     0    0   4.1   3.2    1.1    5.0     0    0   0.4    0.0   37.8    0.0   0.0  44.4  16:05:30
hdisk39          0.1  37.7K   3.5  19.3K  18.3K   1.2   0.5    0.3    1.7     0    0   2.4   0.9    0.5    4.6     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30

Disks:                     xfers                                read                                write                                  queue                    time
-------------- -------------------------------- ------------------------------------ ------------------------------------ -------------------------------------- ---------
                 %tm    bps   tps  bread  bwrtn   rps    avg    min    max time fail   wps    avg    min    max time fail    avg    min    max   avg   avg  serv
                 act                                    serv   serv   serv outs              serv   serv   serv outs        time   time   time  wqsz  sqsz qfull
hdisk49          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk57          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk45         51.5   3.0M 154.3   3.0M  54.8K 151.5   4.7    0.2   31.6     0    0   2.8   3.2    1.3    5.1     0    0   0.2    0.0   12.5    0.0   0.0  28.1  16:05:30
hdisk50          7.9   2.1M  50.2   1.7M 391.7K  33.0   2.1    0.3   23.3     0    0  17.1   1.5    0.7   18.3     0    0   0.0    0.0    0.5    0.0   0.0   2.8  16:05:30
hdisk55         64.5   3.7M 169.6   3.6M  72.5K 166.0   6.1    0.2   55.9     0    0   3.6   3.4    0.8    5.2     0    0   0.4    0.0   17.6    0.0   0.1  47.0  16:05:30
hdisk54         66.9   3.6M 165.5   3.5M  80.6K 162.3   6.7    0.3   56.3     0    0   3.2   3.0    0.5    5.0     0    0   0.9    0.0   23.7    0.0   0.1  52.7  16:05:30
hdisk53          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk56         81.9   3.2M 142.5   3.1M  83.8K 138.8  11.6    0.3  117.6     0    0   3.6   3.3    1.1    5.3     0    0   1.9    0.0   42.9    0.0   0.1  72.4  16:05:30
hdisk58         82.2   3.6M 168.2   3.6M  77.3K 164.9   9.9    0.2   84.0     0    0   3.2   2.7    0.6    5.2     0    0   1.9    0.0   45.8    0.0   0.1  88.9  16:05:30
hdisk60          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk29         52.5   3.4M 172.4   3.4M  64.4K 170.1   4.3    0.2   51.9     0    0   2.3   2.6    1.0    5.5     0    0   0.2    0.0   12.5    0.0   0.0  37.1  16:05:30
hdisk59          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk61         46.6   3.0M 157.2   2.9M  58.0K 153.7   4.1    0.2   42.8     0    0   3.5   3.5    1.4    5.3     0    0   0.1    0.0    7.8    0.0   0.0  23.1  16:05:30
hdisk63          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk62         65.5   3.0M 152.3   2.9M  74.1K 148.7   7.4    0.3   66.8     0    0   3.6   2.6    0.8    5.4     0    0   1.0    0.0   43.2    0.0   0.1  56.1  16:05:30
hdisk68          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk65          0.3  19.6K   3.0   1.3K  18.3K   0.6   2.1    0.4    6.6     0    0   2.4   1.2    0.6    2.9     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk64         42.9   3.4M 145.4   3.4M  78.9K 141.5   4.1    0.2   25.1     0    0   3.9   3.0    0.7    5.6     0    0   0.3    0.0   14.5    0.0   0.0  23.7  16:05:30
hdisk67          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk46         66.8   3.4M 165.5   3.3M  93.4K 161.8   6.8    0.2   51.6     0    0   3.7   3.1    0.6    5.0     0    0   0.6    0.0   24.2    0.0   0.1  52.1  16:05:30
hdisk71          1.6 411.0K  18.3  19.3K 391.7K   1.2   0.6    0.3    3.2     0    0  17.1   1.1    0.5    3.1     0    0   0.0    0.0    0.1    0.0   0.0   0.1  16:05:30
hdisk70         61.5   2.7M 135.8   2.7M  62.4K 132.2   7.4    0.2  107.1     0    0   3.6   3.1    0.6    4.9     0    0   0.7    0.0   25.7    0.0   0.1  39.2  16:05:30
hdisk74         86.1   3.6M 182.2   3.5M  69.3K 178.9  10.7    0.2  108.8     0    0   3.3   3.2    0.8    5.3     0    0   4.2    0.0   98.7    0.0   0.1 119.1  16:05:30
hdisk72         58.2   2.5M 130.0   2.5M  80.6K 125.7   7.1    0.3   43.8     0    0   4.3   2.9    1.0    5.3     0    0   0.8    0.0   27.0    0.0   0.1  38.6  16:05:30

Disks:                     xfers                                read                                write                                  queue                    time
-------------- -------------------------------- ------------------------------------ ------------------------------------ -------------------------------------- ---------
                 %tm    bps   tps  bread  bwrtn   rps    avg    min    max time fail   wps    avg    min    max time fail    avg    min    max   avg   avg  serv
                 act                                    serv   serv   serv outs              serv   serv   serv outs        time   time   time  wqsz  sqsz qfull
hdisk75         47.3   3.3M 160.7   3.2M  69.3K 157.1   4.0    0.2   30.9     0    0   3.5   3.2    1.2    5.0     0    0   0.2    0.0   12.7    0.0   0.0  27.9  16:05:30
hdisk78         66.2   3.3M 168.3   3.2M  70.9K 165.5   6.7    0.2   48.5     0    0   2.9   3.8    2.0    5.1     0    0   0.9    0.0   31.5    0.0   0.1  56.3  16:05:30
hdisk69          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk77          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk73          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk76          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
hdisk66          0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
cd0              0.0   0.0    0.0   0.0    0.0    0.0   0.0    0.0    0.0     0    0   0.0   0.0    0.0    0.0     0    0   0.0    0.0    0.0    0.0   0.0   0.0  16:05:30
}}}


''AIX commands you should not leave home without'' http://www.ibm.com/developerworks/aix/library/au-dutta_cmds.html
''AIX system identification'' http://www.ibm.com/developerworks/aix/library/au-aix-systemid.html
''Determining CPU Speed in AIX'' http://www-01.ibm.com/support/docview.wss?uid=isg3T1000107
CPU monitoring and tuning http://www.ibm.com/developerworks/aix/library/au-aix5_cpu/
Too many Virtual Processors? https://www.ibm.com/developerworks/mydeveloperworks/blogs/AIXDownUnder/entry/too_many_virtual_processors365?lang=en
AIX Virtual Processor Folding is Misunderstood https://www.ibm.com/developerworks/mydeveloperworks/blogs/aixpert/entry/aix_virtual_processor_folding_in_misunderstood110?lang=en
How to find physical CPU socket count for IBM AIX http://www.tek-tips.com/viewthread.cfm?qid=1623771
Single/Dual Core Processor http://www.ibm.com/developerworks/forums/message.jspa?messageID=14270797
http://pic.dhe.ibm.com/infocenter/aix/v7r1/index.jsp?topic=%2Fcom.ibm.aix.cmds%2Fdoc%2Faixcmds3%2Flparstat.htm
lparstat command http://www.ibm.com/developerworks/forums/thread.jspa?messageID=14772565
Micropartitioning and Lparstat Output Virtual/Physical http://unix.ittoolbox.com/groups/technical-functional/ibm-aix-l/micropartitioning-and-lparstat-output-virtualphysical-4241112
Capped/Uncapped Partitions http://www.ibmsystemsmag.com/ibmi/trends/linux/See-Linux-Run/Sidebar--Capped-Uncapped-Partitions/
IBM PowerVM Virtualization Introduction and Configuration http://www.redbooks.ibm.com/abstracts/sg247940.html
iostat http://www.wmduszyk.com/wp-content/uploads/2011/01/PE23_Braden_Nasypany.pdf






















https://en.wikipedia.org/wiki/Application_lifecycle_management

''12c'' Getting Started with Oracle Application Management Pack (AMP) for Oracle E-Business Suite, Release 12.1.0.1 [ID 1434392.1]
''11g'' Getting Started with Oracle E-Business Suite Plug-in, Release 4.0 [ID 1224313.1]
''10g'' Getting Started with Oracle Application Management Pack and Oracle Application Change Management Pack for Oracle E-Business Suite, Release 3.1 [ID 982302.1]
''Application Management Suite for PeopleSoft (AMS4PSFT)'' http://www.oracle.com/technetwork/oem/app-mgmt/ds-apps-mgmt-suite-psft-166219.pdf
http://download.oracle.com/technology/products/oem/screenwatches/peoplesoft_amp/PeopleSoft_final.html
http://www.psoftsearch.com/managing-peoplesoft-with-application-management-suite/

http://www.oracle.com/technetwork/oem/em12c-screenwatches-512013.html#app_mgmt
https://apex.oracle.com/pls/apex/f?p=44785:24:9222314894074::NO:24:P24_CONTENT_ID,P24_PREV_PAGE:6415,2

<<<
''AMS we bundled the licenses of AMP and RUEI together in a single skew. AMP already had multiple features in it off course.''
<<<
''11g'' 
http://gasparotto.blogspot.com/2011/04/manage-peoplesoft-with-oem-grid-control.html
http://gasparotto.blogspot.com/2011/04/manage-peoplesoft-with-oem-grid-control_08.html
http://gasparotto.blogspot.com/2011/04/manage-peoplesoft-with-oem-grid-control_09.html
peoplesoft plugin 8.52 install, peoplesoft plugin agent install,  http://oraclehowto.wordpress.com/category/oracle-enterprise-manager-11g-plugins/peoplesoft-plugin/

''10g'' http://www.oracle.com/us/products/enterprise-manager/mgmt-pack-for-psft-ds-068946.pdf?ssSourceSiteId=ocomcafr
http://modern-sql.com


http://gigaom.com/2012/10/30/meet-arms-two-newest-cores-for-faster-phones-and-greener-servers/
http://gigaom.com/cloud/facebook-amd-hp-and-others-team-up-to-plan-the-arm-data-center-takeover/
''the consortium'' http://www.linaro.org/linux-on-arm
http://www.arm.com/index.php

''ARM and moore's law'' http://www.technologyreview.com/news/507116/moores-law-is-becoming-irrelevant/, http://www.technologyreview.com/news/428481/the-moores-law-moon-shot/
https://sites.google.com/site/embtdbo/wait-event-documentation/ash---active-session-history
ASH patent http://www.google.com/patents?id=cQWbAAAAEBAJ&pg=PA2&source=gbs_selected_pages&cad=3#v=onepage&q&f=false
Practical ASH http://www.scribd.com/rvenrdra/d/44100090-Practical-Advice-on-the-Use-of-Oracle-Database-s-Active-Session-History
magic metirc? http://wenku.baidu.com/view/7d07b81b964bcf84b9d57b48.html?from=related
Sifting through the ASHes http://www.oracle.com/technetwork/database/focus-areas/manageability/ppt-active-session-history-129612.pdf





{{{
col name for a12
col program for a25
col calling_code for a30
col CPU for 9999
col IO for 9999
col TOTAL for 99999
col WAIT for 9999
col user_id for 99999
col sid for 9999
col sql_text format a10

set linesize 300

select /* usercheck */
        decode(nvl(to_char(s.sid),-1),-1,'DISCONNECTED','CONNECTED')
                                                        "STATUS",
        topsession.sid             "SID",
        topsession.serial#,
        u.username  "NAME",
        topsession.program                  "PROGRAM",
        topsession.sql_plan_hash_value,
        topsession.sql_id,        
        st.sql_text sql_text,
        topsession."calling_code",
        max(topsession.CPU)              "CPU",
        max(topsession.WAIT)       "WAITING",
        max(topsession.IO)                  "IO",
        max(topsession.TOTAL)           "TOTAL", 
        round((s.LAST_CALL_ET/60),2) ELAP_MIN
from (
				select * 
				from (
								select
								     ash.session_id sid,
								     ash.session_serial# serial#,
								     ash.user_id user_id,
								     ash.program,
								     ash.sql_plan_hash_value,
								     ash.sql_id, 
								    procs1.object_name || decode(procs1.procedure_name,'','','.')||
								    procs1.procedure_name ||' '||
								    decode(procs2.object_name,procs1.object_name,'',
									 decode(procs2.object_name,'','',' => '||procs2.object_name)) 
								    ||
								    decode(procs2.procedure_name,procs1.procedure_name,'',
								        decode(procs2.procedure_name,'','',null,'','.')||procs2.procedure_name)
								    "calling_code",	     
								     sum(decode(ash.session_state,'ON CPU',1,0))     "CPU",
								     sum(decode(ash.session_state,'WAITING',1,0))    -
								     sum(decode(ash.session_state,'WAITING',
								        decode(wait_class,'User I/O',1, 0 ), 0))    "WAIT" ,
								     sum(decode(ash.session_state,'WAITING',
								        decode(wait_class,'User I/O',1, 0 ), 0))    "IO" ,
								     sum(decode(session_state,'ON CPU',1,1))     "TOTAL"
								from 
									v$active_session_history ash,
									all_procedures procs1,
	                                all_procedures procs2
								where 
							        ash.PLSQL_ENTRY_OBJECT_ID  = procs1.object_id (+) and 
							        ash.PLSQL_ENTRY_SUBPROGRAM_ID = procs1.SUBPROGRAM_ID (+) and 
							        ash.PLSQL_OBJECT_ID   = procs2.object_id (+) and 
							        ash.PLSQL_SUBPROGRAM_ID  = procs2.SUBPROGRAM_ID (+) 
                                        and ash.sample_time > sysdate - 1
								group by session_id,user_id,session_serial#,program,sql_id,sql_plan_hash_value, 
								         procs1.object_name, procs1.procedure_name, procs2.object_name, procs2.procedure_name
								order by sum(decode(session_state,'ON CPU',1,1)) desc
				     ) 
				 where rownum < 10
      ) topsession,
        v$session s,
        (select sql_id, dbid, nvl(b.name, a.command_type) sql_text from dba_hist_sqltext a, audit_actions b where a.command_type =  b.action(+)) st,
        all_users u
where
        u.user_id =topsession.user_id and
        /* outer join to v$session because the session might be disconnected */
        topsession.sid         = s.sid         (+) and
        topsession.serial# = s.serial#   (+)   and
		st.sql_id(+)             = s.sql_id
		and topsession."calling_code" like '%&PACKAGE_NAME%'
group by  topsession.sid, topsession.serial#,
             topsession.user_id, topsession.program, topsession.sql_plan_hash_value, topsession.sql_id,
                     topsession."calling_code",
             s.username, s.sid,s.paddr,u.username, st.sql_text, s.LAST_CALL_ET
order by max(topsession.TOTAL) desc
/
}}}
{{{
col name for a12
col program for a25
col calling_code for a30
col CPU for 9999
col IO for 9999
col TOTAL for 99999
col WAIT for 9999
col user_id for 99999
col sid for 9999
col sql_text format a10

set linesize 300

select /* usercheck */
        decode(nvl(to_char(s.sid),-1),-1,'DISCONNECTED','CONNECTED')
                                                        "STATUS",
        topsession.sid             "SID",
        topsession.serial#,
        u.username  "NAME",
        topsession.program                  "PROGRAM",
        topsession.sql_plan_hash_value,
        topsession.sql_id,
        st.sql_text sql_text,
        topsession."calling_code",
        max(topsession.CPU)              "CPU",
        max(topsession.WAIT)       "WAITING",
        max(topsession.IO)                  "IO",
        max(topsession.TOTAL)           "TOTAL",
        round((s.LAST_CALL_ET/60),2) ELAP_MIN
from (
                                select *
                                from (
                                                                select
                                                                     ash.session_id sid,
                                                                     ash.session_serial# serial#,
                                                                     ash.user_id user_id,
                                                                     ash.program,
                                                                     ash.sql_plan_hash_value,
                                                                     ash.sql_id,
                                                                    procs1.object_name || decode(procs1.procedure_name,'','','.')||
                                                                    procs1.procedure_name ||' '||
                                                                    decode(procs2.object_name,procs1.object_name,'',
                                                                         decode(procs2.object_name,'','',' => '||procs2.object_name))
                                                                    ||
                                                                    decode(procs2.procedure_name,procs1.procedure_name,'',
                                                                        decode(procs2.procedure_name,'','',null,'','.')||procs2.procedure_name)
                                                                    "calling_code",
                                                                     sum(decode(ash.session_state,'ON CPU',1,0))     "CPU",
                                                                     sum(decode(ash.session_state,'WAITING',1,0))    -
                                                                     sum(decode(ash.session_state,'WAITING',
                                                                        decode(wait_class,'User I/O',1, 0 ), 0))    "WAIT" ,
                                                                     sum(decode(ash.session_state,'WAITING',
                                                                        decode(wait_class,'User I/O',1, 0 ), 0))    "IO" ,
                                                                     sum(decode(session_state,'ON CPU',1,1))     "TOTAL"
                                                                from
                                                                        dba_hist_active_sess_history ash,
                                                                        all_procedures procs1,
                                        all_procedures procs2
                                                                where
                                                                ash.PLSQL_ENTRY_OBJECT_ID  = procs1.object_id (+) and
                                                                ash.PLSQL_ENTRY_SUBPROGRAM_ID = procs1.SUBPROGRAM_ID (+) and
                                                                ash.PLSQL_OBJECT_ID   = procs2.object_id (+) and
                                                                ash.PLSQL_SUBPROGRAM_ID  = procs2.SUBPROGRAM_ID (+)
                                        and ash.sample_time > sysdate - 99
                                                                group by session_id,user_id,session_serial#,program,sql_id,sql_plan_hash_value,
                                                                         procs1.object_name, procs1.procedure_name, procs2.object_name, procs2.procedure_name
                                                                order by sum(decode(session_state,'ON CPU',1,1)) desc
                                     )
                                 where rownum < 50
      ) topsession,
        v$session s,
        (select sql_id, dbid, nvl(b.name, a.command_type) sql_text from dba_hist_sqltext a, audit_actions b where a.command_type =  b.action(+)) st,
        all_users u
where
        u.user_id =topsession.user_id and
        /* outer join to v$session because the session might be disconnected */
        topsession.sid         = s.sid         (+) and
        topsession.serial# = s.serial#   (+)   and
                st.sql_id(+)             = s.sql_id
                and topsession."calling_code" like '%&PACKAGE_NAME%'
group by  topsession.sid, topsession.serial#,
             topsession.user_id, topsession.program, topsession.sql_plan_hash_value, topsession.sql_id,
                     topsession."calling_code",
             s.username, s.sid,s.paddr,u.username, st.sql_text, s.LAST_CALL_ET
order by max(topsession.TOTAL) desc
/
}}}
{{{
col name for a12
col program for a25
col calling_code for a30
col CPU for 9999
col IO for 9999
col TOTAL for 99999
col WAIT for 9999
col user_id for 99999
col sid for 9999
col sql_text format a10

set linesize 300

select /* usercheck */
        decode(nvl(to_char(s.sid),-1),-1,'DISCONNECTED','CONNECTED')
                                                        "STATUS",
        topsession.sid             "SID",
        topsession.serial#,
        u.username  "NAME",
        topsession.program                  "PROGRAM",
        topsession.sql_plan_hash_value,
        topsession.sql_id,
        st.sql_text sql_text,
        topsession."calling_code",
        max(topsession.CPU)              "CPU",
        max(topsession.WAIT)       "WAITING",
        max(topsession.IO)                  "IO",
        max(topsession.TOTAL)           "TOTAL",
        round((s.LAST_CALL_ET/60),2) ELAP_MIN
from (
                                select *
                                from (
                                                                select
                                                                     ash.session_id sid,
                                                                     ash.session_serial# serial#,
                                                                     ash.user_id user_id,
                                                                     ash.program,
                                                                     ash.sql_plan_hash_value,
                                                                     ash.sql_id,
                                                                    procs1.object_name || decode(procs1.procedure_name,'','','.')||
                                                                    procs1.procedure_name ||' '||
                                                                    decode(procs2.object_name,procs1.object_name,'',
                                                                         decode(procs2.object_name,'','',' => '||procs2.object_name))
                                                                    ||
                                                                    decode(procs2.procedure_name,procs1.procedure_name,'',
                                                                        decode(procs2.procedure_name,'','',null,'','.')||procs2.procedure_name)
                                                                    "calling_code",
                                                                     sum(decode(ash.session_state,'ON CPU',1,0))     "CPU",
                                                                     sum(decode(ash.session_state,'WAITING',1,0))    -
                                                                     sum(decode(ash.session_state,'WAITING',
                                                                        decode(wait_class,'User I/O',1, 0 ), 0))    "WAIT" ,
                                                                     sum(decode(ash.session_state,'WAITING',
                                                                        decode(wait_class,'User I/O',1, 0 ), 0))    "IO" ,
                                                                     sum(decode(session_state,'ON CPU',1,1))     "TOTAL"
                                                                from
                                                                        v$active_session_history ash,
                                                                        all_procedures procs1,
                                        all_procedures procs2
                                                                where
                                                                ash.PLSQL_ENTRY_OBJECT_ID  = procs1.object_id (+) and
                                                                ash.PLSQL_ENTRY_SUBPROGRAM_ID = procs1.SUBPROGRAM_ID (+) and
                                                                ash.PLSQL_OBJECT_ID   = procs2.object_id (+) and
                                                                ash.PLSQL_SUBPROGRAM_ID  = procs2.SUBPROGRAM_ID (+)
                                        and ash.sample_time > sysdate - 1
                                                                group by session_id,user_id,session_serial#,program,sql_id,sql_plan_hash_value,
                                                                         procs1.object_name, procs1.procedure_name, procs2.object_name, procs2.procedure_name
                                                                order by sum(decode(session_state,'ON CPU',1,1)) desc
                                     )
                                 where rownum < 50
      ) topsession,
        v$session s,
        (select sql_id, dbid, nvl(b.name, a.command_type) sql_text from dba_hist_sqltext a, audit_actions b where a.command_type =  b.action(+)) st,
        all_users u
where
        u.user_id =topsession.user_id and
        /* outer join to v$session because the session might be disconnected */
        topsession.sid         = s.sid         (+) and
        topsession.serial# = s.serial#   (+)   and
                st.sql_id(+)             = s.sql_id
       and topsession.sql_id = '&SQLID'
group by  topsession.sid, topsession.serial#,
             topsession.user_id, topsession.program, topsession.sql_plan_hash_value, topsession.sql_id,
                     topsession."calling_code",
             s.username, s.sid,s.paddr,u.username, st.sql_text, s.LAST_CALL_ET
order by max(topsession.TOTAL) desc
/

}}}
{{{
$ cat ashtop
#!/bin/bash

while :; do
sqlplus "/ as sysdba" <<-EOF
@ashtop.sql
EOF
sleep 5
echo
done
}}}


{{{
-- (c) Kyle Hailey 2007, edited by Karl Arao 20091217

col name for a12
col program for a25
col calling_code for a25
col CPU for 9999
col IO for 9999
col TOTAL for 99999
col WAIT for 9999
col user_id for 99999
col sid for 9999
col sql_text format a10

set linesize 300

select /* usercheck */
        decode(nvl(to_char(s.sid),-1),-1,'DISCONNECTED','CONNECTED')
                                                        "STATUS",
        topsession.sid             "SID",
        topsession.serial#,
        u.username  "NAME",
        topsession.program                  "PROGRAM",
        topsession.sql_plan_hash_value,
        topsession.sql_id,        
        st.sql_text sql_text,
        topsession."calling_code",
        max(topsession.CPU)              "CPU",
        max(topsession.WAIT)       "WAITING",
        max(topsession.IO)                  "IO",
        max(topsession.TOTAL)           "TOTAL", 
        round((s.LAST_CALL_ET/60),2) ELAP_MIN
from (
				select * 
				from (
								select
								     ash.session_id sid,
								     ash.session_serial# serial#,
								     ash.user_id user_id,
								     ash.program,
								     ash.sql_plan_hash_value,
								     ash.sql_id, 
								    procs1.object_name || decode(procs1.procedure_name,'','','.')||
								    procs1.procedure_name ||' '||
								    decode(procs2.object_name,procs1.object_name,'',
									 decode(procs2.object_name,'','',' => '||procs2.object_name)) 
								    ||
								    decode(procs2.procedure_name,procs1.procedure_name,'',
								        decode(procs2.procedure_name,'','',null,'','.')||procs2.procedure_name)
								    "calling_code",	     
								     sum(decode(ash.session_state,'ON CPU',1,0))     "CPU",
								     sum(decode(ash.session_state,'WAITING',1,0))    -
								     sum(decode(ash.session_state,'WAITING',
								        decode(wait_class,'User I/O',1, 0 ), 0))    "WAIT" ,
								     sum(decode(ash.session_state,'WAITING',
								        decode(wait_class,'User I/O',1, 0 ), 0))    "IO" ,
								     sum(decode(session_state,'ON CPU',1,1))     "TOTAL"
								from 
									v$active_session_history ash,
									all_procedures procs1,
	                                all_procedures procs2
								where 
							        ash.PLSQL_ENTRY_OBJECT_ID  = procs1.object_id (+) and 
							        ash.PLSQL_ENTRY_SUBPROGRAM_ID = procs1.SUBPROGRAM_ID (+) and 
							        ash.PLSQL_OBJECT_ID   = procs2.object_id (+) and 
							        ash.PLSQL_SUBPROGRAM_ID  = procs2.SUBPROGRAM_ID (+) 
                                        and ash.sample_time > sysdate - 1/(60*24)
								group by session_id,user_id,session_serial#,program,sql_id,sql_plan_hash_value, 
								         procs1.object_name, procs1.procedure_name, procs2.object_name, procs2.procedure_name
								order by sum(decode(session_state,'ON CPU',1,1)) desc
				     ) 
				 where rownum < 10
      ) topsession,
        v$session s,
        (select sql_id, dbid, nvl(b.name, a.command_type) sql_text from dba_hist_sqltext a, audit_actions b where a.command_type =  b.action(+)) st,
        all_users u
where
        u.user_id =topsession.user_id and
        /* outer join to v$session because the session might be disconnected */
        topsession.sid         = s.sid         (+) and
        topsession.serial# = s.serial#   (+)   and
		st.sql_id(+)             = s.sql_id
group by  topsession.sid, topsession.serial#,
             topsession.user_id, topsession.program, topsession.sql_plan_hash_value, topsession.sql_id,
                     topsession."calling_code",
             s.username, s.sid,s.paddr,u.username, st.sql_text, s.LAST_CALL_ET
order by max(topsession.TOTAL) desc
/

}}}

grant CREATE SESSION to karlarao;
grant SELECT_CATALOG_ROLE to karlarao;
grant SELECT ANY DICTIONARY to karlarao;
usage:
{{{
./ash
or 
sh ash
}}}


create the file and do ''chmod 755 ash''.. this calls the aveactn300.sql
{{{
$ cat ~/dba/bin/ash
#!/bin/bash

while :; do
sqlplus "/ as sysdba" <<-EOF
@/home/oracle/dba/scripts/aveactn300.sql
EOF
sleep 5
echo
done
}}}


{{{
$ cat /home/oracle/dba/scripts/aveactn300.sql
-- (c) Kyle Hailey 2007

set lines 500
column f_days new_value v_days
select 1 f_days from dual;
column f_secs new_value v_secs
select 5 f_secs from dual;
--select &seconds f_secs from dual;
column f_bars new_value v_bars
select 5 f_bars from dual;
column aveact format 999.99
column graph format a50


column fpct format 99.99
column spct format 99.99
column tpct format 99.99
column fasl format 999.99
column sasl format 999.99
column first format a40
column second format a40


select to_char(start_time,'DD HH:MI:SS'),
       samples,
       --total,
       --waits,
       --cpu,
       round(fpct * (total/samples),2) fasl,
       decode(fpct,null,null,first) first,
       round(spct * (total/samples),2) sasl,
       decode(spct,null,null,second) second,
        substr(substr(rpad('+',round((cpu*&v_bars)/samples),'+') ||
        rpad('-',round((waits*&v_bars)/samples),'-')  ||
        rpad(' ',p.value * &v_bars,' '),0,(p.value * &v_bars)) ||
        p.value  ||
        substr(rpad('+',round((cpu*&v_bars)/samples),'+') ||
        rpad('-',round((waits*&v_bars)/samples),'-')  ||
        rpad(' ',p.value * &v_bars,' '),(p.value * &v_bars),10) ,0,50)
        graph
     --  spct,
     --  decode(spct,null,null,second) second,
     --  tpct,
     --  decode(tpct,null,null,third) third
from (
select start_time
     , max(samples) samples
     , sum(top.total) total
     , round(max(decode(top.seq,1,pct,null)),2) fpct
     , substr(max(decode(top.seq,1,decode(top.event,'ON CPU','CPU',event),null)),0,25) first
     , round(max(decode(top.seq,2,pct,null)),2) spct
     , substr(max(decode(top.seq,2,decode(top.event,'ON CPU','CPU',event),null)),0,25) second
     , round(max(decode(top.seq,3,pct,null)),2) tpct
     , substr(max(decode(top.seq,3,decode(top.event,'ON CPU','CPU',event),null)),0,25) third
     , sum(waits) waits
     , sum(cpu) cpu
from (
  select
       to_date(tday||' '||tmod*&v_secs,'YYMMDD SSSSS') start_time
     , event
     , total
     , row_number() over ( partition by id order by total desc ) seq
     , ratio_to_report( sum(total)) over ( partition by id ) pct
     , max(samples) samples
     , sum(decode(event,'ON CPU',total,0))    cpu
     , sum(decode(event,'ON CPU',0,total))    waits
  from (
    select
         to_char(sample_time,'YYMMDD')                      tday
       , trunc(to_char(sample_time,'SSSSS')/&v_secs)          tmod
       , to_char(sample_time,'YYMMDD')||trunc(to_char(sample_time,'SSSSS')/&v_secs) id
       , decode(ash.session_state,'ON CPU','ON CPU',ash.event)     event
       , sum(decode(session_state,'ON CPU',1,decode(session_type,'BACKGROUND',0,1))) total
       , (max(sample_id)-min(sample_id)+1)                    samples
     from
        v$active_session_history ash
     where
               sample_time > sysdate - &v_days
     group by  trunc(to_char(sample_time,'SSSSS')/&v_secs)
            ,  to_char(sample_time,'YYMMDD')
            ,  decode(ash.session_state,'ON CPU','ON CPU',ash.event)
     order by
               to_char(sample_time,'YYMMDD'),
               trunc(to_char(sample_time,'SSSSS')/&v_secs)
  )  chunks
  group by id, tday, tmod, event, total
) top
group by start_time
) aveact,
  v$parameter p
where p.name='cpu_count'
order by start_time
/
}}}
I got the job chain info of IBM Curam batch from the dev team

Here are the details of how the batch works 
<<<
        IBM Cúram Social Program Management 7.0.10 - 7.0.11

        Batch Streaming Architecture
        https://www.ibm.com/support/knowledgecenter/SS8S5A_7.0.11/com.ibm.curam.content.doc/BatchPerformanceMechanisms/c_BATCHPER_Architecture1BatchStreamingArchitecture1.html
        The Chunker
        https://www.ibm.com/support/knowledgecenter/SS8S5A_7.0.11/com.ibm.curam.content.doc/BatchPerformanceMechanisms/c_BATCHPER_Architecture1Chunker1.html
        The Stream
        https://www.ibm.com/support/knowledgecenter/SS8S5A_7.0.11/com.ibm.curam.content.doc/BatchPerformanceMechanisms/c_BATCHPER_Architecture1Stream1.html
<<<

Here's the SQL to pull data from SCHEDULER_HISTORY table 

{{{
 SELECT *
FROM   (SELECT Substr(H.job_name, Instr(H.job_name, 'jobId-') + 6, 20)
               JOB_ID
                      ,
               Substr(H.job_name, Instr(H.job_name, 'job-') + 4,
               Instr(H.job_name, '-jobId-') - ( Instr(H.job_name, 'job-') + 4 ))
                      JOB_NAME,
               H.start_time,
               H.end_time,
               Regexp_substr(H.job_name, '[A-Za-z0-9\-]+',
               Instr(H.job_name, '/'))
                      FUNCTIONAL_AREA,
               Nvl(Regexp_substr(H.job_name, 'tier-[0-9]+'), 'N/A')
               TIER,
               Substr(H.job_name, 1, Instr(H.job_name, '/') - 1)
                      ORDERED_OR_STANDALONE
        FROM   scheduler_history H
        WHERE  ( ( H.start_time BETWEEN :startTime AND :endTime )
                  OR ( :startTime BETWEEN H.start_time AND H.end_time
                        OR ( :startTime >= H.start_time
                             AND H.end_time IS NULL ) ) )
               AND H.start_time > To_date(:startTime, 'YYYYMMDD HH24:MI:SS') - 2
               AND H.job_name LIKE '%jobId%'
               AND H.job_name NOT LIKE '%parallel%'
               AND H.job_name NOT LIKE '%snyc%'
               AND H.job_name NOT LIKE '%Reporting%'
               AND H.job_name NOT LIKE '%Stream%'
        ORDER  BY 5,
                  3) sub1
ORDER  BY 3 ASC  
}}}

This is the dependency diagram of the jobs, I defined the levels 1 to 5 to clearly see the sequential dependency on the data set


[img(100%,100%)[ https://user-images.githubusercontent.com/3683046/116051201-53df5980-a646-11eb-8acc-85946c99a655.png]]



Here's the calculated field I used. The tableau developer needs to complete this as reflected on the diagram above. What I did is just the jobs executed on 20210317.xlsx data set

{{{
IF contains(lower(trim([Functional Area])),'daytimebatch')=true then 'level 0'
ELSEIF contains(lower(trim([Functional Area])),'standalone-only')=true then 'level 0'
ELSEIF contains(lower(trim([Functional Area])),'post-start-of-batchjobs')=true then 'level 1'

ELSEIF contains(lower(trim([Functional Area])),'recipientfile')=true then 'level 2'
ELSEIF contains(lower(trim([Functional Area])),'pre-financials')=true then 'level 2'

ELSEIF contains(lower(trim([Functional Area])),'post-financials-reports')=true then 'level 4'
ELSEIF contains(lower(trim([Functional Area])),'post-financials')=true then 'level 4'
ELSEIF contains(lower(trim([Functional Area])),'post-financials2')=true then 'level 4'
ELSEIF contains(lower(trim([Functional Area])),'pre-bulkprint')=true then 'level 4'

ELSEIF contains(lower(trim([Functional Area])),'bulkprint')=true then 'level 5'
ELSEIF contains(lower(trim([Functional Area])),'ebt-2')=true then 'level 5'
ELSEIF contains(lower(trim([Functional Area])),'ebt-response')=true then 'level 5'

ELSEIF contains(lower(trim([Functional Area])),'financials')=true then 'level 3'
ELSEIF contains(lower(trim([Functional Area])),'ebt')=true then 'level 4'

ELSE 'OTHER' END
}}}



Here's the gantt chart. 

From here we can be tactical and systematic when it comes to tuning. We can identify the blocking jobs and the longest running jobs and how it impacts the overall batch elapsed time. We can isolate these jobs from the Dynatrace instrumentation and even run the identified bad performing batch as standalone and then profile/tune the top SQLs. 


[img(100%,100%)[ https://user-images.githubusercontent.com/3683046/116051156-4924c480-a646-11eb-85bf-265efbe56fa4.png ]]




Here's how to create the gantt chart but the tableau developer needs to tap the SCHEDULER_HISTORY table directly instead of a data dump from SQL


[img(100%,100%)[ https://user-images.githubusercontent.com/3683046/116051200-5346c300-a646-11eb-88b1-6fbab7b01cb0.png ]]



https://www.linkedin.com/pulse/estimating-oltp-execution-latencies-using-ash-john-beresniewicz
{{{
WITH
    ash_summary
AS
(select
     ash.sql_id
    ,SUM(usecs_per_row)                     as DBtime_usecs
    ,1+MAX(sql_exec_id) - MIN(sql_exec_id)  as execs
    ,SUM(usecs_per_row)/(1+MAX(sql_exec_id) - MIN(sql_exec_id))/1000   
                                            as avg_latency_msec_ash
    ,SUM(elapsed_time)/SUM(executions)/1000               
                                            as avg_latency_msec_sqlstats
    ,MAX(substr(sql_text,1,150))            as sqltext
from
     v$active_session_history      ash
    ,v$sqlstats                    sql
where
    ash.sql_id is not null
and ash.sql_exec_id is not null
and sql.executions is not null
and sql.executions > 0
and ash.sql_id = sql.sql_id
group by
    ash.sql_id
)
select
    sql_id
    ,DBtime_usecs
    ,execs
    ,ROUND(avg_latency_msec_ash,6)      ash_latency_msec
    ,ROUND(avg_latency_msec_sqlstats,6) sqlstats_latency_msec
    ,sqltext
from
    ash_summary

order by 3 desc;

}}}
<<showtoc>>


! End to end picture of ORMB and OBIEE performance
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047450-17a9fa00-a642-11eb-83fc-06b6d9c2c482.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047452-17a9fa00-a642-11eb-939f-1ef82583b6c9.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047454-18429080-a642-11eb-9ccf-d81de4442dd3.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047455-18429080-a642-11eb-8b99-93520ddd7c29.png]]
[img[https://user-images.githubusercontent.com/3683046/116047456-18db2700-a642-11eb-8ca5-1c09c3ebc379.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047465-1a0c5400-a642-11eb-8906-e790b70a9195.png]]
[img[https://user-images.githubusercontent.com/3683046/116047466-1a0c5400-a642-11eb-900d-be731beb1eb6.png]]
[img[https://user-images.githubusercontent.com/3683046/116047473-1b3d8100-a642-11eb-9392-c3ef0e6e7334.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047475-1b3d8100-a642-11eb-9a0e-e8c8d24c126f.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047476-1b3d8100-a642-11eb-98db-f14e777cc084.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047479-1bd61780-a642-11eb-85c2-dd7a43bcad28.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047484-1bd61780-a642-11eb-8e24-c93d0ceea5e3.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047485-1c6eae00-a642-11eb-850d-f6f8b1ddd015.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047487-1c6eae00-a642-11eb-811d-099f8aeca152.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047491-1d9fdb00-a642-11eb-8eaf-5856619d69e8.png]]
[img[https://user-images.githubusercontent.com/3683046/116047492-1d9fdb00-a642-11eb-8938-1bc8d59b78e5.png]]
[img(100%,100%)[https://user-images.githubusercontent.com/3683046/116047494-1ed10800-a642-11eb-9ec0-af7f85d9ad0a.png]]



! Logic to separate the workload of OBIEE


Here's what I used on the ASH data to separate the workload of OBIEE.
* BIP reports (front end)
* ODI ETL jobs
* nqsserver (OBIEE processes)

{{{
Tableau calculated field: 
IF contains(lower(trim([Module])),'BIP')=true THEN 'BIP'
ELSEIF contains(lower(trim([Module])),'ODI')=true THEN 'ODI'
ELSEIF contains(lower(trim([Module])),'nqs')=true THEN 'nqsserver'
ELSE 'OTHER' END
}}}

[img[ https://user-images.githubusercontent.com/3683046/116047456-18db2700-a642-11eb-8ca5-1c09c3ebc379.png ]]

Some of the reports are instrumented enough that the ACTION column shows the report number. But separating the workload by module is the best
-- from http://www.perfvision.com/statspack/ash.txt

{{{
ASH Report For CDB10/cdb10
DB Name         DB Id    Instance     Inst Num Release     RAC Host
CPUs           SGA Size       Buffer Cache        Shared Pool    ASH Buffer Size
Top User Events
Top Background Events
Top Event P1/P2/P3 Values
Top Service/Module
Top Client IDs
Top SQL Command Types
Top SQL Statements
Top SQL using literals
Top Sessions
Top Blocking Sessions
Top DB Objects
Top DB Files
Top Latches
Activity Over Time
}}}
https://blog.tanelpoder.com/2011/10/24/what-the-heck-is-the-sql-execution-id-sql_exec_id/
-- from http://www.perfvision.com/statspack/ash.txt

{{{
ASH Report For CDB10/cdb10

DB Name         DB Id    Instance     Inst Num Release     RAC Host
------------ ----------- ------------ -------- ----------- --- ------------
CDB10         1193559071 cdb10               1 10.2.0.1.0  NO  tsukuba

CPUs           SGA Size       Buffer Cache        Shared Pool    ASH Buffer Size
---- ------------------ ------------------ ------------------ ------------------
   2        440M (100%)         28M (6.4%)       128M (29.1%)        4.0M (0.9%)


          Analysis Begin Time:   31-Jul-07 17:52:21
            Analysis End Time:   31-Jul-07 18:07:21
                 Elapsed Time:        15.0 (mins)
                 Sample Count:       2,647
      Average Active Sessions:        2.94
  Avg. Active Session per CPU:        1.47
                Report Target:   None specified

Top User Events                  DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

                                                               Avg Active
Event                               Event Class     % Activity   Sessions
----------------------------------- --------------- ---------- ----------
db file sequential read             User I/O             26.60       0.78
CPU + Wait for CPU                  CPU                   8.88       0.26
db file scattered read              User I/O              7.25       0.21
log file sync                       Commit                5.44       0.16
log buffer space                    Configuration         4.53       0.13
          -------------------------------------------------------------

Top Background Events            DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

                                                               Avg Active
Event                               Event Class     % Activity   Sessions
----------------------------------- --------------- ---------- ----------
db file parallel write              System I/O           21.61       0.64
log file parallel write             System I/O           18.21       0.54
          -------------------------------------------------------------

Top Event P1/P2/P3 Values        DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

Event                          % Event  P1 Value, P2 Value, P3 Value % Activity
------------------------------ ------- ----------------------------- ----------
Parameter 1                Parameter 2                Parameter 3
-------------------------- -------------------------- --------------------------
db file sequential read          26.97             "201","66953","1"       0.11
file#                      block#                     blocks

db file parallel write           21.61          "3","0","2147483647"       3.21
requests                   interrupt                  timeout

                                                "2","0","2147483647"       2.49


                                                "5","0","2147483647"       2.42


log file parallel write          18.21                "1","2022","1"       0.68
files                      blocks                     requests

db file scattered read            7.37             "201","72065","8"       0.23
file#                      block#                     blocks

log file sync                     5.48                "4114","0","0"       0.30
buffer#                    NOT DEFINED                NOT DEFINED

          -------------------------------------------------------------

Top Service/Module               DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

Service        Module                   % Activity Action               % Action
-------------- ------------------------ ---------- ------------------ ----------
SYS$USERS      UNNAMED                       50.70 UNNAMED                 50.70
SYS$BACKGROUND UNNAMED                       41.56 UNNAMED                 41.56
cdb10          OEM.SystemPool                 2.64 UNNAMED                  1.47
                                                   XMLLoader0               1.17
SYS$USERS      sqlplus@tsukuba (TNS V1-       1.55 UNNAMED                  1.55
cdb10          Lab128                         1.36 UNNAMED                  1.36
          -------------------------------------------------------------

Top Client IDs                   DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

                  No data exists for this section of the report.
          -------------------------------------------------------------

Top SQL Command Types            DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)
-> 'Distinct SQLIDs' is the count of the distinct number of SQLIDs
      with the given SQL Command Type found over all the ASH samples
      in the analysis period

                                           Distinct            Avg Active
SQL Command Type                             SQLIDs % Activity   Sessions
---------------------------------------- ---------- ---------- ----------
INSERT                                           28      27.81       0.82
SELECT                                           45      12.73       0.37
UPDATE                                           11       3.85       0.11
DELETE                                            4       3.70       0.11
          -------------------------------------------------------------

Top SQL Statements              DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

       SQL ID    Planhash % Activity Event                             % Event
------------- ----------- ---------- ------------------------------ ----------
fd6a0p6333g8z  2993408006       7.59 db file sequential read              3.06
 SELECT trunc(SYSDATE, 'HH24') HOUR_STAMP, CM_ID, MA
X(SUBSTR(CM_DESC, 1, 12)) CM_DESC, MAX(UP_ID) UP_ID, MA
X(DOWN_ID) DOWN_ID, MAX(MAC_ID) MAC_ID, MAX(CMTS_
ID) CMTS_ID, SUM(BYTES_UP) SUM_BYTES_UP, SUM(BY

                                     direct path write temp               1.74

                                     db file scattered read               1.32

298wmz1kxjs1m  4251515144       5.25 CPU + Wait for CPU                   2.68
INSERT INTO CM_QOS_PROF SELECT :B1 , R.TOPOLOGYID, :B1 - :B4 , P.NODE_PROFILE_ID
, R.DOCSIFCMTSSERVICEQOSPROFILE FROM CM_SID_RAWDATA R, ( SELECT DISTINCT T.CMID,
 P.QOS_PROF_IDX, P.NODE_PROFILE_ID FROM TMP_TOP_SLOW_CM T, CMTS_QOS_PROF P WHERE
 T.CMTSID = P.TOPOLOGYID AND P.SECONDID = :B1 ) P WHERE R.BATCHID = :B3 AND R.PR

                                     db file sequential read              1.78

fhawr20n0wy5x  1792062018       3.40 db file sequential read              2.91
INSERT INTO TMP_CALC_HFC_SLOW_CM_TMP SELECT T.CMTSID, T.DOWNID, T.CMID, 0, 0, 0,
 T.DOWN_SNR_CNR_A3, T.DOWN_SNR_CNR_A2, T.DOWN_SNR_CNR_A1, T.DOWN_SNR_CNR_A0, R.S
YSUPTIME, R.DOCSIFSIGQUNERROREDS, R.DOCSIFSIGQCORRECTEDS, R.DOCSIFSIGQUNCORRECTA
BLES, R.DOCSIFSIGQSIGNALNOISE, :B3 , L.PREV_SECONDID, L.PREV_DOCSIFSIGQUNERRORED

3a11s4c86wdu5  1366293986       3.21 db file sequential read              1.85
DELETE FROM CM_RAWDATA WHERE BATCHID = 0 AND PROFINDX = :B1

                                     log buffer space                     1.06

998t5bbdfm5rm  1914870171       3.21 db file sequential read              1.70
INSERT INTO CM_RAWDATA SELECT PROFINDX, 0 BATCHID, TOPOLOGYID, SAMPLETIME, SYSUP
TIME, DOCSIFCMTSCMSTATUSVALUE, DOCSIFCMTSSERVICEINOCTETS, DOCSIFCMTSSERVICEOUTOC
TETS, DOCSIFCMSTATUSTXPOWER, DOCSIFCMTSCMSTATUSRXPOWER, DOCSIFDOWNCHANNELPOWER,
DOCSIFSIGQUNERROREDS, DOCSIFSIGQCORRECTEDS, DOCSIFSIGQUNCORRECTABLES, DOCSIFSIGQ

          -------------------------------------------------------------

Top SQL using literals           DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

                  No data exists for this section of the report.
          -------------------------------------------------------------

Top Sessions                    DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)
-> '# Samples Active' shows the number of ASH samples in which the session
      was found waiting for that particular event. The percentage shown
      in this column is calculated with respect to wall clock time
      and not total database activity.
-> 'XIDs' shows the number of distinct transaction IDs sampled in ASH
      when the session was waiting for that particular event
-> For sessions running Parallel Queries, this section will NOT aggregate
      the PQ slave activity into the session issuing the PQ. Refer to
      the 'Top Sessions running PQs' section for such statistics.

   Sid, Serial# % Activity Event                             % Event
--------------- ---------- ------------------------------ ----------
User                 Program                          # Samples Active     XIDs
-------------------- ------------------------------ ------------------ --------
      126,    5      33.59 db file sequential read             18.62
STARGUS                                                 493/900 [ 55%]        4

                           CPU + Wait for CPU                   5.52
                                                        146/900 [ 16%]        2

                           db file scattered read               5.02
                                                        133/900 [ 15%]        2

      167,    1      21.80 db file parallel write              21.61
SYS                  oracle@tsukuba (DBW0)              572/900 [ 64%]        0

      166,    1      18.47 log file parallel write             18.21
SYS                  oracle@tsukuba (LGWR)              482/900 [ 54%]        0

      133,  763       9.67 db file sequential read              4.80
STARGUS                                                 127/900 [ 14%]        1

                           direct path write temp               1.74
                                                         46/900 [  5%]        0

                           db file scattered read               1.32
                                                         35/900 [  4%]        0

      152,  618       3.10 db file sequential read              1.10
STARGUS                                                  29/900 [  3%]        1

          -------------------------------------------------------------

Top Blocking Sessions            DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)
-> Blocking session activity percentages are calculated with respect to
      waits on enqueues, latches and "buffer busy" only
-> '% Activity' represents the load on the database caused by
      a particular blocking session
-> '# Samples Active' shows the number of ASH samples in which the
      blocking session was found active.
-> 'XIDs' shows the number of distinct transaction IDs sampled in ASH
      when the blocking session was found active.

   Blocking Sid % Activity Event Caused                      % Event
--------------- ---------- ------------------------------ ----------
User                 Program                          # Samples Active     XIDs
-------------------- ------------------------------ ------------------ --------
      166,    1       5.48 log file sync                        5.48
SYS                  oracle@tsukuba (LGWR)              512/900 [ 57%]        0

          -------------------------------------------------------------

Top Sessions running PQs        DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

                  No data exists for this section of the report.
          -------------------------------------------------------------

Top DB Objects                   DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)
-> With respect to Application, Cluster, User I/O and buffer busy waits only.

      Object ID % Activity Event                             % Event
--------------- ---------- ------------------------------ ----------
Object Name (Type)                                    Tablespace
----------------------------------------------------- -------------------------
          52652       4.08 db file scattered read               4.08
STARGUS.TMP_CALC_HFC_SLOW_CM_TMP (TABLE)              SYSTEM

          52543       3.32 db file sequential read              3.32
STARGUS.PK_CM_RAWDATA (INDEX)                         TS_STARGUS

          52698       3.21 db file sequential read              2.98
STARGUS.TMP_TOP_SLOW_CM (TABLE)                       SYSTEM

          52542       2.98 db file sequential read              2.98
STARGUS.CM_RAWDATA (TABLE)                            TS_STARGUS

          52699       1.78 db file sequential read              1.78
STARGUS.PK_TMP_TOP_SLOW_CM (INDEX)                    SYSTEM

          -------------------------------------------------------------

Top DB Files                     DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)
-> With respect to Cluster and User I/O events only.

        File ID % Activity Event                             % Event
--------------- ---------- ------------------------------ ----------
File Name                                             Tablespace
----------------------------------------------------- -------------------------
              6      23.31 db file sequential read             19.83
/export/home/oracle10/oradata/cdb10/ts_stargus_01.dbf TS_STARGUS

                           db file scattered read               1.59


                           direct path write temp               1.59


          -------------------------------------------------------------

Top Latches                      DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)

                  No data exists for this section of the report.
          -------------------------------------------------------------

Activity Over Time              DB/Inst: CDB10/cdb10  (Jul 31 17:52 to 18:07)
-> Analysis period is divided into smaller time slots
-> Top 3 events are reported in each of those slots
-> 'Slot Count' shows the number of ASH samples in that slot
-> 'Event Count' shows the number of ASH samples waiting for
   that event in that slot
-> '% Event' is 'Event Count' over all ASH samples in the analysis period

                         Slot                                   Event
Slot Time (Duration)    Count Event                             Count % Event
-------------------- -------- ------------------------------ -------- -------
17:52:21   (1.7 min)      354 log file parallel write              85    3.21
                              db file sequential read              82    3.10
                              db file parallel write               65    2.46
17:54:00   (2.0 min)      254 CPU + Wait for CPU                   73    2.76
                              db file sequential read              46    1.74
                              log file parallel write              44    1.66
17:56:00   (2.0 min)      323 log file parallel write              94    3.55
                              db file parallel write               85    3.21
                              db file sequential read              85    3.21
17:58:00   (2.0 min)      385 log file parallel write             109    4.12
                              db file parallel write               95    3.59
                              db file sequential read              71    2.68
18:00:00   (2.0 min)      470 db file sequential read             169    6.38
                              db file parallel write               66    2.49
                              log file parallel write              61    2.30
18:02:00   (2.0 min)      277 db file sequential read             139    5.25
                              db file parallel write               58    2.19
                              CPU + Wait for CPU                   39    1.47
18:04:00   (2.0 min)      364 db file parallel write              105    3.97
                              db file scattered read               90    3.40
                              db file sequential read              80    3.02
18:06:00   (1.4 min)      220 db file parallel write               67    2.53
                              db file scattered read               44    1.66
                              db file sequential read              42    1.59
          -------------------------------------------------------------

End of Report
}}}
<<<

Active Session History (ASH) performed an emergency flush. This may mean that ASH is undersized. If emergency flushes are a recurring issue, you may consider increasing ASH size by setting the value of _ASH_SIZE to a sufficiently large value. Currently, ASH size is 16777216 bytes. Both ASH size and the total number of emergency flushes since instance startup can be monitored by running the following query:
 select total_size,awr_flush_emergency_count from v$ash_info;

<<<
''RE: Finding Sessions using AWR Report - ASH'' http://www.evernote.com/shard/s48/sh/733fa2e6-4feb-45cf-ac1a-18a679d9bce5/d6f5a6382d71007a633bc30d0a225db6
When slicing and dicing the ASH data. Having the correct sample math and granularity matters! 

<<showtoc>>

! 1st example - CPU usage across container databases (CDB)
!! second granularity 
change to second granularity and apply the formula below
{{{
count(1)
}}}
[img(100%,100%)[https://i.imgur.com/Awjjz6o.png]]

!! minute granularity
change to minute granularity and apply the formula below
{{{
(count(1)*10)/60
}}}
[img(100%,100%)[https://i.imgur.com/KZ1IImy.png]]


! 2nd example - CPU usage across instances
* This is the consolidated view of CPU and Scheduler wait class of all instances
[img(100%,100%)[https://i.imgur.com/UTWySm3.png]]
* The data is filtered by CPU and Scheduler
[img(40%,40%)[ https://i.imgur.com/nVKkG1P.png]]
* Filtering on the peak July 29 period. If we change to Second granularity you see that the aggregation is incorrect if the minute granularity math is applied
[img(100%,100%)[https://i.imgur.com/qLQa8Zs.png]]
* Changing it back to count(1) with Second granularity shows the correct range of AAS CPU usage
[img(100%,100%)[https://i.imgur.com/xPP1kgh.png]]


<<showtoc>> 


! ASH granularity, SQL_EXEC_START - peoplesoft job troubleshooting
<<<
* SQL trace would be more granular and definitive on chasing the outlier elap/exec performance (particularly the < 1sec elapsed times) 
* SQL Monitoring is another way but with limitations (space, threshold, etc.) https://sqlmaria.com/2017/08/01/getting-the-most-out-of-oracle-sql-monitor/
* ASH is another way but you lose the granularity (especially the < 1sec elapsed times), although the sample_time and sql_exec_start can give you the general wall clock info when a particular SQL started and ended (more on this below)
<<<

!! 1) ASH granularity
Example is this SQL_ID 0fhpmaba4znqy which is executed thousands of times with .000x seconds response time per execute (PHV 2970305186)
{{{
SYS@FMSSTG:PS122STG1 AS SYSDBA> @sql_id
Enter value for sql_id: 0fhpmaba4znqy
SQL_TEXT
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
UPDATE PS_RECV_LOAD_T15 SET RECEIVER_ID = ( SELECT DISTINCT RECEIVER_ID FROM PS_
1 row selected.
BEGINTIM INSTANCE    PLANHASH    EXECDELTA    ROWSDELTA  BUFFERGETSDELTA DISKREADSDELTA        IOWAITDELTA     CPUTIMEDELTA ELAPSEDTIMEDELTA ELAPSEDEXECDELTA    SNAP_ID
-------- -------- ----------- ------------ ------------ ---------------- -------------- ------------------ ---------------- ---------------- ---------------- ----------
02-07 15        1   986626382          110          110      235,624,455              7              3,674    1,367,154,117    1,370,452,852        12.458662       5632   W
.
.
02-08 00        1   986626382          188          188      400,969,410             16              8,354    2,292,949,823    2,298,467,546        12.225891       5641   W
 
02-08 15        1  3862886561       13,946       13,946       42,043,961            949            354,164      322,228,251      332,017,906          .023807       5656
.
.
>>02-11 22        3  2970305186       15,999       15,999        1,055,855            761            371,071        8,703,310        9,964,815          .000286       5691    B   
 
}}}


The ash_elap.sql output using dba_hist_active_sess_history view shows 1 exec and 0 on avg,min,max elapased
{{{
DBA_HIST_ACTIVE_SESSION_HISTORY - ash_elap exec (start to end) avg min max
------------------------------------------------------------------------
SQL_ID          SQL_PLAN_HASH_VALUE        COUNT(*)        AVG        MIN        MAX
--------------- ------------------------ ---------- ---------- ---------- ----------
0fhpmaba4znqy   2970305186                        1          0          0          0
}}}

Then using v$active_session_history, from 1 exec to 9, then shows .56 avg, 0 min, 1 max elapsed
{{{
ACTIVE_SESSION_HISTORY - ash_elap exec avg min max
------------------------------------------------------------------------
0fhpmaba4znqy   2970305186                        9        .56          0          1
}}}
SQL Tracing this would give way more than 9 exec and lower elapsed time numbers
 

!! 2) ASH SAMPLE_TIME and SQL_EXEC_START visualized

* sql_exec_start can give you the general wall clock info when a particular SQL started and ended, below is the same Peoplesoft workload. Here we are looking at the job level view of performance.
* So the main job that they say executes for 21K times with millisecond level per execute response time actually runs for 2hours overall. The job is called RSTR_RCVLOAD.

[img(60%,60%)[https://i.imgur.com/eNi3znY.png]]

* Below is the same highlighted 2 hours, only that the time period is across 1 month. David Kurtz’s PS360 (https://github.com/davidkurtz/ps360) generated this graph (Process Scheduler Process Map).

[img(100%,100%)[https://i.imgur.com/zNPyygJ.png]]

* As for the PHV 2970305186 above (ASH granularity). The SQL_ID 0fhpmaba4znqy compared to the 2hour end to end job run time is the tiny graph on its own axis (2nd row and it looks like it’s running for a few seconds end to end).
* The others highlighted below it are the rest of the SQL_IDs of RSTR_RCVLOAD

click here for full size image https://i.imgur.com/pJOs8DR.png
[img(100%,100%)[https://i.imgur.com/pJOs8DR.png]]

From the same ASH data. The graph below is the breakdown of the 2 hour time series of RSTR_RCVLOAD above (Process Scheduler Process Map section of PS360).
* The process started 10-FEB-19 10.06.05.000000 PM and ended 10-FEB-19 11.54.55.000000 PM based on sample_time and sql_exec_start.
* The graph is sliced by SQL TYPE and Plan Hash Value, then colored by SQL_ID.

<<<
* The red annotated font are the Plan Hash Values with multiple SQL_IDs. There are 9 of them that’s at least 30 minutes. 
* The black annotated font are the Plan Hash Values with single SQL_IDs. There are 5 of them 
<<<

click here for full size image https://i.imgur.com/QU47vcy.png
[img(100%,100%)[https://i.imgur.com/QU47vcy.png]]

* All these SQLs are all on CPU event (all green). And the 2 hours is executing in a serial manner. Using 1 CPU on 1 node.

click here for full size image https://i.imgur.com/foKJJOb.png
[img(100%,100%)[https://i.imgur.com/foKJJOb.png]]

* Below are the red and black plan hash values mentioned above
* The green color is the PHV 2970305186 (0fhpmaba4znqy mentioned in ASH granularity) which is also the tiny blip on the time series graph above 

[img(40%,40%)[https://i.imgur.com/thYqZcA.png]]
[img(40%,40%)[https://i.imgur.com/zCBgHbq.png]]

* In summary, when we looked at it from the job level, we uncovered more tuning opportunities because we can clearly see which SQLs and plan_hash_value are eating up the 2 hours end to end elapsed. But this workload is a batch job so having this approach works well.
* The wall clock mattered more vs the exact millisecond per execute granularity.
 

! the scripts used - ash dump and ash_elap 

* this ash dump script was used to generate the time series breakdown of the 2 hours end to end elapsed 
https://raw.githubusercontent.com/karlarao/pull_dump_and_explore_ash/master/ash/0_gvash_to_csv_12c.sql

* ash_elap scripts are used to generate the avg,min,max elapsed/exec
<<<
* ash_elap.sql  - get wall clock time, the filter is SQL_ID 
** https://raw.githubusercontent.com/karlarao/scripts/master/performance/ash_elap.sql
* ash_elap2.sql  - get wall clock time, the filter is “where run_time_sec < &run_time_sec”. So you can just say 0 and it will output all 
** https://raw.githubusercontent.com/karlarao/scripts/master/performance/ash_elap2.sql
* ash_elap_user.sql  - get wall clock time, the filter is user_id from dba_users. Here you can change the user_id fileter to ACTION, MODULE, or PROGRAM 
** https://raw.githubusercontent.com/karlarao/scripts/master/performance/ash_elap_user.sql
<<<

If you have multiple MODULEs or PROGRAMs and you want to expose that to the group by you can do that just like what I did below
[img(100%,100%)[https://i.imgur.com/dZHZOGz.png]]

<<<
Then if you want to detail on that SQL_ID, use  planx Y <sql_id>
https://raw.githubusercontent.com/karlarao/scripts/master/performance/planx.sql
<<<







.
{{{
https://mail.google.com/mail/u/0/#search/tanel+ash+tpt-oracle/FMfcgxwLtGsWjXhhlwQrvQDJVXGkqPMQ
https://github.com/tanelpoder/tpt-oracle/blob/master/ash/devent_hist.sql
https://raw.githubusercontent.com/tanelpoder/tpt-oracle/master/ash/devent_hist.sql


--parameter1:
direct*
cell*
^(direct|cell|log|db)

--parameter2:
1=1

edit the date filters accordingly 


}}}
{{{
If you see the time waited for IOs go up, but you're not trying to do more I/O (same amount of data & workload and exec plans haven't changed), you can report the individual I/O latencies to see if your I/O is just slower this time (due to other activity in the storage subsystem).

You can even estimate wait event counts in different latency buckets using ASH data (more granularity and flexibility compared to AWR).

https://github.com/tanelpoder/tpt-oracle/blob/master/ash/devent_hist.sql

SQL> @ash/devent_hist db.file.*read 1=1 "TIMESTAMP'2020-12-10 00:00:00'" "TIMESTAMP'2020-12-10 23:00:00'"

                                   Wait time    Num ASH   Estimated    Estimated    % Event  Estimated
Wait Event                        bucket ms+    Samples Total Waits    Total Sec       Time  Time Graph  
---------------------------- --------------- ---------- ----------- ------------ ---------- ------------ 
db file parallel read                    < 1          7     31592.4        315.9        8.1 |#         | 
                                         < 2          6      4044.5         80.9        2.1 |          | 
                                         < 4          5      1878.6         75.1        1.9 |          | 
                                         < 8          9      1407.2        112.6        2.9 |          | 
                                        < 16         19      1572.1        251.5        6.5 |#         | 
                                        < 32         36      1607.3        514.3       13.2 |#         | 
                                        < 64         35       809.8        518.3       13.3 |#         | 
                                       < 128         52       530.8        679.5       17.5 |##        | 
                                       < 256         44       284.6        728.7       18.7 |##        | 
                                       < 512         28          88        450.7       11.6 |#         | 
                                      < 1024          2         3.7         38.1          1 |          | 
                                      < 4096          1           1         41.0        1.1 |          | 
                                      < 8192          1           1         81.9        2.1 |          | 

db file scattered read                   < 1          4     17209.3        172.1       71.1 |#######   | 
                                         < 2          1       935.5         18.7        7.7 |#         | 
                                         < 4          3        1021         40.8       16.9 |##        | 
                                         < 8          1       131.7         10.5        4.3 |          | 

db file sequential read                  < 1        276   1354178.7     13,541.8        7.7 |#         | 
                                         < 2        221    150962.7      3,019.3        1.7 |          | 
                                         < 4        515    174345.3      6,973.8          4 |          | 
                                         < 8       1453    250309.8     20,024.8       11.4 |#         | 
                                        < 16       1974    181327.4     29,012.4       16.6 |##        | 
                                        < 32       2302    101718.4     32,549.9       18.6 |##        | 
                                        < 64       2122     49502.4     31,681.5       18.1 |##        | 
                                       < 128       1068     12998.8     16,638.4        9.5 |#         | 
                                       < 256        312      1855.9      4,751.1        2.7 |          | 
                                       < 512        260       763.7      3,909.9        2.2 |          | 
                                      < 1024         13        24.7        253.2         .1 |          | 
                                      < 4096         59          59      2,416.6        1.4 |          | 
                                      < 8192        127         127     10,403.8        5.9 |#         | 


This way, any potential latency outliers won't get hidden in averages.


}}}
I use the following scripts for quick troubleshooting
{{{
sqlmon.sql
snapper.sql

report_sql_monitor_html.sql
report_sql_monitor.sql

find_sql_awr.sql
dplan.sql
dplan_awr.sql
awr_plan_change.sql

px.sql
}}}




http://oracledoug.com/serendipity/index.php?/archives/1614-Network-Events-in-ASH.html

other articles by Doug about ASH 

Alternative Pictures Demo
That Pictures demo in full
Time Matters: Throughput vs. Response Time - Part 2
Diagnosing Locking Problems using ASH/LogMiner – The End
Diagnosing Locking Problems using ASH/LogMiner – Part 9
Diagnosing Locking Problems using ASH/LogMiner – Part 8
Diagnosing Locking Problems using ASH/LogMiner – Part 7
Diagnosing Locking Problems using ASH – Part 6
Diagnosing Locking Problems using ASH – Part 5
Diagnosing Locking Problems using ASH – Part 4
http://www.oaktable.net/content/ukoug-2011-ash-outliers
http://oracledoug.com/serendipity/index.php?/archives/1669-UKOUG-2011-Ash-Outliers.html#comments
http://oracledoug.com/ASHoutliers3c.sql
http://oracledoug.com/adaptive_thresholds_faq.pdf
http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:1525205200346930663   <-- JB and Graham comments


	

{{{
select sql_id,max(TEMP_SPACE_ALLOCATED)/(1024*1024*1024) gig 
from DBA_HIST_ACTIVE_SESS_HISTORY 
where 
sample_time > sysdate-2 and 
TEMP_SPACE_ALLOCATED > (50*1024*1024*1024) 
group by sql_id order by sql_id;
}}}


http://www.bobbydurrettdba.com/2012/05/10/finding-query-with-high-temp-space-usage-using-ash-views/






Visualizing Active Session History (ASH) Data With R http://structureddata.org/2011/12/20/visualizing-active-session-history-ash-data-with-r/
also talks about TIME_WAITED – micro, only the last sample is fixed up, the others will have TIME_WAITED=0
thanks to John Beresniewicz for this info. http://dboptimizer.com/2011/07/20/oracle-time-units-in-v-views/
DAVE ABERCROMBIE research on AAS and ASH
http://aberdave.blogspot.com/search?updated-max=2011-04-02T08:09:00-07:00&max-results=7
http://dboptimizer.com/2011/10/20/tuning-blog-entries/
{{{
ASH

SQL execution times from ASH – using ASH to see SQL execution times and execution time variations
AAS on AWR – my favorite ASH query that shows AAS  wait classes  as an ascii graph
CPU Wait vs CPU Usage
Simulated ASH 2.1
AWR

Wait  Metrics vs v$system_event
Statistic Metrics verses v$sysstat
I/O latency fluctuations
I/O wait histograms
Redo over weeks
AWR mining
Diff’ing AWR reports
Importing AWR repositories
Redo

LGWR redo write times (log file parallel write)
Ratio of Redo bytes to Datablocks writes
Etc

V$ view time units S,CS,MS,US
Parsing 10046 traces
SQL

Display Cursor Explained – what are all those display_cursor options and what exactly is the data
VST – vistual sql tunning

VST in DB Optimizer 3.0
VST with 100 Tables !
SQL Joins using sets
Visualizing SQL Queries
VST – product design
View expansion with VST
Outer Joins Graphically
}}}
* ASM Mind Map
http://jarneil.wordpress.com/2008/08/26/the-asm-mind-map/

* v$asm_disk
http://www.rachelp.nl/index_kb.php?menu=articles&actie=show&id=10



http://www.freelists.org/post/oracle-l/ASM-on-SAN,5
http://www.freelists.org/post/oracle-l/ASM-and-EMC-PowerPath
ASM and shared pool sizing - http://www.evernote.com/shard/s48/sh/c3535415-30fd-42fa-885a-85df36616e6e/288c13d20095240c8882594afed99e8b

Bug 11684854 : ASM ORA-4031 IN LARGE POOL FROM CREATE DISKGROUP
14292825: DEFAULT MEMORY PARAMETER VALUES FOR 11.2 ASM INSTANCES LOW
https://twiki.cern.ch/twiki/bin/view/PDBService/ASM_Internals <-- GOOD STUFF
https://twiki.cern.ch/twiki/bin/view/PDBService/HAandPerf
{{{
ASM considerations on SinglePath and MultiPath across versions (OCR,VD,DATA)

In general you gotta have a facility/mechanism for:

	* multipathing -> persistent naming -> ASM


on 10gR2, 11gR1 for your OCR and VD you must use the following:

	* 
		* clustered filesystem (OCFS2) or NFS
		* raw devices (RHEL4) or udev (RHEL5)


on 11gR2, for your OCR and VD you must use the following:

	* 
		* clustered filesystem or NFS
		* ASM (mirrored at least 3 disks) 

-----------------------
Single Path 
-----------------------

If you have ASMlib you will go with this setup

	* 
		*   ASMlib -> ASM"


If you don't have ASMlib and Powerpath you will go with this setup

	* 
		* 10gR2 and 11g
			* raw devices
			* udev -> ASM

		* 11gR2

			* udev -> ASM

-----------------------
Multi Path
-----------------------

If you have ASMlib and Powerpath you will go with this setup

	* 
		* 10gR2, 11g, 11gR2

			* "powerpath -> ASMlib -> ASM"



If you don't have ASMlib and Powerpath you will go with this setup

	* 
		* 10gR2
			* "dm multipath (dev mapper) -> raw devices -> ASM"


		* 11g and 11gR2
			* "dm multipath (dev mapper) -> ASM"



you can also be flexible and go with 

	* 
		*   "dm multipath (dev mapper) -> ASMlib -> ASM"

-----------------------
Notes
-----------------------

kpartx confuses me..just do this.. 
- assign and share luns on all nodes.
- fdisk the luns and update partition table on all nodes
- configure multipath
- use </dev/mapper/<mpath_alias>
- create asm storage using above devices
https://forums.oracle.com/forums/thread.jspa?threadID=2288213
}}}

http://www.evernote.com/shard/s48/sh/0012dbf5-6648-4792-84ff-825a363f68d3/a744de57fdb99349388e21cdd9c6059a
http://www.pythian.com/news/1078/oracle-11g-asm-diskgroup-compatibility/

http://www.freelists.org/post/oracle-l/Does-ocssdbin-started-from-11gASM-home-support-diskgroups-mounted-by-10g-ASM-instance,5
{{{
Hi Sanjeev,

I'd like to clear some info first.

1st)... the ocssd.bin

the CSS is created when:
- you use ASM as storage
- when you install Clusterware (RAC, but Clusterware has its separate
home already)

  For Oracle Real Application Clusters installations, the CSS daemon
is installed with Oracle Clusterware in a separate Oracle home
directory (also called the Clusterware home directory). For
single-node installations, the CSS daemon is installed in and runs
from the same Oracle home as Oracle Database.

You could identify the Oracle home directory being used to run the CSS daemon:

# cat /etc/oracle/ocr.loc

The output from this command is similar to the following:

[oracle@dbrocaix01 bin]$ cat /etc/oracle/ocr.loc
ocrconfig_loc=/oracle/app/oracle/product/10.2.0/asm_1/cdata/localhost/local.ocr
local_only=TRUE

The ocrconfig_loc parameter specifies the location of the Oracle
Cluster Registry (OCR) used by the CSS daemon. The path up to the
cdata directory is the Oracle home directory where the CSS daemon is
running (/oracle/app/oracle/product/10.2.0/asm_1 in this example). To
confirm you could grep the css deamon and see that it's running on
that home

[oracle@dbrocaix01 bin]$ ps -ef | grep -i css
oracle    4950     1  0 04:23 ?        00:00:00
/oracle/app/oracle/product/10.2.0/asm_1/bin/ocssd.bin
oracle    5806  5609  0 04:26 pts/1    00:00:00 grep -i css

Note:
If the value of the local_only parameter is FALSE, Oracle Clusterware
is installed on this system.


2nd)... ASM and Database compatibility

I'll supply you with some references..

Note 337737.1 Oracle Clusterware - ASM - Database Version Compatibility
Note 363254.1 Applying one-off Oracle Clusterware patches in a mixed
version home environment

and Chapter 4, page 116-120 of Oracle ASM (under the hood & practical
deployment guide) 10g & 11g

In the book it says that there are two types of compatibility settings
between ASM and the RDBMS:
  1) instance-level software compatibility settings
        - the COMPATIBLE parameter (mine is 10.2.0), this defines what
software features are available to the instance. Setting the
COMPATIBLE parameter in the ASM instance
        to 10.1 will not enable you to use 11g ASM new features (variable
extents, etc.)

  2) diskgroup-specific settings
        - COMPATIBLE.ASM and COMPATIBLE.RDBMS which are persistently stored
in the ASM diskgroup metadata..these compatibility settings are
specific to a diskgroup and control which
          attributes are available to the ASM diskgroup and which are
available to the database.
        - COMPATIBLE.RDBMS, which defaults to 10.1 in 11g, is the minimum
COMPATIBLE version setting of a database that can mount the
diskgroup.. once you advanced it, it cannot be reversed
        - COMPATIBLE.ASM, which controls the persistent format of the on-disk
ASM metadata structures. The ASM compatibility defaults to 10.1 in 11g
and must always be greater than or equal to the RDBMS compatibility
level.. once you advanced it, it cannot be reversed

    The combination of the compatibility parameter setting of the
database, the software version of the database, and the RDBMS
compatibility setting of a diskgroup determines whether a database
instance is permitted to mount a given diskgroup. The compatibility
setting also determines which ASM features are available for a
diskgroup.

    An ASM instance can support different RDBMS clients with different
compatibility settings, as long as the database COMPATIBLE init.ora
parameter setting of each database instance is greater than or equal
to the RDBMS compatibility of all diskgroups.

    You could also read more here...
http://download.oracle.com/docs/cd/B28359_01/server.111/b31107/asmdiskgrps.htm#CHDDIGBJ




So the following info will give us some background on your environment

cat /etc/oracle/ocr.loc
ps -ef | grep -i css
cat /etc/oratab
select name, group_number, value from v$asm_attribute order by 2;
select db_name, status,software_version,compatible_version from v$asm_client;
select name,compatibility, database_compatibility from v$asm_diskgroup;



I hope I did not confuse you with all of this info.





- Karl Arao
http://karlarao.wordpress.com
}}}
http://blog.ronnyegner-consulting.de/2009/10/27/asm-resilvering-or-how-to-recovery-your-asm-in-crash-scenarios/
http://www.ardentperf.com/2010/07/15/asm-mirroring-no-hot-spare-disk/
http://asmsupportguy.blogspot.com/2010/05/how-to-map-asmlib-disk-to-device-name.html
http://uhesse.wordpress.com/2010/12/01/database-migration-to-asm-with-short-downtime/
{{{
backup as copy database format '+DATA';
switch database to copy;
}}}
''Migrating Databases from non-ASM to ASM and Vice-Versa'' http://www.idevelopment.info/data/Oracle/DBA_tips/Automatic_Storage_Management/ASM_33.shtml


-- ''OCFS to ASM''
''How to Migrate an Existing RAC database to ASM'' http://www.colestock.com/blogs/2008/05/how-to-migrate-existing-rac-database-to.html
http://oss.oracle.com/pipermail/oracleasm-users/2009-June/000094.html
{{{

[root@uscdcmix30 ~]#  time dd if=/dev/VgCDCMIX30_App/app_new bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    0m39.045s

user    0m0.083s

sys     0m6.467s

[root@uscdcmix30 ~]# time dd if=/dev/oracleasm/disks/DGMIX03 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m1.784s

user    0m0.084s

sys     0m14.914s

[root@uscdcmix30 ~]# time dd if=/dev/oracleasm/disks/DGMIX04 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m17.748s

user    0m0.069s

sys     0m13.409s

[root@uscdcmix30 ~]# time dd if=/dev/oracleasm/disks/DGMIX03 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m2.702s

user    0m0.090s

sys     0m16.682s

[root@uscdcmix30 ~]# time dd if=/dev/oracleasm/disks/DGMIX04 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m19.698s

user    0m0.079s

sys     0m16.774s

[root@uscdcmix30 ~]# time dd if=/dev/oracleasm/disks/DGMIX03 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m2.037s

user    0m0.085s

sys     0m14.386s

[root@uscdcmix30 ~]# time dd if=/dev/oracleasm/disks/DGMIX03 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m2.822s

user    0m0.052s

sys     0m11.703s

[root@uscdcmix30 ~]# oracleasm listdisks

DGCRM01

DGCRM02

DGCRM03

DGCRM04

DGCRM05

DGCRM06

DGMIX01

DGMIX02

DGMIX03

DGMIX04

[root@uscdcmix30 ~]# oracleasm deletedisk DGMIX03

Clearing disk header: done

Dropping disk: done

[root@uscdcmix30 ~]# time dd if=/dev/emcpowers1 bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    1m0.955s

user    0m0.044s

sys     0m11.446s

[root@uscdcmix30 ~]# pvcreate /dev/emcpowers1

  Physical volume "/dev/emcpowers1" successfully created

[root@uscdcmix30 ~]# vgcreate VgTemp /dev/emcpowers1

  /dev/emcpowero: open failed: No such device

  /dev/emcpowero1: open failed: No such device

  Volume group "VgTemp" successfully created

[root@uscdcmix30 ~]# vgs

  VG              #PV #LV #SN Attr   VSize   VFree

  VgCDCCRM30_App    1   1   0 wz--n- 101.14G      0

  VgCDCCRM30_Arch   1   1   0 wz--n- 101.14G      0

  VgCDCMIX30_App    1   1   0 wz--n- 100.00G      0

  VgTemp            1   0   0 wz--n- 100.00G 100.00G

  vg00              1   7   0 wz--n- 136.50G  66.19G

  vg01              1   1   0 wz--n- 101.14G      0

  vg03              2   1   0 wz--n- 505.74G 101.14G

[root@uscdcmix30 ~]# lvcreate -L 102396 -n TestLV VgTemp

  Logical volume "TestLV" created

[root@uscdcmix30 ~]# time dd if=/dev/VgTemp/TestLV bs=8192 count=655360 of=/dev/null

655360+0 records in

655360+0 records out


 
real    0m34.027s

user    0m0.056s

sys     0m4.698s

}}}
How to create ASM filesystem in Oracle 11gR2
http://translate.google.com/translate?sl=auto&tl=en&u=http://www.dbform.com/html/2010/1255.html
OTN ASM
http://www.oracle.com/technology/tech/linux/asmlib/index.html
http://www.oracle.com/technology/tech/linux/asmlib/raw_migration.html
http://www.oracle.com/technology/tech/linux/asmlib/multipath.html
http://www.oracle.com/technology/tech/linux/asmlib/persistence.html

ASM using ASMLib and Raw Devices
http://www.oracle-base.com/articles/10g/ASMUsingASMLibAndRawDevices.php
Raw devices with release 11: Note ID 754305.1
# 
However, the Unbreakable Enterprise Kernel is optional, 
and Oracle Linux continues to include a Red Hat compatible kernel, compiled directly from Red Hat 
Enterprise Linux source code, for customers who require strict RHEL compatibility. Oracle also 
recommends the  Unbreakable Enterprise Kernel when running third party software and third party 
hardware.


# Performance improvements

latencytop?


# ASMlib and virtualization modules in the kernel

Updated Kernel Modules
The Unbreakable Enterprise Kernel includes both OCFS2 1.6 as well as Oracle ASMLib, the kernel 
driver for Oracle’s Automatic Storage Management feature.  There is no need to install separate RPMs 
to implement these kernel features.  Also, the Unbreakable Enterprise Kernel can be run directly on 
bare metal or as a virtual guest on Oracle VM, both in hardware virtualized (HVM) and paravirtualized (PV) mode, as it implements the paravirt_ops instruction set and includes the xen_netfront and 
xen_blkfront drivers.

# 
Unbreakable Enterprise Kernel itself already includes ocfs2 and oracleasm



Questions:
1) Since it will be a new kernel, what if I have a third party module like EMC Powerpath? I'm sure ill have to reinstall it once I use the new 
kernel. But, once reinstalled.. will it be certified with EMC (or vice versa)? 
2) Also, Oracle says, if you have to maintain compatibility with a third party module. You can use the old vanilla kernel. Questions is, since the 
ASMlib module is already integrated on the Unbreakable Kernel, once I use the non-Unbreakable kernel do they also have the old style RPM 
(oracleasm-`uname -r` - kernel driver) for having the ASMlib module? 
OR 
if it's not supported at all and I'm 

ASMLIB has three components.
1. oracleasm-support - user space shell scripts
2. oracleasmlib - user space library (closed source)	
3. oracleasm-`uname -r` - kernel driver		<-- kernel dependent

###############################################################################################3

-- from this thread http://www.freelists.org/post/oracle-l/ASM-and-EMC-PowerPath

! 
! The Storage Report (ASM -> Linux -> EMC)
Below is a sample storage info that you should have, it clearly shows the relationship from the Oracle layer (ASM), Linux, and SAN storage. This info is very useful for you and the storage engineer. So you would know which is which in case of catastrophic problems..

Very useful for storage activities like: 
* SAN Migration
* Add/Remove disk
* Powerpath upgrade
* Kernel upgrade

//(Note: The images below might be too big on your current screen resolution, to have a better view just right click and download the images or ''double click'' on this page to see the full path of the images..)//

[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TaFARe_nqfI/AAAAAAAABOI/jXAshWxpfw8/powerpath1.png]]
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TaFARSJDiLI/AAAAAAAABOE/SoDU7jrddUQ/powerpath2.png]]
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TaFAUat0HcI/AAAAAAAABOM/qe5qoeF3wTw/powerpath3.png]]

! 
! What info do you need to produce the report? 
''You need the following:''
* AWR time series output (my scripts http://karlarao.wordpress.com/scripts-resources)
* output of the command ''powermt display dev=all'' (run as root)
* RDA
* SAR (because I just love looking at the performance data)
* sysreport (run as root)

''You have to collect'' this on each server / instance and properly arrange them per folder so you won't have a hard time documenting the bits of info you need on the Excel sheet
[img[picturename| https://lh4.googleusercontent.com/_F2x5WXOJ6Q8/TaFK4F_SexI/AAAAAAAABPQ/VjSQm0_uUUM/powerdevices4.png]]

''Below is the drill down on each folder'', the data you'll see is from a separate two RAC clusters.. each with it's own SAN storage.. the project I'm working on here is to migrate/consolidate them into a single SAN storage (newly purchased). So I need to collect all these data to help on planning the activity and mitigate the risks/issues. Also the collection of performance data is a must to verify if the IO requirements of the databases can be handled by the new SAN. On this project I have verified that the Capacity exceeds the current requirements. 
* AWR
<<<
per server
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TaFDmcOnhBI/AAAAAAAABOk/lxo8_tbLqX4/powerdevices5-awr.png]]
> per instance
> [img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TaFKbFENV1I/AAAAAAAABPE/nUCFo_HOjHY/powerdevices5-awr2.png]]
>> awr output on each instance
>> [img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TaFKbZB6nnI/AAAAAAAABPI/8MVhDN5Q_rI/powerdevices5-awr3.png]]
<<<
* powermt display dev=all
<<<
[img[picturename| https://lh5.googleusercontent.com/_F2x5WXOJ6Q8/TaFDmG2XayI/AAAAAAAABOg/0Lo8QoDbm_A/powerdevices6-powermt.png]]
<<<
* RDA
<<<
[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TaFDmsNLT2I/AAAAAAAABOs/sHa-KUryYFo/powerdevices7-rda.png]]
<<<
* SAR
<<<
[img[picturename| https://lh6.googleusercontent.com/_F2x5WXOJ6Q8/TaFDmcOnhBI/AAAAAAAABOk/lxo8_tbLqX4/powerdevices5-awr.png]]
> sample output
> [img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TaFDmyLyKAI/AAAAAAAABOw/f5UgyqVu09I/powerdevices8-sar.png]]
<<<
* sysreport 
<<<
[img[picturename| https://lh4.googleusercontent.com/_F2x5WXOJ6Q8/TaFDm08TuYI/AAAAAAAABO0/bZzSwZX6Vqc/powerdevices8-sysreport.png]]
> sample output
> [img[picturename| https://lh4.googleusercontent.com/_F2x5WXOJ6Q8/TaFDnQpFRHI/AAAAAAAABO4/SiDKdZE9kOY/powerdevices8-sysreport2.png]]
<<<

! 
! Putting it all together 
On the Excel sheet, you have to fill in the following sections 
* From RDA
** ASM Library Information
** ASM Library Disk Information
** Disk Partitions
** Operating System Setup->Operating System Packages
** Operating System Setup->Disk Drives->Disk Mounts
** Oracle Cluster Registry (Cluster -> Cluster Information -> ocrcheck)
* From ''powermt'' command
** Logical Device IDs and names
* From sysreport
** raw devices (possible for OCR and Voting Disk)
** fstab (check for OCFS2 mounts)
* Double check from OS commands
** Voting Disk (''crsctl query css votedisk'')
** ls -l /dev/	
** /etc/init.d/oracleasm querydisk <device_name>

''Below are the output from the various sources...'' this will show you how to map the ''ASM disk'' to a particular ''EMC power device'' (follow the ''RED ARROWS'').. you have to do it on all "ASM disks" and the method will also be the same on accounting the ''raw devices'', ''OCFS2'', and ''OCR'' for their mapping on their respective EMC power devices..

To do the correlated report of the ASM, Linux, and SAN storage.. follow the ''BLUE ARROWS''..

You will also see below that having this proper accounting and correlating it from the ASM, Linux, and EMC storage level you will never go wrong and you have the definitive information that you can share with the EMC Storage Engineer which they can also ''double check''.. in that way both the ''DBAs and the Storage guys will be on the same page''.

[img[picturename| https://lh3.googleusercontent.com/_F2x5WXOJ6Q8/TaFvF-b2-tI/AAAAAAAABPk/cdnpkq6yLeg/emcreport10.png]]

Notice above that the ''emcpowerr'' and ''emcpowers'' have no allocations, so what does that mean? can we allocate these devices now? ... mm ''no!'' ... ''stop''...''move back''... ''think''...

I will do the following: 
* Run this query to check if it's recognized as ''FOREIGN'' or ''CANDIDATE''
{{{
set lines 400
col name format a20
col label format a20
col path format a20
col redundancy format a20
select a.group_number, a.name, a.header_status, a.mount_status, a.state, a.total_mb, a.free_mb, a.label, path, a.redundancy
from v$asm_disk a
order by 1,2;

GROUP_NUMBER NAME                 HEADER_STATU STATE      TOTAL_MB    FREE_MB LABEL                PATH                 REDUNDANCY
------------ -------------------- ------------ -------- ---------- ---------- -------------------- -------------------- --------------------
}}}
* I've done some precautions on my data gathering by checking on the ''fstab'' and ''raw devices config'' and found out that ''there are no pointers to the two devices''.. 
** I have an obsessive–compulsive tendencies just to make sure that these devices are not used by some services. If accidentally these EMC power devices were used for something else let's say as a filesystem.. Oracle will still allow you to do the ADD/DROP operation on these devices wiping out all the data on those devices! 
* Another thing I would do is validate it with my storage engineer or the in-house DBA if these disks exist for the purpose of expanding the disk group. 

If everything is okay. I can safely say they are candidate disks for expanding the space of my current disk group and go ahead with the activity.


! 
! From Matt Zito (former EMC solutions architect)
<<<
Hey guys,

I haven't gotten this email address straightened out on Oracle-L yet, but I figured I'd drop you a note, and you could forward it on to the list if you cared to.

The doc you read is correct, powerpath will cheerfully work with any of the devices you send IOs to, because the kernel driver intercepts requests for all devices and routes them through itself before dishing them down the appropriate path.

However, setting scandisks to the emcpower has the administrative benefits of making sure the disks don't show up twice.  However, even if ASM picks the first of the two disks, it will still be load-balanced successfully.

Thanks,
Matt Zito
(former EMC solutions architect)
<<<

https://blogs.oracle.com/XPSONHA/entry/asr_snmp_on_exadata

Oracle Auto Service Request (ASR) [ID 1185493.1]
''ASR Documentation'' http://www.oracle.com/technetwork/server-storage/asr/documentation/index.html?ssSourceSiteId=ocomen


What DBAs Need to Know - Data Guard 11g ASYNC Redo Transport
http://www.oracle.com/technetwork/database/features/availability/316925-maa-otn-173423.pdf

http://www.oracle.com/technetwork/database/availability/maa-gg-performance-1969630.pdf
http://www.oracle.com/technetwork/database/availability/sync-2437177.pdf
http://www.oracle.com/au/products/database/maa-wp-10gr2-dataguardnetworkbestpr-134557.pdf
''10mins AWR snap interval, 144 samples in a day, 1008 samples in 7days, 4032 samples in 4weeks, 52560 samples in 1year''

''Good chapter on HOW to read AWR reports'' http://filezone.orapub.com/FF_Book/v4Chap9.pdf

{{{
Understand each field of AWR (Doc ID 884046.1)
AWR report is broken into multiple parts.

1)Instance information:-
This provides information the instance name , number,snapshot ids,total time the report was taken for and the database time during this elapsed time.

Elapsed time= end snapshot time - start snapshot time
Database time= Work done by database during this much elapsed time( CPU and I/o both add to Database time).If this is lesser than the elapsed time by a great margin, then database is idle.Database time does not include time spend by the background processes.

2)Cache Sizes : This shows the size of each SGA region after AMM has changed them. This information
can be compared to the original init.ora parameters at the end of the AWR report.

3)Load Profile: This important section shows important rates expressed in units of per second and
transactions per second.This is very important for understanding how is the instance behaving.This has to be compared to base line report to understand the expected load on the machine and the delta during bad times.

4)Instance Efficiency Percentages (Target 100%): This section talks about how close are the vital ratios like buffer cache hit, library cache hit,parses etc.These can be taken as indicators ,but should not be a cause of worry if they are low.As the ratios cold be low or high based in database activities, and not due to real performance problem.Hence these are not stand alone statistics, should be read for a high level view .

5)Shared Pool Statistics: This summarizes changes to the shared pool during the snapshot
period.

6)Top 5 Timed Events :This is the section which is most relevant for analysis.This section shows what % of database time was the wait event seen for.Till 9i, this was the way to backtrack what was the total database time for the report , as there was no Database time column in 9i.

7)RAC Statistics :This part is seen only incase of cluster instance.This provides important indication on the average time take for block transfer, block receiving , messages ., which can point to performance problems in the Cluster instead of database.

8)Wait Class : This Depicts which wait class was the area of contention and where we need to focus.Was that network, concurrency, cluster, i/o Application, configuration etc.

9)Wait Events Statistics Section: This section shows a breakdown of the main wait events in the
database including foreground and background database wait events as well as time model, operating
system, service, and wait classes statistics.

10)Wait Events: This AWR report section provides more detailed wait event information for foreground
user processes which includes Top 5 wait events and many other wait events that occurred during
the snapshot interval.

11)Background Wait Events: This section is relevant to the background process wait events.

12)Time Model Statistics: Time mode statistics report how database-processing time is spent. This
section contains detailed timing information on particular components participating in database
processing.This gives information about background process timing also which is not included in database time.

13)Operating System Statistics: This section is important from OS server contention point of view.This section shows the main external resources including I/O, CPU, memory, and network usage.

14)Service Statistics: The service statistics section gives information services and their load in terms of CPU seconds, i/o seconds, number of buffer reads etc.

15)SQL Section: This section displays top SQL, ordered by important SQL execution metrics.

a)SQL Ordered by Elapsed Time: Includes SQL statements that took significant execution
time during processing.

b)SQL Ordered by CPU Time: Includes SQL statements that consumed significant CPU time
during its processing.

c)SQL Ordered by Gets: These SQLs performed a high number of logical reads while
retrieving data.

d)SQL Ordered by Reads: These SQLs performed a high number of physical disk reads while
retrieving data.

e)SQL Ordered by Parse Calls: These SQLs experienced a high number of reparsing operations.

f)SQL Ordered by Sharable Memory: Includes SQL statements cursors which consumed a large
amount of SGA shared pool memory.

g)SQL Ordered by Version Count: These SQLs have a large number of versions in shared pool
for some reason.

16)Instance Activity Stats: This section contains statistical information describing how the database
operated during the snapshot period.

17)I/O Section: This section shows the all important I/O activity.This provides time it took to make 1 i/o say Av Rd(ms), and i/o per second say Av Rd/s.This should be compared to the baseline to see if the rate of i/o has always been like this or there is a diversion now.

18)Advisory Section: This section show details of the advisories for the buffer, shared pool, PGA and
Java pool.

19)Buffer Wait Statistics: This important section shows buffer cache waits statistics.

20)Enqueue Activity: This important section shows how enqueue operates in the database. Enqueues are
special internal structures which provide concurrent access to various database resources.

21)Undo Segment Summary: This section gives a summary about how undo segments are used by the database.
Undo Segment Stats: This section shows detailed history information about undo segment activity.

22)Latch Activity: This section shows details about latch statistics. Latches are a lightweight
serialization mechanism that is used to single-thread access to internal Oracle structures.The latch should be checked by its sleeps.The sleepiest Latch is the latch that is under contention , and not the latch with high requests.Hence  run through the sleep breakdown part of this section to arrive at the latch under highest contention.

23)Segment Section: This portion is important to make a guess in which segment and which segment type the contention could be.Tally this with the top 5 wait events.

Segments by Logical Reads: Includes top segments which experienced high number of
logical reads.

Segments by Physical Reads: Includes top segments which experienced high number of disk
physical reads.

Segments by Buffer Busy Waits: These segments have the largest number of buffer waits
caused by their data blocks.

Segments by Row Lock Waits: Includes segments that had a large number of row locks on
their data.

Segments by ITL Waits: Includes segments that had a large contention for Interested
Transaction List (ITL). The contention for ITL can be reduced by increasing INITRANS storage
parameter of the table.

24)Dictionary Cache Stats: This section exposes details about how the data dictionary cache is
operating.

25)Library Cache Activity: Includes library cache statistics  which are needed in case you see library cache in top 5 wait events.You might want to see if the reload/invalidations are causing the contention or there is some other issue with library cache.

26)SGA Memory Summary:This would tell us the difference in the respective pools at the start and end of report.This could be an indicator of setting minimum value for each, when sga)target is being used..

27)init.ora Parameters: This section shows the original init.ora parameters for the instance during
the snapshot period.

There would be more Sections in case of RAC setups to provide details.
}}}


''A SQL Performance History from AWR''
http://www.toadworld.com/BLOGS/tabid/67/EntryId/125/A-SQL-Performance-History-from-AWR.aspx  <-- This could also be possible to graph using my awr_topsqlx.sql

''miTrend AWR Report / StatsPack Gathering Procedures Instructions'' https://community.emc.com/docs/DOC-13949 <-- EMCs tool with nice PPT and paper, also talks about "burst" periods for IO sizing, raid adjusted IOPS, EFDs IOPS

http://pavandba.files.wordpress.com/2009/11/owp_awr_historical_analysis.pdf







{{{
set arraysize 5000

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

ttitle center 'AWR Top SQL Report' skip 2
set pagesize 50000
set linesize 300

col snap_id     format 99999            heading "Snap|ID"
col tm          format a15              heading "Snap|Start|Time"
col inst        format 90               heading "i|n|s|t|#"
col dur         format 990.00          heading "Snap|Dur|(m)"
col sql_id      format a15              heading "SQL|ID"
col phv         format 99999999999      heading "Plan|Hash|Value"
col module      format a20              heading "Module"
col elap        format 999990.00        heading "Elapsed|Time|(s)"
col elapexec    format 999990.00        heading "Elapsed|Time|per exec|(s)"
col cput        format 999990.00        heading "CPU|Time|(s)"
col iowait      format 999990.00        heading "IO|Wait|(s)"
col bget        format 99999999990      heading "LIO"
col dskr        format 99999999990      heading "PIO"
col rowp        format 99999999990      heading "Rows"
col exec        format 9999990          heading "Exec"
col prsc        format 999999990        heading "Parse|Count"
col pxexec      format 9999990          heading "PX|Exec"
col pctdbt      format 990              heading "DB Time|%"
col aas         format 990.00           heading "A|A|S"
col time_rank   format 90               heading "Time|Rank"
col sql_text    format a40              heading "SQL|Text"

     select *
       from (
             select
                  sqt.snap_id snap_id,
                  TO_CHAR(sqt.tm,'MM/DD/YY HH24:MI') tm,
                  sqt.inst inst,
                  sqt.dur dur,
                  sqt.sql_id sql_id,   
                  sqt.phv phv,                
                  to_clob(decode(sqt.module, null, null, sqt.module)) module,
                  nvl((sqt.elap), to_number(null)) elap,
                  nvl((sqt.elapexec), to_number(null)) elapexec,
                  nvl((sqt.cput), to_number(null)) cput,
                  sqt.iowait iowait,
                  sqt.bget bget, 
                  sqt.dskr dskr, 
                  sqt.rowp rowp,
                  sqt.exec exec, 
                  sqt.prsc prsc, 
                  sqt.pxexec pxexec,
                  sqt.aas aas,
                  sqt.time_rank time_rank
                  , nvl(st.sql_text, to_clob('** SQL Text Not Available **')) sql_text     -- PUT/REMOVE COMMENT TO HIDE/SHOW THE SQL_TEXT
             from        (
                          select snap_id, tm, inst, dur, sql_id, phv, module, elap, elapexec, cput, iowait, bget, dskr, rowp, exec, prsc, pxexec, aas, time_rank
                          from
                                             (
                                               select 
                                                      s0.snap_id snap_id,
                                                      s0.END_INTERVAL_TIME tm,
                                                      s0.instance_number inst,
                                                      round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                                                      e.sql_id sql_id, 
                                                      e.plan_hash_value phv, 
                                                      max(e.module) module,
                                                      sum(e.elapsed_time_delta)/1000000 elap,
                                                      decode((sum(e.executions_delta)), 0, to_number(null), ((sum(e.elapsed_time_delta)) / (sum(e.executions_delta)) / 1000000)) elapexec,
                                                      sum(e.cpu_time_delta)/1000000     cput, 
                                                      sum(e.iowait_delta)/1000000 iowait,
                                                      sum(e.buffer_gets_delta) bget,
                                                      sum(e.disk_reads_delta) dskr, 
                                                      sum(e.rows_processed_delta) rowp,
                                                      sum(e.executions_delta)   exec,
                                                      sum(e.parse_calls_delta) prsc,
                                                      sum(px_servers_execs_delta) pxexec,
                                                      (sum(e.elapsed_time_delta)/1000000) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                            + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                            + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                            + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60) aas,
                                                      DENSE_RANK() OVER (
                                                      PARTITION BY s0.snap_id ORDER BY e.elapsed_time_delta DESC) time_rank
                                               from 
                                                   dba_hist_snapshot s0,
                                                   dba_hist_snapshot s1,
                                                   dba_hist_sqlstat e
                                                   where 
                                                    s0.dbid                   = &_dbid                -- CHANGE THE DBID HERE!
                                                    AND s1.dbid               = s0.dbid
                                                    and e.dbid                = s0.dbid                                                
                                                    AND s0.instance_number    = &_instancenumber      -- CHANGE THE INSTANCE_NUMBER HERE!
                                                    AND s1.instance_number    = s0.instance_number
                                                    and e.instance_number     = s0.instance_number                                                 
                                                    AND s1.snap_id            = s0.snap_id + 1
                                                    and e.snap_id             = s0.snap_id + 1                                              
                                               group by 
                                                    s0.snap_id, s0.END_INTERVAL_TIME, s0.instance_number, e.sql_id, e.plan_hash_value, e.elapsed_time_delta, s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME
                                             )
                          where 
                          time_rank <= 5                                     -- GET TOP 5 SQL ACROSS SNAP_IDs... YOU CAN ALTER THIS TO HAVE MORE DATA POINTS
                         ) 
                        sqt,
                        dba_hist_sqltext st 
             where st.sql_id(+)             = sqt.sql_id
             and st.dbid(+)                 = &_dbid
-- AND TO_CHAR(tm,'D') >= 1                                                  -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(tm,'D') <= 7
-- AND TO_CHAR(tm,'HH24MI') >= 0900                                          -- Hour
-- AND TO_CHAR(tm,'HH24MI') <= 1800
-- AND tm >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND tm <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
-- AND snap_id in (338,339)
-- AND snap_id >= 335 and snap_id <= 339
-- AND snap_id = 3172
-- and sqt.sql_id = 'dj3n91vxsyaq5'
-- AND lower(st.sql_text) like 'select%'
-- AND lower(st.sql_text) like 'insert%'
-- AND lower(st.sql_text) like 'update%'
-- AND lower(st.sql_text) like 'merge%'
-- AND pxexec > 0
-- AND aas > .5
             order by 
             -- snap_id                             -- TO GET SQL OUTPUT ACROSS SNAP_IDs SEQUENTIALLY AND ASC
             nvl(sqt.elap, -1) desc, sqt.sql_id     -- TO GET SQL OUTPUT BY ELAPSED TIME
             )
where rownum <= 20
;
}}}
{{{
set arraysize 5000

COLUMN blocksize NEW_VALUE _blocksize NOPRINT
select distinct block_size blocksize from v$datafile;

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN name NEW_VALUE _instname NOPRINT
select lower(instance_name) name from v$instance;

COLUMN name NEW_VALUE _hostname NOPRINT
select lower(host_name) name from v$instance;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

-- ttitle center 'AWR CPU and IO Workload Report' skip 2
set pagesize 50000
set linesize 550

col instname    format a15              heading instname        -- instname
col hostname    format a30              heading hostname        -- hostname
col tm          format a17              heading tm              -- "tm"
col id          format 99999            heading id              -- "snapid"
col inst        format 90               heading inst            -- "inst"
col dur         format 999990.00        heading dur             -- "dur"
col cpu         format 90               heading cpu             -- "cpu"
col cap         format 9999990.00       heading cap             -- "capacity"
col dbt         format 999990.00        heading dbt             -- "DBTime"
col dbc         format 99990.00         heading dbc             -- "DBcpu"
col bgc         format 99990.00         heading bgc             -- "BGcpu"
col rman        format 9990.00          heading rman            -- "RMANcpu"
col aas         format 990.0            heading aas             -- "AAS"
col totora      format 9999990.00       heading totora          -- "TotalOracleCPU"
col busy        format 9999990.00       heading busy            -- "BusyTime"
col load        format 990.00           heading load            -- "OSLoad"
col totos       format 9999990.00       heading totos           -- "TotalOSCPU"
col mem         format 999990.00        heading mem             -- "PhysicalMemorymb"
col IORs        format 9990.000         heading IORs            -- "IOPsr"
col IOWs        format 9990.000         heading IOWs            -- "IOPsw"
col IORedo      format 9990.000         heading IORedo          -- "IOPsredo"
col IORmbs      format 9990.000         heading IORmbs          -- "IOrmbs"
col IOWmbs      format 9990.000         heading IOWmbs          -- "IOwmbs"
col redosizesec format 9990.000         heading redosizesec     -- "Redombs"
col logons      format 990              heading logons          -- "Sess"
col logone      format 990              heading logone          -- "SessEnd"
col exsraw      format 99990.000        heading exsraw          -- "Execrawdelta"
col exs         format 9990.000         heading exs             -- "Execs"
col ucs         format 9990.000         heading ucs             -- "UserCalls"
col ucoms       format 9990.000         heading ucoms           -- "Commit"
col urs         format 9990.000         heading urs             -- "Rollback"
col oracpupct   format 990              heading oracpupct       -- "OracleCPUPct"
col rmancpupct  format 990              heading rmancpupct      -- "RMANCPUPct"
col oscpupct    format 990              heading oscpupct        -- "OSCPUPct"
col oscpuusr    format 990              heading oscpuusr        -- "USRPct"
col oscpusys    format 990              heading oscpusys        -- "SYSPct"
col oscpuio     format 990              heading oscpuio         -- "IOPct"

SELECT * FROM
( 
  SELECT trim('&_instname') instname, 
         trim('&_dbid') db_id, 
         trim('&_hostname') hostname, 
          s0.snap_id id,
         TO_CHAR(s0.END_INTERVAL_TIME,'MM/DD/YY HH24:MI:SS') tm,
         s0.instance_number inst,
  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
  s3t1.value AS cpu,
  (round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value cap,
  (s5t1.value - s5t0.value) / 1000000 as dbt,
  (s6t1.value - s6t0.value) / 1000000 as dbc,
  (s7t1.value - s7t0.value) / 1000000 as bgc,
  round(DECODE(s8t1.value,null,'null',(s8t1.value - s8t0.value) / 1000000),2) as rman,
  ((s5t1.value - s5t0.value) / 1000000)/60 /  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) aas,
  round(((s6t1.value - s6t0.value) / 1000000) + ((s7t1.value - s7t0.value) / 1000000),2) totora,
  -- s1t1.value - s1t0.value AS busy,  -- this is osstat BUSY_TIME
  round(s2t1.value,2) AS load,
  (s1t1.value - s1t0.value)/100 AS totos,
  ((round(((s6t1.value - s6t0.value) / 1000000) + ((s7t1.value - s7t0.value) / 1000000),2)) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value))*100 as oracpupct,
  ((round(DECODE(s8t1.value,null,'null',(s8t1.value - s8t0.value) / 1000000),2)) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value))*100 as rmancpupct,
  (((s1t1.value - s1t0.value)/100) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value))*100 as oscpupct,
  (((s17t1.value - s17t0.value)/100) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value))*100 as oscpuusr,
  (((s18t1.value - s18t0.value)/100) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value))*100 as oscpusys,
  (((s19t1.value - s19t0.value)/100) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2)*60)*s3t1.value))*100 as oscpuio
FROM dba_hist_snapshot s0,
  dba_hist_snapshot s1,
  dba_hist_osstat s1t0,         -- BUSY_TIME
  dba_hist_osstat s1t1,
  dba_hist_osstat s17t0,        -- USER_TIME
  dba_hist_osstat s17t1,
  dba_hist_osstat s18t0,        -- SYS_TIME
  dba_hist_osstat s18t1,
  dba_hist_osstat s19t0,        -- IOWAIT_TIME
  dba_hist_osstat s19t1,
  dba_hist_osstat s2t1,         -- osstat just get the end value
  dba_hist_osstat s3t1,         -- osstat just get the end value
  dba_hist_sys_time_model s5t0,
  dba_hist_sys_time_model s5t1,
  dba_hist_sys_time_model s6t0,
  dba_hist_sys_time_model s6t1,
  dba_hist_sys_time_model s7t0,
  dba_hist_sys_time_model s7t1,
  dba_hist_sys_time_model s8t0,
  dba_hist_sys_time_model s8t1
WHERE s0.dbid            = &_dbid    -- CHANGE THE DBID HERE!
AND s1.dbid              = s0.dbid
AND s1t0.dbid            = s0.dbid
AND s1t1.dbid            = s0.dbid
AND s2t1.dbid            = s0.dbid
AND s3t1.dbid            = s0.dbid
AND s5t0.dbid            = s0.dbid
AND s5t1.dbid            = s0.dbid
AND s6t0.dbid            = s0.dbid
AND s6t1.dbid            = s0.dbid
AND s7t0.dbid            = s0.dbid
AND s7t1.dbid            = s0.dbid
AND s8t0.dbid            = s0.dbid
AND s8t1.dbid            = s0.dbid
AND s17t0.dbid            = s0.dbid
AND s17t1.dbid            = s0.dbid
AND s18t0.dbid            = s0.dbid
AND s18t1.dbid            = s0.dbid
AND s19t0.dbid            = s0.dbid
AND s19t1.dbid            = s0.dbid
AND s0.instance_number   = &_instancenumber   -- CHANGE THE INSTANCE_NUMBER HERE!
AND s1.instance_number   = s0.instance_number
AND s1t0.instance_number = s0.instance_number
AND s1t1.instance_number = s0.instance_number
AND s2t1.instance_number = s0.instance_number
AND s3t1.instance_number = s0.instance_number
AND s5t0.instance_number = s0.instance_number
AND s5t1.instance_number = s0.instance_number
AND s6t0.instance_number = s0.instance_number
AND s6t1.instance_number = s0.instance_number
AND s7t0.instance_number = s0.instance_number
AND s7t1.instance_number = s0.instance_number
AND s8t0.instance_number = s0.instance_number
AND s8t1.instance_number = s0.instance_number
AND s17t0.instance_number = s0.instance_number
AND s17t1.instance_number = s0.instance_number
AND s18t0.instance_number = s0.instance_number
AND s18t1.instance_number = s0.instance_number
AND s19t0.instance_number = s0.instance_number
AND s19t1.instance_number = s0.instance_number
AND s1.snap_id           = s0.snap_id + 1
AND s1t0.snap_id         = s0.snap_id
AND s1t1.snap_id         = s0.snap_id + 1
AND s2t1.snap_id         = s0.snap_id + 1
AND s3t1.snap_id         = s0.snap_id + 1
AND s5t0.snap_id         = s0.snap_id
AND s5t1.snap_id         = s0.snap_id + 1
AND s6t0.snap_id         = s0.snap_id
AND s6t1.snap_id         = s0.snap_id + 1
AND s7t0.snap_id         = s0.snap_id
AND s7t1.snap_id         = s0.snap_id + 1
AND s8t0.snap_id         = s0.snap_id
AND s8t1.snap_id         = s0.snap_id + 1
AND s17t0.snap_id         = s0.snap_id
AND s17t1.snap_id         = s0.snap_id + 1
AND s18t0.snap_id         = s0.snap_id
AND s18t1.snap_id         = s0.snap_id + 1
AND s19t0.snap_id         = s0.snap_id
AND s19t1.snap_id         = s0.snap_id + 1
AND s1t0.stat_name       = 'BUSY_TIME'
AND s1t1.stat_name       = s1t0.stat_name
AND s17t0.stat_name       = 'USER_TIME'
AND s17t1.stat_name       = s17t0.stat_name
AND s18t0.stat_name       = 'SYS_TIME'
AND s18t1.stat_name       = s18t0.stat_name
AND s19t0.stat_name       = 'IOWAIT_TIME'
AND s19t1.stat_name       = s19t0.stat_name
AND s2t1.stat_name       = 'LOAD'
AND s3t1.stat_name       = 'NUM_CPUS'
AND s5t0.stat_name       = 'DB time'
AND s5t1.stat_name       = s5t0.stat_name
AND s6t0.stat_name       = 'DB CPU'
AND s6t1.stat_name       = s6t0.stat_name
AND s7t0.stat_name       = 'background cpu time'
AND s7t1.stat_name       = s7t0.stat_name
AND s8t0.stat_name       = 'RMAN cpu time (backup/restore)'
AND s8t1.stat_name       = s8t0.stat_name
)
-- WHERE 
-- tm > to_char(sysdate - 30, 'MM/DD/YY HH24:MI')
-- id  in (select snap_id from (select * from r2toolkit.r2_regression_data union all select * from r2toolkit.r2_outlier_data))
-- id in (336)
-- aas > 1
-- oracpupct > 50
-- oscpupct > 50
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'D') >= 1     -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'D') <= 7
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'HH24MI') >= 0900     -- Hour
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'HH24MI') <= 1800
-- AND s0.END_INTERVAL_TIME >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND s0.END_INTERVAL_TIME <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
ORDER BY id ASC;
}}}
{{{

-- TO VIEW DB INFO
set lines 300
select dbid,instance_number,version,db_name,instance_name, host_name 
from dba_hist_database_instance 
where instance_number = (select instance_number from v$instance)
and rownum < 2;

-- TO VIEW RETENTION INFORMATION
select * from dba_hist_wr_control;
set lines 300
select b.name, a.DBID,
   ((TRUNC(SYSDATE) + a.SNAP_INTERVAL - TRUNC(SYSDATE)) * 86400)/60 AS SNAP_INTERVAL_MINS,
   ((TRUNC(SYSDATE) + a.RETENTION - TRUNC(SYSDATE)) * 86400)/60 AS RETENTION_MINS,
   ((TRUNC(SYSDATE) + a.RETENTION - TRUNC(SYSDATE)) * 86400)/60/60/24 AS RETENTION_DAYS,
   TOPNSQL
from dba_hist_wr_control a, v$database b
where a.dbid = b.dbid;

/*
-- SET RETENTION PEROID TO 31 DAYS (UNIT IS MINUTES)
execute dbms_workload_repository.modify_snapshot_settings (interval => 30, retention => 43200);
-- SET RETENTION PEROID TO 365 DAYS (UNIT IS MINUTES)
exec dbms_workload_repository.modify_snapshot_settings (interval => 30, retention => 525600);

-- Create Snapshot
BEGIN
  DBMS_WORKLOAD_REPOSITORY.CREATE_SNAPSHOT ();
END;
/
*/

-- AWR get recent snapshot
set lines 300
select * from 
(SELECT s0.instance_number, s0.snap_id, 
  to_char(s0.startup_time,'yyyy-mon-dd hh24:mi:ss') startup_time,
  TO_CHAR(s0.END_INTERVAL_TIME,'yyyy-mon-dd hh24:mi:ss') snap_start,
  TO_CHAR(s1.END_INTERVAL_TIME,'yyyy-mon-dd hh24:mi:ss') snap_end,
  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) ela_min
FROM dba_hist_snapshot s0,
  dba_hist_snapshot s1
WHERE s1.snap_id           = s0.snap_id + 1
ORDER BY snap_id DESC)
where rownum < 11;

-- MIN/MAX for dba_hist tables
select count(*) snap_count from dba_hist_snapshot;
select min(snap_id) min_snap, max(snap_id) max_snap from dba_hist_snapshot;
select to_char(min(end_interval_time),'yyyy-mon-dd hh24:mi:ss') min_date, to_char(max(end_interval_time),'yyyy-mon-dd hh24:mi:ss') max_date from dba_hist_snapshot;


/*
-- STATSPACK get recent snapshot
	  set lines 300
	  col what format a30
	  set numformat 999999999999999
	  alter session set NLS_DATE_FORMAT='DD-MON-YYYY HH24:MI:SS';
	  select sysdate from dual;
	  select instance, what, job, next_date, next_sec from user_jobs;
	  select * from 
	      (select 
		    s0.instance_number, s0.snap_id snap_id, s0.startup_time,
		    to_char(s0.snap_time,'YYYY-Mon-DD HH24:MI:SS') snap_start,
		    to_char(s1.snap_time,'YYYY-Mon-DD HH24:MI:SS') snap_end,
		    (s1.snap_time-s0.snap_time)*24*60 ela_min,
		    s0.dbid, s0.snap_level, s0.snapshot_exec_time_s 
	      from	stats$snapshot s0,
		      stats$snapshot s1
	      where s1.snap_id  = s0.snap_id + 1
	      ORDER BY s0.snap_id DESC)
	      where rownum < 11;


-- MIN/MAX for statspack tables
col min_dt format a14
col max_dt format a14
col host_name format a12
select	
	t1.dbid, 
	t1.instance_number,
        t2.version,
        t2.db_name,
	t2.instance_name,
        t2.host_name,
	min(to_char(t1.snap_time,'YYYY-Mon-DD HH24')) min_dt,
	max(to_char(t1.snap_time,'YYYY-Mon-DD HH24')) max_dt
from	stats$snapshot t1,
        stats$database_instance t2
where   t1.dbid = t2.dbid
  and   t1.snap_id = t2.snap_id
group by
	t1.dbid, 
	t1.instance_number,
        t2.version,
        t2.db_name,
	t2.instance_name,
        t2.host_name
/
*/


/*
AWR reports:

Running Workload Repository Reports Using Enterprise Manager
Running Workload Repository Compare Period Report Using Enterprise Manager
Running Workload Repository Reports Using SQL Scripts



Running Workload Repository Reports Using SQL Scripts
-----------------------------------------------------

You can view AWR reports by running the following SQL scripts:

The @?/rdbms/admin/awrrpt.sql SQL script generates an HTML or text report that displays statistics for a range of snapshot Ids.

The awrrpti.sql SQL script generates an HTML or text report that displays statistics for a range of snapshot Ids on 
a specified database and instance.

The awrsqrpt.sql SQL script generates an HTML or text report that displays statistics of a particular SQL statement for a 
range of snapshot Ids. Run this report to inspect or debug the performance of a SQL statement.

The awrsqrpi.sql SQL script generates an HTML or text report that displays statistics of a particular SQL statement for a 
range of snapshot Ids on a specified database and instance. Run this report to inspect or debug the performance of a SQL statement on a specific database and instance.

The awrddrpt.sql SQL script generates an HTML or text report that compares detailed performance attributes and configuration 
settings between two selected time periods.

The awrddrpi.sql SQL script generates an HTML or text report that compares detailed performance attributes and configuration 
settings between two selected time periods on a specific database and instance.

awrsqrpt.sql -- SQL performance report
*/

}}}
{{{
set arraysize 5000

COLUMN blocksize NEW_VALUE _blocksize NOPRINT
select distinct block_size blocksize from v$datafile;

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN name NEW_VALUE _instname NOPRINT
select lower(instance_name) name from v$instance;

COLUMN name NEW_VALUE _hostname NOPRINT
select lower(host_name) name from v$instance;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

-- ttitle center 'AWR IO Workload Report' skip 2
set pagesize 50000
set linesize 550

col instname       format a15              heading instname            -- instname
col hostname       format a30              heading hostname            -- hostname
col tm             format a17              heading tm                  -- "tm"
col id             format 99999            heading id                  -- "snapid"
col inst           format 90               heading inst                -- "inst"
col dur            format 999990.00        heading dur                 -- "dur"
col cpu            format 90               heading cpu                 -- "cpu"
col cap            format 9999990.00       heading cap                 -- "capacity"
col dbt            format 999990.00        heading dbt                 -- "DBTime"
col dbc            format 99990.00         heading dbc                 -- "DBcpu"
col bgc            format 99990.00         heading bgc                 -- "BGcpu"
col rman           format 9990.00          heading rman                -- "RMANcpu"
col aas            format 990.0            heading aas                 -- "AAS"
col totora         format 9999990.00       heading totora              -- "TotalOracleCPU"
col busy           format 9999990.00       heading busy                -- "BusyTime"
col load           format 990.00           heading load                -- "OSLoad"
col totos          format 9999990.00       heading totos               -- "TotalOSCPU"
col mem            format 999990.00        heading mem                 -- "PhysicalMemorymb"
col IORs           format 99990.000        heading IORs                -- "IOPsr"
col IOWs           format 99990.000        heading IOWs                -- "IOPsw"
col IORedo         format 99990.000        heading IORedo              -- "IOPsredo"
col IORmbs         format 99990.000        heading IORmbs              -- "IOrmbs"
col IOWmbs         format 99990.000        heading IOWmbs              -- "IOwmbs"
col redosizesec    format 99990.000        heading redosizesec         -- "Redombs"
col logons         format 990              heading logons              -- "Sess"
col logone         format 990              heading logone              -- "SessEnd"
col exsraw         format 99990.000        heading exsraw              -- "Execrawdelta"
col exs            format 9990.000         heading exs                 -- "Execs"
col oracpupct      format 990              heading oracpupct           -- "OracleCPUPct"
col rmancpupct     format 990              heading rmancpupct          -- "RMANCPUPct"
col oscpupct       format 990              heading oscpupct            -- "OSCPUPct"
col oscpuusr       format 990              heading oscpuusr            -- "USRPct"
col oscpusys       format 990              heading oscpusys            -- "SYSPct"
col oscpuio        format 990              heading oscpuio             -- "IOPct"
col SIORs          format 99990.000        heading SIORs               -- "IOPsSingleBlockr"
col MIORs          format 99990.000        heading MIORs               -- "IOPsMultiBlockr"
col TIORmbs        format 99990.000        heading TIORmbs             -- "Readmbs"
col SIOWs          format 99990.000        heading SIOWs               -- "IOPsSingleBlockw"
col MIOWs          format 99990.000        heading MIOWs               -- "IOPsMultiBlockw"
col TIOWmbs        format 99990.000        heading TIOWmbs             -- "Writembs"
col TIOR           format 99990.000        heading TIOR                -- "TotalIOPsr"
col TIOW           format 99990.000        heading TIOW                -- "TotalIOPsw"
col TIOALL         format 99990.000        heading TIOALL              -- "TotalIOPsALL"
col ALLRmbs        format 99990.000        heading ALLRmbs             -- "TotalReadmbs"
col ALLWmbs        format 99990.000        heading ALLWmbs             -- "TotalWritembs"
col GRANDmbs       format 99990.000        heading GRANDmbs            -- "TotalmbsALL"
col readratio      format 990              heading readratio           -- "ReadRatio"
col writeratio     format 990              heading writeratio          -- "WriteRatio"
col diskiops       format 99990.000        heading diskiops            -- "HWDiskIOPs"
col numdisks       format 99990.000        heading numdisks            -- "HWNumofDisks"
col flashcache     format 990              heading flashcache          -- "FlashCacheHitsPct"
col cellpiob       format 99990.000        heading cellpiob            -- "CellPIOICmbs"
col cellpiobss     format 99990.000        heading cellpiobss          -- "CellPIOICSmartScanmbs"
col cellpiobpreoff format 99990.000        heading cellpiobpreoff      -- "CellPIOpredoffloadmbs"
col cellpiobsi     format 99990.000        heading cellpiobsi          -- "CellPIOstorageindexmbs"
col celliouncomb   format 99990.000        heading celliouncomb        -- "CellIOuncompmbs"
col cellpiobs      format 99990.000        heading cellpiobs           -- "CellPIOsavedfilecreationmbs"
col cellpiobsrman  format 99990.000        heading cellpiobsrman       -- "CellPIOsavedRMANfilerestorembs"

SELECT * FROM
( 
  SELECT trim('&_instname') instname, 
         trim('&_dbid') db_id, 
         trim('&_hostname') hostname, 
         s0.snap_id id,
         TO_CHAR(s0.END_INTERVAL_TIME,'MM/DD/YY HH24:MI:SS') tm,
         s0.instance_number inst,
  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
   (((s20t1.value - s20t0.value) - (s21t1.value - s21t0.value)) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as SIORs,
   ((s21t1.value - s21t0.value) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as MIORs,
   (((s22t1.value - s22t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as TIORmbs,
   (((s23t1.value - s23t0.value) - (s24t1.value - s24t0.value)) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as SIOWs,
   ((s24t1.value - s24t0.value) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as MIOWs,
   (((s25t1.value - s25t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as TIOWmbs,
   ((s13t1.value - s13t0.value)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as IORedo, 
   (((s14t1.value - s14t0.value)/1024/1024)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as redosizesec,
    ((s33t1.value - s33t0.value) / (s20t1.value - s20t0.value))*100 as flashcache,
   (((s26t1.value - s26t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as cellpiob,
   (((s31t1.value - s31t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as cellpiobss,
   (((s29t1.value - s29t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as cellpiobpreoff,
   (((s30t1.value - s30t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as cellpiobsi,
   (((s32t1.value - s32t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as celliouncomb,
   (((s27t1.value - s27t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as cellpiobs,
   (((s28t1.value - s28t0.value)/1024/1024) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as cellpiobsrman
FROM dba_hist_snapshot s0,
  dba_hist_snapshot s1,
  dba_hist_sysstat s13t0,       -- redo writes, diffed
  dba_hist_sysstat s13t1,
  dba_hist_sysstat s14t0,       -- redo size, diffed
  dba_hist_sysstat s14t1,
  dba_hist_sysstat s20t0,       -- physical read total IO requests, diffed
  dba_hist_sysstat s20t1,
  dba_hist_sysstat s21t0,       -- physical read total multi block requests, diffed
  dba_hist_sysstat s21t1,  
  dba_hist_sysstat s22t0,       -- physical read total bytes, diffed
  dba_hist_sysstat s22t1,  
  dba_hist_sysstat s23t0,       -- physical write total IO requests, diffed
  dba_hist_sysstat s23t1,
  dba_hist_sysstat s24t0,       -- physical write total multi block requests, diffed
  dba_hist_sysstat s24t1,
  dba_hist_sysstat s25t0,       -- physical write total bytes, diffed
  dba_hist_sysstat s25t1,
  dba_hist_sysstat s26t0,       -- cell physical IO interconnect bytes, diffed, cellpiob
  dba_hist_sysstat s26t1,
  dba_hist_sysstat s27t0,       -- cell physical IO bytes saved during optimized file creation, diffed, cellpiobs
  dba_hist_sysstat s27t1,
  dba_hist_sysstat s28t0,       -- cell physical IO bytes saved during optimized RMAN file restore, diffed, cellpiobsrman
  dba_hist_sysstat s28t1,
  dba_hist_sysstat s29t0,       -- cell physical IO bytes eligible for predicate offload, diffed, cellpiobpreoff
  dba_hist_sysstat s29t1,
  dba_hist_sysstat s30t0,       -- cell physical IO bytes saved by storage index, diffed, cellpiobsi
  dba_hist_sysstat s30t1,
  dba_hist_sysstat s31t0,       -- cell physical IO interconnect bytes returned by smart scan, diffed, cellpiobss
  dba_hist_sysstat s31t1,
  dba_hist_sysstat s32t0,       -- cell IO uncompressed bytes, diffed, celliouncomb
  dba_hist_sysstat s32t1,
  dba_hist_sysstat s33t0,       -- cell flash cache read hits
  dba_hist_sysstat s33t1
WHERE s0.dbid            = &_dbid    -- CHANGE THE DBID HERE!
AND s1.dbid              = s0.dbid
AND s13t0.dbid            = s0.dbid
AND s13t1.dbid            = s0.dbid
AND s14t0.dbid            = s0.dbid
AND s14t1.dbid            = s0.dbid
AND s20t0.dbid            = s0.dbid
AND s20t1.dbid            = s0.dbid
AND s21t0.dbid            = s0.dbid
AND s21t1.dbid            = s0.dbid
AND s22t0.dbid            = s0.dbid
AND s22t1.dbid            = s0.dbid
AND s23t0.dbid            = s0.dbid
AND s23t1.dbid            = s0.dbid
AND s24t0.dbid            = s0.dbid
AND s24t1.dbid            = s0.dbid
AND s25t0.dbid            = s0.dbid
AND s25t1.dbid            = s0.dbid
AND s26t0.dbid            = s0.dbid
AND s26t1.dbid            = s0.dbid
AND s27t0.dbid            = s0.dbid
AND s27t1.dbid            = s0.dbid
AND s28t0.dbid            = s0.dbid
AND s28t1.dbid            = s0.dbid
AND s29t0.dbid            = s0.dbid
AND s29t1.dbid            = s0.dbid
AND s30t0.dbid            = s0.dbid
AND s30t1.dbid            = s0.dbid
AND s31t0.dbid            = s0.dbid
AND s31t1.dbid            = s0.dbid
AND s32t0.dbid            = s0.dbid
AND s32t1.dbid            = s0.dbid
AND s33t0.dbid            = s0.dbid
AND s33t1.dbid            = s0.dbid
AND s0.instance_number   = &_instancenumber   -- CHANGE THE INSTANCE_NUMBER HERE!
AND s1.instance_number   = s0.instance_number
AND s13t0.instance_number = s0.instance_number
AND s13t1.instance_number = s0.instance_number
AND s14t0.instance_number = s0.instance_number
AND s14t1.instance_number = s0.instance_number
AND s20t0.instance_number = s0.instance_number
AND s20t1.instance_number = s0.instance_number
AND s21t0.instance_number = s0.instance_number
AND s21t1.instance_number = s0.instance_number
AND s22t0.instance_number = s0.instance_number
AND s22t1.instance_number = s0.instance_number
AND s23t0.instance_number = s0.instance_number
AND s23t1.instance_number = s0.instance_number
AND s24t0.instance_number = s0.instance_number
AND s24t1.instance_number = s0.instance_number
AND s25t0.instance_number = s0.instance_number
AND s25t1.instance_number = s0.instance_number
AND s26t0.instance_number = s0.instance_number
AND s26t1.instance_number = s0.instance_number
AND s27t0.instance_number = s0.instance_number
AND s27t1.instance_number = s0.instance_number
AND s28t0.instance_number = s0.instance_number
AND s28t1.instance_number = s0.instance_number
AND s29t0.instance_number = s0.instance_number
AND s29t1.instance_number = s0.instance_number
AND s30t0.instance_number = s0.instance_number
AND s30t1.instance_number = s0.instance_number
AND s31t0.instance_number = s0.instance_number
AND s31t1.instance_number = s0.instance_number
AND s32t0.instance_number = s0.instance_number
AND s32t1.instance_number = s0.instance_number
AND s33t0.instance_number = s0.instance_number
AND s33t1.instance_number = s0.instance_number
AND s1.snap_id            = s0.snap_id + 1
AND s13t0.snap_id         = s0.snap_id
AND s13t1.snap_id         = s0.snap_id + 1
AND s14t0.snap_id         = s0.snap_id
AND s14t1.snap_id         = s0.snap_id + 1
AND s20t0.snap_id         = s0.snap_id
AND s20t1.snap_id         = s0.snap_id + 1
AND s21t0.snap_id         = s0.snap_id
AND s21t1.snap_id         = s0.snap_id + 1
AND s22t0.snap_id         = s0.snap_id
AND s22t1.snap_id         = s0.snap_id + 1
AND s23t0.snap_id         = s0.snap_id
AND s23t1.snap_id         = s0.snap_id + 1
AND s24t0.snap_id         = s0.snap_id
AND s24t1.snap_id         = s0.snap_id + 1
AND s25t0.snap_id         = s0.snap_id
AND s25t1.snap_id         = s0.snap_id + 1
AND s26t0.snap_id         = s0.snap_id
AND s26t1.snap_id         = s0.snap_id + 1
AND s27t0.snap_id         = s0.snap_id
AND s27t1.snap_id         = s0.snap_id + 1
AND s28t0.snap_id         = s0.snap_id
AND s28t1.snap_id         = s0.snap_id + 1
AND s29t0.snap_id         = s0.snap_id
AND s29t1.snap_id         = s0.snap_id + 1
AND s30t0.snap_id         = s0.snap_id
AND s30t1.snap_id         = s0.snap_id + 1
AND s31t0.snap_id         = s0.snap_id
AND s31t1.snap_id         = s0.snap_id + 1
AND s32t0.snap_id         = s0.snap_id
AND s32t1.snap_id         = s0.snap_id + 1
AND s33t0.snap_id         = s0.snap_id
AND s33t1.snap_id         = s0.snap_id + 1
AND s13t0.stat_name       = 'redo writes'
AND s13t1.stat_name       = s13t0.stat_name
AND s14t0.stat_name       = 'redo size'
AND s14t1.stat_name       = s14t0.stat_name
AND s20t0.stat_name       = 'physical read total IO requests'
AND s20t1.stat_name       = s20t0.stat_name
AND s21t0.stat_name       = 'physical read total multi block requests'
AND s21t1.stat_name       = s21t0.stat_name
AND s22t0.stat_name       = 'physical read total bytes'
AND s22t1.stat_name       = s22t0.stat_name
AND s23t0.stat_name       = 'physical write total IO requests'
AND s23t1.stat_name       = s23t0.stat_name
AND s24t0.stat_name       = 'physical write total multi block requests'
AND s24t1.stat_name       = s24t0.stat_name
AND s25t0.stat_name       = 'physical write total bytes'
AND s25t1.stat_name       = s25t0.stat_name
AND s26t0.stat_name       = 'cell physical IO interconnect bytes'
AND s26t1.stat_name       = s26t0.stat_name
AND s27t0.stat_name       = 'cell physical IO bytes saved during optimized file creation'
AND s27t1.stat_name       = s27t0.stat_name
AND s28t0.stat_name       = 'cell physical IO bytes saved during optimized RMAN file restore'
AND s28t1.stat_name       = s28t0.stat_name
AND s29t0.stat_name       = 'cell physical IO bytes eligible for predicate offload'
AND s29t1.stat_name       = s29t0.stat_name
AND s30t0.stat_name       = 'cell physical IO bytes saved by storage index'
AND s30t1.stat_name       = s30t0.stat_name
AND s31t0.stat_name       = 'cell physical IO interconnect bytes returned by smart scan'
AND s31t1.stat_name       = s31t0.stat_name
AND s32t0.stat_name       = 'cell IO uncompressed bytes'
AND s32t1.stat_name       = s32t0.stat_name
AND s33t0.stat_name       = 'cell flash cache read hits'
AND s33t1.stat_name       = s33t0.stat_name
)
-- WHERE 
-- tm > to_char(sysdate - 30, 'MM/DD/YY HH24:MI')
-- id  in (select snap_id from (select * from r2toolkit.r2_regression_data union all select * from r2toolkit.r2_outlier_data))
-- id in (338)
-- aas > 1
-- oscpuio > 50
-- rmancpupct > 0
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'D') >= 1     -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'D') <= 7
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'HH24MI') >= 0900     -- Hour
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'HH24MI') <= 1800
-- AND s0.END_INTERVAL_TIME >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND s0.END_INTERVAL_TIME <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
ORDER BY id ASC;
}}}
{{{

Network requirements below. TX is transmit, RX is received 
https://github.com/carlos-sierra/esp_collect/blob/master/sql/esp_collect_requirements_awr.sql


SUM(CASE WHEN h.stat_name = 'bytes sent via SQL*Net to client'                   THEN h.value ELSE 0 END) tx_cl,
       SUM(CASE WHEN h.stat_name = 'bytes received via SQL*Net from client'             THEN h.value ELSE 0 END) rx_cl,
       SUM(CASE WHEN h.stat_name = 'bytes sent via SQL*Net to dblink'                   THEN h.value ELSE 0 END) tx_dl,
       SUM(CASE WHEN h.stat_name = 'bytes received via SQL*Net from dblink'             THEN h.value ELSE 0 END) rx_dl


ROUND(MAX((tx_cl + rx_cl + tx_dl + rx_dl) / elapsed_sec)) nw_peak_bytes,
       ROUND(MAX((tx_cl + tx_dl) / elapsed_sec)) nw_tx_peak_bytes,
       ROUND(MAX((rx_cl + rx_dl) / elapsed_sec)) nw_rx_peak_bytes,       


Interconnect below

SUM(CASE WHEN h.stat_name = 'gc cr blocks received'                         THEN h.value ELSE 0 END) gc_cr_bl_rx,
       SUM(CASE WHEN h.stat_name = 'gc current blocks received'             THEN h.value ELSE 0 END) gc_cur_bl_rx,
       SUM(CASE WHEN h.stat_name = 'gc cr blocks served'                THEN h.value ELSE 0 END) gc_cr_bl_serv,
       SUM(CASE WHEN h.stat_name = 'gc current blocks served'               THEN h.value ELSE 0 END) gc_cur_bl_serv, 
       SUM(CASE WHEN h.stat_name = 'gcs messages sent'                  THEN h.value ELSE 0 END) gcs_msg_sent, 
       SUM(CASE WHEN h.stat_name = 'ges messages sent'                  THEN h.value ELSE 0 END) ges_msg_sent, 
       SUM(CASE WHEN d.name      = 'gcs msgs received'                  THEN d.value ELSE 0 END) gcs_msg_rcv, 
       SUM(CASE WHEN d.name      = 'ges msgs received'                  THEN d.value ELSE 0 END) ges_msg_rcv, 
       SUM(CASE WHEN p.parameter_name = 'db_block_size'                 THEN to_number(p.value) ELSE 0 END) block_size        

ROUND(MAX(((gc_cr_bl_rx + gc_cur_bl_rx + gc_cr_bl_serv + gc_cur_bl_serv)*block_size)+((gcs_msg_sent + ges_msg_sent + gcs_msg_rcv + ges_msg_rcv)*200) / elapsed_sec)) ic_peak_bytes,
              


}}}
{{{
set arraysize 5000

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN name NEW_VALUE _instname NOPRINT
select lower(instance_name) name from v$instance;

COLUMN name NEW_VALUE _hostname NOPRINT
select lower(host_name) name from v$instance;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

-- ttitle center 'AWR Services Statistics Report' skip 2
set pagesize 50000
set linesize 550

col instname    format a15
col hostname    format a30
col tm          format a15              heading tm           --"Snap|Start|Time"
col id          format 99999            heading id           --"Snap|ID"
col inst        format 90               heading inst         --"i|n|s|t|#"
col dur         format 999990.00        heading dur          --"Snap|Dur|(m)"
col cpu         format 90               heading cpu          --"C|P|U"
col cap         format 9999990.00       heading cap          --"***|Total|CPU|Time|(s)"
col dbt         format 999990.00        heading dbt          --"DB|Time"
col dbc         format 99990.00         heading dbc          --"DB|CPU"
col bgc         format 99990.00         heading bgc          --"Bg|CPU"
col rman        format 9990.00          heading rman         --"RMAN|CPU"
col aas         format 990.0            heading aas          --"A|A|S"
col totora      format 9999990.00       heading totora       --"***|Total|Oracle|CPU|(s)"
col busy        format 9999990.00       heading busy         --"Busy|Time"
col load        format 990.00           heading load         --"OS|Load"
col totos       format 9999990.00       heading totos        --"***|Total|OS|CPU|(s)"
col mem         format 999990.00        heading mem          --"Physical|Memory|(mb)"
col IORs        format 9990.000         heading IORs         --"IOPs|r"
col IOWs        format 9990.000         heading IOWs         --"IOPs|w"
col IORedo      format 9990.000         heading IORedo       --"IOPs|redo"
col IORmbs      format 9990.000         heading IORmbs       --"IO r|(mb)/s"
col IOWmbs      format 9990.000         heading IOWmbs       --"IO w|(mb)/s"
col redosizesec format 9990.000         heading redosizesec  --"Redo|(mb)/s"
col logons      format 990              heading logons       --"Sess"
col logone      format 990              heading logone       --"Sess|End"
col exsraw      format 99990.000        heading exsraw       --"Exec|raw|delta"
col exs         format 9990.000         heading exs          --"Exec|/s"
col oracpupct   format 990              heading oracpupct    --"Oracle|CPU|%"
col rmancpupct  format 990              heading rmancpupct   --"RMAN|CPU|%"
col oscpupct    format 990              heading oscpupct     --"OS|CPU|%"
col oscpuusr    format 990              heading oscpuusr     --"U|S|R|%"
col oscpusys    format 990              heading oscpusys     --"S|Y|S|%"
col oscpuio     format 990              heading oscpuio      --"I|O|%"
col phy_reads   format 99999990.00      heading phy_reads    --"physical|reads"
col log_reads   format 99999990.00      heading log_reads    --"logical|reads"

select  trim('&_instname') instname, trim('&_dbid') db_id, trim('&_hostname') hostname, snap_id,
        TO_CHAR(tm,'MM/DD/YY HH24:MI:SS') tm, 
        inst,
        dur,
        service_name, 
        round(db_time / 1000000, 1) as dbt, 
        round(db_cpu  / 1000000, 1) as dbc,
        phy_reads, 
        log_reads,
        aas
 from (select 
          s1.snap_id,
          s1.tm,
          s1.inst,
          s1.dur,
          s1.service_name, 
          sum(decode(s1.stat_name, 'DB time', s1.diff, 0)) db_time,
          sum(decode(s1.stat_name, 'DB CPU',  s1.diff, 0)) db_cpu,
          sum(decode(s1.stat_name, 'physical reads', s1.diff, 0)) phy_reads,
          sum(decode(s1.stat_name, 'session logical reads', s1.diff, 0)) log_reads,
          round(sum(decode(s1.stat_name, 'DB time', s1.diff, 0))/1000000,1)/60 / s1.dur as aas
   from
     (select s0.snap_id snap_id,
             s0.END_INTERVAL_TIME tm,
             s0.instance_number inst,
            round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
             e.service_name     service_name, 
             e.stat_name        stat_name, 
             e.value - b.value  diff
       from dba_hist_snapshot s0,
            dba_hist_snapshot s1,
            dba_hist_service_stat b,
            dba_hist_service_stat e
       where 
         s0.dbid                  = &_dbid            -- CHANGE THE DBID HERE!
         and s1.dbid              = s0.dbid
         and b.dbid               = s0.dbid
         and e.dbid               = s0.dbid
         and s0.instance_number   = &_instancenumber  -- CHANGE THE INSTANCE_NUMBER HERE!
         and s1.instance_number   = s0.instance_number
         and b.instance_number    = s0.instance_number
         and e.instance_number    = s0.instance_number
         and s1.snap_id           = s0.snap_id + 1
         and b.snap_id            = s0.snap_id
         and e.snap_id            = s0.snap_id + 1
         and b.stat_id            = e.stat_id
         and b.service_name_hash  = e.service_name_hash) s1
   group by 
     s1.snap_id, s1.tm, s1.inst, s1.dur, s1.service_name
   order by 
     snap_id asc, aas desc, service_name)
-- where 
-- AND TO_CHAR(tm,'D') >= 1     -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(tm,'D') <= 7
-- AND TO_CHAR(tm,'HH24MI') >= 0900     -- Hour
-- AND TO_CHAR(tm,'HH24MI') <= 1800
-- AND tm >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND tm <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
-- snap_id = 338
-- and snap_id >= 335 and snap_id <= 339
-- aas > .5
;
}}}
{{{
trx/sec = [UCOMS]+[URS]
}}}

{{{
set arraysize 5000

COLUMN blocksize NEW_VALUE _blocksize NOPRINT
select distinct block_size blocksize from v$datafile;

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN name NEW_VALUE _instname NOPRINT
select lower(instance_name) name from v$instance;

COLUMN name NEW_VALUE _hostname NOPRINT
select lower(host_name) name from v$instance;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

-- ttitle center 'AWR CPU and IO Workload Report' skip 2
set pagesize 50000
set linesize 550

col instname    format a15              heading instname        -- instname
col hostname    format a30              heading hostname        -- hostname
col tm          format a17              heading tm              -- "tm"
col id          format 99999            heading id              -- "snapid"
col inst        format 90               heading inst            -- "inst"
col dur         format 999990.00        heading dur             -- "dur"
col cpu         format 90               heading cpu             -- "cpu"
col cap         format 9999990.00       heading cap             -- "capacity"
col dbt         format 999990.00        heading dbt             -- "DBTime"
col dbc         format 99990.00         heading dbc             -- "DBcpu"
col bgc         format 99990.00         heading bgc             -- "BGcpu"
col rman        format 9990.00          heading rman            -- "RMANcpu"
col aas         format 990.0            heading aas             -- "AAS"
col totora      format 9999990.00       heading totora          -- "TotalOracleCPU"
col busy        format 9999990.00       heading busy            -- "BusyTime"
col load        format 990.00           heading load            -- "OSLoad"
col totos       format 9999990.00       heading totos           -- "TotalOSCPU"
col mem         format 999990.00        heading mem             -- "PhysicalMemorymb"
col IORs        format 9990.000         heading IORs            -- "IOPsr"
col IOWs        format 9990.000         heading IOWs            -- "IOPsw"
col IORedo      format 9990.000         heading IORedo          -- "IOPsredo"
col IORmbs      format 9990.000         heading IORmbs          -- "IOrmbs"
col IOWmbs      format 9990.000         heading IOWmbs          -- "IOwmbs"
col redosizesec format 9990.000         heading redosizesec     -- "Redombs"
col logons      format 990              heading logons          -- "Sess"
col logone      format 990              heading logone          -- "SessEnd"
col exsraw      format 99990.000        heading exsraw          -- "Execrawdelta"
col exs         format 9990.000         heading exs             -- "Execs"
col ucs         format 9990.000         heading ucs             -- "UserCalls"
col ucoms       format 9990.000         heading ucoms           -- "Commit"
col urs         format 9990.000         heading urs             -- "Rollback"
col lios        format 9999990.00       heading lios            -- "LIOs"
col oracpupct   format 990              heading oracpupct       -- "OracleCPUPct"
col rmancpupct  format 990              heading rmancpupct      -- "RMANCPUPct"
col oscpupct    format 990              heading oscpupct        -- "OSCPUPct"
col oscpuusr    format 990              heading oscpuusr        -- "USRPct"
col oscpusys    format 990              heading oscpusys        -- "SYSPct"
col oscpuio     format 990              heading oscpuio         -- "IOPct"

SELECT * FROM
( 
  SELECT trim('&_instname') instname, 
         trim('&_dbid') db_id, 
         trim('&_hostname') hostname, 
          s0.snap_id id,
         TO_CHAR(s0.END_INTERVAL_TIME,'MM/DD/YY HH24:MI:SS') tm,
         s0.instance_number inst,
  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
  round(s4t1.value/1024/1024/1024,2) AS memgb,
  round(s37t1.value/1024/1024/1024,2) AS sgagb,
  round(s36t1.value/1024/1024/1024,2) AS pgagb,
     s9t0.value logons, 
   ((s10t1.value - s10t0.value)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as exs, 
   ((s40t1.value - s40t0.value)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as ucs, 
   ((s38t1.value - s38t0.value)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as ucoms, 
   ((s39t1.value - s39t0.value)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as urs,
   ((s41t1.value - s41t0.value)  / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                  + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                  + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                  + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60)
    ) as lios
FROM dba_hist_snapshot s0,
  dba_hist_snapshot s1,
  dba_hist_osstat s4t1,         -- osstat just get the end value 
  (select snap_id, dbid, instance_number, sum(value) value from dba_hist_sga group by snap_id, dbid, instance_number) s37t1, -- total SGA allocated, just get the end value
  dba_hist_pgastat s36t1,		-- total PGA allocated, just get the end value 
  dba_hist_sysstat s9t0,        -- logons current, sysstat absolute value should not be diffed
  dba_hist_sysstat s10t0,       -- execute count, diffed
  dba_hist_sysstat s10t1,
  dba_hist_sysstat s38t0,       -- user commits, diffed
  dba_hist_sysstat s38t1,
  dba_hist_sysstat s39t0,       -- user rollbacks, diffed
  dba_hist_sysstat s39t1,
  dba_hist_sysstat s40t0,       -- user calls, diffed
  dba_hist_sysstat s40t1,
  dba_hist_sysstat s41t0,       -- session logical reads, diffed
  dba_hist_sysstat s41t1
WHERE s0.dbid            = &_dbid    -- CHANGE THE DBID HERE!
AND s1.dbid              = s0.dbid
AND s4t1.dbid            = s0.dbid
AND s9t0.dbid            = s0.dbid
AND s10t0.dbid            = s0.dbid
AND s10t1.dbid            = s0.dbid
AND s36t1.dbid            = s0.dbid
AND s37t1.dbid            = s0.dbid
AND s38t0.dbid            = s0.dbid
AND s38t1.dbid            = s0.dbid
AND s39t0.dbid            = s0.dbid
AND s39t1.dbid            = s0.dbid
AND s40t0.dbid            = s0.dbid
AND s40t1.dbid            = s0.dbid
AND s41t0.dbid            = s0.dbid
AND s41t1.dbid            = s0.dbid
AND s0.instance_number   = &_instancenumber   -- CHANGE THE INSTANCE_NUMBER HERE!
AND s1.instance_number   = s0.instance_number
AND s4t1.instance_number = s0.instance_number
AND s9t0.instance_number = s0.instance_number
AND s10t0.instance_number = s0.instance_number
AND s10t1.instance_number = s0.instance_number
AND s36t1.instance_number = s0.instance_number
AND s37t1.instance_number = s0.instance_number
AND s38t0.instance_number = s0.instance_number
AND s38t1.instance_number = s0.instance_number
AND s39t0.instance_number = s0.instance_number
AND s39t1.instance_number = s0.instance_number
AND s40t0.instance_number = s0.instance_number
AND s40t1.instance_number = s0.instance_number
AND s41t0.instance_number = s0.instance_number
AND s41t1.instance_number = s0.instance_number
AND s1.snap_id           = s0.snap_id + 1
AND s4t1.snap_id         = s0.snap_id + 1
AND s36t1.snap_id        = s0.snap_id + 1
AND s37t1.snap_id        = s0.snap_id + 1
AND s9t0.snap_id         = s0.snap_id
AND s10t0.snap_id         = s0.snap_id
AND s10t1.snap_id         = s0.snap_id + 1
AND s38t0.snap_id         = s0.snap_id
AND s38t1.snap_id         = s0.snap_id + 1
AND s39t0.snap_id         = s0.snap_id
AND s39t1.snap_id         = s0.snap_id + 1
AND s40t0.snap_id         = s0.snap_id
AND s40t1.snap_id         = s0.snap_id + 1
AND s41t0.snap_id         = s0.snap_id
AND s41t1.snap_id         = s0.snap_id + 1
AND s4t1.stat_name       = 'PHYSICAL_MEMORY_BYTES'
AND s36t1.name           = 'total PGA allocated'
AND s9t0.stat_name       = 'logons current'
AND s10t0.stat_name       = 'execute count'
AND s10t1.stat_name       = s10t0.stat_name
AND s38t0.stat_name       = 'user commits'
AND s38t1.stat_name       = s38t0.stat_name
AND s39t0.stat_name       = 'user rollbacks'
AND s39t1.stat_name       = s39t0.stat_name
AND s40t0.stat_name       = 'user calls'
AND s40t1.stat_name       = s40t0.stat_name
AND s41t0.stat_name       = 'session logical reads'
AND s41t1.stat_name       = s41t0.stat_name
)
-- WHERE 
-- tm > to_char(sysdate - 30, 'MM/DD/YY HH24:MI')
-- id  in (select snap_id from (select * from r2toolkit.r2_regression_data union all select * from r2toolkit.r2_outlier_data))
-- id in (336)
-- aas > 1
-- oracpupct > 50
-- oscpupct > 50
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'D') >= 1     -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'D') <= 7
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'HH24MI') >= 0900     -- Hour
-- AND TO_CHAR(s0.END_INTERVAL_TIME,'HH24MI') <= 1800
-- AND s0.END_INTERVAL_TIME >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND s0.END_INTERVAL_TIME <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
ORDER BY id ASC;
}}}
{{{
set arraysize 5000

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN name NEW_VALUE _instname NOPRINT
select lower(instance_name) name from v$instance;

COLUMN name NEW_VALUE _hostname NOPRINT
select lower(host_name) name from v$instance;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

-- ttitle center 'AWR Top Events Report' skip 2
set pagesize 50000
set linesize 550

col instname    format a15              
col hostname    format a30              
col snap_id     format 99999            heading snap_id       -- "snapid"   
col tm          format a17              heading tm            -- "tm"       
col inst        format 90               heading inst          -- "inst"     
col dur         format 999990.00        heading dur           -- "dur"      
col event       format a55              heading event         -- "Event"    
col event_rank  format 90               heading event_rank    -- "EventRank"
col waits       format 9999999990.00    heading waits         -- "Waits"    
col time        format 9999999990.00    heading time          -- "Timesec"  
col avgwt       format 99990.00         heading avgwt         -- "Avgwtms"  
col pctdbt      format 9990.0           heading pctdbt        -- "DBTimepct"
col aas         format 990.0            heading aas           -- "Aas"      
col wait_class  format a15              heading wait_class    -- "WaitClass"

spool awr_topevents-tableau-&_instname-&_hostname..csv
select trim('&_instname') instname, trim('&_dbid') db_id, trim('&_hostname') hostname, snap_id, tm, inst, dur, event, event_rank, waits, time, avgwt, pctdbt, aas, wait_class
from 
      (select snap_id, TO_CHAR(tm,'MM/DD/YY HH24:MI:SS') tm, inst, dur, event, waits, time, avgwt, pctdbt, aas, wait_class, 
            DENSE_RANK() OVER (
          PARTITION BY snap_id ORDER BY time DESC) event_rank
      from 
              (
              select * from 
                    (select * from 
                          (select 
                            s0.snap_id snap_id,
                            s0.END_INTERVAL_TIME tm,
                            s0.instance_number inst,
                            round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                    + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                    + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                    + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                            e.event_name event,
                            e.total_waits - nvl(b.total_waits,0)       waits,
                            round ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000000, 2)  time,     -- THIS IS EVENT (sec)
                            round (decode ((e.total_waits - nvl(b.total_waits, 0)), 0, to_number(NULL), ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000) / (e.total_waits - nvl(b.total_waits,0))), 2) avgwt,
                            ((round ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000000, 2)) / NULLIF(((s5t1.value - nvl(s5t0.value,0)) / 1000000),0))*100 as pctdbt,     -- THIS IS EVENT (sec) / DB TIME (sec)
                            (round ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000000, 2))/60 /  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                            + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                            + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                            + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) aas,     -- THIS IS EVENT (min) / SnapDur (min) TO GET THE % DB CPU ON AAS
                            e.wait_class wait_class
                            from 
                                 dba_hist_snapshot s0,
                                 dba_hist_snapshot s1,
                                 dba_hist_system_event b,
                                 dba_hist_system_event e,
                                 dba_hist_sys_time_model s5t0,
                                 dba_hist_sys_time_model s5t1
                            where 
                              s0.dbid                   = &_dbid            -- CHANGE THE DBID HERE!
                              AND s1.dbid               = s0.dbid
                              and b.dbid(+)             = s0.dbid
                              and e.dbid                = s0.dbid
                              AND s5t0.dbid             = s0.dbid
                              AND s5t1.dbid             = s0.dbid
                              AND s0.instance_number    = &_instancenumber  -- CHANGE THE INSTANCE_NUMBER HERE!
                              AND s1.instance_number    = s0.instance_number
                              and b.instance_number(+)  = s0.instance_number
                              and e.instance_number     = s0.instance_number
                              AND s5t0.instance_number = s0.instance_number
                              AND s5t1.instance_number = s0.instance_number
                              AND s1.snap_id            = s0.snap_id + 1
                              AND b.snap_id(+)          = s0.snap_id
                              and e.snap_id             = s0.snap_id + 1
                              AND s5t0.snap_id         = s0.snap_id
                              AND s5t1.snap_id         = s0.snap_id + 1
                              AND s5t0.stat_name       = 'DB time'
                              AND s5t1.stat_name       = s5t0.stat_name
                                    and b.event_id            = e.event_id
                                    and e.wait_class          != 'Idle'
                                    and e.total_waits         > nvl(b.total_waits,0)
                                    and e.event_name not in ('smon timer', 
                                                             'pmon timer', 
                                                             'dispatcher timer',
                                                             'dispatcher listen timer',
                                                             'rdbms ipc message')
                                  order by snap_id, time desc, waits desc, event)
                    union all
                              select 
                                       s0.snap_id snap_id,
                                       s0.END_INTERVAL_TIME tm,
                                       s0.instance_number inst,
                                       round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                            + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                            + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                            + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                                        'CPU time',
                                        0,
                                        round ((s6t1.value - s6t0.value) / 1000000, 2) as time,     -- THIS IS DB CPU (sec)
                                        0,
                                        ((round ((s6t1.value - s6t0.value) / 1000000, 2)) / NULLIF(((s5t1.value - nvl(s5t0.value,0)) / 1000000),0))*100 as pctdbt,     -- THIS IS DB CPU (sec) / DB TIME (sec)..TO GET % OF DB CPU ON DB TIME FOR TOP 5 TIMED EVENTS SECTION
                                        (round ((s6t1.value - s6t0.value) / 1000000, 2))/60 /  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                    + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                    + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                    + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) aas,  -- THIS IS DB CPU (min) / SnapDur (min) TO GET THE % DB CPU ON AAS
                                        'CPU'
                                      from 
                                        dba_hist_snapshot s0,
                                        dba_hist_snapshot s1,
                                        dba_hist_sys_time_model s6t0,
                                        dba_hist_sys_time_model s6t1,
                                        dba_hist_sys_time_model s5t0,
                                        dba_hist_sys_time_model s5t1
                                      WHERE 
                                      s0.dbid                   = &_dbid              -- CHANGE THE DBID HERE!
                                      AND s1.dbid               = s0.dbid
                                      AND s6t0.dbid            = s0.dbid
                                      AND s6t1.dbid            = s0.dbid
                                      AND s5t0.dbid            = s0.dbid
                                      AND s5t1.dbid            = s0.dbid
                                      AND s0.instance_number    = &_instancenumber    -- CHANGE THE INSTANCE_NUMBER HERE!
                                      AND s1.instance_number    = s0.instance_number
                                      AND s6t0.instance_number = s0.instance_number
                                      AND s6t1.instance_number = s0.instance_number
                                      AND s5t0.instance_number = s0.instance_number
                                      AND s5t1.instance_number = s0.instance_number
                                      AND s1.snap_id            = s0.snap_id + 1
                                      AND s6t0.snap_id         = s0.snap_id
                                      AND s6t1.snap_id         = s0.snap_id + 1
                                      AND s5t0.snap_id         = s0.snap_id
                                      AND s5t1.snap_id         = s0.snap_id + 1
                                      AND s6t0.stat_name       = 'DB CPU'
                                      AND s6t1.stat_name       = s6t0.stat_name
                                      AND s5t0.stat_name       = 'DB time'
                                      AND s5t1.stat_name       = s5t0.stat_name
                    union all
                                      (select 
                                               dbtime.snap_id,
                                               dbtime.tm,
                                               dbtime.inst,
                                               dbtime.dur,
                                               'CPU wait',
                                                0,
                                                round(dbtime.time - accounted_dbtime.time, 2) time,     -- THIS IS UNACCOUNTED FOR DB TIME (sec)
                                                0,
                                                ((dbtime.aas - accounted_dbtime.aas)/ NULLIF(nvl(dbtime.aas,0),0))*100 as pctdbt,     -- THIS IS UNACCOUNTED FOR DB TIME (sec) / DB TIME (sec)
                                                round(dbtime.aas - accounted_dbtime.aas, 2) aas,     -- AAS OF UNACCOUNTED FOR DB TIME
                                                'CPU wait'
                                      from
                                                  (select  
                                                     s0.snap_id, 
                                                     s0.END_INTERVAL_TIME tm,
                                                     s0.instance_number inst,
                                                    round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                    + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                    + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                    + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                                                    'DB time',
                                                    0,
                                                    round ((s5t1.value - s5t0.value) / 1000000, 2) as time,     -- THIS IS DB time (sec)
                                                    0,
                                                    0,
                                                     (round ((s5t1.value - s5t0.value) / 1000000, 2))/60 /  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                    + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                    + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                    + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) aas,
                                                    'DB time'
                                                  from 
                                                                    dba_hist_snapshot s0,
                                                                    dba_hist_snapshot s1,
                                                                    dba_hist_sys_time_model s5t0,
                                                                    dba_hist_sys_time_model s5t1
                                                                  WHERE 
                                                                  s0.dbid                   = &_dbid              -- CHANGE THE DBID HERE!
                                                                  AND s1.dbid               = s0.dbid
                                                                  AND s5t0.dbid            = s0.dbid
                                                                  AND s5t1.dbid            = s0.dbid
                                                                  AND s0.instance_number    = &_instancenumber    -- CHANGE THE INSTANCE_NUMBER HERE!
                                                                  AND s1.instance_number    = s0.instance_number
                                                                  AND s5t0.instance_number = s0.instance_number
                                                                  AND s5t1.instance_number = s0.instance_number
                                                                  AND s1.snap_id            = s0.snap_id + 1
                                                                  AND s5t0.snap_id         = s0.snap_id
                                                                  AND s5t1.snap_id         = s0.snap_id + 1
                                                                  AND s5t0.stat_name       = 'DB time'
                                                                  AND s5t1.stat_name       = s5t0.stat_name) dbtime, 
                                                  (select snap_id, sum(time) time, sum(AAS) aas from 
                                                          (select * from (select 
                                                                s0.snap_id snap_id,
                                                                s0.END_INTERVAL_TIME tm,
                                                                s0.instance_number inst,
                                                                round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                        + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                        + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                        + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                                                                e.event_name event,
                                                                e.total_waits - nvl(b.total_waits,0)       waits,
                                                                round ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000000, 2)  time,     -- THIS IS EVENT (sec)
                                                                round (decode ((e.total_waits - nvl(b.total_waits, 0)), 0, to_number(NULL), ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000) / (e.total_waits - nvl(b.total_waits,0))), 2) avgwt,
                                                                ((round ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000000, 2)) / NULLIF(((s5t1.value - nvl(s5t0.value,0)) / 1000000),0))*100 as pctdbt,     -- THIS IS EVENT (sec) / DB TIME (sec)
                                                                (round ((e.time_waited_micro - nvl(b.time_waited_micro,0))/1000000, 2))/60 /  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                                + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                                + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                                + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) aas,     -- THIS IS EVENT (min) / SnapDur (min) TO GET THE % DB CPU ON AAS
                                                                e.wait_class wait_class
                                                          from 
                                                               dba_hist_snapshot s0,
                                                               dba_hist_snapshot s1,
                                                               dba_hist_system_event b,
                                                               dba_hist_system_event e,
                                                               dba_hist_sys_time_model s5t0,
                                                               dba_hist_sys_time_model s5t1
                                                          where 
                                                            s0.dbid                   = &_dbid            -- CHANGE THE DBID HERE!
                                                            AND s1.dbid               = s0.dbid
                                                            and b.dbid(+)             = s0.dbid
                                                            and e.dbid                = s0.dbid
                                                            AND s5t0.dbid             = s0.dbid
                                                            AND s5t1.dbid             = s0.dbid
                                                            AND s0.instance_number    = &_instancenumber  -- CHANGE THE INSTANCE_NUMBER HERE!
                                                            AND s1.instance_number    = s0.instance_number
                                                            and b.instance_number(+)  = s0.instance_number
                                                            and e.instance_number     = s0.instance_number
                                                            AND s5t0.instance_number = s0.instance_number
                                                            AND s5t1.instance_number = s0.instance_number
                                                            AND s1.snap_id            = s0.snap_id + 1
                                                            AND b.snap_id(+)          = s0.snap_id
                                                            and e.snap_id             = s0.snap_id + 1
                                                            AND s5t0.snap_id         = s0.snap_id
                                                            AND s5t1.snap_id         = s0.snap_id + 1
                                                      AND s5t0.stat_name       = 'DB time'
                                                      AND s5t1.stat_name       = s5t0.stat_name
                                                            and b.event_id            = e.event_id
                                                            and e.wait_class          != 'Idle'
                                                            and e.total_waits         > nvl(b.total_waits,0)
                                                            and e.event_name not in ('smon timer', 
                                                                                     'pmon timer', 
                                                                                     'dispatcher timer',
                                                                                     'dispatcher listen timer',
                                                                                     'rdbms ipc message')
                                                          order by snap_id, time desc, waits desc, event)
                                                    union all
                                                          select 
                                                                   s0.snap_id snap_id,
                                                                   s0.END_INTERVAL_TIME tm,
                                                                   s0.instance_number inst,
                                                                   round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                        + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                        + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                        + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                                                                    'CPU time',
                                                                    0,
                                                                    round ((s6t1.value - s6t0.value) / 1000000, 2) as time,     -- THIS IS DB CPU (sec)
                                                                    0,
                                                                    ((round ((s6t1.value - s6t0.value) / 1000000, 2)) / NULLIF(((s5t1.value - nvl(s5t0.value,0)) / 1000000),0))*100 as pctdbt,     -- THIS IS DB CPU (sec) / DB TIME (sec)..TO GET % OF DB CPU ON DB TIME FOR TOP 5 TIMED EVENTS SECTION
                                                                    (round ((s6t1.value - s6t0.value) / 1000000, 2))/60 /  round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                                + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                                + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                                + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) aas,  -- THIS IS DB CPU (min) / SnapDur (min) TO GET THE % DB CPU ON AAS
                                                                    'CPU'
                                                                  from 
                                                                    dba_hist_snapshot s0,
                                                                    dba_hist_snapshot s1,
                                                                    dba_hist_sys_time_model s6t0,
                                                                    dba_hist_sys_time_model s6t1,
                                                                    dba_hist_sys_time_model s5t0,
                                                                    dba_hist_sys_time_model s5t1
                                                                  WHERE 
                                                                  s0.dbid                   = &_dbid              -- CHANGE THE DBID HERE!
                                                                  AND s1.dbid               = s0.dbid
                                                                  AND s6t0.dbid            = s0.dbid
                                                                  AND s6t1.dbid            = s0.dbid
                                                                  AND s5t0.dbid            = s0.dbid
                                                                  AND s5t1.dbid            = s0.dbid
                                                                  AND s0.instance_number    = &_instancenumber    -- CHANGE THE INSTANCE_NUMBER HERE!
                                                                  AND s1.instance_number    = s0.instance_number
                                                                  AND s6t0.instance_number = s0.instance_number
                                                                  AND s6t1.instance_number = s0.instance_number
                                                                  AND s5t0.instance_number = s0.instance_number
                                                                  AND s5t1.instance_number = s0.instance_number
                                                                  AND s1.snap_id            = s0.snap_id + 1
                                                                  AND s6t0.snap_id         = s0.snap_id
                                                                  AND s6t1.snap_id         = s0.snap_id + 1
                                                                  AND s5t0.snap_id         = s0.snap_id
                                                                  AND s5t1.snap_id         = s0.snap_id + 1
                                                                  AND s6t0.stat_name       = 'DB CPU'
                                                                  AND s6t1.stat_name       = s6t0.stat_name
                                                                  AND s5t0.stat_name       = 'DB time'
                                                                  AND s5t1.stat_name       = s5t0.stat_name
                                                          ) group by snap_id) accounted_dbtime
                                                            where dbtime.snap_id = accounted_dbtime.snap_id 
                                        )
                    )
              )
      )
WHERE event_rank <= 5
-- AND tm > to_char(sysdate - 30, 'MM/DD/YY HH24:MI')
-- AND TO_CHAR(tm,'D') >= 1     -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(tm,'D') <= 7
-- AND TO_CHAR(tm,'HH24MI') >= 0900     -- Hour
-- AND TO_CHAR(tm,'HH24MI') <= 1800
-- AND tm >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND tm <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
-- and snap_id = 495
-- and snap_id >= 495 and snap_id <= 496
-- and event = 'db file sequential read'
-- and event like 'CPU%'
-- and avgwt > 5
-- and aas > .5
-- and wait_class = 'CPU'
-- and wait_class like '%I/O%'
-- and event_rank in (1,2,3)
ORDER BY snap_id;
}}}
If you'd like to be detailed and not only give you the top5 across snap_ids.. then comment the following lines below
<<<
where 
                          time_rank <= 5
<<<
then put filters like SQL_ID or AAS after the line 
<<<
-- where rownum <= 20
<<<


{{{
set arraysize 5000

COLUMN blocksize NEW_VALUE _blocksize NOPRINT
select distinct block_size blocksize from v$datafile;

COLUMN dbid NEW_VALUE _dbid NOPRINT
select dbid from v$database;

COLUMN name NEW_VALUE _instname NOPRINT
select lower(instance_name) name from v$instance;

COLUMN name NEW_VALUE _hostname NOPRINT
select lower(host_name) name from v$instance;

COLUMN instancenumber NEW_VALUE _instancenumber NOPRINT
select instance_number instancenumber from v$instance;

-- ttitle center 'AWR Top SQL Report' skip 2
set pagesize 50000
set linesize 550

col snap_id             format 99999            heading -- "Snap|ID"
col tm                  format a15              heading -- "Snap|Start|Time"
col inst                format 90               heading -- "i|n|s|t|#"
col dur                 format 990.00           heading -- "Snap|Dur|(m)"
col sql_id              format a15              heading -- "SQL|ID"
col phv                 format 99999999999      heading -- "Plan|Hash|Value"
col module              format a50
col elap                format 999990.00        heading -- "Ela|Time|(s)"
col elapexec            format 999990.00        heading -- "Ela|Time|per|exec|(s)"
col cput                format 999990.00        heading -- "CPU|Time|(s)"
col iowait              format 999990.00        heading -- "IO|Wait|(s)"
col appwait             format 999990.00        heading -- "App|Wait|(s)"
col concurwait          format 999990.00        heading -- "Ccr|Wait|(s)"
col clwait              format 999990.00        heading -- "Cluster|Wait|(s)"
col bget                format 99999999990      heading -- "LIO"
col dskr                format 99999999990      heading -- "PIO"
col dpath               format 99999999990      heading -- "Direct|Writes"
col rowp                format 99999999990      heading -- "Rows"
col exec                format 9999990          heading -- "Exec"
col prsc                format 999999990        heading -- "Parse|Count"
col pxexec              format 9999990          heading -- "PX|Server|Exec"
col icbytes             format 99999990         heading -- "IC|MB"           
col offloadbytes        format 99999990         heading -- "Offload|MB"
col offloadreturnbytes  format 99999990         heading -- "Offload|return|MB"
col flashcachereads     format 99999990         heading -- "Flash|Cache|MB"   
col uncompbytes         format 99999990         heading -- "Uncomp|MB"       
col pctdbt              format 990              heading -- "DB Time|%"
col aas                 format 990.00           heading -- "A|A|S"
col time_rank           format 90               heading -- "Time|Rank"
col sql_text            format a6               heading -- "SQL|Text"

     select *
       from (
             select
                  trim('&_instname') instname, 
                  trim('&_dbid') db_id, 
                  trim('&_hostname') hostname, 
                  sqt.snap_id snap_id,
                  TO_CHAR(sqt.tm,'MM/DD/YY HH24:MI:SS') tm,
                  sqt.inst inst,
                  sqt.dur dur,
                  sqt.aas aas,
                  nvl((sqt.elap), to_number(null)) elap,
                  nvl((sqt.elapexec), 0) elapexec,
                  nvl((sqt.cput), to_number(null)) cput,
                  sqt.iowait iowait,
                  sqt.appwait appwait,
                  sqt.concurwait concurwait,
                  sqt.clwait clwait,
                  sqt.bget bget, 
                  sqt.dskr dskr, 
                  sqt.dpath dpath,
                  sqt.rowp rowp,
                  sqt.exec exec, 
                  sqt.prsc prsc, 
                  sqt.pxexec pxexec,
                  sqt.icbytes, 
                  sqt.offloadbytes, 
                  sqt.offloadreturnbytes, 
                  sqt.flashcachereads, 
                  sqt.uncompbytes,
                  sqt.time_rank time_rank,
                  sqt.sql_id sql_id,   
                  sqt.phv phv,                
                  substr(to_clob(decode(sqt.module, null, null, sqt.module)),1,50) module, 
                  st.sql_text sql_text     -- PUT/REMOVE COMMENT TO HIDE/SHOW THE SQL_TEXT
             from        (
                          select snap_id, tm, inst, dur, sql_id, phv, module, elap, elapexec, cput, iowait, appwait, concurwait, clwait, bget, dskr, dpath, rowp, exec, prsc, pxexec, icbytes, offloadbytes, offloadreturnbytes, flashcachereads, uncompbytes, aas, time_rank
                          from
                                             (
                                               select 
                                                      s0.snap_id snap_id,
                                                      s0.END_INTERVAL_TIME tm,
                                                      s0.instance_number inst,
                                                      round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                              + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                              + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                              + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2) dur,
                                                      e.sql_id sql_id, 
                                                      e.plan_hash_value phv, 
                                                      max(e.module) module,
                                                      sum(e.elapsed_time_delta)/1000000 elap,
                                                      decode((sum(e.executions_delta)), 0, to_number(null), ((sum(e.elapsed_time_delta)) / (sum(e.executions_delta)) / 1000000)) elapexec,
                                                      sum(e.cpu_time_delta)/1000000     cput, 
                                                      sum(e.iowait_delta)/1000000 iowait,
                                                      sum(e.apwait_delta)/1000000 appwait,
                                                      sum(e.ccwait_delta)/1000000 concurwait,
                                                      sum(e.clwait_delta)/1000000 clwait,
                                                      sum(e.buffer_gets_delta) bget,
                                                      sum(e.disk_reads_delta) dskr, 
                                                      sum(e.direct_writes_delta) dpath,
                                                      sum(e.rows_processed_delta) rowp,
                                                      sum(e.executions_delta)   exec,
                                                      sum(e.parse_calls_delta) prsc,
                                                      sum(e.px_servers_execs_delta) pxexec,
                                                      sum(e.io_interconnect_bytes_delta)/1024/1024 icbytes,  
                                                      sum(e.io_offload_elig_bytes_delta)/1024/1024 offloadbytes,  
                                                      sum(e.io_offload_return_bytes_delta)/1024/1024 offloadreturnbytes,   
                                                      (sum(e.optimized_physical_reads_delta)* &_blocksize)/1024/1024 flashcachereads,   
                                                      sum(e.cell_uncompressed_bytes_delta)/1024/1024 uncompbytes, 
                                                      (sum(e.elapsed_time_delta)/1000000) / ((round(EXTRACT(DAY FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 1440 
                                                                                            + EXTRACT(HOUR FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) * 60 
                                                                                            + EXTRACT(MINUTE FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) 
                                                                                            + EXTRACT(SECOND FROM s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME) / 60, 2))*60) aas,
                                                      DENSE_RANK() OVER (
                                                      PARTITION BY s0.snap_id ORDER BY e.elapsed_time_delta DESC) time_rank
                                               from 
                                                   dba_hist_snapshot s0,
                                                   dba_hist_snapshot s1,
                                                   dba_hist_sqlstat e
                                                   where 
                                                    s0.dbid                   = &_dbid                -- CHANGE THE DBID HERE!
                                                    AND s1.dbid               = s0.dbid
                                                    and e.dbid                = s0.dbid                                                
                                                    AND s0.instance_number    = &_instancenumber      -- CHANGE THE INSTANCE_NUMBER HERE!
                                                    AND s1.instance_number    = s0.instance_number
                                                    and e.instance_number     = s0.instance_number                                                 
                                                    AND s1.snap_id            = s0.snap_id + 1
                                                    and e.snap_id             = s0.snap_id + 1                                              
                                               group by 
                                                    s0.snap_id, s0.END_INTERVAL_TIME, s0.instance_number, e.sql_id, e.plan_hash_value, e.elapsed_time_delta, s1.END_INTERVAL_TIME - s0.END_INTERVAL_TIME
                                             )
                          where 
                          time_rank <= 5                                     -- GET TOP 5 SQL ACROSS SNAP_IDs... YOU CAN ALTER THIS TO HAVE MORE DATA POINTS
                         ) 
                        sqt,
                        (select sql_id, dbid, nvl(b.name, a.command_type) sql_text from dba_hist_sqltext a, audit_actions b where a.command_type =  b.action(+)) st
             where st.sql_id(+)             = sqt.sql_id
             and st.dbid(+)                 = &_dbid
-- AND TO_CHAR(tm,'D') >= 1                                                  -- Day of week: 1=Sunday 7=Saturday
-- AND TO_CHAR(tm,'D') <= 7
-- AND TO_CHAR(tm,'HH24MI') >= 0900                                          -- Hour
-- AND TO_CHAR(tm,'HH24MI') <= 1800
-- AND tm >= TO_DATE('2010-jan-17 00:00:00','yyyy-mon-dd hh24:mi:ss')     -- Data range
-- AND tm <= TO_DATE('2010-aug-22 23:59:59','yyyy-mon-dd hh24:mi:ss')
-- AND snap_id in (338,339)
-- AND snap_id = 338
-- AND snap_id >= 335 and snap_id <= 339
-- AND lower(st.sql_text) like 'select%'
-- AND lower(st.sql_text) like 'insert%'
-- AND lower(st.sql_text) like 'update%'
-- AND lower(st.sql_text) like 'merge%'
-- AND pxexec > 0
-- AND aas > .5
             order by 
             snap_id                             -- TO GET SQL OUTPUT ACROSS SNAP_IDs SEQUENTIALLY AND ASC
             -- nvl(sqt.elap, -1) desc, sqt.sql_id     -- TO GET SQL OUTPUT BY ELAPSED TIME
             )
-- where rownum <= 20
;

}}}
http://gavinsoorma.com/2009/07/exporting-and-importing-awr-snapshot-data/

http://dboptimizer.com/2011/11/08/importing-awr-repositories-from-cloned-databases/  <-- this is to change the DBIDs
https://sites.google.com/site/oraclemonitor/dba_hist_active_sess_history#TOC-Force-importing-a-in-AWR   <-- this is to ''FORCE'' import ASH data 


How to Export and Import the AWR Repository From One Database to Another (Doc ID 785730.1)

Transporting Automatic Workload Repository Data to Another System https://docs.oracle.com/en/database/oracle/oracle-database/19/tgdba/gathering-database-statistics.html#GUID-F25470A0-C236-46DE-84F7-D68FBE1B0F12



{{{


###################################
on the source env
###################################

CREATE DIRECTORY AWR_DATA AS '/oracle/app/oracle/awrdata';

@?/rdbms/admin/awrextr.sql


~~~~~~~~~~~~~
AWR EXTRACT
~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~  This script will extract the AWR data for a range of snapshots  ~
~  into a dump file.  The script will prompt users for the         ~
~  following information:                                          ~
~     (1) database id                                              ~
~     (2) snapshot range to extract                                ~
~     (3) name of directory object                                 ~
~     (4) name of dump file                                        ~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~


Databases in this Workload Repository schema
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

   DB Id     DB Name      Host
------------ ------------ ------------
* 2607950532 IVRS         dbrocaix01.b
                          ayantel.com


The default database id is the local one: '2607950532'.  To use this
database id, press <return> to continue, otherwise enter an alternative.

Enter value for dbid: 2607950532


Specify the Begin and End Snapshot Ids
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Enter value for begin_snap: 235
Begin Snapshot Id specified: 235

Enter value for end_snap: 3333


Specify the Directory Name
~~~~~~~~~~~~~~~~~~~~~~~~~~

Directory Name                 Directory Path
------------------------------ -------------------------------------------------
ADMIN_DIR                      /oracle/app/oracle/product/10.2.0/db_1/md/admin
AWR_DATA                       /oracle/app/oracle/awrdata
DATA_PUMP_DIR                  /flash_reco/flash_recovery_area/IVRS/expdp
DATA_PUMP_LOG                  /home/oracle/logs
SQLT$STAGE                     /oracle/app/oracle/admin/ivrs/udump
SQLT$UDUMP                     /oracle/app/oracle/admin/ivrs/udump
WORK_DIR                       /oracle/app/oracle/product/10.2.0/db_1/work

Choose a Directory Name from the above list (case-sensitive).

Enter value for directory_name: AWR_DATA

Using the dump directory: AWR_DATA

Specify the Name of the Extract Dump File
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The prefix for the default dump file name is awrdat_235_3333.
To use this name, press <return> to continue, otherwise enter
an alternative.

Enter value for file_name: awrexp



###################################
on the target env
###################################

CREATE DIRECTORY AWR_DATA AS '/oracle/app/oracle/awrdata';

@?/rdbms/admin/awrload.sql

-- on target before the load 
-- MIN/MAX for dba_hist tables
  2  select min(snap_id) min_snap_id, max(snap_id) max_snap_id from dba_hist_snapshot;
  3  select to_char(min(end_interval_time),'yyyy-mon-dd hh24:mi:ss') min_date, to_char(max(end_interval_time),'yyyy-mon-dd hh24:mi:ss') max_date from dba_hist_snapshot;

  4    5    6    7    8    9   10   11
INSTANCE_NUMBER    SNAP_ID STARTUP_TIME         SNAP_START           SNAP_END                ELA_MIN
--------------- ---------- -------------------- -------------------- -------------------- ----------
              1        238 2011-jan-27 08:52:09 2011-jan-27 09:30:31 2011-jan-27 09:40:34      10.05
              1        237 2011-jan-27 08:52:09 2011-jan-27 09:20:28 2011-jan-27 09:30:31      10.04
              1        236 2011-jan-27 08:52:09 2011-jan-27 09:10:26 2011-jan-27 09:20:28      10.04
              1        235 2011-jan-27 08:52:09 2011-jan-27 09:03:24 2011-jan-27 09:10:26       7.03
              1        234 2009-dec-15 13:41:20 2009-dec-15 14:00:32 2011-jan-27 09:03:24  587222.87
              1        233 2009-dec-15 12:08:35 2009-dec-15 13:00:49 2009-dec-15 14:00:32      59.72
              1        232 2009-dec-15 12:08:35 2009-dec-15 12:19:42 2009-dec-15 13:00:49      41.12
              1        231 2009-dec-15 07:58:35 2009-dec-15 08:09:41 2009-dec-15 12:19:42     250.01
              1        230 2009-dec-14 23:35:11 2009-dec-14 23:46:20 2009-dec-15 08:09:41     503.35
              1        229 2009-dec-10 11:27:30 2009-dec-11 04:00:38 2009-dec-14 23:46:20     5505.7

10 rows selected.

sys@IVRS> sys@IVRS> sys@IVRS>
MIN_SNAP_ID MAX_SNAP_ID
----------- -----------
        213         239

sys@IVRS>
MIN_DATE             MAX_DATE
-------------------- --------------------
2009-dec-10 11:38:56 2011-jan-27 09:40:34



~~~~~~~~~~
AWR LOAD
~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~  This script will load the AWR data from a dump file. The   ~
~  script will prompt users for the following information:    ~
~     (1) name of directory object                            ~
~     (2) name of dump file                                   ~
~     (3) staging schema name to load AWR data into           ~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Specify the Directory Name
~~~~~~~~~~~~~~~~~~~~~~~~~~

Directory Name                 Directory Path
------------------------------ -------------------------------------------------
ADMIN_DIR                      /oracle/app/oracle/product/10.2.0/db_1/md/admin
AWR_DATA                       /oracle/app/oracle/awrdata
DATA_PUMP_DIR                  /flash_reco/flash_recovery_area/IVRS/expdp
DATA_PUMP_LOG                  /home/oracle/logs
SQLT$STAGE                     /oracle/app/oracle/admin/ivrs/udump
SQLT$UDUMP                     /oracle/app/oracle/admin/ivrs/udump
WORK_DIR                       /oracle/app/oracle/product/10.2.0/db_1/work

Choose a Directory Name from the list above (case-sensitive).

Enter value for directory_name: AWR_DATA

Using the dump directory: AWR_DATA

Specify the Name of the Dump File to Load
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Please specify the prefix of the dump file (.dmp) to load:

Enter value for file_name: awrexp


Enter value for schema_name:

Using the staging schema name: AWR_STAGE

Choose the Default tablespace for the AWR_STAGE user
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Choose the AWR_STAGE users's default tablespace.  This is the
tablespace in which the AWR data will be staged.

TABLESPACE_NAME                CONTENTS  DEFAULT TABLESPACE
------------------------------ --------- ------------------
CCDATA                         PERMANENT
CCINDEX                        PERMANENT
PSE                            PERMANENT
SOE                            PERMANENT
SOEINDEX                       PERMANENT
SYSAUX                         PERMANENT *
TPCCTAB                        PERMANENT
TPCHTAB                        PERMANENT
USERS                          PERMANENT

Pressing <return> will result in the recommended default
tablespace (identified by *) being used.

Enter value for default_tablespace:


Using tablespace SYSAUX as the default tablespace for the AWR_STAGE


Choose the Temporary tablespace for the AWR_STAGE user
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Choose the AWR_STAGE user's temporary tablespace.

TABLESPACE_NAME                CONTENTS  DEFAULT TEMP TABLESPACE
------------------------------ --------- -----------------------
TEMP                           TEMPORARY *

Pressing <return> will result in the database's default temporary
tablespace (identified by *) being used.

Enter value for temporary_tablespace:




Processing object type TABLE_EXPORT/TABLE/INDEX/INDEX
Processing object type TABLE_EXPORT/TABLE/CONSTRAINT/CONSTRAINT
Processing object type TABLE_EXPORT/TABLE/CONSTRAINT/REF_CONSTRAINT
Job "SYS"."SYS_IMPORT_FULL_01" successfully completed at 12:46:07
begin
*
ERROR at line 1:
ORA-20105: unable to move AWR data to SYS
ORA-06512: at "SYS.DBMS_SWRF_INTERNAL", line 1760
ORA-20107: not allowed to move AWR data for local dbid
ORA-06512: at line 3


... Dropping AWR_STAGE user

End of AWR Load
}}}
-- from http://www.perfvision.com/statspack/awr.txt

{{{
WORKLOAD REPOSITORY report for
DB Name         DB Id    Instance     Inst Num Release     RAC Host
              Snap Id      Snap Time      Sessions Curs/Sess
Cache Sizes
Load Profile
Instance Efficiency Percentages (Target 100%)
Top 5 Timed Events                                         Avg %Total
Time Model Statistics
Wait Class
Wait Events
Background Wait Events
Operating System Statistics
Service Statistics
Service Wait Class Stats
SQL ordered by Elapsed Time
SQL ordered by CPU Time
SQL ordered by Gets
SQL ordered by Reads
SQL ordered by Executions
SQL ordered by Parse Calls
SQL ordered by Sharable Memory
SQL ordered by Version Count
Instance Activity Stats
Instance Activity Stats - Absolute Values
Instance Activity Stats - Thread Activity
Tablespace IO Stats
File IO Stats
Buffer Pool Statistics
Instance Recovery Stats
Buffer Pool Advisory
PGA Aggr Summary
PGA Aggr Target Histogram
PGA Memory Advisory
Shared Pool Advisory
SGA Target Advisory
Streams Pool Advisory
Java Pool Advisory
Buffer Wait Statistics
Enqueue Activity
Undo Segment Summary
Latch Activity
Latch Sleep Breakdown
Latch Miss Sources
Parent Latch Statistics
Segments by Logical Reads
Segments by Physical Reads
Segments by Row Lock Waits
Segments by ITL Waits
Segments by Buffer Busy Waits
Dictionary Cache Stats
Library Cache Activity
Process Memory Summary
SGA Memory Summary
SGA regions                     Begin Size (Bytes)      (if different)
SGA breakdown difference
Streams CPU/IO Usage
Streams Capture
Streams Apply
Buffered Queues
Buffered Subscribers
Rule Set
Resource Limit Stats
init.ora Parameters

}}}
{{{
WORKLOAD REPOSITORY report for
DB Name         DB Id    Instance     Inst Num Release     RAC Host
              Snap Id      Snap Time      Sessions Curs/Sess
Cache Sizes
Load Profile
Instance Efficiency Percentages (Target 100%)
Top 5 Timed Events     Avg wait %Total Call
Time Model Statistics
Wait Class
Wait Events
Background Wait Events
Operating System Statistics
Service Statistics
Service Wait Class Stats
SQL ordered by Elapsed Time
SQL ordered by CPU Time
SQL ordered by Gets
SQL ordered by Reads
SQL ordered by Executions
SQL ordered by Parse Calls
SQL ordered by Sharable Memory
SQL ordered by Version Count
Instance Activity Stats
Instance Activity Stats - Absolute Values
Instance Activity Stats - Thread Activity
Tablespace IO Stats
File IO Stats
Buffer Pool Statistics
Instance Recovery Stats
Buffer Pool Advisory
PGA Aggr Summary
PGA Aggr Target Stats     <-- new in 10.2.0.3
PGA Aggr Target Histogram
PGA Memory Advisory
Shared Pool Advisory
SGA Target Advisory
Streams Pool Advisory
Java Pool Advisory
Buffer Wait Statistics
Enqueue Activity
Undo Segment Summary
Undo Segment Stats     <-- new in 10.2.0.3
Latch Activity
Latch Sleep Breakdown
Latch Miss Sources
Parent Latch Statistics
Child Latch Statistics     <-- new in 10.2.0.3
Segments by Logical Reads
Segments by Physical Reads
Segments by Row Lock Waits
Segments by ITL Waits
Segments by Buffer Busy Waits
Dictionary Cache Stats
Library Cache Activity
Process Memory Summary
SGA Memory Summary
SGA breakdown difference
Streams CPU/IO Usage
Streams Capture
Streams Apply
Buffered Queues
Buffered Subscribers
Rule Set
Resource Limit Stats
init.ora Parameters
}}}
-- from http://www.perfvision.com/statspack/awrrpt_1_122_123.txt


{{{
WORKLOAD REPOSITORY report for

DB Name         DB Id    Instance     Inst Num Release     RAC Host
------------ ----------- ------------ -------- ----------- --- ------------
CDB10         1193559071 cdb10               1 10.2.0.1.0  NO  tsukuba

              Snap Id      Snap Time      Sessions Curs/Sess
            --------- ------------------- -------- ---------
Begin Snap:       122 31-Jul-07 17:00:40        36      24.9
  End Snap:       123 31-Jul-07 18:00:56        37      25.0
   Elapsed:               60.26 (mins)
   DB Time:               89.57 (mins)

Cache Sizes
~~~~~~~~~~~                       Begin        End
                             ---------- ----------
               Buffer Cache:        28M        28M  Std Block Size:         8K
           Shared Pool Size:       128M       128M      Log Buffer:     6,256K

Load Profile
~~~~~~~~~~~~                            Per Second       Per Transaction
                                   ---------------       ---------------
                  Redo size:            404,585.37            714,975.12
              Logical reads:              8,318.76             14,700.74
              Block changes:              2,744.42              4,849.89
             Physical reads:                111.18                196.48
            Physical writes:                 48.07                 84.96
                 User calls:                154.96                273.84
                     Parses:                  3.17                  5.60
                Hard parses:                  0.07                  0.13
                      Sorts:                  9.07                 16.04
                     Logons:                  0.05                  0.09
                   Executes:                150.07                265.20
               Transactions:                  0.57

  % Blocks changed per Read:   32.99    Recursive Call %:    16.44
 Rollback per transaction %:   21.11       Rows per Sort:    57.60

Instance Efficiency Percentages (Target 100%)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            Buffer Nowait %:  100.00       Redo NoWait %:   99.98
            Buffer  Hit   %:   98.70    In-memory Sort %:  100.00
            Library Hit   %:   99.94        Soft Parse %:   97.71
         Execute to Parse %:   97.89         Latch Hit %:  100.00
Parse CPU to Parse Elapsd %:    3.60     % Non-Parse CPU:   99.62

 Shared Pool Statistics        Begin    End
                              ------  ------
             Memory Usage %:   91.89   91.86
    % SQL with executions>1:   75.28   73.08
  % Memory for SQL w/exec>1:   73.58   70.06

Top 5 Timed Events                                         Avg %Total
~~~~~~~~~~~~~~~~~~                                        wait   Call
Event                                 Waits    Time (s)   (ms)   Time Wait Class
------------------------------ ------------ ----------- ------ ------ ----------
log file parallel write               2,819       2,037    723   37.9 System I/O
db file parallel write               32,625       1,949     60   36.3 System I/O
db file sequential read             268,447       1,761      7   32.8   User I/O
log file sync                         1,850       1,117    604   20.8     Commit
log buffer space                      1,189         866    728   16.1 Configurat
          -------------------------------------------------------------
Time Model Statistics                    DB/Inst: CDB10/cdb10  Snaps: 122-123
-> Total time in database user-calls (DB Time): 5374.1s
-> Statistics including the word "background" measure background process
   time, and so do not contribute to the DB time statistic
-> Ordered by % or DB time desc, Statistic name

Statistic Name                                       Time (s) % of DB Time
------------------------------------------ ------------------ ------------
sql execute elapsed time                              4,409.2         82.0
DB CPU                                                  488.2          9.1
parse time elapsed                                       48.5           .9
hard parse elapsed time                                  45.8           .9
PL/SQL execution elapsed time                            24.0           .4
sequence load elapsed time                                6.1           .1
connection management call elapsed time                   3.6           .1
failed parse elapsed time                                 0.8           .0
hard parse (sharing criteria) elapsed time                0.1           .0
repeated bind elapsed time                                0.0           .0
DB time                                               5,374.1          N/A
background elapsed time                               4,199.3          N/A
background cpu time                                      76.0          N/A
          -------------------------------------------------------------

Wait Class                                DB/Inst: CDB10/cdb10  Snaps: 122-123
-> s  - second
-> cs - centisecond -     100th of a second
-> ms - millisecond -    1000th of a second
-> us - microsecond - 1000000th of a second
-> ordered by wait time desc, waits desc

                                                                  Avg
                                       %Time       Total Wait    wait     Waits
Wait Class                      Waits  -outs         Time (s)    (ms)      /txn
-------------------- ---------------- ------ ---------------- ------- ---------
System I/O                     63,959     .0            4,080      64      31.3
User I/O                      286,652     .0            2,337       8     140.1
Commit                          1,850   47.2            1,117     604       0.9
Configuration                   4,319   79.1            1,081     250       2.1
Concurrency                       211   14.7               64     301       0.1
Application                     1,432     .3               29      21       0.7
Network                       566,962     .0               20       0     277.1
Other                             499    1.2                9      19       0.2
          -------------------------------------------------------------

Wait Events                              DB/Inst: CDB10/cdb10  Snaps: 122-123
-> s  - second
-> cs - centisecond -     100th of a second
-> ms - millisecond -    1000th of a second
-> us - microsecond - 1000000th of a second
-> ordered by wait time desc, waits desc (idle events last)

                                                                   Avg
                                             %Time  Total Wait    wait     Waits
Event                                 Waits  -outs    Time (s)    (ms)      /txn
---------------------------- -------------- ------ ----------- ------- ---------
log file parallel write               2,819     .0       2,037     723       1.4
db file parallel write               32,625     .0       1,949      60      15.9
db file sequential read             268,447     .0       1,761       7     131.2
log file sync                         1,850   47.2       1,117     604       0.9
log buffer space                      1,189   51.9         866     728       0.6
db file scattered read               16,589     .0         449      27       8.1
log file switch completion              182   35.2         109     597       0.1
control file parallel write           2,134     .0          87      41       1.0
direct path write temp                  415     .0          78     188       0.2
log file switch (checkpoint             120   24.2          53     444       0.1
buffer busy waits                       155   18.1          49     315       0.1
free buffer waits                     2,387   95.0          43      18       1.2
enq: RO - fast object reuse              60    6.7          23     379       0.0
SQL*Net more data to dblink           1,723     .0          19      11       0.8
direct path read temp                   350     .0          16      46       0.2
local write wait                        164    1.8          15      90       0.1
direct path write                       304     .0          13      42       0.1
write complete waits                     11   90.9          10     923       0.0
latch: In memory undo latch               5     .0           8    1592       0.0
os thread startup                        40    7.5           7     171       0.0
enq: CF - contention                     25     .0           7     272       0.0
SQL*Net break/reset to clien          1,372     .0           7       5       0.7
control file sequential read         26,253     .0           5       0      12.8
db file parallel read                   149     .0           4      29       0.1
direct path read                        233     .0           1       6       0.1
latch: cache buffers lru cha             10     .0           1     132       0.0
latch: object queue header o              2     .0           1     460       0.0
SQL*Net message to client           557,769     .0           1       0     272.6
log file single write                    64     .0           1      13       0.0
SQL*Net more data to client           1,806     .0           0       0       0.9
LGWR wait for redo copy                 125    4.8           0       1       0.1
rdbms ipc reply                         298     .0           0       0       0.1
SQL*Net more data from clien             93     .0           0       1       0.0
latch free                                2     .0           0      17       0.0
latch: redo allocation                    1     .0           0      21       0.0
latch: shared pool                        2     .0           0      10       0.0
log file sequential read                 64     .0           0       0       0.0
reliable message                         36     .0           0       1       0.0
read by other session                     1     .0           0      15       0.0
SQL*Net message to dblink             5,565     .0           0       0       2.7
latch: library cache                      4     .0           0       1       0.0
undo segment extension                  430   99.3           0       0       0.2
latch: cache buffers chains               4     .0           0       0       0.0
latch: library cache pin                  1     .0           0       0       0.0
SQL*Net more data from dblin              6     .0           0       0       0.0
SQL*Net message from client         557,767     .0      51,335      92     272.6
Streams AQ: waiting for time             50   40.0       3,796   75924       0.0
wait for unread message on b          3,588   99.5       3,522     982       1.8
Streams AQ: qmn slave idle w            128     .0       3,520   27498       0.1
Streams AQ: qmn coordinator             275   53.5       3,520   12799       0.1
virtual circuit status                  120  100.0       3,503   29191       0.1
Streams AQ: waiting for mess            725   97.7       3,498    4825       0.4
jobq slave wait                       1,133   97.5       3,284    2898       0.6
PL/SQL lock timer                       977   99.9       2,862    2929       0.5
SQL*Net message from dblink           5,566     .0         540      97       2.7
class slave wait                          2  100.0          10    4892       0.0
single-task message                       2     .0           0     103       0.0
          -------------------------------------------------------------

Background Wait Events                   DB/Inst: CDB10/cdb10  Snaps: 122-123
-> ordered by wait time desc, waits desc (idle events last)

                                                                   Avg
                                             %Time  Total Wait    wait     Waits
Event                                 Waits  -outs    Time (s)    (ms)      /txn
---------------------------- -------------- ------ ----------- ------- ---------
log file parallel write               2,820     .0       2,037     722       1.4
db file parallel write               32,625     .0       1,949      60      15.9
control file parallel write           2,134     .0          87      41       1.0
direct path write                       231     .0          13      55       0.1
db file sequential read                 935     .0          12      13       0.5
log buffer space                         13   53.8          10     791       0.0
events in waitclass Other               415    1.4           8      19       0.2
os thread startup                        40    7.5           7     171       0.0
db file scattered read                  115     .0           3      27       0.1
log file sync                             3   66.7           2     828       0.0
direct path read                        231     .0           1       6       0.1
buffer busy waits                        21     .0           1      63       0.0
control file sequential read          2,550     .0           1       0       1.2
log file single write                    64     .0           1      13       0.0
log file sequential read                 64     .0           0       0       0.0
latch: shared pool                        1     .0           0       7       0.0
latch: library cache                      2     .0           0       1       0.0
latch: cache buffers chains               1     .0           0       0       0.0
rdbms ipc message                    13,865   72.8      27,604    1991       6.8
Streams AQ: waiting for time             50   40.0       3,796   75924       0.0
pmon timer                            1,272   98.6       3,526    2772       0.6
Streams AQ: qmn slave idle w            128     .0       3,520   27498       0.1
Streams AQ: qmn coordinator             275   53.5       3,520   12799       0.1
smon timer                              178    3.4       3,360   18875       0.1
          -------------------------------------------------------------

Operating System Statistics               DB/Inst: CDB10/cdb10  Snaps: 122-123

Statistic                                       Total
-------------------------------- --------------------
AVG_BUSY_TIME                                 204,954
AVG_IDLE_TIME                                 155,940
AVG_IOWAIT_TIME                                     0
AVG_SYS_TIME                                   15,979
AVG_USER_TIME                                 188,638
BUSY_TIME                                     410,601
IDLE_TIME                                     312,370
IOWAIT_TIME                                         0
SYS_TIME                                       32,591
USER_TIME                                     378,010
LOAD                                                1
OS_CPU_WAIT_TIME                              228,200
RSRC_MGR_CPU_WAIT_TIME                              0
VM_IN_BYTES                               338,665,472
VM_OUT_BYTES                              397,410,304
PHYSICAL_MEMORY_BYTES                   6,388,301,824
NUM_CPUS                                            2
          -------------------------------------------------------------

Service Statistics                       DB/Inst: CDB10/cdb10  Snaps: 122-123
-> ordered by DB Time

                                                             Physical    Logical
Service Name                      DB Time (s)   DB CPU (s)      Reads      Reads
-------------------------------- ------------ ------------ ---------- ----------
SYS$USERS                             4,666.5        429.9    348,141 ##########
cdb10                                   701.4         58.1     51,046    224,419
SYS$BACKGROUND                            0.0          0.0      2,830     18,255
cdb10XDB                                  0.0          0.0          0          0
          -------------------------------------------------------------

Service Wait Class Stats                  DB/Inst: CDB10/cdb10  Snaps: 122-123
-> Wait Class info for services in the Service Statistics section.
-> Total Waits and Time Waited displayed for the following wait
   classes:  User I/O, Concurrency, Administrative, Network
-> Time Waited (Wt Time) in centisecond (100th of a second)

Service Name
----------------------------------------------------------------
 User I/O  User I/O  Concurcy  Concurcy     Admin     Admin   Network   Network
Total Wts   Wt Time Total Wts   Wt Time Total Wts   Wt Time Total Wts   Wt Time
--------- --------- --------- --------- --------- --------- --------- ---------
SYS$USERS
   271425    210890        65       602         0         0    532492      1979
cdb10
    12969     18550        81      4945         0         0     34068        15
SYS$BACKGROUND
     2261      4306        65       815         0         0         0         0
          -------------------------------------------------------------

SQL ordered by Elapsed Time              DB/Inst: CDB10/cdb10  Snaps: 122-123
-> Resources reported for PL/SQL code includes the resources used by all SQL
   statements called by the code.
-> % Total DB Time is the Elapsed Time of the SQL statement divided
   into the Total Database Time multiplied by 100

  Elapsed      CPU                  Elap per  % Total
  Time (s)   Time (s)  Executions   Exec (s)  DB Time    SQL Id
---------- ---------- ------------ ---------- ------- -------------
       797        134            1      796.6    14.8 f1qcyh20550cf
Call CALC_QOS_SLOW(:1, :2, :3, :4)

       773         58            1      773.2    14.4 fj6gjgsshtxyx
Call CALC_DELETE_OLD_DATA(:1)

       354         25            1      354.3     6.6 0cjsxw5ndqdbc
Call CALC_HFC_SLOW(:1, :2, :3, :4)

       275         29            1      275.3     5.1 8t8as9usk11qw
Call CALC_TOPOLOGY_SLOW(:1, :2, :3, :4)

       202          4            4       50.5     3.8 dr1rkrznhh95b
Call CALC_TOPOLOGY_MEDIUM(:1, :2, :3, :4)

       158         16            0        N/A     2.9 10dkqv3kr8xa5
 SELECT trunc(SYSDATE, 'HH24') HOUR_STAMP, CM_ID, MA
X(SUBSTR(CM_DESC, 1, 12)) CM_DESC, MAX(UP_ID) UP_ID, MA
X(DOWN_ID) DOWN_ID, MAX(MAC_ID) MAC_ID, MAX(CMTS_
ID) CMTS_ID, SUM(BYTES_UP) SUM_BYTES_UP, SUM(BY

       139          7            1      139.2     2.6 38zhkf4jdyff4
DECLARE job BINARY_INTEGER := :job; next_date DATE := :mydate; broken BOOLEAN :
= FALSE; BEGIN ash.collect(3,1200); :mydate := next_date; IF broken THEN :b := 1
; ELSE :b := 0; END IF; END;

       137         72            1      136.8     2.5 298wmz1kxjs1m
INSERT INTO CM_QOS_PROF SELECT :B1 , R.TOPOLOGYID, :B1 - :B4 , P.NODE_PROFILE_ID
, R.DOCSIFCMTSSERVICEQOSPROFILE FROM CM_SID_RAWDATA R, ( SELECT DISTINCT T.CMID,
 P.QOS_PROF_IDX, P.NODE_PROFILE_ID FROM TMP_TOP_SLOW_CM T, CMTS_QOS_PROF P WHERE
 T.CMTSID = P.TOPOLOGYID AND P.SECONDID = :B1 ) P WHERE R.BATCHID = :B3 AND R.PR

       130          9            1      130.5     2.4 6n0d6cv6w6krs
DELETE FROM CM_VA WHERE SECONDID <= :B1

       130          9            1      130.0     2.4 86m0m9q8fw9bj
DELETE FROM CM_QOS_PROF WHERE SECONDID <= :B1

       126          3            1      125.6     2.3 33bpz9dh1w5jk
Module: Lab128
--lab128 select /*+rule*/ owner, segment_name||decode(partition_name,null,nul
l,' ('||partition_name||')') name, segment_type,tablespace_name, extent_id,f
ile_id,block_id, blocks,bytes/1048576 bytes from dba_extents

       124          9            1      124.5     2.3 gyqv6h5pft4mj
DELETE FROM CM_BYTES WHERE SECONDID <= :B1

       121          2           56        2.2     2.3 6gvch1xu9ca3g
DECLARE job BINARY_INTEGER := :job; next_date DATE := :mydate; broken BOOLEAN :
= FALSE; BEGIN EMD_MAINTENANCE.EXECUTE_EM_DBMS_JOB_PROCS(); :mydate := next_date
; IF broken THEN :b := 1; ELSE :b := 0; END IF; END;

       120          2            4       30.0     2.2 4zjg6w4mwu0wv
INSERT INTO TMP_TOP_MED_DN SELECT M.CMTSID, M.VENDOR_DESC, M.MODEL_DESC, MAC_L.T
OPOLOGYID, DOWN_L.TOPOLOGYID, M.UP_SNR_CNR_A3, M.UP_SNR_CNR_A2, M.UP_SNR_CNR_A1,
 M.UP_SNR_CNR_A0, M.MAC_SLOTS_OPEN, M.MAC_SLOTS_USED, M.CMTS_REBOOT, 0 FROM TMP_
TOP_MED_CMTS M, TOPOLOGY_LINK DOWN_L, TOPOLOGY_NODE DOWN_N, TOPOLOGY_LINK MAC_L

       119          9            1      119.1     2.2 aywfs0n7wwwhn
DELETE FROM CM_POWER_2 WHERE SECONDID <= :B1
SQL ordered by Elapsed Time              DB/Inst: CDB10/cdb10  Snaps: 122-123
-> Resources reported for PL/SQL code includes the resources used by all SQL
   statements called by the code.
-> % Total DB Time is the Elapsed Time of the SQL statement divided
   into the Total Database Time multiplied by 100

  Elapsed      CPU                  Elap per  % Total
  Time (s)   Time (s)  Executions   Exec (s)  DB Time    SQL Id
---------- ---------- ------------ ---------- ------- -------------

       117          9            1      117.4     2.2 0fnnktt50m86h
DELETE FROM CM_ERRORS WHERE SECONDID <= :B1

       116          1          977        0.1     2.1 5jh6zfmvpu77f
UPDATE ASH.DBIDS@REPO SET ASHSEQ = :B2 WHERE DBID = :B1

       108          9            1      107.5     2.0 21jqxqyf80cn8
DELETE FROM CM_POWER_1 WHERE SECONDID <= :B1

       107         11            1      107.0     2.0 87gy6mxtk7f3z
DELETE FROM CM_POLL_STATUS WHERE TOPOLOGYID IN ( SELECT DISTINCT TOPOLOGYID FROM
 CM_RAWDATA WHERE BATCHID = :B1 )

        96          6            1       95.9     1.8 2r6jnnf1hzb4z
select power.TOPOLOGYID, power.SAMPLE_LENGTH, UNIQUE_CMS, ACTIVE_CMS, BITSPERSYM
BOL, TXPOWER_UP FROM CM_POWER_2 power, TOPOLOGY_LINK link, UPSTREAM_CHANNEL chan
nel WHERE power.SECONDID = :1 AND link.TOPOLOGYID = power.TOPOLOGYID AND link.PA
RENTLEN = 1 AND link.STATEID = 1 AND link.LINKTYPEID = 1 AND link.PARENTID = cha

        95          1            1       95.1     1.8 1qp1yn30gajjw
 SELECT trunc(SYSDATE, 'HH24') HOUR_STAMP, M.
TOPOLOGYID UP_ID, T.UP_DESC UP_DESC, T.MAC_ID
 MAC_ID, T.CMTS_ID CMTS_ID, M.MAX_PERCENT_UTI
L, M.MAX_PACKETS_PER_SEC, M.AVG_PACKET_SIZE,

        94          5            1       93.9     1.7 fxvdq915s3qpt
DELETE FROM TMP_CALC_HFC_SLOW_CM_LAST

        87          4            1       86.9     1.6 axyukfdx12pu4
Call CALC_DELETE_SLOW_RAWDATA(:1, :2)

        85          9            1       84.6     1.6 998t5bbdfm5rm
INSERT INTO CM_RAWDATA SELECT PROFINDX, 0 BATCHID, TOPOLOGYID, SAMPLETIME, SYSUP
TIME, DOCSIFCMTSCMSTATUSVALUE, DOCSIFCMTSSERVICEINOCTETS, DOCSIFCMTSSERVICEOUTOC
TETS, DOCSIFCMSTATUSTXPOWER, DOCSIFCMTSCMSTATUSRXPOWER, DOCSIFDOWNCHANNELPOWER,
DOCSIFSIGQUNERROREDS, DOCSIFSIGQCORRECTEDS, DOCSIFSIGQUNCORRECTABLES, DOCSIFSIGQ

        84          5            1       83.8     1.6 3a11s4c86wdu5
DELETE FROM CM_RAWDATA WHERE BATCHID = 0 AND PROFINDX = :B1

        77         22      150,832        0.0     1.4 5zm9acqtd51h7
insert into cm_sid_rawdata (profindx, batchid, topologyid, sid, sampletime, docs
IfCmtsServiceQosProfile) values (:1, :2, :3, :4, :5, :6)

        74          9            1       73.6     1.4 3whpusvtv0qq1
INSERT INTO TMP_CALC_QOS_SLOW_CM_TMP SELECT T.CMTSID, T.DOWNID, T.UPID, T.CMID,
GREATEST(T.CMTS_REBOOT, T.UP_REBOOT), GREATEST(T.CMTS_REBOOT, T.UP_REBOOT), R.DO
CSIFCMTSSERVICEINOCTETS, R.DOCSIFCMTSSERVICEOUTOCTETS, S.SID, L.PREV_SECONDID, L
.PREV_IFINOCTETS, L.PREV_IFOUTOCTETS, L.PREV_SID FROM TMP_TOP_SLOW_CM T, CM_RAWD

        74          8            1       73.5     1.4 9h99br1t3qq3a
INSERT INTO TMP_CALC_HFC_SLOW_CM_LAST SELECT * FROM TMP_CALC_HFC_SLOW_CM_LAST_TM
P

        72          7            1       72.0     1.3 4qunm1qbf8cyk
select power.TOPOLOGYID, power.SAMPLE_LENGTH, UNIQUE_CMS, ACTIVE_CMS, CHANNELWID
TH, RXPOWER_UP, RXPOWER UPSTREAM_AVG_RX FROM CM_POWER_1 power, TOPOLOGY_LINK lin
k, UPSTREAM_CHANNEL channel, UPSTREAM_POWER_1 upstream_rx WHERE power.SECONDID =
 :1 and power.SECONDID = upstream_rx.secondid AND link.TOPOLOGYID = power.TOPOLO
SQL ordered by Elapsed Time              DB/Inst: CDB10/cdb10  Snaps: 122-123
-> Resources reported for PL/SQL code includes the resources used by all SQL
   statements called by the code.
-> % Total DB Time is the Elapsed Time of the SQL statement divided
   into the Total Database Time multiplied by 100

  Elapsed      CPU                  Elap per  % Total
  Time (s)   Time (s)  Executions   Exec (s)  DB Time    SQL Id
---------- ---------- ------------ ---------- ------- -------------

        68          3            1       68.4     1.3 bzmccctnyjb3z
INSERT INTO DOWNSTREAM_ERRORS SELECT T2.SECONDID, T1.DOWNID, ROUND(AVG(T2.SAMPLE
_LENGTH), 0), ROUND(AVG(DECODE(T2.UNERROREDS + T2.CORRECTEDS + T2.UNCORRECTABLES
,0,0, T2.UNCORRECTABLES / ( T2.UNERROREDS + T2.CORRECTEDS + T2.UNCORRECTABLES )
* 100)) ,2) AVG_CER, ROUND(AVG(DECODE(T2.UNERROREDS + T2.CORRECTEDS + T2.UNCORRE

        64          7            1       63.6     1.2 fqcwt6uak8x3w
INSERT INTO TMP_CALC_QOS_SLOW_CM_LAST SELECT * FROM TMP_CALC_QOS_SLOW_CM_LAST_TM
P

        59          6            1       58.8     1.1 fd6a0p6333g8z
 SELECT trunc(SYSDATE, 'HH24') HOUR_STAMP, CM_ID, MA
X(SUBSTR(CM_DESC, 1, 12)) CM_DESC, MAX(UP_ID) UP_ID, MA
X(DOWN_ID) DOWN_ID, MAX(MAC_ID) MAC_ID, MAX(CMTS_
ID) CMTS_ID, SUM(BYTES_UP) SUM_BYTES_UP, SUM(BY

          -------------------------------------------------------------

SQL ordered by CPU Time                  DB/Inst: CDB10/cdb10  Snaps: 122-123
-> Resources reported for PL/SQL code includes the resources used by all SQL
   statements called by the code.
-> % Total DB Time is the Elapsed Time of the SQL statement divided
   into the Total Database Time multiplied by 100

    CPU      Elapsed                  CPU per  % Total
  Time (s)   Time (s)  Executions     Exec (s) DB Time    SQL Id
---------- ---------- ------------ ----------- ------- -------------
       134        797            1      133.81    14.8 f1qcyh20550cf
Call CALC_QOS_SLOW(:1, :2, :3, :4)

        72        137            1       71.96     2.5 298wmz1kxjs1m
INSERT INTO CM_QOS_PROF SELECT :B1 , R.TOPOLOGYID, :B1 - :B4 , P.NODE_PROFILE_ID
, R.DOCSIFCMTSSERVICEQOSPROFILE FROM CM_SID_RAWDATA R, ( SELECT DISTINCT T.CMID,
 P.QOS_PROF_IDX, P.NODE_PROFILE_ID FROM TMP_TOP_SLOW_CM T, CMTS_QOS_PROF P WHERE
 T.CMTSID = P.TOPOLOGYID AND P.SECONDID = :B1 ) P WHERE R.BATCHID = :B3 AND R.PR

        58        773            1       57.60    14.4 fj6gjgsshtxyx
Call CALC_DELETE_OLD_DATA(:1)

        29        275            1