<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
<!--}}}-->
Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity=60)';}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0 1em 1em; left:0px; top:0px;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 0.3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0 0; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0;}
.wizardFooter .status {padding:0 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0 0 0.5em;}
.tab {margin:0 0 0 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0 0.25em; padding:0 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0; font-size:.9em;}
.editorFooter .button {padding-top:0px; padding-bottom:0px;}

.fieldsetFix {border:0; padding:0; margin:1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0; right:0;}
#backstageButton a {padding:0.1em 0.4em; margin:0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin-left:3em; padding:1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none !important;}
#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
<!--{{{-->
<div class='header' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
<!--}}}-->
To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* [[MainMenu]]: The menu (usually on the left)
* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
These [[InterfaceOptions]] for customising [[TiddlyWiki]] are saved in your browser

Your username for signing your edits. Write it as a [[WikiWord]] (eg [[JoeBloggs]])

<<option txtUserName>>
<<option chkSaveBackups>> [[SaveBackups]]
<<option chkAutoSave>> [[AutoSave]]
<<option chkRegExpSearch>> [[RegExpSearch]]
<<option chkCaseSensitiveSearch>> [[CaseSensitiveSearch]]
<<option chkAnimate>> [[EnableAnimations]]

----
Also see [[AdvancedOptions]]
<<importTiddlers>>
Background: #fff
Foreground: #000
PrimaryPale: #eee
PrimaryLight: #ccc
PrimaryMid: #600
PrimaryDark: #600
SecondaryPale: #eee
SecondaryLight: #ccc
SecondaryMid: #999
SecondaryDark: #666
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
This is the executable that manages metadata for OpenDDS.  This is where associations are made between publications and subscriptions.  Builtin Topics are distributed from here.  One process of this type is required for operaion of OpenDDS.  It implements CORBA interfaces that allow the individual DDS Participants to register for discovery and update internal state as necessary.
Test execution will result in data being generated and stored.  This data can then be post processed to reduce it to understandable summaries as well as to visualize the results.  OpenDDS-Bench produces output to the console while operating and will create latency data files if directed to do so as part of the test configuration.  A data reduction script for bash shells is available that performs data reduction on the pre-configured latency and throughput test results:
{{indent{
;[[$BENCH_ROOT/bin/generate-test-results.sh|GenerateTestResultsManPage]]
:This script will reduce data from the pre-configured tests for [[latency|Latency Tests]] and [[throughput|Throughput Tests]].  The results will be located in a new 'data' subdirectory of the test execution base directory.
}}}
The directory structure assumed for data reduction and visualization is the following, relative to a working directory acting as the root directory for the test execution.
{{centeredTable{
| ''Data Reduction and Visualization  Directory Structure'' |c
| Directory | Use |
| data|plot data files converted from the raw data files;<br>extracted summary data;<br>plot label definition files |
| images|data visualization results |
}}}
The data reduction steps for the existing tests are summarized here:
{{chunk{
!Latency Test data reduction

Latency tests will rely on the data stored to the [[test output files|LatencyDataFileFormat]] specified in the test configurations.  These files contain individual sample latency data as well as summary data.

Scripts are available to convert the data into plottable format.  These include the following:

{{indent{
;[[$BENCH_ROOT/bin/reduce-latency-data.pl|ReduceLatencyManPage]]
:This script will convert individual latency test output files into a format suitable for use by GNUPlot for [[visualization|Data Visualization]].
}}}
{{indent{
;[[$BENCH_ROOT/bin/extract-latency.pl|ExtractLatencyManPage]]
:This script will extract the data from the previously converted plot data and create a summary table for plotting overall trend data.
}}}
{{indent{
;[[$BENCH_ROOT/bin/gen-latency-stats.pl|GenLatencyStatsManPage]]
:This script will extract latency statistical data from the previously converted plot data and create GNUPlot variable assignments suitable for use in displaying the data in plots.
}}}

In addition to these scripts, a helper script to show how the other scripts can be used to reduce the data produced by the pre-defined [[existing tests|Existing Tests]] is available at //$~BENCH_ROOT/tools/convert-latency//.  The commands in there assume a directory structure in which the tests were executed and in which the analysis will be performed.  You will need to tailor a set of commands similar to these to your test environment in order to effectively use the data reduction scripts.

The data reduction steps peformed include the following:
1) Convert the raw data files into plottable files:
{{indent{
{{{ reduce-latency-data.pl <testdir>/tcp/latency-<size>.data > data/latency-tcp-<size>.gpd }}}
{{{ reduce-latency-data.pl <testdir>/udp/latency-<size>.data > data/latency-udp-<size>.gpd }}}
{{{ reduce-latency-data.pl <testdir>/mbe/latency-<size>.data > data/latency-mbe-<size>.gpd }}}
{{{ reduce-latency-data.pl <testdir>/mrel/latency-<size>.data > data/latency-mrel-<size>.gpd }}}
}}}
2) Extract summary data from the plottable data files:
{{indent{
{{{ extract-latency.pl data/latency-*.gpd > data/latency.csv }}}
}}}
3) Generate GNUPlot suitable label strings for charts:
{{indent{
{{{ gen-latency-stats.pl data/latency.csv }}}
}}}
}}}

{{chunk{
!Throughput Test data reduction

Throughput test analysis will rely on the number of samples received at the test subscriptions.  Typically no latency data will be gathered during these tests so they will not be slowed by data collection.  The summary number of samples received can be gathered from the output of the test execution.

The following script is available to convert the data into plottable format:
{{indent{
;[[$BENCH_ROOT/bin/extract-throughput.pl|ExtractThroughputManPage]]
:This script will extract throughput data from throughput test results files and create a summary table for plotting overall trend data.
}}}
The output of this data reduction script can be used directly to plot the throughput summary results.

The data reduction steps performed for throughput testing include the following:
1) Extract the throughput data:
{{indent{
{{{ extract-throughput.pl <testdir>/*/*.results > data/throughput.csv }}}
}}}
}}}
Once performance tests have been executed and the data reduced, the results can be visualized by plotting the data.  Several plotting scripts are available for the reduced data.  These are based on using the GNUPlot tool and create various different views of the data.

A summary of the available plots and how to interpret them is available as a Middleware News Brief: [[Interpreting OpenDDS Performance Testing Results|http://mnb.objectcomputing.com/mnb/MiddlewareNewsBrief-201003.html]]

A plotting script for bash shells is available that performs plotting of the reduced data for the pre-configured latency and throughput tests:
{{indent{
;[[$BENCH_ROOT/bin/plot-test-results.sh|PlotTestResultsManPage]]
:This script will plot the reduced data from the pre-configured tests for [[latency|Latency Tests]] and [[throughput|Throughput Tests]].  The resulting images will be located in a subdirectory specified on the command line.
}}}
The directory structure assumed for data reduction and visualization is the following, relative to a working directory acting as the root directory for the test execution.
{{centeredTable{
| ''Data Reduction and Visualization Directory Structure'' |c
| Directory | Use |
| data|plot data files converted from the raw data files;<br>extracted summary data;<br>plot label definition files<br>This is used as plot input data for visualization. |
| images|data visualization results.  Plotting output will be placed here. |
}}}
The visualization steps to plot results for the existing tests are summarized here:
{{chunk{
!Latency Test Plots
There are several plots that can be made from the latency test data.  These include latency and jitter summary and detailed data.
!!!Latency Summary Plots
These plots use the //'latency.csv'// summary data generated by the [[extract-latency.pl|ExtractLatencyManPage]] data reduction script.  The ploting script is:
{{indent{
;[[$BENCH_ROOT/bin/plot-transports.gpi|PlotTransportsManPage]]
:This script will plot the reduced data from the pre-configured tests for [[latency|Latency Tests]] to produce a summary plot of latency for the entire set of test data.
}}}
Plotting with this script will produce a single chart with a line for each transport type, the message size on the X-axis and the latency on the Y-axis.  A typical command for this script (from within GNUPlot) would be:
{{indent{
{{{ gnuplot> call plot-transports.gpi data/latency.csv images/transport-latency }}}
}}}
with the output chart being located as //'images/transport-latency.png'//.
!!!Jitter Summary Plots
These plots use the //'latency.csv'// summary data generated by the [[extract-latency.pl|ExtractLatencyManPage]] data reduction script.  The ploting script is:
{{indent{
;[[$BENCH_ROOT/bin/plot-jitter.gpi|PlotJitterManPage]]
:This script will plot the reduced data from the pre-configured tests for [[latency|Latency Tests]] to produce a summary plot of jitter for the entire set of test data.
}}}
Plotting with this script will produce a single chart with a line for each transport type, the message size on the X-axis and the jitter on the Y-axis.  A typical command for this script (from within GNUPlot) would be:
{{indent{
{{{ gnuplot> call plot-jitter.gpi data/latency.csv images/transport-jitter }}}
}}}
with the output chart being located as //'images/transport-jitter.png'//.
!!!Quantile Summary Plots
These plots use the //'latency-<transport>-<size>.gpd'// data generated by the [[reduce-latency.pl|ReduceLatencyManPage]] data reduction script.  The ploting script is:
{{indent{
;[[$BENCH_ROOT/bin/plot-quantiles.gpi|PlotQuantilesManPage]]
:This script will plot the reduced data from the pre-configured tests for [[latency|Latency Tests]] to produce summary plots of latency in quantile format for the entire set of test data.
}}}
Plotting with this script will produce a chart for each transport type, with a line for each message size.  The quantile (portion of samples with smaller latency) is plotted on the X-axis and the latency on the Y-axis.  A typical command for this script (from within GNUPlot) would be:
{{indent{
{{{ gnuplot> call plot-quantiles.gpi data images }}}
}}}
with the output charts being located as //'images/<transport>-quantiles.png'//.
!!!Kernel Density Estimate Summary Plots
These plots use the //'latency-<transport>-<size>.gpi'// data generated by the [[reduce-latency.pl|ReduceLatencyManPage]] data reduction script.  The ploting script is:
{{indent{
;[[$BENCH_ROOT/bin/plot-density.gpi|PlotDensityManPage]]
:This script will plot the reduced data from the pre-configured tests for [[latency|Latency Tests]] to produce summary plots of latency data as a kernel density estimate for the entire set of test data.
}}}
Plotting with this script will produce a chart for each transport type, with a line for each message size.  The latency is plotted on the X-axis and the frequency on the Y-axis.  A typical command for this script (from within GNUPlot) would be:
{{indent{
{{{ gnuplot> call plot-density.gpi data images }}}
}}}
with the output charts being located as //'images/<transport>-density.png'//.
!!!Detailed Plots
The detailed data can be charted as a quad plot for each transport type / message size combination.  These plots use the //'latency-<transport>-<size>.gpd'// data generated by the [[reduce-latency.pl|ReduceLatencyManPage]] data reduction script.  The plotting script is:
{{indent{
;[[$BENCH_ROOT/bin/lj-plots.gpi|LJPlotsManPage]]
:This script will plot the reduced data from the pre-configured tests for [[latency|Latency Tests]] to produce detailed plots of latency and jitter data in four different charts.  These charts are created as a multiplot in the same image.  They include latency and jitter timeline charts, and histogram charts for latency and jitter.  Summary statistics generated by the [[gen-latency-stats.pl|GenLatencyStatsManPage]] script are added as labels to the histogram charts.
}}}
Plotting with this script will produce a multiplot with four charts for each transport type, with a line for each message size.  A typical command for this script (from within GNUPlot) would be:
{{indent{
{{{ gnuplot> call lj-plots.gpi data/latency-<transport>-<size>.gpd data/latency-<transport>-<size>.stats images/latency-<transport>-<size>.png 'chart title' }}}
}}}
with the output charts being located as //'images/latency-<transport>-<size>.png'//.
}}}
{{chunk{
!Throughput Test Plots
Throughput test plots all require the reduced throughput test data produced by the [[extract-throughput.pl|ExtractThroughputManPage]] script.  This data is typically located at //'data/throughput.csv'//.  Plotting can then be done to produce charts containing the realized throughput versus the requested throughput.

The plots are organized with the Message Size as the bottom X-axis, the Message Rate as the top X-axis and the measured throughput on the Y-axis.  For the pre-configured tests, either the size or rate is held constant, leaving the other variable the one plotted, or they both are varied in a way that the plotted position corresponds to a value on both the top and bottom X-axes.  This results in a plot of the requested throughput (size times rate) versus the measured throughput.  A plot of the absolute network capacity and the measured network //ftp// performance are included on the charts for reference.

The available scripts are:
!!![[$BENCH_ROOT/bin/plot-throughput-transports.gpi|PlotThroughputTransportsManPage]]
Plotting with this script will produce a summary chart containing all available throughput data, and a separate chart per transport type that have all available data per that transport type plotted.
{{indent{
{{{ gnuplot> call plot-throughput-transports.gpi data/throughput.csv images }}}
}}}
with the output charts produced being: //'images/thru-lines.png'// and //'images/thru-<transport>.png'//.
!!![[$BENCH_ROOT/bin/plot-throughput-tesformats.gpi|PlotThroughputFormatsManPage]]
Plotting with this script will produce a chart for each test topology, including all available data for that test type.
{{indent{
{{{ gnuplot> call plot-throughput-testformats.gpi data/throughput.csv images }}}
}}}
with the output charts produced being: //'images/thru-<type>.png'//.
}}}
[[OpenDDS-Bench]]
There are existing test configurations for common test scenarios provided with the framework.  These are:
;[[latency|Latency Tests]]
:measures latency from publication to reception of each sample
;[[throughput|Throughput Tests]]
:measures the maximum bandwidth that can be utilized by OpenDDS
;[[scaling|Scaling Tests]]
:measures the incremental performance as hosts are added to a distribution

In addition to the test specification files, the transport specification files may need to be updated for your test environment.  Currently, only the //$~BENCH_ROOT/etc/transport-udp.ini// file will need to be udpated.  Replace the <%HOSTNAME%> placeholders with IP addresses or resolvable hostnames in your environment.

Included in the test specification directories is a file "//test-commands.txt//" which contains a set of bash script commands that can be used to execute the full set of tests in a Linux environment.  These can be used as an example and tailored to your specific test environment.  Beyond translating to the native shell type, the IP addresses used should be changed to those that are valid in your environment.
<html>
<head>
<title>expandColors.pl - Convert the TiddlyWiki ColorPalette slices to actual CSS colors</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>expandColors.pl - Convert the TiddlyWiki ColorPalette slices to actual CSS colors</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  expandColors.pl &lt;stylesheet&gt;</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script will replace the TiddlyWiki ColorPalette slice specifications
in the CSS stylesheet(s).  The map is currently staticly defined in this
script and needs to be synchronized manually with the TiddlyWiki
ColorPalette.</p>
<p>Processing is performed in place.  That is, the input file is written
back with the updated information.  The original file is saved with an
appended '.bak' extension.</p>
<p>This script is intended to be run on the publish/style.css file created
when publishing a TiddlyWiki file as a series of static HTML documents.
The export plugin does not do this conversion for us.</p>

</body>

</html>
<html>
<head>
<title>extract-latency.pl - extract summary statistics from plot datafiles.</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>extract-latency.pl - extract summary statistics from plot datafiles.</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  extract-latency.pl &lt;infile&gt; ...</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script processes the input files and prints a summary from all files
in comma separated values (CSV) format to standard output.</p>
<p>The input file is expected to be in the format produced by the
reduce-latency-data.pl data reduction script.  This file type has
statistical summary data in the Index 1 and Index 2 section header
comments that are parsed by this script and gathered from all input files.</p>
<p>This input file name is expected to be in a format that includes '-'
separated fields and a fixed extension of &quot;.gpd&quot;.</p>
<pre>
  latency-&lt;transport&gt;-&lt;size&gt;.gpd</pre>
<p>The &lt;transport&gt; and &lt;size&gt; fields are used to populate two columns in the
output data.</p>
<p>This output consists of a single CSV file with a single record (line)
generated from each input file.  The output lines include:</p>
<table border>
  <tr><th>Field</th><th>Description</th></tr>
  <tr><td>1</td><td>transport type (derived from input filename)</td></tr>
  <tr><td>2</td><td>test message size (derived from input filename)</td></tr>
  <tr><td>3</td><td>latency mean statistic</td></tr>
  <tr><td>4</td><td>latency standard deviation statistic</td></tr>
  <tr><td>5</td><td>latency median statistic</td></tr>
  <tr><td>6</td><td>latency median absolute deviation statistic</td></tr>
  <tr><td>7</td><td>latency maximum statistic</td></tr>
  <tr><td>8</td><td>latency minimum statistic</td></tr>
  <tr><td>9</td><td>jitter mean statistic</td></tr>
  <tr><td>10</td><td>jitter standard deviation statistic</td></tr>
  <tr><td>11</td><td>jitter median statistic</td></tr>
  <tr><td>12</td><td>jitter median absolute deviation statistic</td></tr>
  <tr><td>13</td><td>jitter maximum statistic</td></tr>
  <tr><td>14</td><td>jitter minimum statistic</td></tr>
</table><p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  extract-latency.pl data/*.gpd &gt; data/latency.csv</pre>

</body>
</html>
<html>
<head>
<title>extract-throughput.pl - extract summary statistics from test results.</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>extract-throughput.pl - extract summary statistics from test results.</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  extract-throughput.pl &lt;infile&gt; ...</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script processes the logfiles created by executing the througput
tests and generates a series of indexed data sets suitable for plotting
by GNUPlot.</p>
<p>All input files named on the command line are processed and the transport
used, the message sizes and rates, as well as the elapsed test time and
actual number of messages received are extracted.</p>
<p>The input files are expected to be in logfile format as produced by the
testprocess <code>-v</code> option.  Logfiles produced by the subscription end of
the testing contain the data required to produce plottable data sets.</p>
<p>The supported tests that can be reduced from the pre-configured throughput
tests:</p>
<table border>
  <tr><th>Test Type</th></tr>
  <tr><td>Bidirectional Throughput</td></tr>
  <tr><td>Publication Bound</td></tr>
  <tr><td>Subscription Bound</td></tr>
</table><p>Each of these test types has a pre-configured set of test conditions that
can be executed.  The sets of test conditions are grouped into three
different categories:</p>
<table border>
  <tr><th>Test Conditions</th><th>Description</th></tr>
  <tr><td>Steepest Ascent</td><td>A group of tests that simultaneously
  increase both the message size and message rate resulting in a large
  nominal throughput increase between tests.</td></tr>
  <tr><td>Fixed Rate</td><td>A group of tests where the message rate is
  held constant for all of the tests.</td></tr>
  <tr><td>Fixed Size</td><td>A group of tests where the message size is
  held constant for all of the tests.</td></tr>
</table><p>The transport types included in the pre-configured tests are:</p>
<table border>
  <tr><th>Transport</th><th>Description</th></tr>
  <tr><td>UDP</td><td>best effort datagram transport.</td></tr>
  <tr><td>TCP</td><td>reliable stream transport.</td></tr>
  <tr><td>Best Effort Multicast</td><td>best effort multicast datagram transport.</td></tr>
  <tr><td>Reliable Multicast</td><td>reliable multicast datagram transport.</td></tr>
</table><p>Each index set in the output represents the combination of a test type,
a set of test conditions, and a transport type with each entry within
the index representing a single size/rate test result.</p>
<p>The output data within an index is formatted as a comma separated value
file (CSV), with the following fields:</p>
<table border>
  <tr><th>Field</th><th>Description</th></tr>
  <tr><td>1</td><td>test message size</td></tr>
  <tr><td>2</td><td>test message rate</td></tr>
  <tr><td>3</td><td>actual (measured) bandwidth</td></tr>
  <tr><td>4</td><td>test type</td></tr>
  <tr><td>5</td><td>transport type</td></tr>
</table><p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  extract-throughput.pl */*.results &gt; data/throughput.csv</pre>

</body>
</html>
#''What kind of document is this?''<br>{{indent{This is a TiddlyWiki document.}}}
#''What is the latency data output file format?''<br>{{indent{The output files generated when gathering latency data is: [[latency file format|LatencyDataFileFormat]].}}}
#''What are all these charts?''<br>{{indent{They are described in a a Middleware News Brief: [[Interpreting OpenDDS Performance Testing Results|http://mnb.objectcomputing.com/mnb/MiddlewareNewsBrief-201003.html]].}}}
#''What are all these scripts for?''<br>{{indent{The script usage is spread throughout this document; with a [[synopsis|Script Summary]] included.}}}
GNUPlot is a data visualization tool available from [[gnuplot.info|http://gnuplot.info/]]
<html>
<head>
<title>gen-latency-stats.pl - create GNUPlot statistical summary data string variables</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>gen-latency-stats.pl - create GNUPlot statistical summary data string variables</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  gen-latency-stats.pl &lt;infile&gt;</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script processes a comma separated value (CSV) input file and
creates an output file for each record of the input.  Each output file is
placed in the same directory that the input file was located in.  The
output file names are constructed using fields from the input record as:</p>
<pre>
  latency-&lt;transport&gt;-&lt;size&gt;.stats</pre>
<p>The input file is expected to be in the format produced by the extract.pl
data reduction script.  Each record (line) of the input file contains the
following fields:</p>
<table border>
  <tr><th>Field</th><th>Description</th></tr>
  <tr><td>1</td><td>transport type (derived from input filename)</td></tr>
  <tr><td>2</td><td>test message size (derived from input filename)</td></tr>
  <tr><td>3</td><td>latency mean statistic</td></tr>
  <tr><td>4</td><td>latency standard deviation statistic</td></tr>
  <tr><td>5</td><td>latency median statistic</td></tr>
  <tr><td>6</td><td>latency median absolute deviation statistic</td></tr>
  <tr><td>7</td><td>latency maximum statistic</td></tr>
  <tr><td>8</td><td>latency minimum statistic</td></tr>
  <tr><td>9</td><td>jitter mean statistic</td></tr>
  <tr><td>10</td><td>jitter standard deviation statistic</td></tr>
  <tr><td>11</td><td>jitter median statistic</td></tr>
  <tr><td>12</td><td>jitter median absolute deviation statistic</td></tr>
  <tr><td>13</td><td>jitter maximum statistic</td></tr>
  <tr><td>14</td><td>jitter minimum statistic</td></tr>
</table><p>The output includes two GNUPlot string variable definitions suitable for
'load' or 'call' operations within GNUPlot.  Provided GNUPlot data
visualization scripts use these variables to generate label information
to place on plots.  The variables are:</p>
<pre>
  latency_stats
  jitter_stats</pre>
<p>Each variable contains the median, median absolute deviation, maximum,
and minimum data values for the output file (transport/size) in a newline
separated single string suitable for use as a label within GNUPlot.</p>
<p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  gen-latency-stats.pl data/latency.csv</pre>

</body>
</html>
This is a bash script that invokes the data reduction scripts on the test results from the pre-configured tests.

{{chunk{
Usage: generate-test-results.sh <bench_directory>

bench_directory         This is the location of the Bench performance tests directory.

Examples:
generate-test-results.sh /performance-tests/Bench
generate-test-results.sh /home/tester/perf-tests

}}}
<html>
<head>
<title>gen-latency-stats.pl - create GNUPlot statistical summary data string variables</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>gen-latency-stats.pl - create GNUPlot statistical summary data string variables</p>
<p>$Id: userguide.html 103 2010-10-05 18:45:41Z martinezm $</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  gen-latency-stats.pl &lt;infile&gt;</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script processes a comma separated value (CSV) input file and
creates an output file for each record of the input.  Each output file is
placed in the same directory that the input file was located in.  The
output file names are constructed using fields from the input record as:</p>
<pre>
  latency-&lt;transport&gt;-&lt;size&gt;.stats</pre>
<p>The input file is expected to be in the format produced by the extract.pl
data reduction script.  Each record (line) of the input file contains the
following fields:</p>
<table border>
  <tr><th>Field</th><th>Description</th></tr>
  <tr><td>1</td><td>transport type</td></tr>
  <tr><td>2</td><td>test message size</td></tr>
  <tr><td>3</td><td>latency mean statistic</td></tr>
  <tr><td>4</td><td>latency standard deviation statistic</td></tr>
  <tr><td>5</td><td>latency maximum statistic</td></tr>
  <tr><td>6</td><td>latency minimum statistic</td></tr>
  <tr><td>7</td><td>jitter mean statistic</td></tr>
  <tr><td>8</td><td>jitter standard deviation statistic</td></tr>
  <tr><td>9</td><td>jitter maximum statistic</td></tr>
  <tr><td>10</td><td>jitter minimum statistic</td></tr>
</table><p>The output includes two GNUPlot string variable definitions suitable for
'load' or 'call' operations within GNUPlot.  Some GNUPlot data
visualization scripts use these variables to generate label information
to place on some plots.  The variables are:</p>
<pre>
  latency_stats
  jitter_stats</pre>
<p>Each variable contains the mean, standard deviation, maximum, and minimum
data values for the output file (transport/size) in a newline separated
single string suitable for use as a label within GNUPlot.</p>
<p>

</p>
<hr />

<h1><a name="example">EXAMPLE</a></h1>
<pre>

  gen-latency-stats.pl data/latency.csv</pre>

</body>

</html>

To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* [[MainMenu]]: The menu (usually on the left)
* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
* [[StyleSheet]]: the ''.headerForeground'' class includes a relative path to the opendds.png logo file.  Ensure that this will be reachable from the final document.  As supplied, this is expected to be located internal to the //./images// directory
You'll also need to enter your username for signing your edits: <<option txtUserName>>

If you wish to publish the contents of this document as static HTML web pages, then:
* add a ''publish'' tag to each tiddler to be included in the published document
* [[PublishIndexTemplate]], [[PublishTemplateBody]], [[PublishTemplateBodySingle]]: edit to the style desired in the published document
* [[StyleSheet]]: modify the reference to the logo file in the //.publishLogo// class to be in a correct relative directory path
* use the ''publish'' selection on the right sidebar to publish all tiddlers with the ''publish'' tag to individual HTML files.  They will all be located in the //./publish// directory
Once the OpenDDS core is built, the OpenDDS-Bench software can be built and installed.  This software is located relative to the OpenDDS root directory and is part of the OpenDDS download.  For purposes of this documentation, we assume that you have set the ~BENCH_ROOT environment variable to a value of //$~DDS_ROOT/performance-tests/Bench//.  In a bash shell this can be done as:
<<<
 shell> export ~BENCH_ROOT=$~DDS_ROOT/performance-tests/Bench
<<<

There is one additional executable that needs to be built as part of the framework.  This is the {{{testprocess}}} command.  Makefiles and solution files are provided as part of the OpenDDS download, so this can be built the same way as the core OpenDDS code.  Using the GNU toolchain on Linux this could be done simply as:
<<<
 shell> make -C $~BENCH_ROOT
<<<

Once the software has been built, it can be used for test execution.  It must be made available on all hosts which will be participating in the tests.  If the software has been built on each host participating, or if it is available via a network file system and executable on each participating host, then no further actions need to be taken.

If there are hosts which are participating in the test and can execute the OpenDDS software built on one of the other hosts, but which do not have direct access to those build directories, then the libraries and command for testing can be transferred to these hosts.  The libraries used by the test processing include those from ACE, TAO, and OpenDDS.  The commands include the test execution script as well as the {{{testprocess}}} command and [[repository|DCPSInfoRepo]] command.  These are summarized in the table below.

{{centeredTable{
| ''Required Software for Test Execution'' |c
| //Item// | //Location// | //Description// |
|versioned ~DCPSInfoRepoServ library |$~DDS_ROOT/lib |OpenDDS repository implementation libraries |
|versioned Federator library |$~DDS_ROOT/lib |~|
|versioned ~InfoRepoLib library |$~DDS_ROOT/lib |~|
|versioned ~OpenDDS_Dcps library |$~DDS_ROOT/lib |core OpenDDS library |
|versioned ~SimpleTcp library |$~DDS_ROOT/lib |OpenDDS transport implementations |
|versioned ~OpenDDS_Udp library |$~DDS_ROOT/lib |~|
|versioned ~OpenDDS_Multicast library |$~DDS_ROOT/lib |~|
|unversioned<br>~SimpleTcp library |link to versioned<br>~OpenDDS_SimpleTcp library |~|
|unversioned<br>~OpenDDS_Udp library |link to versioned<br>~OpenDDS_Udp library |~|
|unversioned<br>~OpenDDS_Multicast library |link to versioned<br>~OpenDDS_Multicast library |~|
|versioned ACE library |$~ACE_ROOT/lib |core ACE library |
|versioned ~TAO_ImR_Client library |$~ACE_ROOT/lib |core TAO libraries |
|versioned ~TAO_IORTable library |$~ACE_ROOT/lib |~|
|versioned ~TAO_PortableServer library |$~ACE_ROOT/lib |~|
|versioned TAO library |$~ACE_ROOT/lib |~|
|versioned ~TAO_Svc_Utils library |$~ACE_ROOT/lib |~|
|versioned ~TAO_CodecFactory library |$~ACE_ROOT/lib |~|
|versioned ~TAO_AnyTypeCode library |$~ACE_ROOT/lib |~|
|versioned ~TAO_Codeset library |$~ACE_ROOT/lib |~|
|versioned ~TAO_PI library |$~ACE_ROOT/lib |~|
|unversioned<br>~TAO_CodecFactory library |link to versioned<br>~TAO_CodecFactory library |~|
|unversioned<br>~TAO_AnyTypeCode library |link to versioned<br>~TAO_AnyTypeCode library |~|
|unversioned<br>~TAO_Codeset library |link to versioned<br>~TAO_Codeset library |~|
|unversioned<br>~TAO_PI library |link to versioned<br>~TAO_PI library |~|
|~Run_Test.pm |$~DDS_ROOT/bin/~PerlDDS |core DDS perl modules |
|~ProcessFactory.pm |$~DDS_ROOT/bin/~PerlDDS |~|
|Process.pm |$~DDS_ROOT/bin/~PerlDDS |~|
|~ConfigList.pm |$~ACE_ROOT/bin/~PerlACE |core ACE perl modules |
|~Process_Unix.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~ProcessVX_Win32.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~TestTarget.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~MSProject.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~Process_VMS.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~Process_Win32.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~ProcessLVRT.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~ProcessVX.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~Run_Test.pm |$~ACE_ROOT/bin/~PerlACE |~|
|Process.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~ProcessVX_Unix.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~TestTarget_LVRT.pm |$~ACE_ROOT/bin/~PerlACE |~|
|~DCPSInfoRepo |$~DDS_ROOT/bin |OpenDDS repository command |
|testprocess |$~BENCH_ROOT/bin |OpenDDS-Bench test process |
|run_test |$~BENCH_ROOT/bin |OpenDDS-Bench test management script |
}}}
In addition to the software, there are configuration files that are available for pre-configured tests.  These should be available on all hosts participating in these tests.  These configuration files are listed in the table below.

{{centeredTable{
| ''Test Configuration Files'' |c
| //Name// | //Location// | //Description// |
|svc.conf |$~BENCH_ROOT/etc |Service configurator specifications for dynamically loaded libraries.  Typically this is restricted to transport libraries and the monitor service library. |
|transport*.ini |$~BENCH_ROOT/etc |Transport configuration specifications used by all tests. |
|*.ini |$~BENCH_ROOT/tests/scenarios |Preconfigured scenario tests.  These are old and should be considered deprecated. |
|~|$~BENCH_ROOT/tests/latency |Preconfigured latency tests.|
|~|$~BENCH_ROOT/tests/scaling |Preconfigured scaling tests.|
|~|$~BENCH_ROOT/tests/thru |Preconfigured throughput tests.|
|~|$~BENCH_ROOT/tests/spray |Preconfigured multicast multipoint tests.|
}}}

As an aid to packaging these files into a distributable form, the [[$BENCH_ROOT/tools/mkpkg|MkpkgManPage]] script is available.  This is a bash shell script and as such can only be used directly on a host that supports the bash shell.  It can be used as a guide for other shells or host types to create a localized version if desired.  See [[Quick Start]] for an example of how this script can be used.

The script makes copies and links in a named (default //pkg//) target directory that can then be tar'ed or zip'ed into a distributable file.  The {{{run_test}}} test management script will configure the environment of processes that it starts to find libraries and commands in the subdirectories created by this script, making use of the distribution straightforward.
This is a GNUPlot script for creating charts displaying latency and jitter data.

This script is intended to be called with the following parameters:
{{indent{
$0: datafilename
$1: statsfilename
$2: outputfilename
$3: multiplot title
}}}

The output of this script will be a single output file in PNG format named by the //'$2'// parameter that contains a quad plot of the data read from the input file named by the //'$0'// parameter.  The statistics information located in the file named by the //'$1'// parameter is used as a labels on the charts.  The //'$3'// parameter is used as a label for the whole multiplot chart.

The sub-plots are:
{{centeredTable{
| ''sub-plot'' | ''type'' | ''X-axis'' | ''Y-axis'' |
|top-left |Latency timeline data plot |Sample number |Measured latency |
|top-right |Jitter timeline data plot |Sample number |Measured jitter |
|bottom-left |Latency historgram plot |Measured latency |Frequency |
|bottom-right |Jitter histogram plot |Measured jitter |Frequency |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call lj-plot.gpi 'data/latency-tcp-1000.gpd' 'data/latency-tcp.1000.stats' 'images/latency-tcp-1000.png' 'TCP Message Size 1000 bytes' }}}
}}}
The OpenDDS-Bench framework includes configuration files for latency testing.  These files are located in the //$~BENCH_ROOT/tests/latency// sub-directory.  The latency testing covers several different conditions.  The configuration for these tests are illustrated in the following diagram.
[img[Latency Test|images/latencytest.jpg]]
The message size is varied for each test execution run and latency data for the full loopback path gathered and analyzed.  Latency data is measured by extracting a timestamp at the time of the write() call and inserting that into the data sample.  Once the sample has been received at the terminus of the path, another timestamp will be extracted and the difference logged as the latency.  Since the clocks on different hosts tend to drift at different rates, it is best to have both the write() and read() calls executed on the same host so the latency measurements are made using the same system clock.  This is ensured for these tests by placing the publication and subscription in the same process.  Typically the two processes of this test will be executed on different hosts to include network effects in the measurements.  While executing them on the same host will exclude network effects from the results, it is possible to encounter processor effects depending on the capacity of the host on which the test is executing.

There is a separate configuration file supplied for each message size test to be executed.  These files configure the test to use a RELIABLE transport type.  They can be modified to select ~BEST_EFFORT for testing using the UDP and basic Multicast transports.  Users are encouraged to extend the testing to include cases specific to their environment as well, using the supplied configuration files as a guide.  The test files and message sizes specified available include the following:
{{centeredTable{
| ''Test File Message Sizes'' |c
| //Message Size//<br>(bytes) | //Configuration File//<br>(in //$~BENCH_ROOT/tests/latency//) |
| 50|p1-50.ini |
| 100|p1-100.ini |
| 250|p1-250.ini |
| 500|p1-500.ini |
| 1000|p1-1000.ini |
| 2500|p1-2500.ini |
| 5000|p1-5000.ini |
| 8000|p1-8000.ini |
| 16000|p1-16000.ini |
| 32000|p1-32000.ini |
}}}
{{itemBlock{
Commands that can be used to structure the test execution for these tests is included in the file //$~BENCH_ROOT/tests/latency/test-commands.txt//.  This file contains a set of commands for each test configuration described above.  They can be used to structure tests for your specific environment.  Executing tests in a Linux environment can be done as follows:
!Start the repostory
<<<
repohost> $~BENCH_ROOT/bin/run_test -S -h iiop:"""//"""<repohost>:2809
<<<
!Start the forwarding (loopback) process
<<<
host2> cd <testdir>
host2> $~BENCH_ROOT/bin/run_test -P -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/latency/p2.ini
<<<
!Start the originating and receiving process
<<<
host1> cd <testdir>
host1> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/latency/p1-<size>.ini
<<<
}}}
{{itemBlock{
A script that can be used to execute these tests is available as:
{{indent{
;[[$BENCH_ROOT/tests/latency/run_test.pl|LatencyRunTestManPage]]
:This script will execute one side (or host) of a latency test for a specific message size and transport using the available test configuration files for that message size.
}}}
}}}
Latency data files include last hop and full path information in both summary and detailed forms.  in the output files specified as part of the test configuration for a subscription, the summary data appears first, followed by the detailed data.  The summary data is shown organized by last hop then full path, and within each category the information is broken down and summarized by publication that wrote the data.

Latency data is produced by collecting a timestamp at the originating point and sending it along with the sample to be compared with a timestamp collected at the terminating point.  Since the actual collection of timestamps can affect the operation of the test, only timestamps that are required are generated.  This typically means that a test timestamp will be generated and collected only at the path origination and termination.  This is where the full-path latency data is gathered.

OpenDDS internally has a requirement to generate and send timestamps with each data sample, and those timestamps are used to extract the actual per-hop information.

Since the latency data consists of two timestamps, it is important to only compare the timestamps where the comparison has meaning.  This means knowledge of skew between different clocks is necessary if the two timestamps are collected from different sources.  Since the inherent clock drift and skew between hosts is large compared to the time intervals being measured, it is best to compare data only between measurements from the same clock unless dedicated hardware of known performance is used to ensure comparison fidelity.

This typically results in only the full-path data being useful in the latency data collection.  The skew and drift between hosts results in the per-hop information being of less value unless the publication and subscription are on the same host and access the same underlying system clock.

An example of the summary data is shown below:
{{{
SUBSCRIPTION s5 SUMMARY DATA
Total Messages Received: 11913
Valid Messages Received: 11912

 --- last hop statistical summary for subscription s5 ---
  Writer[ 00030000.00000000.00000030.00000102(abf2c9c7)]
     samples: 11912
        mean: 0.00011231
     minimum: 0.000106
     maximum: 0.000365
    variance: 2.20289e-11
 --- full path statistical summary for subscription s5 ---
  Writer[ 0xffffffff9692e077]
     samples: 11912
        mean: 0.000278045
     minimum: 0.000266
     maximum: 0.001049
    variance: 1.58282e-10
}}}
;Messages Received
:The "Total Messages Recieved" will in general vary from the "Valid Messages Received" value due to control messages that are considered invalid at the application API interfaces.  Typically this is accounted for by the terminating  "unregister" message that is received during test shutdown.  The "Valid Messages Received" indicates how many samples of the expected data type were received by the application interface during the test.
;last hop statistical summary
:this is collected and organized by the ~DataWriter Id value of the publication that was last to write during the data's traversal of the test path.  For more than one hop testing this will typically be a different set of ~DataWriters than those for the full path statistics.  For the last hop, the Publication Id of the ~DataWriter is available and is included in the Writer information.
;full path statistical summary
:this is collected and organized by the ~DataWriter Id value of the publication originating the data within the test.  For more than one hop test paths there will be a different set of ~DataWriters than those for the last hop ~DataWriters.  For the full path information the Publication Id value of the originating writer is not available, so only the 32 bit hash value is included in the Writer information.  On 64 bit systems this value might be sign-extended as in the example data above.
The statistical data consists of the following information:
;samples
:this is the number of samples that are included in the statistical calculations.
;mean
:this is the average latency value for all of the included samples.
;minimum
:this is the lowest latency value of all the samples.
;maximum
:this is the largest latency value of all the samples.
;variance
:this is the variance of the entire data set.  It is the square of the standard deviation for Gaussian distributed data populations.
The detailed data includes a data point representing the latency of a single sample for each sample in the specified collection parameters (last 5,000 samples in the predefined [[latency tests|Latency Tests]]).  This is the number of seconds between the sample write time at the originating publication and the sample read time at the terminating subscription.

An example of the detailed data is shown below:
{{{
SUBSCRIPTION s5 DETAILED DATA
 --- last hop statistical data for subscription s5 ---

  Writer[ 00030000.00000000.00000030.00000102(abf2c9c7)]
5000 samples out of 11912
1.13000e-04
1.12000e-04
1.12000e-04
...
1.13000e-04
1.23000e-04
1.20000e-04
 --- full path statistical data for subscription s5 ---
  Writer[ 0xffffffff9692e077]
5000 samples out of 11912
2.80000e-04
2.76000e-04
2.80000e-04
...
2.78000e-04
3.22000e-04
3.12000e-04
}}}
<html>
<head>
<title>run_test.pl - run one side of a latency cross host test</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>run_test.pl - run one side of a latency cross host test</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  run_test.pl &lt;transport&gt; &lt;message size&gt;</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script runs one side of the latency test for a cross host testing.  The
script needs to be run on each of the two hosts involved in the test using
the same parameters on each host.</p>
<p>The test consists of two halves, an originating (server) side and a reflecting
(client) side. The servers involved in the test are are stored in
test_list.txt file in test-host groupings.  The grouping consists of an
ID, client host, and server host.  The script identifies the host's behavior
by identifying the test group ID and the local host's name.  The test group
ID is identified using the environement variable CROSS_GRP.</p>
<p>The server (originiating) side starts the DCPSInfoRepo for the test.</p>
<p>The transport has to be one of the following values:</p>
<dl>
<dt><strong><a name="tcp" class="item">tcp</a></strong></dt>

<dd>
<p>SimpleTCP</p>
</dd>
<dt><strong><a name="udp" class="item">udp</a></strong></dt>

<dd>
<p>SimpleUDP</p>
</dd>
<dt><strong><a name="multi_be" class="item">multi-be</a></strong></dt>

<dd>
<p>multicast (Best Effort)</p>
</dd>
<dt><strong><a name="multi_rel" class="item">multi-rel</a></strong></dt>

<dd>
<p>multicast (Reliable)</p>
</dd>
</dl>
<p>Supported message sizes are <strong>50</strong> <strong>100</strong> <strong>250</strong> <strong>500</strong> <strong>1000</strong>
 <strong>2500</strong> <strong>5000</strong> <strong>8000</strong> <strong>16000</strong> <strong>32000</strong></p>
<p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  run the same command on both hosts:</pre>
<pre>
  run_test.pl tcp 1000</pre>
<pre>
  run_test.pl multi-be 50</pre>

</body>
</html>
[[Introduction|OpenDDS-Bench]]
[[Quick Start]]
[[Installation]]
[[Test Specification]]
[[Existing Tests]]
[[Test Execution]]
[[Data Reduction]]
[[Data Visualization]]
[[FAQ]]
This is a bash script that creates a directory containing a set of executables, librarys, and configuration files that is suitable for transfer to other machines with the same system type and libraries for test execution.
{{chunk{
Usage: mkpkg [destination_directory]

destination_directory:  This is the location of the directory for the packaged files.

If not supplied a destination directory of 'pkg' will be used.

Examples:
mkpkg dts-dirname
}}}
<html>
<head>
<title>mktable.pl - create a TiddlyWiki or HTML table with latency test data</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#options">OPTIONS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>mktable.pl - create a TiddlyWiki or HTML table with latency test data</p>
<p>$Id: userguide.html 103 2010-10-05 18:45:41Z martinezm $</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  mktable.pl [ html ] &lt;infile&gt;</pre>
<p>
</p>
<hr />
<h1><a name="options">OPTIONS</a></h1>
<dl>
<dt><strong><a name="html" class="item"><strong>html</strong></a></strong></dt>

<dd>
<p>Select HTML output format instead of the default TiddlyWiki tiddler
table format.  <strong>This is not currently supported</strong>.</p>
</dd>
</dl>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script reads a data file with statistical information and creates
output suitable for inclusion within a tiddler in a TiddlyWiki document
to define a table with the data included.  If the optional 'html'
argument is supplied the output format will be as a static HTML table.</p>
<p>The input file is expected to be in the format produced by the extract.pl
data reduction script.  Each record (line) of the input file contains the
following fields:</p>
<table border>
  <tr><th>Field</th><th>Description</th></tr>
  <tr><td>1</td><td>transport type</td></tr>
  <tr><td>2</td><td>test message size</td></tr>
  <tr><td>3</td><td>latency mean statistic</td></tr>
  <tr><td>4</td><td>latency standard deviation statistic</td></tr>
  <tr><td>5</td><td>latency maximum statistic</td></tr>
  <tr><td>6</td><td>latency minimum statistic</td></tr>
  <tr><td>7</td><td>jitter mean statistic</td></tr>
  <tr><td>8</td><td>jitter standard deviation statistic</td></tr>
  <tr><td>9</td><td>jitter maximum statistic</td></tr>
  <tr><td>10</td><td>jitter minimum statistic</td></tr>
</table><p>This script will only successfully create table data for input files that
contain the same number of message size data for each transport included.</p>
<p>The tiddler table output consists of TiddlyWiki table format definitions
to create a table containing the statistical information and links to
tiddlers that should contain the quad-plots representing the summarized
data.</p>
<p>The first two rows of the table include a row and column title for the
leftmost column identifying the transport along with the data columns -
one pair for each data size.</p>
<p>Subsequent rows include sets of 5 rows each representing summary data for
a specific transport type.  These rows contain the following columns:</p>
<table border>
  <tr><th>Row</th><th>Column(s)</th><th>Tiddler Format Contents</th><th>HTML Format Contents</th></tr>
  <tr><td>n</td><td>1</td><td>transport identification (name)</td><td>unsupported</td></tr>
  <tr><td>n</td><td>even</td><td>"Latency" label</td><td>unsupported</td></tr>
  <tr><td>n</td><td>odd, after first</td><td>"Mean&lt;br>" . $data</td><td>unsupported</td></tr>
  <tr><td>1 + n</td><td>1</td><td>"~" ROWSPAN specification</td><td>unsupported</td></tr>
  <tr><td>1 + n</td><td>even</td><td>"~" ROWSPAN specification</td><td>unsupported</td></tr>
  <tr><td>1 + n</td><td>odd, after first</td><td>"Dev&lt;br>" . $data</td><td>unsupported</td></tr>
  <tr><td>2 + n</td><td>1</td><td>"~" ROWSPAN specification</td><td>unsupported</td></tr>
  <tr><td>2 + n</td><td>even</td><td>"Jitter" label</td><td>unsupported</td></tr>
  <tr><td>2 + n</td><td>odd, after first</td><td>"Mean&lt;br>" . $data</td><td>unsupported</td></tr>
  <tr><td>3 + n</td><td>1</td><td>"~" ROWSPAN specification</td><td>unsupported</td></tr>
  <tr><td>3 + n</td><td>even</td><td>"~" ROWSPAN specification</td><td>unsupported</td></tr>
  <tr><td>3 + n</td><td>odd, after first</td><td>"Dev&lt;br>" . $data</td><td>unsupported</td></tr>
  <tr><td>4 + n</td><td>1</td><td>"~" ROWSPAN specification</td><td>unsupported</td></tr>
  <tr><td>4 + n</td><td>even</td><td>">" COLSPAN specification</td><td>unsupported</td></tr>
  <tr><td>4 + n</td><td>odd, after first</td><td>"[[plot]|" . $data . "]]"</td><td>unsupported</td></tr>
</table><p>Where the mean and deviation data are taken from the input file, and the
plot data is formed from the transport type and message size obtained
from the input file.</p>
<p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  mktable.pl data/latency.csv &gt; doc/results-tiddler</pre>
<pre>
  mktable.pl html data/latency.csv &gt; table-frag.html</pre>

</body>

</html>
OpenDDS is an open source C++ implementation of the Object Management Group (OMG) Data Distribution Service (DDS). OpenDDS also supports Java bindings through JNI and can be included with JBoss (ESB) frameworks by means of a JMS wrapper. OpenDDS leverages the ADAPTIVE Communication Environment (ACE) to provide a cross platform environment.

OpenDDS is supported by [[Object Computing, Inc.|http://www.objectcomputing.com/]]
OpenDDS-Bench is a testing framework for specifying and executing performance testing of OpenDDS.  It supports testing on one or more hosts; each host with one or more processes.  Any test process may contain any number of DDS service publications or subscriptions.

The framework consists of:
*executable [[test processes|Test Execution]]
*[[supporting scripts|Script Summary]] for starting these test processes
*[[test specification|Test Specification]] configuration files
*[[data reduction|Data Reduction]] tools
*[[data visualization|Data Visualization]] tools
OpenDDS-Bench is included in the OpenDDS download files and is located in the //$~DDS_ROOT/performance-tests/Bench// directory.  It links the OpenDDS service libraries so should be built after OpenDDS has been successfully built.

Several pre-configured tests are available for testing without needing to develop them.  These include tests for [[latency|Latency Tests]] and [[throughput|Throughput Tests]].

This user guide is written assuming a minimum environment setup which includes establishing the framework root, adding the framework bin directory to the command search path, and adding the framework library directory to the library runtime search path.  This can be done easily by setting the following environment variables:

{{centeredTable{
| OpenDDS-Bench environment used in this document |c
| //Variable// | //Value// |
|''~BENCH_ROOT'' |$~DDS_ROOT/performance-tests/Bench |
|''PATH'' |$~BENCH_ROOT/bin:$PATH |
|''~LD_LIBRARY_PATH'' |$~BENCH_ROOT/lib:$~LD_LIBRARY_PATH |
}}}
<!--{{{-->
<div class='header'>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
<div class='footer'>$Id: userguide.html 103 2010-10-05 18:45:41Z martinezm $</div>
</div>
<!--}}}-->
!Definable Parameters
{{centeredTable{
| ''Participant Section Keys'' |c
| //Key// | //Value// | //Notes// |
|>|>| """---""" Participant Qos Policy values """---""" |
|~UserData |<string> |&nbsp; |
|~EntityFactory |<bool> |Booleans are represented with a numeric 0 or 1 |
|>|>| """---""" Test Execution Parameters """---""" |
|~DomainId |<number> |&nbsp; |
}}}
!Test Execution Parameters
[participant] specifications only have {{{DomainId}}} as a test execution parameter.  This determines the DDS Domain in which the participant resides.
!Example
An example participant section like the following would result in a DDS ~DomainParticipant with a name of //player// being created in the process with default ~QoS parameter values and a Domain Id value of 2112.
{{{
[participant/player]
DomainId = 2112
}}}
This is a GNUPlot script for creating charts displaying latency distribution density data.

This script is intended to be called with the following parameters:
{{indent{
$0: data directory
$1: output directory
}}}

The output of this script will be output files in PNG format located in the //'$1'// parameter directory.  The charts plot the data in the directory specified by the //'$0'// parameter.  A separate output for each transport type will be produced.

The charts are:
{{centeredTable{
| ''chart'' | ''type'' | ''X-axis'' | ''Y-axis'' | ''lines'' |
|//$1///<transport>-density.png |Latency density data plot |Measured latency |Frequency |One per message size |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call plot-density.gpi 'data' 'images' }}}
}}}
This is a GNUPlot script for creating charts displaying a jitter data summary.

This script is intended to be called with the following parameters:
{{indent{
$0: datafilename
$1: outputfilebase
}}}

The output of this script will be two output files in PNG format named using the //'$1'// parameter as a base.  The charts plot the data read from the input file named by the //'$0'// parameter.

The charts are:
{{centeredTable{
| ''chart'' | ''type'' | ''X-axis'' | ''Y-axis'' | ''lines'' | ''remarks'' |
|//$1//.png |Jitter summary data plot |Message size |Measured jitter |One per transport type | |
|//$1//-zoom.png |Jitter summary data plot |Message size |Measured jitter |One per transport type |X-axis limited to [0:2500] range |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call plot-jitter.gpi 'data/latency.csv' 'images/transport-jitter' }}}
}}}
This is a GNUPlot script for creating charts displaying latency quantile data summary.

This script is intended to be called with the following parameters:
{{indent{
$0: data directory
$1: output directory
}}}

The output of this script will be output files in PNG format located in the //'$1'// parameter directory.  The charts plot the data in the directory specified by the //'$0'// parameter.  A separate output for each transport type will be produced.

The charts are:
{{centeredTable{
| ''chart'' | ''type'' | ''X-axis'' | ''Y-axis'' | ''lines'' |
|//$1///<transport>-quantiles.png |Latency quantile data plot |Percentile |Measured latency |One per message size |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call plot-quantiles.gpi 'data' 'images' }}}
}}}
This is a bash script that invokes the plotting scripts on the reduced test data from the pre-configured tests.
{{chunk{
Usage: plot-test-results.sh <bench_directory> <destination_directory>

bench_directory         This is the location of the Bench performance tests directory.

destination_directory   This is the location of the directory for the generated images.

Options must be specified in the order shown above.

Examples:
plot-test-results.sh /performance-tests/Bench /var/www/html/perf/images
plot-test-results.sh /home/tester/perf-tests /home/tester/perf-results/images
}}}
This is a GNUPlot script for creating charts displaying a throughput data summary.

This script is intended to be called with the following parameters:
{{indent{
$0: datafilename
$1: output directory
}}}

The output of this script will be output files in PNG format named using the //'$1'// parameter as output directory.  The charts plot the data read from the input file named by the //'$0'// parameter.

The charts are:
{{centeredTable{
| ''chart'' | ''type'' | ''X1-axis(bottom)'' | ''X2-axis(top)'' | ''Y-axis'' | ''lines'' |
|//$1///thru-bidir.png |Throughput bidirectional topology summary data plot |Message size |Message rate |Measured throughput |One per test condition |
|//$1///thru-pub.png |Throughput publication bound topology summary data plot |Message size |Message rate |Measured throughput |One per test condition |
|//$1///thru-sub.png |Throughput subscription bound topology summary data plot |Message size |Message rate |Measured throughput |One per test condition |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call plot-throughput-testformats.gpi 'data/throughput.csv' 'images' }}}
}}}
This is a GNUPlot script for creating charts displaying a throughput data taken for the bidirectional test topology summary.

This script is intended to be called with the following parameters:
{{indent{
$0: datafilename
$1: output directory
$2: [optional] smoothing specification
}}}
Smoothing is not recommended.

The output of this script will be output files in PNG format named using the //'$1'// parameter as output directory.  The charts plot the data read from the input file named by the //'$0'// parameter.

The charts are:
{{centeredTable{
| ''chart'' | ''type'' | ''X1-axis(bottom)'' | ''X2-axis(top)'' | ''Y-axis'' | ''lines'' |
|//$1///thru-lines.png |Throughput bidirectional topology summary data plot |Message size |Message rate |Measured throughput |One per test condition |
|//$1///thru-<transport>.png |Throughput bidirectional topology summary data plot |Message size |Message rate |Measured throughput |One per test condition |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call plot-throughput-transports.gpi 'data/throughput.csv' 'images' }}}
{{{ gnuplot> call plot-throughput-transports.gpi 'data/throughput.csv' 'images' 'smooth acspline' }}}
}}}
This is a GNUPlot script for creating charts displaying a latency data summary.

This script is intended to be called with the following parameters:
{{indent{
$0: datafilename
$1: outputfilebase
}}}

The output of this script will be two output files in PNG format named using the //'$1'// parameter as a base.  The charts plot the data read from the input file named by the //'$0'// parameter.

The charts are:
{{centeredTable{
| ''chart'' | ''type'' | ''X-axis'' | ''Y-axis'' | ''lines'' | ''remarks'' |
|//$1//.png |Latency summary data plot |Message size |Measured latency |One per transport type | |
|//$1//-zoom.png |Latency summary data plot |Message size |Measured latency |One per transport type |X-axis limited to [0:2500] range |
}}}

!!!Examples
{{indent{
{{{ gnuplot> call plot-latency.gpi 'data/latency.csv' 'images/transport-latency' }}}
}}}
The OpenDDS-Bench framework specifies publications, which consist of individual DDS Publisher and ~DataWriter Entities.  The combination of these Entities make up a complete publication.  A consequence of this is that any single DDS Publisher will contain only a single DDS ~DataWriter.  This should not affect any test results or capabilities.

!Definable Parameters
{{centeredTable{
| ''Publication Section Keys'' |c
| //Key// | //Value// | //Notes// |
|>|>| """---""" Publisher Qos Policy values """---""" |
|Presentation |<string> |Enumeration value of {{{INSTANCE}}}, {{{TOPIC}}}, or {{{GROUP}}} |
|~PresentationCoherent |<bool> |Booleans are represented with a numeric 0 or 1 |
|~PresentationOrdered |<bool> |Booleans are represented with a numeric 0 or 1 |
|Partition |<string> |Only single string value is currently supported |
|~GroupData |<string> |&nbsp; |
|~EntityFactory |<bool> |Booleans are represented with a numeric 0 or 1 |
|>|>| """---""" ~DataWriter Qos Policy values """---""" |
|Durability |<string> |Enumeration value of {{{VOLATILE}}}, {{{LOCAL}}}, {{{TRANSIENT}}}, or {{{PERSISTENT}}} |
|~DurabilityServiceDuration |<number> |&nbsp; |
|~DurabilityServiceHistoryKind |<string> |Enumeration value of {{{ALL}}} or {{{LAST}}} |
|~DurabilityServiceHistoryDepth |<number> |&nbsp; |
|~DurabilityServiceSamples |<number> |&nbsp; |
|~DurabilityServiceInstances |<number> |&nbsp; |
|~DurabilityServiceSamplesPerInstance |<number> |&nbsp; |
|Deadline |<number> |&nbsp; |
|~LatencyBudget |<number>  |&nbsp; |
|~LivelinessKind |<string> |Enumeration value of {{{AUTOMATIC}}}, {{{PARTICIPANT}}}, or {{{TOPIC}}} |
|~LivelinessDuration |<number> |&nbsp; |
|~ReliabilityKind |<string> |Enumeration value of {{{BEST_EFFORT}}} or {{{RELIABLE}}} |
|~ReliabilityMaxBlocking |<number> |&nbsp; |
|~DestinationOrder |<string> |Enumeration value of {{{SOURCE}}} or {{{RECEPTION}}} |
|~HistoryKind |<string> |Enumeration value of {{{ALL}}} or {{{LAST}}} |
|~HistoryDepth |<number> |&nbsp; |
|~ResourceMaxSamples |<number> |&nbsp; |
|~ResourceMaxInstances |<number> |&nbsp; |
|~ResourceMaxSamplesPerInstance |<number> |&nbsp; |
|~TransportPriority |<number> |&nbsp; |
|Lifespan |<number> |&nbsp; |
|~UserData |<string> |&nbsp; |
|~OwnershipKind |<string> |Enumeration value of {{{SHARED}}} or {{{EXCLUSIVE}}} |
|~OwnershipStrength |<number> |&nbsp; |
|~WriterDataLifecycle |<bool> |Booleans are represented with a numeric 0 or 1 |
|>|>| """---""" Test Execution Parameters """---""" |
|Topic |<string> |Must reference by name the identifier of a [topic] subsection within the file. |
|~TransportIndex |<number> |Index into transport configurations |
|~MessageSource |<string> |If present, this must reference by name the identifier of a [subscription] subsection within the file. |
|~MessageRateType |<string> |Enumeration value of {{{FIXED}}} or {{{POISSON}}} |
|~MessageRate |<number> |Samples per second, average for POISSON arrival times |
|~MessageSizeType |<string> |Enumeration value of {{{FIXED}}}, {{{UNIFORM}}}, or {{{GAUSSIAN}}} |
|~MessageSize |<number> |bytes per sample |
|~MessageMax |<number> |upper bound for size |
|~MessageMin |<number> |lower bound for size |
|~MessageDeviation |<number> |standard deviation for size |
|~InstanceType |<string> |Enumeration value of {{{FIXED}}}, {{{UNIFORM}}}, or {{{GAUSSIAN}}} |
|~InstanceMean |<number> |average value of instance key for sending |
|~InstanceMax |<number> |upper bound for instance key |
|~InstanceMin |<number> |lower bound for instance key |
|~InstanceDeviation |<number> |standard deviation of instance key for sending |
|Associations |<number> |Number of subscriptions to match before starting |
|~StartAfterDelay |<number> |Delay before writes start after matching |
|~AckDelay |<number> |""">"""0 passed to wait_for_acks() |
}}}
!Test Execution Parameters
The test execution parameters allow specification of several aspects of the publication during testing.  These include:
;how the publication starts
:the publication will not start writing until it is associated with the number of subscriptions specified by the {{{Associations}}} parameter.  After the associations have been matched, if the {{{StartAfterDelay}}} parameter has been specified and is greater than zero, the publication will wait that many seconds before starting to write data.
;how the publication terminates
:if the {{{AckDelay}}} parameter is specified and non-zero, the ~DataWriter::wait_for_acknowledgments() API method is called as the publication is terminating to wait until delivery of all data has been acknowledged.
;the transport used
:this is specified as an index into the transport definitions.  The definitions are in the OpenDDS service configuration file read at startup.
;the data published
:this is specified as either from a subscription as the {{{MessageSource}}} or as generated data samples with a specified distribution.  If from a subscription, the publication is entirely reactive and forwards all data received by the subscription without modification.  If the {{{MessageSource}}} parameter has been specified, it is used exclusively and any message generation parameters are ignored.
;generated data sample size
:if the publication is to generate samples, they are formed using the {{{MessageSizeType}}} and related distribution parameters.
;generated data sample rate
:if the publication is to generate samples, they are sent at a rate specified by the {{{MessageRateType}}} and related distribution parameters.
;instance values used
:each write is sent with an instance key value determined by the {{{InstanceType}}} and related distribution parameter values.
!Distributions
The sample message size and rate as well as the instance key value to be used when publishing data can be specified as a statistical distribution.  These distributions are described here.
{{itemBlock{
!!Fixed Distribution
If the {{{MessageSizeType}}}, {{{MessageRateType}}}, or {{{InstanceType}}} have a value of ''FIXED'', then the corresponding {{{MessageSize}}}, {{{MessageRate}}}, or {{{InstanceMean}}} parameter is used as the only value for that parameter.
!!Uniform Distribution
If the {{{MessageSizeType}}}, or {{{InstanceType}}} have a value of ''UNIFORM'', then the corresponding *Min and *Max parameters are used as the lower and upper bound for the value and as each sample is sent a new value will be obtained from a uniform probability distribution between those limits.
!!Exponential Distribution
If the {{{MessageRateType}}} has a value of ''POISSON'', the the {{{MessageRate}}} parameter is used as the //lamba// parameter for an exponential probability distribution to determine the interval between writing samples.  This interval will result in a Poisson arrival time distribution of the samples as they are published.
!!Gaussian Distribution
If the {{{MessageSizeType}}}, or {{{InstanceType}}} have a value of ''GAUSSIAN'', then the corresponding *//Mean// and *//Deviation// parameters define a Gaussian probability distribution from which the actual value for each sample will be obtained as each sample is written.  The *//Min// and *//Max// parameters are used as the lower and upper bound for the value and values from the probability distribution that are outside this range are replaced with the nearest extreme value.
}}}
!Example
As an example a publication section like the following would result in a publication that started sending after associating with a single subscription, and sent 1,000 byte samples at 1,000 samples per second during the test.
{{{
[publication/move]
Topic = joystick
TransportIndex = 1
MessageSizeType = FIXED
MessageSize = 1000
MessageRateType = FIXED
MessageRate = 1000
HistoryKind = LAST
HistoryDepth = 10
ResourceMaxSamplesPerInstance = 10
ReliabilityKind = RELIABLE
Partition = link1
Associations = 1
}}}
<div class='viewer' id='contentWrapper'><small><a href="index.html">Home</a> > %1</small><h1 class='publishLogo'>%0</h1>
<div id='publishContentWrapper'>
<h2>%1</h2>
<ul>%2
</ul>
<small>Published: %6</small>
</div></div>
/***
|''Name:''|Publish Macro|
|''Version:''|2.4.1 (2 July 2008)|
|''Source''|http://jackparke.googlepages.com/jtw.html#PublishMacro ([[del.icio.us|http://del.icio.us/post?url=http://jackparke.googlepages.com/jtw.html%23PublishMacro]])|
|''Author:''|[[Jack]]|
|''Type:''|Macro|
!Description
<<doPublish>> tiddlers tagged with these tags <<option txtPublishTags>> (comma seperated) as HTML pages to the subfolder 'publish' (you must create this). Use the [[PublishTemplateHead]] and [[PublishTemplateBody]] templates to style your pages and the [[PublishIndexTemplate]] to define an index page. For publishing individual tiddlers the [[PublishTemplateBodySingle]] template is used.
!Usage
To publish all tagged tiddlers:
{{{<<doPublish>>}}} <<doPublish>>
To publish a single tiddler, use the {{{<<publishTiddler>>}}} macro or add the "publishTiddler" command to your ViewTemplate
!Template placeholders
|!Placeholder|!Meaning|
|%0|Your SiteTitle "<<tiddler SiteTitle>>"|
|%1|The current tiddler title|
|%2|The rendered tiddler HTML|
|%3|CSV list of tags|
|%4|Tiddler modifier|
|%5|Tiddler modified date|
|%6|Tiddler creation date|
|%7|Tiddler wiki text|
!Revision History
* Original by [[Jack]] 24 May 2006
* Updated 2 Jan 2007
* Refactored 4 Jan 2007
* Small improvements
* Publish single tiddlers
* Template placeholder %7 for tiddler's wiki text

!Code
***/
//{{{
version.extensions.doPublish = {
 major: 2,
 minor: 4,
 revision: 1,
 date: new Date("July 2, 2008")
};
config.macros.doPublish = {
 label: "publish",
 prompt: "Publish Tiddlers as HTML files"
};
if (config.options.txtPublishTags == undefined) config.options.txtPublishTags = "Publish";
config.shadowTiddlers.PublishTemplateHead = '<title>%0 - %1</title>\n<link rel="stylesheet" type="text/css" href="style.css"/>\n<meta name="keywords" content="%3"/>'
config.shadowTiddlers.PublishTemplateBody = '<div class=\'viewer\' id=\'contentWrapper\'><small><a href=\"./publish/index.html\">Home</a> > %1</small><h1>%0</h1>\n<h2>%1</h2>\n%2\n<hr>Tags: %3\n<hr>%4, %5&nbsp;(created %6)\n</div>\n'
config.shadowTiddlers.PublishTemplateBodySingle = '<h1>%0</h1>\n<h2>%1</h2>\n%2\n<hr>Tags: %3\n<hr>%4, %5&nbsp;(created %6)\n</div>\n'
config.shadowTiddlers.PublishIndexTemplate = '<div class=\'viewer\' id=\'contentWrapper\'><small><a href="./publish/index.html">Home</a> > %1</small><h1>%0</h1><h2>%1</h2>\n<ul>%2\n</ul>\n<small>Published: %6</small>\n</div>\n';
config.macros.doPublish.handler = function(place)
 {
 if (!readOnly)
 createTiddlyButton(place, this.label, this.prompt,
 function() {
  doPublish();
  return false;
 },
 null, null, this.accessKey);

}
config.macros.publishTiddler = {
 label : 'publish',
 prompt : 'Publish this tiddler as an HTML file.',
 handler : function(place,macroName,params,wikifier,paramString,tiddler)
 {
  var btn = createTiddlyButton(place, this.label, this.prompt,
  function(e) {
   if(!e) var e = window.event;
   publishTiddler(this.getAttribute('tiddler'))
   return false;
  },
  null, null, this.accessKey);
  btn.setAttribute('tiddler', tiddler.title);
}}
config.commands.publishTiddler = {handler : function(event,src,title) {publishTiddler(title);},text: "publish", tooltip: "Publish this tiddler as HTML"};
function publishTiddler(title) {
 //debugger

 var PublishFolder = getWikiPath('publish');
 var place = document.getElementById(story.container)
 var HTMLTemplateHead = store.getTiddlerText("PublishTemplateHead");
 var HTMLTemplateBody = store.getTiddlerText("PublishTemplateBodySingle") || store.getTiddlerText("PublishTemplateBody");
 HTMLTemplateBody = renderTemplate(HTMLTemplateBody)
 HTMLTemplateBody = wiki2Web(HTMLTemplateBody);

 var tiddler = store.getTiddler(title);
 var tiddlerText = store.getValue(tiddler, 'text');
 var tiddlerHTML = wikifyStatic(tiddlerText);
 var HTML = '<html>\n\<head>\n' + HTMLTemplateHead + '\n</head>\n<body>\n' + HTMLTemplateBody + '\n</body>\n</html>';
 HTML = HTML.format([
  wikifyPlain("SiteTitle").htmlEncode(),
  tiddler.title.htmlEncode(),
  wiki2Web(tiddlerHTML),
  tiddler.tags.join(", "),
  tiddler.modifier,
  tiddler.modified.toLocaleString(),
  tiddler.created.toLocaleString(),
  tiddlerText
 ]);
 saveFile(PublishFolder + tiddler.title.filenameEncode() + ".html", HTML);
 //story.closeTiddler(tiddler.title);
 var indexWin = window.open((PublishFolder + title.filenameEncode() + ".html").replace(/\\/g, "/"), null);
 indexWin.focus();
}
function doPublish() {
 var savedTiddlers = [];
 var tiddlers = store.getTiddlers("title");
 var place = document.getElementById(story.container)
 var HTMLTemplateHead = store.getTiddlerText("PublishTemplateHead");
 var HTMLTemplateBody = store.getTiddlerText("PublishTemplateBody");
 HTMLTemplateBody = renderTemplate(HTMLTemplateBody)
 HTMLTemplateBody = wiki2Web(HTMLTemplateBody);

 var PublishTags = config.options.txtPublishTags || "publish";
 PublishTags = PublishTags.split(",")
 var PublishFolder = getWikiPath('publish');
 if (!PublishFolder) return;
 var indexFile = "";

 var indexFileTemplate = store.getTiddlerText("PublishIndexTemplate");
 // This does not allow <<myMacro>> but wants <div macro="myMacro">
 indexFileTemplate = renderTemplate(indexFileTemplate)
 // This option allows WIKI-syntax but is limited in it's HTML capabilities
 //indexFileTemplate = wikifyStatic(indexFileTemplate)

 for (var t = 0; t < tiddlers.length; t++) {
  var tiddler = tiddlers[t];
  if (tiddler.tags.containsAny(PublishTags)) {
   var tiddlerText = store.getValue(tiddler, 'text');
   var tiddlerHTML = wikifyStatic(tiddlerText);
   var HTML = '<html>\n\<head>\n' + HTMLTemplateHead + '\n</head>\n<body>\n' + HTMLTemplateBody + '\n</body>\n</html>';
   HTML = HTML.format([
   wikifyPlain("SiteTitle").htmlEncode(),
   tiddler.title.htmlEncode(),
   wiki2Web(tiddlerHTML),
   tiddler.tags.join(", "),
   tiddler.modifier,
   tiddler.modified.toLocaleString(),
   tiddler.created.toLocaleString(),
  tiddlerText
   ]);
   //saveFile(PublishFolder + tiddler.created.formatString("YYYY0MM0DD") + ".html", HTML);
   saveFile(PublishFolder + tiddler.title.filenameEncode() + ".html", HTML);
   indexFile += "<li><a href=\"" + tiddler.title.filenameEncode() + ".html" + "\" class=\"tiddlyLink tiddlyLinkExisting\">" + tiddler.title + "</a></li>\n";
   story.closeTiddler(tiddler.title);

  }

 }
 indexFileTemplate = '<html>\n\<head>\n' + HTMLTemplateHead + '\n</head>\n<body>\n' + indexFileTemplate + '\n</body>\n</html>';
 indexFileTemplate = indexFileTemplate.format([wikifyPlain("SiteTitle").htmlEncode(), wikifyPlain("SiteSubtitle").htmlEncode(), "%2", "", "", "", (new Date()).toLocaleString()])

 indexFile = indexFileTemplate.replace("%2", indexFile)
 indexFile = wiki2Web(indexFile);
 saveFile(PublishFolder + "index.html", indexFile)
 saveFile(PublishFolder + "style.css", store.getTiddlerText("StyleSheet") + store.getTiddlerText("StyleSheetLayout") + store.getTiddlerText("StyleSheetColors"))
 var indexWin = window.open("file://" + PublishFolder.replace(/\\/g, "/") + "index.html", null);
 indexWin.focus();

}

function renderTemplate(html) {
 var result = document.createElement("div");
 result.innerHTML = html;
 applyHtmlMacros(result, null);
 var temp = result.innerHTML;
 //result.parentNode.removeChild(result);
 return temp;

}

// Convert wikified text to html
function wiki2Web(wikiHTML) {
 //var regexpLinks = new RegExp("<a tiddlylink=.*?</a>", "img");
 var regexpLinks = /<a[^>]+tiddlylink\s*=\s*["']?\s*?([^ "'>]*)\s*["']?[^>]*>[^<]+<\/a>/img;
 var result = wikiHTML.match(regexpLinks);
 if (result) {
  for (i = 0; i < result.length; i++) {
   var className = result[i].match(/ class="(.*?)"/i) ? result[i].match(/ class="(.*?)"/i)[1] : "";
   var tiddlerName = result[i].match(/ tiddlylink="(.*?)"/i)[1];
   var url = tiddlerName.htmlDecode().filenameEncode() + ".html";
   var tiddlerLabel = result[i].match(/">(.*?)<\/a>/i)[1];
   if (!className.match(/tiddlyLinkNonExisting/i))
   wikiHTML = wikiHTML.myReplace(result[i], "<a class=\"" + className + "\" href=\"" + url + "\">" + tiddlerLabel + "</a>");
   else
   wikiHTML = wikiHTML.myReplace(result[i], "<a class=\"" + className + "\" title=\"Page does not exist\" href=\"#\">" + tiddlerName + "</a>");

  }
  wikiHTML = wikiHTML.replace(/ href="http:\/\//gi, " target=\"_blank\" href=\"http://");

 }
 return wikiHTML

}
function getWikiPath(folderName) {
 var originalPath = document.location.toString();
 if (originalPath.substr(0, 5) != 'file:') {
  alert(config.messages.notFileUrlError);
  if (store.tiddlerExists(config.messages.saveInstructions))
  story.displayTiddler(null, config.messages.saveInstructions);
  return;

 }
 var localPath = getLocalPath(originalPath);
 var backSlash = localPath.lastIndexOf('\\') == -1 ? '/': '\\';
 var dirPathPos = localPath.lastIndexOf(backSlash);
 var subPath = localPath.substr(0, dirPathPos) + backSlash + (folderName ? folderName + backSlash: '');
 return subPath;

}

// Replace without regex
String.prototype.myReplace = function(sea, rep) {
 var t1 = this.indexOf(sea);
 var t2 = parseInt(this.indexOf(sea)) + parseInt(sea.length);
 var t3 = this.length;
 return this.substring(0, t1) + rep + this.substring(t2, t3)

}
// Convert illegal characters to underscores
String.prototype.filenameEncode = function()
 {
 return (this.toLowerCase().replace(/[^a-z0-9_-]/g, "_"));

}
//}}}
<div class='viewer' id='contentWrapper'><small><a href="./publish/index.html">Home</a> > %1</small><h1>%0</h1>
<h2>%1</h2>
%2
<hr>Tags: %3
<hr>%4, %5&nbsp;(created %6)
</div>
<h1>%0</h1>
<h2>%1</h2>
%2
<hr>Tags: %3
<hr>%4, %5&nbsp;(created %6)
</div>
The steps to run a test using this framework are described below.
The description is based on a Linux platform using the GNU toolchain.  If you are using a different platform or tools the instructions will need to be modified to suit your environment.
{{itemBlock{
!Obtain and build ~OpenDDS and the ~OpenDDS-Bench software.
Find the OpenDDS software build and install instructions at [[opendds.org|http://www.opendds.org/]].  Each host which will be participating in a test will need to have the software available.  For homogeneous hosts that can execute software built on the same kind of host the installation steps below show a way to distribute the software.  Each host type will need to have the software built for it.  For example if both Linux and Windows machines will be participating in the testing then a build for each host type will need to be performed and installed on hosts of that type.

Once OpenDDS and the libraries it depends on have been built, the OpenDDS-Bench software needs to be built.  Do this by:
<<<
 shell> cd $~DDS_ROOT/performance-tests/Bench
 shell> mwc.pl -type gnuace
 shell> make
<<<
}}}
{{itemBlock{
!Locate the ~OpenDDS-Bench performance testing framework software.

Do this by:
<<<
shell> export ~BENCH_ROOT=$~DDS_ROOT/performance-tests/Bench
<<<
}}}
{{itemBlock{
!Install the ~OpenDDS-Bench performance testing framework on all hosts involved with the test.
Once the OpenDDS and OpenDDS-Bench software has been built, it needs to be made available on each host participating in the testing.  There is a helper script located in //$~BENCH_ROOT/tools// to simplify this process for tests that execute on homogenous systems.  This can be done by following these steps:
<<<
 shell> cd $~BENCH_ROOT
 shell> tools/mkpkg bench
 shell> tar jcf bench.tar.bz2 bench
 shell> cp bench.tar.bz2 <dest>
<<<
Where ''<dest>'' is the destination directory where the tarfile will be uncompressed and the tests will be executed from.  Use the ''scp'' command to transfer the tarfile between hosts if the disks are not available directly.  It is not required but recommended to have a separate destination directory for each host in the test.  It is best if the directories where the test processes will execute are local to the machine which will be executing the processes.
Once the tarfile has been distributed unpack it and save the base directory of the distributed software as a shell variable to simplify test execution later.
<<<
 shell> cd <dest>
 shell> tar xf bench.tar.bz2
 shell> export BASEDIR=`pwd`/bench
<<<
}}}
{{itemBlock{
!Start the testing on each host.
Once the test software is available on each host participating in the test, the test can be executed.  A separate directory for each host is recommended to avoid inadvertently colliding output.
The [[repository|DCPSInfoRepo]] needs to be started on one host prior to starting any {{{testprocess}}} executables.  The ''<host>'' and ''<port>'' are where the repository will listen for CORBA connection requests from the participating processes.
<<<
 shell> $BASEDIR/bin/run_test -S -h <host>:<port>
<<<
The following steps need to be performed on each host for each test.  The ''<host>'' and ''<port>'' should refer to the location where the repository is listening and should be reachable from the current host.  The ''<transportconfig>'' file specifies the transport configuration information, and the ''<testconfig>'' file contains the [[test specification|Test Specification]] information.
<<<
 shell> mkdir -p <testdir>
 shell> cd <testdir>
 shell> $BASEDIR/bin/run_test -P -h <host>:<port> -i <transportconfig> -s <testconfig>
<<<
Note that it is important to start the tests within 60 seconds of each other.  This is due to the script timeout value, which is set to 60 seconds after the specified duration.  The test execution does not start until the entire test is connected, so if any host is not started within the 60 seconds interval processes that were started first will time out before the test is complete.
}}}
<html>
<head>
<title>reduce-latency-data.pl - reduce test results into plottable data</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>reduce-latency-data.pl - reduce test results into plottable data</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  reduce-latency-data.pl &lt;infile&gt;</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script processes the input file and prints converted data to
standard output.</p>
<p>The input file is expected to be in the format produced by the OpenDDS
performance test bench latency data summary.</p>
<p>The output consists of data suitable for plotting using GNUPlot.  There
are 5 indexed sections with the following data in the columns of each
section:</p>
<dl>
<dt><strong><a name="index_0" class="item"><strong>Index 0</strong></a></strong></dt>

<dd>
<p>Latency and jitter data.</p>
<p>This index does not include a sample number.  This can be derived by
using $0 for the x axis in the GNUPlot plot command.</p>
<dl>
<dt><strong><a name="column_1" class="item"><strong>Column 1</strong></a></strong></dt>

<dd>
<p>Individual data points for 1/HOPS full path latency from the input file.
HOPS is set internally to a value of 2 corresponding to the
pre-configured OpenDDS-Bench latency tests.</p>
</dd>
<dt><strong><a name="column_2" class="item"><strong>Column 2</strong></a></strong></dt>

<dd>
<p>Individual data points for jitter between successive latency data points.</p>
</dd>
</dl>
</dd>
<dt><strong><a name="index_1" class="item"><strong>Index 1</strong></a></strong></dt>

<dd>
<p>Latency histogram data.  This index has binned data derived from the
index 0 / column 1 data points.  There are currently 25 bins into which
the data is placed.</p>
<dl>
<dt><strong><a name="column_12" class="item"><strong>Column 1</strong></a></strong></dt>

<dd>
<p>The center of each bin.</p>
</dd>
<dt><strong><a name="column_22" class="item"><strong>Column 2</strong></a></strong></dt>

<dd>
<p>The frequency (number of samples) in the bin.</p>
</dd>
</dl>
</dd>
<dt><strong><a name="index_2" class="item"><strong>Index 2</strong></a></strong></dt>

<dd>
<p>Jitter histogram data.  This index has binned data derived from the
index 0 / column 2 data points.  There are currently 25 bins into which
the data is placed.</p>
<dl>
<dt><strong><a name="column_13" class="item"><strong>Column 1</strong></a></strong></dt>

<dd>
<p>The center of each bin.</p>
</dd>
<dt><strong><a name="column_23" class="item"><strong>Column 2</strong></a></strong></dt>

<dd>
<p>The frequency (number of samples) in the bin.</p>
</dd>
</dl>
</dd>
<dt><strong><a name="index_3" class="item"><strong>Index 3</strong></a></strong></dt>

<dd>
<p>Latency quantile data.  This index has sorted latency data derived from
the index 0 / column 1 data points.</p>
<dl>
<dt><strong><a name="column_14" class="item"><strong>Column 1</strong></a></strong></dt>

<dd>
<p>Latency data from Index 0, sorted.</p>
</dd>
</dl>
</dd>
<dt><strong><a name="index_4" class="item"><strong>Index 4</strong></a></strong></dt>

<dd>
<p>Jitter quantile data.  This index has sorted jitter data derived from
the index 0 / column 2 data points.</p>
<dl>
<dt><strong><a name="column_15" class="item"><strong>Column 1</strong></a></strong></dt>

<dd>
<p>Jitter data from Index 0, sorted.</p>
</dd>
</dl>
</dd>
</dl>
<p>Each index section has a header comment.  The histogram sections (Index 1
and Index 2) have statistical summary data included as well.  This
statistical summary data is usable by the 'extract.pl' script to provide
plottable summary data.</p>
<p>If the data produced by this script is to be used by the 'extract.pl'
script, then the output file needs to be named such that it consists of
three '-' separated fields followed by the extension &quot;.gpd&quot; (representing
GNUPlot data file).  The fields represent the test type, the transport
type, and the message size of the test data.</p>
<p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  reduce-latency-data.pl tcp/latency-1000.data &gt; data/latency-tcp-1000.gpd</pre>

</body>
</html>
<html>
<head>
<title>run_test - Execute test processes for distributed testing</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#options">OPTIONS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#examples">EXAMPLES</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>run_test - Execute test processes for distributed testing</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
 run_test [options]</pre>
<p>
</p>
<hr />
<h1><a name="options">OPTIONS</a></h1>
<dl>
<dt><strong><a name="help" class="item"><strong>-?</strong> | <strong>--help</strong></a></strong></dt>

<dd>
<p>Print a brief help message and exits.</p>
</dd>
<dt><strong><a name="man" class="item"><strong>--man</strong></a></strong></dt>

<dd>
<p>Prints this manual page and exits.</p>
</dd>
<dt><strong><a name="x_noaction" class="item"><strong>-x</strong> | <strong>--noaction</strong></a></strong></dt>

<dd>
<p>Print the commands that would be executed with the current set of command
line options and exit without starting any processes.</p>
</dd>
<dt><strong><a name="v_verbose" class="item"><strong>-v</strong> | <strong>--verbose</strong></a></strong></dt>

<dd>
<p>Print additional information while executing.</p>
</dd>
<dt><strong><a name="v_orbverboselogging_number" class="item"><strong>-V</strong> | <strong>--ORBVerboseLogging=NUMBER</strong></a></strong></dt>

<dd>
<p>Sets the -ORBVerboseLogging option to NUMBER.</p>
<p>The default value is 0.</p>
<p>The value is set to 1 if any non-zero value is specified.  A value of
zero will omit the ORBVerboseLogging specification from the process
command line.</p>
</dd>
<dt><strong><a name="d_number_debug_number" class="item"><strong>-d NUMBER</strong> | <strong>--debug=NUMBER</strong></a></strong></dt>

<dd>
<p>Sets the -DCPSDebugLevel option value.  A value of 0 will omit the
DCPSDebugLevel specification from the process command line.</p>
<p>The default value is 0.</p>
</dd>
<dt><strong><a name="t_number_tdebug_number" class="item"><strong>-T NUMBER</strong> | <strong>--tdebug=NUMBER</strong></a></strong></dt>

<dd>
<p>Sets the -DCPSTransportDebugLevel option value.  A value of 0 will omit
the DCPSTransportDebugLevel specification from the process command line.</p>
<p>The default value is 0.</p>
</dd>
<dt><strong><a name="r_number_rdebug_number" class="item"><strong>-R NUMBER</strong> | <strong>--rdebug=NUMBER</strong></a></strong></dt>

<dd>
<p>Sets the -DCPSDebugLevel option value for the repository process.  A
value of 0 will omit the DCPSDebugLevel specification from the repository
process command line.</p>
<p>The default value is 0.</p>
</dd>
<dt><strong><a name="t_number_duration_number" class="item"><strong>-t NUMBER</strong> | <strong>--duration=NUMBER</strong></a></strong></dt>

<dd>
<p>Limits the execution time of the test.  If not specified, then any test
or repository process that is started will execute until the script is
interrupted.</p>
<p>The default value is unspecified.</p>
</dd>
<dt><strong><a name="s_startrepo" class="item"><strong>-S</strong> | <strong>--startrepo</strong></a></strong></dt>

<dd>
<p>Causes a repository process to be started.  Only a single repository
process will be invoked by an invocation.</p>
<p>The default value is to not start a repository process.</p>
</dd>
<dt><strong><a name="p_starttest" class="item"><strong>-P</strong> | <strong>--starttest</strong></a></strong></dt>

<dd>
<p>Causes one or more test processes to be started.  One process will be
started for each test file specified by the <strong>-s</strong> option.</p>
<p>The default value is to not start a test process.</p>
</dd>
<dt><strong><a name="f_file_dfile_file" class="item"><strong>-f FILE</strong> | <strong>--dfile=FILE</strong></a></strong></dt>

<dd>
<p>Sets the -ORBLogFile option value.  A value of unspecified will result in
the ORBLogFile specification being omitted from the process command line.</p>
<p>The default value is unspecified.</p>
</dd>
<dt><strong><a name="c_types_collect_types" class="item"><strong>-C TYPES</strong> | <strong>--collect=TYPES</strong></a></strong></dt>

<dd>
<p>Starts statistics collection for the specified TYPES.  TYPES may contain
one or more of the values: <a href="#n"><code>n</code></a>, <a href="#s"><code>s</code></a>, and <a href="#p"><code>p</code></a>, or the collective
specification <a href="#all"><code>all</code></a>.
The output for any given statistic is placed in a file named as specified
by the <strong>-O FORMAT</strong> command line option.  The default is to a file with a
basename of the TYPE and a decorator of <code>-&lt;pid&gt;.log</code>.</p>
<dl>
<dt><strong><a name="n" class="item"><strong>n</strong></a></strong></dt>

<dd>
<p>Causes network statistics to be gathered.  A <code>netstat -ntpc</code> command is
started for each test process and the output filtered by its process Id.</p>
</dd>
<dt><strong><a name="s" class="item"><strong>s</strong></a></strong></dt>

<dd>
<p>Causes system statistics to be gathered.  A single <code>vmstat 1</code> command is
started.  If the PID value is specified as part of the statistic
filename, the PID value for the test script process is used.</p>
</dd>
<dt><strong><a name="p" class="item"><strong>p</strong></a></strong></dt>

<dd>
<p>Causes process statistics to be gathered.  A <code>top -bd 1 -p {pid}</code>
command is started for each test process and the output filtered by its
process Id.</p>
</dd>
<dt><strong><a name="all" class="item"><strong>all</strong></a></strong></dt>

<dd>
<p>Causes all statistics described above to be gathered.  This is a synonym
for <em>nsp</em></p>
</dd>
</dl>
<p>The default is unspecified.  This results in no statistics being
collected.</p>
</dd>
<dt><strong><a name="o_format_outputdecorator_format" class="item"><strong>-O FORMAT</strong> | <strong>--outputdecorator=FORMAT</strong></a></strong></dt>

<dd>
<p>Establishes the format of the statistics gathering output file names.
The format is specified as a string with simple substitution performed to
replace some characters with other information.  This is a simple
substitution and no escapes are allowed.</p>
<p>Statistics output filenames are formed starting with the type of
statistic being gathered concatenated with the decorater defined here,
expanded with the formatting information at the time of execution.  The
types of statistics currently include <code>system</code>, <code>process</code>, and
<code>network</code>.</p>
<p>The characters replaced in the format specification are:</p>
<dl>
<dt><strong><a name="p" class="item"><strong>P</strong></a></strong></dt>

<dd>
<p>replaced with the process PID for being monitored.</p>
</dd>
<dt><strong><a name="t" class="item"><strong>T</strong></a></strong></dt>

<dd>
<p>replaced with a timestamp that includes a 4 digit year, 2 digit month, 2
digit day of month, 2 digit hour, 2 digit minute, and 2 digit second.
The timestamp is taken near the time the statistics collection is started.</p>
</dd>
<dt><strong><a name="h" class="item"><strong>H</strong></a></strong></dt>

<dd>
<p>replaced by the hostname of the machine executing the script.</p>
</dd>
</dl>
<p>The default is &quot;-P.log&quot; which results in output files with names similar
to <code>system-5436.log</code>.</p>
</dd>
<dt><strong><a name="h_fqdn_repohost_fqdn" class="item"><strong>-h FQDN</strong> | <strong>--repohost=FQDN</strong></a></strong></dt>

<dd>
<p>This is the fully qualified domain name and port where the OpenDDS
repository may be found.</p>
<p>The default value is 'localhost:2112'.</p>
</dd>
<dt><strong><a name="i_file_inifile_file" class="item"><strong>-i FILE</strong> | <strong>--inifile=FILE</strong></a></strong></dt>

<dd>
<p>OpenDDS configuration filename.  This defines the configuration
information for the OpenDDS service.</p>
<p>The default is to use the file located in the 'etc' directory relative
from the project root (the parent directory of the directory where the
command was executed from) with filename 'transport.ini'.</p>
</dd>
<dt><strong><a name="s_filelist_scenario_filelist" class="item"><strong>-s FILELIST</strong> | <strong>--scenario=FILELIST</strong></a></strong></dt>

<dd>
<p>Test scenario definition filenames.  This defines the scenarios to execute
for the test.  This names an 'ini' style file that contains information
about all publications and subscriptions to execute for this test
execution.  This can be one or more filenames, separated by commas.  One
test process will be started (if requested) to process each named
scenario configuration file.</p>
<p>The default is not specified.  This will result in no test processes
being started if the <strong>-P</strong> argument is given.</p>
</dd>
<dt><strong><a name="r_file_rawdatafile_file" class="item"><strong>-r FILE</strong> | <strong>--rawdatafile=FILE</strong></a>  [DEPRECATED]</strong></dt>

<dd>
<p>Raw data output filename.  This file is where any raw latency data
collected during the test will be written.</p>
<p>The default is unspecified.</p>
<p><em>This is deprecated in favor of the test configuration specification for
this filename.</em></p>
</dd>
</dl>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script manages execution of the processes needed for distributed
testing of OpenDDS using the OpenDDS-Bench performance testing framework.
The processes used by the framework include the OpenDDS specific
repository process and the test specific process.  The repository
executable is the standard OpenDDS <code>DCPSInfoRepo</code> program.  The framework
<code>testprocess</code> program is specific to this testing and provides the
ability to start multiple publications and / or subscriptions within a
single process.</p>
<p>It is possible to start any number of <code>testprocess</code> programs at once.
A separate process will be started for each configuration file supplied.
The same configuration file can be included more than once to start
separate processes with the same configuration.</p>
<p>Processes started by this script will either execute until terminated by
the user (no duration specified) or until a specified duration has
elapsed.  For the <code>testprocess</code> commands, the duration is passed to the
program to allow it to terminate cleanly.  The script will wait 60
seconds beyond this time and then terminate the process by force.</p>
<p>This script will establish the environment for the executable processes
by adding the test library to the runtime library search path.</p>
<p>
</p>
<hr />
<h1><a name="examples">EXAMPLES</a></h1>
<dl>
<dt><strong><a name="bin_run_test_vx_p_s_s1_ini" class="item"><strong>bin/run_test -vx -P -s s1.ini</strong></a></strong></dt>

<dd>
<p>prints the commands that would be invoked when starting a single test
process with the s1.ini test specification.  This will also be verbose
during setup processing.</p>
</dd>
<dt><strong><a name="bin_run_test_d_10_c_n_t_4_p_s_test_ini_f_test_log_h_localhost_2038" class="item"><strong>bin/run_test -d 10 -C n -T 4 -P -s test.ini -f test.log -h localhost:2038</strong></a></strong></dt>

<dd>
<p>starts a test process using the <code>test.ini</code> specification file and expecting
to find the repository process at localhost:2038.  It will run with
<code>DCPSDebugLevel</code> of 10, <code>DCPSTransportDebugLevel</code> of 4 and put the logging
output to the <code>test.log</code> file.  Network statistics will be gathered for
connections to the test process.</p>
</dd>
<dt><strong><a name="bin_run_test_x_t_120_s_scenario1_ini_scenario1_ini_other_ini" class="item"><strong>bin/run_test -x -t 120 -s scenario1.ini,scenario1.ini,other.ini</strong></a></strong></dt>

<dd>
<p>prints the commands that would be invoked when starting 3 test processes:
two using the scenario1.ini specification file and one using the other.ini
specification file.  The test would be scheduled to last for 2 minutes.</p>
</dd>
<dt><strong><a name="bin_run_test_vd10t4vcall_o_h_t_p_log_s_test_ini_h_machine_domain_com_2112" class="item"><strong>bin/run_test -vd10T4VCall -O-H-T-P.log -s test.ini -h machine.domain.com:2112</strong></a></strong></dt>

<dd>
<p>starts a test process using the <code>test.ini</code> specification file and
expecting to connect to the repository at <code>machine.domain.com:2112</code>.  It
will run with <code>DCPSDebugLevel</code> of 10, <code>DCPSTransportDebugLevel</code> of 4,
<code>ORBVerboseLogging</code> enabled and both the script and test process will
execute in verbose mode.  The log output will be directed to the standard
output.</p>
<p>All statistics - system, network, and process - will be gathered.
Statistics will be placed in files named using the statistic type, the
hostname, a timestamp, and the PID of the monitored process.  These
filenames will appear as: <code>system-&lt;host&gt;-&lt;time&gt;-&lt;pid&gt;.log</code>,
<code>process-&lt;host&gt;-&lt;time&gt;-&lt;pid&gt;.log</code>, and
<code>network-&lt;host&gt;-&lt;time&gt;-&lt;pid&gt;.log</code>.</p>
</dd>
</dl>

</body>

</html>
Scripts are available for building, executing, analyzing, and visualizing performance testing for OpenDDS,  These scripts can be organized into broad categories of:
;Execution
:used to execute or help test execution of performance tests.
;Analysis
:used to perform data reduction on raw test results.
;Visualization
:used to plot or help plot results from performance test execution.
;Commands
:used to list or describe commands or sets of commands to perform specific tasks related to the performance testing.
;Documentation
:used to provide or format information for documentation purposes.
;Utility
:used for miscellaneous purposes to assist the performance testing process.
| Scripts (relative to $~BENCH_ROOT) |c
| ''Script'' | ''Category'' | ''Type'' | ''Description'' |
|[[$BENCH_ROOT/bin/run_test|RunTestManPage]] |Execution |Perl |script to execute performance tests. |
|[[$BENCH_ROOT/tests/latency/run_test.pl|LatencyRunTestManPage]] |Execution |Perl |script to execute latency performance tests. |
|[[$BENCH_ROOT/tests/thru/run_test.pl|ThroughputRunTestManPage]] |Execution |Perl |script to execute throughput performance tests. |
|[[$BENCH_ROOT/bin/reduce_latency.pl|ReduceLatencyManPage]] |Analysis |Perl |Perform data reduction on latency test data results. |
|[[$BENCH_ROOT/bin/extract_latency.pl|ExtractLatencyManPage]] |Analysis |Perl |Perform data reduction on latency test data results. |
|[[$BENCH_ROOT/bin/extract_throughput.pl|ExtractThroughputManPage]] |Analysis |Perl |Perform data reduction on throughput test data results. |
|[[$BENCH_ROOT/bin/gen-latency-stats.pl|GenstatsManPage]] |Analysis |Perl |script to create file with GNUPlot label string definitions containing summary statistics information to be added as labels to histogram charts. |
|[[$BENCH_ROOT/bin/lj-plots.gpi|LJPlotsManPage]] |Visualization |GNUPlot |script to create Latency and Jitter timeline charts and histograms in a quad-plot format. |
|[[$BENCH_ROOT/bin/plot-jitter.gpi|PlotJitterManPage]] |Visualization |GNUPlot |script to create a plot of jitter summary data for all tests. |
|[[$BENCH_ROOT/bin/plot-density.gpi|PlotDensityManPage]] |Visualization |GNUPlot |script to create a plot of density data for all tests. |
|[[$BENCH_ROOT/bin/plot-quantiles.gpi|PlotQuantilesManPage]] |Visualization |GNUPlot |script to create a plot of quantile data for all tests. |
|[[$BENCH_ROOT/bin/plot-transports.gpi|PlotTransportsManPage]] |Visualization |GNUPlot |script to create a plot of summary latency data for all tests. |
|[[$BENCH_ROOT/bin/plot-throughput-testformats.gpi|PlotThroughputFormatsManPage]] |Visualization |GNUPlot |script to create a plot of summary throughput data organized by test type. |
|[[$BENCH_ROOT/bin/plot-throughput-transports.gpi|PlotThroughputTransportsManPage]] |Visualization |GNUPlot |script to create a plot of summary  throughput data organized by transport type. |
|[[$BENCH_ROOT/tools/plot-latency.gpi|PlotLatencyManPage]] |Commands |GNUPlot |script to create plots for gathered latency data. |
|[[$BENCH_ROOT/tools/plot-throughput.gpi|PlotThroughputManPage]] |Commands |GNUPlot |script to create plots for gathered throughput data. |
|[[$BENCH_ROOT/tools/plot-test-results.sh|PlotTestResultsManPage]] |Commands |bash |commands to plot the reduced data from the pre-configured tests. |
|[[$BENCH_ROOT/tools/generate-test-results.sh|GenerateTestResultsManPage]] |Commands |bash |commands to reduce data from the pre-configured tests. |
|[[$BENCH_ROOT/tools/mkpkg|MkpkgManPage]] |Utility |bash |commands to package a working test suite for transfer to a similar machine for test execution. |
|[[$BENCH_ROOT/bin/mktable.pl|MktableManPage]] |Documentation |Perl |script to create a table of results.  This currently creates only a TiddlyWiki format table. |
|[[$BENCH_ROOT/bin/expandColors.pl|ExpandcolorsManPage]] |Documentation |Perl |script to replace color table lookups from TiddlyWiki in the data table from //mktable.pl// with actual color references. |
<<search>><<closeAll>><<permaview>><<newTiddler>><<saveChanges>><<doPublish>><<slider chkSliderOptionsPanel OptionsPanel "options ยป" "Change TiddlyWiki advanced options">>
user guide
OpenDDS-Bench
http://www.objectcomputing.com/
a:hover {
    text-decoration: none;
}

.publishLogo {
    position: relative;
    background-color:[[ColorPalette::Background]];
    background-image: url(images/opendds.png);
    background-position: left;
    background-repeat: no-repeat;
    color:[[ColorPalette::SecondaryDark]];
    padding: 20px 0 5px 72px;
    border-bottom: 1px solid [[ColorPalette::SecondaryLight]];
}


.headerForeground {
    position: relative;
    background-color:[[ColorPalette::Background]];
    background-image: url(images/opendds.png);
    background-position: left;
    background-repeat: no-repeat;
    color:[[ColorPalette::SecondaryDark]];
    padding: 20px 0 5px 72px;
    border-bottom: 1px solid [[ColorPalette::SecondaryLight]];
}

.headerForeground a {
    color:[[ColorPalette::SecondaryDark]];
    font-weight: bold;
}

.footer {
    border-top: 1px solid [[ColorPalette::SecondaryLight]];
    color:[[ColorPalette::SecondaryLight]];
    margin-top: 20px;
    font-size: 1.1em;
}

.siteTitle {
    font-weight: bold;
}

.itemBlock {
  display: block;
  text-align: left;
  margin-left: 30px;
  margin-right: auto;
  padding: 5px;
  border: solid 1px;
}

.chunk {background:[[ColorPalette::SecondaryPale]]; border:2px; margin-left:5%; margin-right:5%;}

.viewer div.centeredTable {
	text-align: center;
}

.viewer div.centeredTable table {
	margin: 0 auto;
	text-align: left;
}

.imgcenter {
  display: block;
  text-align: center;
  margin-left: auto;
  margin-right: auto;
}

#mainMenu {
    padding: 25px 0 0 20px;
}

.toolbar {
    float: right;
}

#publishContentWrapper {margin: 0 5em 0 5em; padding: 0;}

th { border-style: inset;}
td { border-style: inset;}
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {/*background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];*/}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity=60)';}
/*}}}*/
!Definable Parameters
{{centeredTable{
| ''Subscription Section Keys'' |c
| //Key// | //Value// | //Notes// |
|>|>| """---""" Subscriber Qos Policy values """---""" |
|Presentation |<string> |Enumeration value of {{{INSTANCE}}}, {{{TOPIC}}}, or {{{GROUP}}} |
|~PresentationCoherent |<bool> |Booleans are represented with a numeric 0 or 1 |
|~PresentationOrdered |<bool> |Booleans are represented with a numeric 0 or 1 |
|Partition |<string> |Only single string value currently supported |
|~GroupData |<string> |&nbsp; |
|~EntityFactory |<bool> |Booleans are represented with a numeric 0 or 1 |
|>|>| """---""" ~DataReader Qos Policy values """---""" |
|Durability |<string> |Enumeration value of {{{VOLATILE}}}, {{{LOCAL}}}, {{{TRANSIENT}}}, or {{{PERSISTENT}}} |
|Deadline |<number> |&nbsp; |
|~LatencyBudget |<number> |&nbsp; |
|~LivelinessKind |<string> |Enumeration value of {{{AUTOMATIC}}}, {{{PARTICIPANT}}}, or {{{TOPIC}}} |
|~LivelinessDuration |<number> |&nbsp; |
|~ReliabilityKind |<string> |Enumeration value of {{{BEST_EFFORT}}} or {{{RELIABLE}}} |
|~ReliabilityMaxBlocking |<number> |&nbsp; |
|~DestinationOrder |<string> |Enumeration value of {{{SOURCE}}} or {{{RECEPTION}}} |
|~HistoryKind |<string> |Enumeration value of {{{ALL}}} or {{{LAST}}} |
|~HistoryDepth |<number> |&nbsp; |
|~ResourceMaxSamples |<number> |&nbsp; |
|~ResourceMaxInstances |<number> |&nbsp; |
|~ResourceMaxSamplesPerInstance |<number> |&nbsp; |
|~UserData |<string> |&nbsp; |
|~TimeBasedFilter |<number> |&nbsp; |
|~ReaderDataLifecycle |<bool> |Booleans are represented with a numeric 0 or 1 |
|>|>| """---""" Test Execution Parameters """---""" |
|Topic |<string> |Must reference by name the identifier of a [topic] subsection within the file. |
|~TransportIndex |<number> |Index into transport configurations |
|~DataCollectionFile |<string> |Filename for collected data |
|~DataCollectionBound |<number>&nbsp; |
|~DataCollectionRetention |<string> |Enumeration value of {{{ALL}}}, {{{OLDEST}}}, or {{{NEWEST}}} |
}}}
!Test Execution Parameters
The test execution parameters allow specification of the transport used for by the subscription and the data collection during the test.
;{{{TransportIndex}}}
:this is specified as an index into the transport definitions.  The definitions are in the OpenDDS service configuration file read at startup.
Data collection is specified as a combination of where to store the results, how much data to store, and if not all which data to store.  The data collected during test operation consists of per hop latency data along with path latency data.  Summary statistics for all observed data are reported at the end of the test.  The internal collection buffers are flushed to the filesystem after the test completes as well.  This avoids writes to disk affecting the performance being measured.  Data collection specification consists of:
;{{{DataCollectionFile}}}
:if specified, this is the output file where data will be stored at the end of the test.  It will contain summary data at the top and (per sample) detailed data after that.  If not specified, no detailed data collection will occur.
;{{{DataCollectionBound}}}
:this specifies the number of samples to retain data for if the {{{DataCollectionRetention}}} parameter has a value other than {{{ALL}}}.  This parameter is not needed if the {{{DataCollectionFile}}} parameter is not specified or if the {{{DataCollectionRetention}}} parameter has the value {{{ALL}}}.
;{{{DataCollectionRetention}}}
:this specifies what data is to be retained during the test.  Data is collected for each sample.  To reduce the impact of storing this data on the performance being measured, the data is stored in RAM until the end of the test when it is written to the output file.  This parameter specifies what data should be stored.  If this has a value of {{{ALL}}} then all of the data will be stored in RAM.  Otherwise a fixed size buffer of size {{{DataCollectionBound}}} is created before the test starts and data stored there.  If this parameter has a value of {{{OLDEST}}} the buffer is not written to once it fills up.  If the value is {{{NEWEST}}} then the buffer is treated as a ring buffer and the last {{{DataCollectionBound}}} samples will always be present.  Samples will be overwritten oldest sample first.
!Example
An example subscription section like the following would result in a subscription with the name //result// that receives //status// topic data on partition //link2// and stores the most recent 5,000 samples of information and writes them to the file //latency.data// at the end of the test.
{{{
[subscription/result]
Topic = status
TransportIndex = 2
Partition = link2
DataCollectionFile = latency.data
DataCollectionBound = 5000
DataCollectionRetention = NEWEST
}}}
Test execution using the framework involves a test script to manage the processing on each host involved in the testing, the OpenDDS [[repository|DCPSInfoRepo]] process, and a test process that creates the OpenDDS service Entities and executes the actual test operations.

{{itemBlock{
!run_test
Test execution using the OpenDDS-Bench framework is managed through the use of a single Perl script.  This script is located in //$~BENCH_ROOT/bin/run_test//.  It has many [[options|RunTestManPage]] which are enumerated by executing the script with either the '-?' or the '"""--"""man' command line arguments.

The {{{run_test}}} script will start either the {{{DCPSInfoRepo}}} [[repository|DCPSInfoRepo]] process or any number of {{{testprocess}}} test executables.  Each test executable is configured via files to create and enable a number of publications and subscriptions for the test.  The details of [[test specification|Test Specification]] include the publication and subscription ~QoS policy values and test management parameters as well as those for associated Entities of the Topic and Participant and test support for transports.  Each {{{testprocess}}} will be started with a separate test configuration file, but each process started by a single script will use the same transport configuration file.
}}}

{{itemBlock{
!~DCPSInfoRepo
Typically one or more test processes are started on each host involved with a test.  The {{{DCSPInfoRepo}}} [[repository|DCPSInfoRepo]] process can be established on one of the test hosts or a different host not involved with the test but reachable by all test hosts.  Since the repository is involved only during test startup and shutdown it is usually not necessary to dedicate a host for its execution.
}}}

{{itemBlock{
!testprocess
The {{{testprocess}}} test executable is what reads the configuration files, creates and manages the OpenDDS Entities and executes the test.  Each process in the test is configured separately and managed by the {{{run_test}}} script.  Normal usage will have a single {{{run_test}}} script on each host start as many {{{testprocess}}} processes as the test requires on that host.  The {{{testprocess}}} executable is passed options from the {{{run_test}}} script, so it is not required for users to directly specify the command line arguments to this executable.  The executable accepts the following options:
{{indent{
; ''-v''
: be verbose when executing.  This is required to produce data for throughput testing.
; ''-d <duration>''
: execute for at least {{{duration}}} seconds
; ''-f <file>''
: use ''<file>'' as the test specification file for this execution.
; ''-t <type>''  [DEPRECATED]
: save the specified data when collecting timing statistics.  ''<type>'' is one of: ''unbounded'', ''newest'', or ''oldest''.
//this is deprecated in favor of the configuration file specification for the same parameter//
; ''-s <size>''  [DEPRECATED]
: gather the specified amount of timing data.
//this is deprecated in favor of the configuration file specification for the same parameter//
; ''-r <file>''  [DEPRECATED]
: place the gathered timing data into the specified file.
//this is deprecated in favor of the configuration file specification for the same parameter//
}}}
This process creates the DDS Participants, Topics, Publications, and Subscriptions that are specified in the test configuration file.  It then waits until each publication has associated with the number of subscriptions specified in the configuration and starts publishing samples.  The samples are sized as specified and published at a rate specified in the configuration file.  If an interval, or test duration,  was specified the publications are stopped at the end of this interval.  If no interval was specified then the test will continue until the interrupted via an INT signal (ctl-C).  Once the publications have been stopped, the test will block until all subscriptions have released all active associations.  At this time a summary of the test results is produced and, if specified, detailed data will be stored.  If there are no publications, the test will start immediately, including the test duration interval.  If there are no subscriptions the process will terminate as soon as the publications are stopped and results stored.
}}}
Distributed tests executed by the framework will consist of many processes executing on many hosts.  There will be one process running the [[repository|DCPSInfoRepo]] for the OpenDDS service.  The transports used by the publications and subscriptions comprising the tests will be entirely specified by OpenDDS configuration files.  The DDS Participants, Topics, Publications, and Subscriptions are specified in a series of test specification files.  Each process will have a specification file defining its structure and behavior.  A single specification file can be used for more than one test process, as long as each process is identical during the testing.

The specification of transports and other service configuration aspects is described in the OpenDDS [[Developers Guide|http://downloads.objectcomputing.com/OpenDDS/OpenDDS-latest.pdf]].  The test structures and behavior are described below.

In general, each DDS Entity type specified, Participant, Topic, Publication, and Subscription will have Quality of Service policy values as well as test management properties specified.  The test specification files are in the format of //ini// files, where each section is delimited using a section header within square brackets.  Within each section are a series of ''Key'' / ''Value'' pair assignments that are used to specify parameter values for the Entity.

Each DDS Entity that is part of the test is specified by a section in a test configuration file.  It will be represented as a subsection of the Entity type (participant, topic, publication, or subscription) where the subsection name is the unique identifier for the Entity.  Subsection syntax is determined by the use of the [[ACE|http://www.cs.wustl.edu/~schmidt/ACE.html]] portability layer configuration file parser.  It uses a path syntax with slash '/' characters separating the subsection levels.

For example a test specification containing the following sections:
{{{
[participant/player]

[topic/joystick]

[topic/button]

[topic/status]

[publication/fire]

[publication/move]

[subscription/result]
}}}
specifies the creation of a DDS Participant identified as //player//, three DDS Topics identified as //joystick//, //button//, and //status//, two publications identified as //fire// and //move//, and a subscription identified as //result//.  These Entities will be created at the start of processing using the contents of the sections and destroyed at the end of processing.

The specific Entity specifications that are supported include:
#[[Participant|Participant Specifications]]
#[[Topic|Topic Specifications]]
#[[Publication|Publication Specifications]]
#[[Subscription|Subscription Specifications]]
The OpenDDS-Bench framework includes configuration files for throughput testing.  These files are located in the //$~BENCH_ROOT/tests/thru// sub-directory.  The throughput testing covers several different conditions.  These include a bidirectional test, topologically equivalent to the [[latency tests|Latency Tests]], publication bound tests, and subscription bound tests.  Publication bound testing publishes test data from one publication to two distinct subscriptions.  When these processes are on three separate hosts, the publication processing will dominate and likely be the limiting factor in throughput.  Subscription bound testing publishes test data from two separate publications to a single subscription.  When these processes are on three separate hosts, the subscription processing will dominate and likely be the limiting factor in throughput.  If the test environment does not have adequate network bandwidth, then this will dominate either or both of these tests and the CPU usage observed during the tests will give an indication of what margins are available in the test setup.  The configuration for these tests are illustrated in the following diagrams.
[img[Bidirectional Test|images/bidirtest.jpg]]
[img[Publication Bound Test|images/pubboundtest.jpg]]
[img[Subscription Bound Test|images/subboundtest.jpg]]
The message size and sending rate are varied for each test execution run and throughput data gathered and analyzed.  Throughput data consists of counting the number of samples and bytes successfully transferred from the publication(s) to the subscription(s).  Typically the processes of this test will be executed on different hosts to include network effects in the measurements.  While executing them on the same host will exclude network effects from the results, it is possible to encounter processor effects depending on the capacity of the host on which the test is executing.  These tests have two sets of configuration specifications available: one for reliable transport testing, and; one for best effort transport testing.  The transport configuration for a test must match the configuration specification.

There is a separate configuration file supplied for each test condition to be executed.  Users are encouraged to extend the testing to include cases specific to their environment as well, using the supplied configuration files as a guide.  The test files and message sizes specified available include the following:
{{centeredTable{
| ''Throughput Test Conditions'' |c
| //Test Case// | //Message Size//<br>(bytes) | // Message Rate//<br>(samples/second) | //Nominal Throughput//<br>(Mbps) | //Configuration File//<br>(in //$~BENCH_ROOT/tests/thru//) |
|>|>|>|>| ''"""---""" Bidirectional Throughput Tests """---"""'' |
|>|>|>|>| //"""---""" Maximum Throughput Rate """---"""// |
| 1| 1,000| 10,000|  160|bidir-1sub-be-80.ini<br>bidir-1sub-rel-80.ini |
| 2| 2,000| 20,000|  640|bidir-1sub-be-320.ini<br>bidir-1sub-rel-320.ini |
| 3| 3,000| 30,000|  1440|bidir-1sub-be-720.ini<br>bidir-1sub-rel-720.ini |
| 4| 4,000| 40,000|  2560|bidir-1sub-be-1280.ini<br>bidir-1sub-rel-1280.ini |
| 5| 5,000| 50,000|  4000|bidir-1sub-be-2000.ini<br>bidir-1sub-rel-2000.ini |
|>|>|>|>| //"""---""" Fixed Rate / Variable Size """---"""// |
| //1//| //1,000//| //10,000//|  //160//|//bidir-1sub-be-80.ini<br>bidir-1sub-rel-80.ini// |
| 6| 1,000| 20,000|  160|bidir-1sub-be-160r.ini<br>bidir-1sub-rel-160r.ini |
| 7| 1,000| 30,000|  240|bidir-1sub-be-240r.ini<br>bidir-1sub-rel-240r.ini |
| 8| 1,000| 40,000|  320|bidir-1sub-be-320r.ini<br>bidir-1sub-rel-320r.ini |
| 9| 1,000| 50,000|  400|bidir-1sub-be-400r.ini<br>bidir-1sub-rel-400r.ini |
|>|>|>|>| //"""---""" Variable Rate / Fixed Size """---"""// |
| //1//| //1,000//| //10,000//|  //160//|//bidir-1sub-be-80.ini<br>bidir-1sub-rel-80.ini// |
| 10| 2,000| 10,000|  160|bidir-1sub-be-160s.ini<br>bidir-1sub-rel-160s.ini |
| 11| 3,000| 10,000|  240|bidir-1sub-be-240s.ini<br>bidir-1sub-rel-240s.ini |
| 12| 4,000| 10,000|  320|bidir-1sub-be-320s.ini<br>bidir-1sub-rel-320s.ini |
| 13| 5,000| 10,000|  400|bidir-1sub-be-400s.ini<br>bidir-1sub-rel-400s.ini |
|>|>|>|>| ''"""---""" Publication Bound Tests """---"""'' |
|>|>|>|>| //"""---""" Maximum Throughput Rate """---"""// |
| 1| 1,000| 10,000|  160|pub-2sub-be-80.ini<br>pub-2sub-rel-80.ini |
| 2| 2,000| 20,000|  640|pub-2sub-be-320.ini<br>pub-2sub-rel-320.ini |
| 3| 3,000| 30,000|  1440|pub-2sub-be-720.ini<br>pub-2sub-rel-720.ini |
| 4| 4,000| 40,000|  2560|pub-2sub-be-1280.ini<br>pub-2sub-rel-1280.ini |
| 5| 5,000| 50,000|  4000|pub-2sub-be-2000.ini<br>pub-2sub-rel-2000.ini |
|>|>|>|>| //"""---""" Fixed Rate / Variable Size """---"""// |
| //1//| //1,000//| //10,000//|  //160//|//pub-2sub-be-80.ini<br>pub-2sub-rel-80.ini// |
| 6| 1,000| 20,000|  160|pub-2sub-be-160r.ini<br>pub-2sub-rel-80r.ini |
| 7| 1,000| 30,000|  240|pub-2sub-be-240r.ini<br>pub-2sub-rel-240r.ini |
| 8| 1,000| 40,000|  320|pub-2sub-be-320r.ini<br>pub-2sub-rel-320r.ini |
| 9| 1,000| 50,000|  400|pub-2sub-be-400r.ini<br>pub-2sub-rel-400r.ini |
|>|>|>|>| //"""---""" Variable Rate / Fixed Size """---"""// |
| //1//| //1,000//| //10,000//|  //160//|//pub-2sub-be-80.ini<br>pub-2sub-rel-80.ini// |
| 10| 2,000| 10,000|  160|pub-2sub-be-160.ini<br>pub-2sub-rel-160.ini |
| 11| 3,000| 10,000|  240|pub-2sub-be-240s.ini<br>pub-2sub-rel-240s.ini |
| 12| 4,000| 10,000|  320|pub-2sub-be-320s.ini<br>pub-2sub-rel-320s.ini |
| 13| 5,000| 10,000|  400|pub-2sub-be-400s.ini<br>pub-2sub-rel-400s.ini |
|>|>|>|>| ''"""---""" Subscription Bound Tests """---"""'' |
|>|>|>|>| //"""---""" Maximum Throughput Rate """---"""// |
| 14| 500| 10,000|  800|pub-1sub-be-80.ini<br>pub-1sub-rel-80.ini |
| 15| 1,000| 20,000|  160|pub-1sub-be-320.ini<br>pub-1sub-rel-320.ini |
| 16| 1,500| 30,000|  240|pub-1sub-be-720.ini<br>pub-1sub-rel-720.ini |
| 17| 2,000| 40,000|  320|pub-1sub-be-1280.ini<br>pub-1sub-rel-1280.ini |
| 18| 2,500| 50,000|  400|pub-1sub-be-2000.ini<br>pub-1sub-rel-2000.ini |
|>|>|>|>| //"""---""" Fixed Rate / Variable Size """---"""// |
| 19| 1,000| 5,000|  80|pub-1sub-be-80r.ini<br>pub-1sub-rel-80r.ini |
| 20| 1,000| 10,000|  160|pub-1sub-be-240r.ini<br>pub-1sub-rel-240r.ini |
| 21| 1,000| 15,000|  240|pub-1sub-be-320r.ini<br>pub-1sub-rel-320r.ini |
| //15//| //1,000//| //20,000//|  //160//|//pub-1sub-be-320.ini<br>pub-1sub-rel-320.ini// |
| 22| 1,000| 25,000|  400|pub-1sub-be-400r.ini<br>pub-1sub-rel-400r.ini |
|>|>|>|>| //"""---""" Variable Rate / Fixed Size """---"""// |
| //14//| //500//| //10,000//|  //800//|//pub-1sub-be-80.ini<br>pub-1sub-rel-80.ini// |
| //20//| //1,000//| //10,000//|  //160//|//pub-1sub-be-240r.ini<br>pub-1sub-rel-240r.ini// |
| 23| 1,500| 10,000|  240|pub-1sub-be-160.ini<br>pub-1sub-rel-160.ini |
| 24| 2,000| 10,000|  320|pub-1sub-be-240s.ini<br>pub-1sub-rel-240s.ini |
| 25| 2,500| 10,000|  400|pub-1sub-be-320s.ini<br>pub-1sub-rel-320s.ini |
}}}
{{itemBlock{
Commands that can be used to structure the test execution for these tests is included in the file //$~BENCH_ROOT/tests/thru/test-commands.txt//.  This file contains a set of commands for each test configuration described above.  They can be used to structure tests for your specific environment.
!Executing bidirectional tests in a Linux environment can be done as follows:
*Start the repostory
<<<
repohost> $~BENCH_ROOT/bin/run_test -S -h iiop:"""//"""<repohost>:2809
<<<
*Start the forwarding (loopback) process
<<<
host2> cd <testdir>
host2> $~BENCH_ROOT/bin/run_test -P -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/bidir-remote-<rel>.ini
<<<
*Start the originating and receiving process
<<<
host1> cd <testdir>
host1> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/bidir-1sub-<rel>-<rate>.ini
<<<
!Executing publication bound tests in a Linux environment can be done as follows:
*Start the repostory
<<<
repohost> $~BENCH_ROOT/bin/run_test -S -h iiop:"""//"""<repohost>:2809
<<<
*Start the receiving processes
<<<
host2> cd <testdir>
host2> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/sub-<rel>.ini
<<<
<<<
host3> cd <testdir>
host3> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/sub-<rel>.ini
<<<
*Start the publishing process
<<<
host1> cd <testdir>
host1> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/pub-2sub-<rel>-<rate>.ini
<<<
!Executing subscription bound tests in a Linux environment can be done as follows:
*Start the repostory
<<<
repohost> $~BENCH_ROOT/bin/run_test -S -h iiop:"""//"""<repohost>:2809
<<<
*Start the publishing processes
<<<
host1> cd <testdir>
host1> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/pub-1sub-<rel>-<rate>.ini
<<<
<<<
host2> cd <testdir>
host2> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/pub-1sub-<rel>-<rate>.ini
<<<
*Start the receiving process
<<<
host3> cd <testdir>
host3> $~BENCH_ROOT/bin/run_test -P -v -t 120 -h <repohost>:2809 -i $~BENCH_ROOT/etc/<transportconfigfile> -s $~BENCH_ROOT/tests/thru/sub-<rel>.ini
<<<
}}}
{{itemBlock{
A script that can be used to execute these tests is available as:
{{indent{
;[[$BENCH_ROOT/tests/thru/run_test.pl|ThroughputRunTestManPage]]
:This script will execute one side (or host) of a throughput test for all or a specific test case and transport using the available test configuration files.
}}}
}}}
<html>
<head>
<title>run_test.pl - run one side of a throughput cross host test</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<link rev="made" href="mailto:root@localhost" />
</head>

<body style="background-color: white">


<!-- INDEX BEGIN -->
<div name="index">
<p><a name="__index__"></a></p>

<ul>

	<li><a href="#name">NAME</a></li>
	<li><a href="#synopsis">SYNOPSIS</a></li>
	<li><a href="#description">DESCRIPTION</a></li>
	<li><a href="#example">EXAMPLE</a></li>
</ul>

<hr name="index" />
</div>
<!-- INDEX END -->

<p>
</p>
<h1><a name="name">NAME</a></h1>
<p>run_test.pl - run one side of a throughput cross host test</p>
<p>
</p>
<hr />
<h1><a name="synopsis">SYNOPSIS</a></h1>
<pre>
  run_test.pl &lt;transport&gt;</pre>
<p>
</p>
<hr />
<h1><a name="description">DESCRIPTION</a></h1>
<p>This script runs one side of the throughput test for a cross host testing.
The script needs to be run on each of the two hosts involved in the test
using the same parameters on each host.</p>
<p>The test consists of two halves, an originating (server) side and a reflecting
(client) side. The servers involved in the test are are stored in
test_list.txt file in test-host groupings.  The grouping consists of an
ID, client host, and server host.  The script identifies the host's behavior
by identifying the test group ID and the local host's name.  The test group
ID is identified using the environment variable CROSS_GRP.</p>
<p>The server (originating) side starts the DCPSInfoRepo for the test.</p>
<p>The transport has to be one of the following values:</p>
<dl>
<dt><strong><a name="tcp" class="item">tcp</a></strong></dt>

<dd>
<p>uses the SimpleTCP transport implementation</p>
</dd>
<dt><strong><a name="udp" class="item">udp</a></strong></dt>

<dd>
<p>uses the udp transport implementation</p>
</dd>
<dt><strong><a name="multi_be" class="item">multi-be</a></strong></dt>

<dd>
<p>uses the multicast implementation with reliability disabled (Best Effort)</p>
</dd>
<dt><strong><a name="multi_rel" class="item">multi-rel</a></strong></dt>

<dd>
<p>uses the multicast implementation with reliability enabled</p>
</dd>
</dl>
<p>
</p>
<hr />
<h1><a name="example">EXAMPLE</a></h1>
<pre>
  run the same command on both hosts:</pre>
<pre>
  run_test.pl tcp</pre>
<pre>
  run_test.pl multi-be</pre>

</body>
</html>
~TiddlyWiki is a complete [[wiki|http://en.wikipedia.org/wiki/Wiki]] in a single HTML file. It contains the entire text of the wiki, and all the ~JavaScript, CSS and HTML goodness to be able to display it, and let you edit it or search it. Without needing a server.

You can find out more at the [[TiddlyWiki home|http://www.tiddlywiki.com/]].
!Definable Parameters
{{centeredTable{
| ''Topic Section Keys'' |c
| //Key// | //Value// | //Notes// |
|>|>| """---""" Topic Qos Policy values """---""" |
|~TopicData |<string> |&nbsp; |
|Durability |<string> |Enumeration value of {{{VOLATILE}}}, {{{LOCAL}}}, {{{TRANSIENT}}}, or {{{PERSISTENT}}} |
|~DurabilityServiceDuration |<number> |&nbsp; |
|~DurabilityServiceHistoryKind |<string> |Enumeration value of {{{ALL}}} or {{{LAST}}} |
|~DurabilityServiceHistoryDepth |<number> |&nbsp; |
|~DurabilityServiceSamples |<number> |&nbsp; |
|~DurabilityServiceInstances |<number> |&nbsp; |
|~DurabilityServiceSamplesPerInstance |<number> |&nbsp; |
|Deadline |<number> |&nbsp; |
|~LatencyBudget |<number> |&nbsp; |
|~LivelinessKind |<string> |Enumeration value of {{{AUTOMATIC}}}, {{{PARTICIPANT}}}, or {{{TOPIC}}} |
|~LivelinessDuration |<number> |&nbsp; |
|~ReliabilityKind |<string> |Enumeration value of {{{BEST_EFFORT}}} or {{{RELIABLE}}} |
|~ReliabilityMaxBlocking |<number> |&nbsp; |
|~DestinationOrder |<string> |Enumeration value of {{{SOURCE}}} or {{{RECEPTION}}} |
|~HistoryKind |<string> |Enumeration value of {{{ALL}}} or {{{LAST}}} |
|~HistoryDepth |<number> |&nbsp; |
|~ResourceMaxSamples |<number> |&nbsp; |
|~ResourceMaxInstances |<number> |&nbsp; |
|~ResourceMaxSamplesPerInstance |<number> |&nbsp; |
|~TransportPriority |<number> |&nbsp; |
|~LifespanDuration |<number> |&nbsp; |
|~OwnershipKind |<string> |Enumeration value of {{{SHARED}}} or {{{EXCLUSIVE}}} |
|>|>| """---""" Test Execution Parameters """---""" |
|Participant |<string> |Must reference by name the identifier of a [participant] subsection within the file. |
}}}
!Test Execution Parameters
[topic] sections are only linked to the containing DDS Participant.  There are no other test execution effects for a topic, other than the Quality of Service policy settings.
!Example
An example topic section like the following would result in a DDS Topic with a name of //status// being created in the process with reliability enabled and contained within the //player// participant.
{{{
[topic/status]
Participant = player
ReliabilityKind = RELIABLE
}}}