diff --git a/doc/How to tune.txt b/doc/How to tune.txt
index 34d0d0b6..ff247917 100644
--- a/doc/How to tune.txt
+++ b/doc/How to tune.txt
@@ -2,7 +2,7 @@ Go to lib/opentuner/ folder and run venv-bootstrap.py. This will setup a virtual
Run the edu.mit.streamjit.tuner.ConfigGenerator.java to generate configuration information for a particular app. Tuner.java will update the apps table in the streamjit.db with name, configuration, location and class name of the streamjit app.
-Build the edu.mit.streamjit.tuner.RunApp.java and export it as runnable jar in to stramjit folder. If you need to change the input size, you can change it inside the function runApp() in the RunApp class.
+Build the edu.mit.streamjit.tuner.RunApp.java and export it as runnable jar in to stramjit folder.
Run lib/opentuner/streamjit/tuner2.py.
diff --git a/jarapp.properties b/jarapp.properties
new file mode 100644
index 00000000..59f0ecf8
--- /dev/null
+++ b/jarapp.properties
@@ -0,0 +1,8 @@
+app=FMRadio
+streamGraphName=FMRadioCore
+#app=NestedSplitJoin
+#streamGraphName=NestedSplitJoinCore
+username=sumanan
+##password=abc123
+mainclasspath=edu.mit.streamjit.test.DistAppRunner
+numOfSNs=2
diff --git a/jarapp.xml b/jarapp.xml
new file mode 100644
index 00000000..c87cff99
--- /dev/null
+++ b/jarapp.xml
@@ -0,0 +1,153 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ #!/bin/bash
+#Author - Sumanan
+app=${app}
+mainClass=${streamGraphName}
+if [ -d $app ]; then
+ echo "$app exists. No downloads..."
+ exit
+fi
+mkdir -p $app
+cd $app
+mkdir -p $mainClass
+scp -r ${username}@lanka.csail.mit.edu:/data/scratch/${username}/$app/$mainClass/\{summary,*.txt,*.orig,streamgraph.dot\} $mainClass/
+scp -r ${username}@lanka.csail.mit.edu:/data/scratch/${username}/$app/\{*.sh,slurm-*,options.properties\} .
+#to download everything.
+#rsync -avh --progress ${username}@lanka.csail.mit.edu:/data/scratch/${username}/$app .
+
+
+
diff --git a/lib/opentuner/streamjit/onlinetuner.py b/lib/opentuner/streamjit/onlinetuner.py
index d68c14f0..115c971d 100644
--- a/lib/opentuner/streamjit/onlinetuner.py
+++ b/lib/opentuner/streamjit/onlinetuner.py
@@ -17,21 +17,26 @@
class StreamJitMI(MeasurementInterface):
''' Measurement Interface for tunning a StreamJit application'''
- def __init__(self, args, ss, manipulator, inputmanager, objective):
+ def __init__(self, args, configuration, connection, manipulator, inputmanager, objective):
super(StreamJitMI, self).__init__(args = args, program_name = args.program, manipulator = manipulator, input_manager = inputmanager, objective = objective)
- self.sdk = ss
+ self.connection = connection
self.trycount = 0
+ self.config = configuration
def run(self, desired_result, input, limit):
self.trycount = self.trycount + 1
print self.trycount
- cfg = desired_result.configuration.data
- #self.niceprint(cfg)
- self.sdk.sendmsg("%s\n"%cfg)
- msg = self.sdk.recvmsg()
+
+ cfg_data = desired_result.configuration.data
+ #self.niceprint(cfg_data)
+ for k in self.config.params:
+ self.config.getParameter(k).update_value_for_json(cfg_data)
+ self.connection.sendmsg(self.config.toJSON())
+
+ msg = self.connection.recvmsg()
if (msg == "exit\n"):
#data = raw_input ( "exit cmd received. Press Keyboard to exit..." )
- self.sdk.close()
+ self.connection.close()
sys.exit(1)
exetime = float(msg)
if exetime < 0:
@@ -55,31 +60,32 @@ def program_version(self):
def save_final_config(self, configuration):
'''called at the end of autotuning with the best resultsdb.models.Configuration'''
- cfg = configuration.data
- print "Final configuration", cfg
- self.sdk.sendmsg("Completed")
- self.sdk.sendmsg("%s\n"%cfg)
- self.sdk.close()
+ cfg_data = configuration.data
+ print "Final configuration", cfg_data
+ for k in self.config.params:
+ self.config.getParameter(k).update_value_for_json(cfg_data)
+
+ self.connection.sendmsg("Completed")
+ self.connection.sendmsg(self.config.toJSON())
+ self.connection.close()
sys.exit(0)
-def main(args, cfg, ss):
+def main(args, cfg, connection):
logging.basicConfig(level=logging.INFO)
manipulator = ConfigurationManipulator()
- params = cfg.getAllParameters()
#print "\nFeature variables...."
- for key in params.keys():
- #print "\t", key
- manipulator.add_parameter(cfg.getParameter(key))
+ for p in cfg.getAllParameters().values():
+ manipulator.add_parameter(p)
- mi = StreamJitMI(args, ss, manipulator, FixedInputManager(),
+ mi = StreamJitMI(args, cfg, connection, manipulator, FixedInputManager(),
MinimizeTime())
m = TuningRunMain(mi, args)
m.main()
-def start(argv, cfg, ss):
+def start(argv, cfg, connection):
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
@@ -90,4 +96,4 @@ def start(argv, cfg, ss):
if not args.database:
args.database = 'sqlite:///' + args.program + '.db'
- main(args, cfg, ss)
+ main(args, cfg, connection)
diff --git a/lib/opentuner/streamjit/sjparameters.py b/lib/opentuner/streamjit/sjparameters.py
index 5d7471f2..3f2862cc 100644
--- a/lib/opentuner/streamjit/sjparameters.py
+++ b/lib/opentuner/streamjit/sjparameters.py
@@ -1,7 +1,7 @@
import deps #fix sys.path
import random
import opentuner
-from opentuner.search.manipulator import IntegerParameter, FloatParameter, SwitchParameter, PermutationParameter, ArrayParameter
+from opentuner.search.manipulator import IntegerParameter, FloatParameter, SwitchParameter, PermutationParameter, ParameterArray
class sjIntegerParameter(IntegerParameter):
def __init__(self, name, min, max, value, javaClass = None, **kwargs):
@@ -86,7 +86,7 @@ def json_replacement(self):
"universe": self.universe,
"class": self.javaClass}
-class sjCompositionParameter(ArrayParameter):
+class sjCompositionParameter(ParameterArray):
def __init__(self, name, values, javaClass):
super(sjCompositionParameter, self).__init__(name, len(values), FloatParameter, 0.0, 1.0)
self.values = values
diff --git a/lib/opentuner/streamjit/tuner2.py b/lib/opentuner/streamjit/tuner2.py
index a7149ca7..c90460b7 100644
--- a/lib/opentuner/streamjit/tuner2.py
+++ b/lib/opentuner/streamjit/tuner2.py
@@ -23,13 +23,14 @@
class StreamJitMI(MeasurementInterface):
''' Measurement Interface for tunning a StreamJit application'''
- def __init__(self, args, jvmOptions, manipulator, inputmanager, objective):
+ def __init__(self, args, configuration, jvmOptions, manipulator, inputmanager, objective):
args.technique = ['StreamJITBandit']
super(StreamJitMI, self).__init__(args = args, program_name = args.program, manipulator = manipulator, input_manager = inputmanager, objective = objective)
self.trycount = 0
self.jvmOptions = jvmOptions
self.program = args.program
self.StreamNodes = []
+ self.config = configuration
try:
self.tunedataDB = sqlite3.connect('sj' + args.program + '.db')
c = self.tunedataDB.cursor()
@@ -43,42 +44,41 @@ def __init__(self, args, jvmOptions, manipulator, inputmanager, objective):
data = raw_input ( "Press Keyboard to exit..." )
def run(self, desired_result, input, limit):
- cfg = dict.copy(desired_result.configuration.data)
- (st, t) = self.runApp(cfg)
+ cfg_data = dict.copy(desired_result.configuration.data)
+ (st, t) = self.runApp(cfg_data)
return opentuner.resultsdb.models.Result(state=st, time=t)
- def runApp(self, cfg):
+ def runApp(self, cfg_data):
self.trycount = self.trycount + 1
print '\n**********New Run - %d **********'%self.trycount
- #self.niceprint(cfg)
+ #self.niceprint(cfg_data)
+
+ for k in self.config.params:
+ self.config.getParameter(k).update_value_for_json(cfg_data)
#TODO: find a better place for these system-specific constants
#the path to the Java executable, or "java" to use system's default
javaPath = "java"
- #the classpath, suitable as the value of the '-cp' java argument
- javaClassPath = "build/jar/streamjit.jar:lib/asm.jar:lib/bridj.jar:lib/bytecodelib.jar:lib/guava.jar:lib/javax.json.jar:lib/joptsimple.jar:lib/sqlitejdbc.jar"
- args = [javaPath, "-cp", javaClassPath]
jvmArgs = []
for key in self.jvmOptions.keys():
- self.jvmOptions.get(key).setValue(cfg[key])
+ self.jvmOptions.get(key).setValue(cfg_data[key])
cmd = self.jvmOptions.get(key).getCommand()
if len(cmd) > 0:
jvmArgs.append(cmd)
- args.extend(jvmArgs)
- args.append("edu.mit.streamjit.tuner.RunApp")
+
+ args = self.getArgs1(javaPath, jvmArgs)
args.append(str(self.program))
args.append(str(self.trycount))
cur = self.tunedataDB.cursor()
- query = 'INSERT INTO results VALUES (%d,"%s","%s", "%f")'%(self.trycount, " ".join(jvmArgs), cfg, -1)
+ query = "INSERT INTO results VALUES (%d,'%s','%s', '%f')"%(self.trycount, " ".join(jvmArgs), self.config.toJSON(), -1)
cur.execute(query)
self.tunedataDB.commit()
-
p = subprocess.Popen(args, stderr=subprocess.PIPE)
- if cfg.get('noOfMachines'):
- self.startStreamNodes(cfg.get('noOfMachines') - 1, args)
+ if cfg_data.get('noOfMachines'):
+ self.startStreamNodes(cfg_data.get('noOfMachines') - 1, args)
timeout = 100
@@ -102,8 +102,8 @@ def runApp(self, cfg):
print "\033[31;1mException Found\033[0m"
self.waitForStreamNodes(True)
cur = self.tunedataDB.cursor()
- str1 = str(commandStr)
- str2 = str(cfg)
+ str1 = str(jvmArgs)
+ str2 = self.config.toJSON()
cur.execute('INSERT INTO exceptions VALUES (?,?,?)', (err, str1, str2))
self.tunedataDB.commit()
return ('ERROR', float('inf'))
@@ -122,11 +122,27 @@ def runApp(self, cfg):
self.waitForStreamNodes(False)
return ('OK',exetime)
- def niceprint(self, cfg):
+ # Return args that is to run a runnable jar file.
+ def getArgs1(self, javaPath, jvmArgs):
+ args = [javaPath]
+ args.extend(jvmArgs)
+ args.append("-jar")
+ args.append("RunApp.jar")
+ return args
+
+ # Return args that is to run from class file.
+ def getArgs2(self, javaPath, jvmArgs):
+ #the classpath, suitable as the value of the '-cp' java argument
+ javaClassPath = "build/jar/streamjit.jar:lib/asm.jar:lib/bridj.jar:lib/bytecodelib.jar:lib/guava.jar:lib/javax.json.jar:lib/joptsimple.jar:lib/sqlitejdbc.jar"
+ args = [javaPath, "-cp", javaClassPath]
+ args.append("edu.mit.streamjit.tuner.RunApp")
+ return args
+
+ def niceprint(self, cfg_data):
print "\n--------------------------------------------------"
print self.trycount
- for key in cfg.keys():
- print "%s - %s"%(key, cfg[key])
+ for key in cfg_data.keys():
+ print "%s - %s"%(key, cfg_data[key])
def program_name(self):
return self.args.program
@@ -136,12 +152,13 @@ def program_version(self):
def save_final_config(self, configuration):
'''called at the end of autotuning with the best resultsdb.models.Configuration'''
- cfg = dict.copy(configuration.data)
+ cfg_data = dict.copy(configuration.data)
+
print "\033[32;1mFinal Config...\033[0m"
- (state, time) = self.runApp(cfg)
+ (state, time) = self.runApp(cfg_data)
conn = sqlite3.connect('streamjit.db', 100)
cur = conn.cursor()
- query = 'INSERT INTO FinalResult VALUES ("%s","%s", %d, "%s", "%f")'%(self.program, cfg, self.trycount, state, float(time))
+ query = "INSERT INTO FinalResult VALUES ('%s','%s', %d, '%s', '%f')"%(self.program, self.config.toJSON(), self.trycount, state, float(time))
cur.execute(query)
conn.commit()
@@ -166,13 +183,15 @@ def main(args, cfg, jvmOptions):
logging.basicConfig(level=logging.INFO)
manipulator = ConfigurationManipulator()
- params = dict(cfg.items() + jvmOptions.items())
+ cfgparams = cfg.getAllParameters()
+
+ params = dict(cfgparams.items() + jvmOptions.items())
#print "\nFeature variables...."
for key in params.keys():
#print "\t", key
manipulator.add_parameter(params.get(key))
- mi = StreamJitMI(args,jvmOptions, manipulator, FixedInputManager(),
+ mi = StreamJitMI(args, cfg, jvmOptions, manipulator, FixedInputManager(),
MinimizeTime())
m = TuningRunMain(mi, args)
@@ -200,7 +219,6 @@ def start(program):
sys.exit(1)
cfgString = row[0]
cfg = configuration.getConfiguration(cfgString)
- cfgparams = cfg.getAllParameters()
except Exception, e:
print 'Exception occured'
traceback.print_exc()
@@ -244,7 +262,7 @@ def start(program):
enabledJvmOptions = [aggressiveOpts, compileThreshold, freqInlineSize, maxInlineSize, maxInlineLevel]
jvmOptions = {x.name:x for x in enabledJvmOptions}
- main(args, cfgparams, jvmOptions)
+ main(args, cfg, jvmOptions)
if __name__ == '__main__':
prgrms = []
diff --git a/lib/opentuner/streamjit/tuner3.py b/lib/opentuner/streamjit/tuner3.py
index aa19f643..f23ee08a 100755
--- a/lib/opentuner/streamjit/tuner3.py
+++ b/lib/opentuner/streamjit/tuner3.py
@@ -183,7 +183,7 @@ def make_jvm_options():
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(parents=opentuner.argparsers())
- parser.add_argument('--program', help='StreamJIT benchmark to tune (with first input)')
+ parser.add_argument('program', help='StreamJIT benchmark to tune (with first input)')
parser.add_argument('--timestamp', help='timestamp to use for final config/errors',
default=time.strftime('%Y%m%d-%H%M%S'))
args = parser.parse_args()
diff --git a/scripts/FMRadio.sh b/scripts/FMRadio.sh
new file mode 100644
index 00000000..988d2c77
--- /dev/null
+++ b/scripts/FMRadio.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+#Author - Sumanan
+# A sample file to download tuning files from Lanka cluster.
+app=FMRadio15
+mainClass=FMRadioCore
+if [ -d $app ]; then
+ echo "$app exists. No downloads..."
+ exit
+fi
+mkdir -p $app
+cd $app
+mkdir -p $mainClass
+scp -r sumanan@lanka.csail.mit.edu:/data/scratch/sumanan/$app/$mainClass/\{summary,*.txt,streamgraph.dot\} $mainClass/
+scp -r sumanan@lanka.csail.mit.edu:/data/scratch/sumanan/$app/\{*.sh,slurm-*,options.properties,*.jar\} .
+
diff --git a/scripts/backup.sh b/scripts/backup.sh
new file mode 100644
index 00000000..98ad53d8
--- /dev/null
+++ b/scripts/backup.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+#Author - Sumanan
+#Feb 25, 2015
+#Backups tuning output files and directories.
+args=("$@")
+suffix=${args[0]}
+if [ -z $suffix ]
+then
+suffix="Orig"
+fi
+
+if [ -d summary$suffix ]; then
+ echo "summary$suffix exists. No backups. Exiting..."
+ exit
+fi
+
+mv summary summary$suffix
+mv compileTime.txt compileTime$suffix.txt
+mv runTime.txt runTime$suffix.txt
+mv drainTime.txt drainTime$suffix.txt
+mv GraphProperty.txt GraphProperty$suffix.txt
+mv README.txt README$suffix.txt
+
diff --git a/scripts/setup.sh b/scripts/setup.sh
new file mode 100644
index 00000000..c25b97f6
--- /dev/null
+++ b/scripts/setup.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+#Author - Sumanan
+#Feb 9, 2015
+#Setup directories and scripts to run a distributed StreamJit app.
+function writeRun(){
+ runfile="run.sh"
+ res=$(get_prop "./options.properties" "tune")
+ echo "#!/bin/bash" > $runfile
+ echo "#SBATCH --tasks-per-node=1" >> $runfile
+ echo "#SBATCH -N 1" >> $runfile
+ echo "#SBATCH --cpu_bind=verbose,cores" >> $runfile
+ echo "#SBATCH --exclusive" >> $runfile
+ echo "cd /data/scratch/sumanan/"$1 >> $runfile
+ if [ "$res" -eq "1" ];then
+ echo "mkdir -p $2" >> $runfile
+ echo "cd $2" >> $runfile
+ echo "srun python ../lib/opentuner/streamjit/streamjit2.py 12563 &" >> $runfile
+ echo "cd .." >> $runfile
+ fi
+ echo "srun -l ../bin/java/jdk1.8.0_31/bin/java -Xmx2048m -jar $1.jar $3" >> $runfile
+}
+
+function writeSN(){
+ runfile="streamnode.sh"
+ echo "#!/bin/bash" > $runfile
+ echo "#SBATCH --tasks-per-node=1" >> $runfile
+ echo "#SBATCH -N $2" >> $runfile
+ echo "#SBATCH --cpu_bind=verbose,cores" >> $runfile
+ echo "#SBATCH --exclusive" >> $runfile
+ echo "cd /data/scratch/sumanan/"$1 >> $runfile
+ echo "srun --exclusive --nodes=$2 ../bin/java/jdk1.8.0_31/bin/java -Xmx2048m -jar StreamNode.jar 128.30.116." >> $runfile
+}
+
+function creatdirs(){
+ mkdir -p $1
+ ln -s /data/scratch/sumanan/data $1/data
+ ln -s /data/scratch/sumanan/lib $1/lib
+ cd $1
+}
+
+get_prop(){
+ grep "^${2}=" ${1}| sed "s%${2}=\(.*\)%\1%"
+}
+
+if [ "$#" -ne 3 ]; then
+ echo "Illegal number of parameters"
+ echo "3 arguments must be passed"
+ echo "setup.sh "
+ exit
+fi
+
+args=("$@")
+app=${args[0]}
+mainClass=${args[1]}
+nodes=${args[2]}
+totalNodes=$((nodes + 1))
+cd /data/scratch/sumanan
+creatdirs $app #Changes the current working directory(CWD).
+mv "optionsLanka.properties" "options.properties"
+writeRun $app $mainClass $totalNodes
+writeSN $app $nodes
diff --git a/src/edu/mit/streamjit/impl/blob/DrainData.java b/src/edu/mit/streamjit/impl/blob/DrainData.java
index 01b0a1f7..728a9402 100644
--- a/src/edu/mit/streamjit/impl/blob/DrainData.java
+++ b/src/edu/mit/streamjit/impl/blob/DrainData.java
@@ -86,6 +86,8 @@ public Object getWorkerState(int workerId, String fieldName) {
* @return a merged DrainData
*/
public DrainData merge(DrainData other) {
+ if (other == null)
+ return this;
ImmutableMap.Builder> dataBuilder = ImmutableMap.builder();
for (Token t : Sets.union(data.keySet(), other.data.keySet())) {
ImmutableList
All BlobNodes in the graph
- * can be retrieved and used in coupled with {@link AbstractDrainer} to
- * successfully perform draining process.
- *
- * @author Sumanan sumanan@mit.edu
- * @since Jul 30, 2013
- */
- public static class BlobGraph {
-
- /**
- * All nodes in the graph.
- */
- private final ImmutableMap blobNodes;
-
- /**
- * The blob which has the overall stream input.
- */
- private final BlobNode sourceBlobNode;
-
- public BlobGraph(List>> partitionWorkers) {
- checkNotNull(partitionWorkers);
- Set blobSet = new HashSet<>();
- for (Set> workers : partitionWorkers) {
- blobSet.add(new DummyBlob(workers));
- }
-
- ImmutableMap.Builder builder = new ImmutableMap.Builder<>();
- for (DummyBlob b : blobSet) {
- builder.put(b.id, new BlobNode(b.id));
- }
-
- this.blobNodes = builder.build();
-
- for (DummyBlob cur : blobSet) {
- for (DummyBlob other : blobSet) {
- if (cur == other)
- continue;
- if (Sets.intersection(cur.outputs, other.inputs).size() != 0) {
- BlobNode curNode = blobNodes.get(cur.id);
- BlobNode otherNode = blobNodes.get(other.id);
-
- curNode.addSuccessor(otherNode);
- otherNode.addPredecessor(curNode);
- }
- }
- }
-
- checkCycles(blobNodes.values());
-
- BlobNode sourceBlob = null;
- for (BlobNode bn : blobNodes.values()) {
- if (bn.getDependencyCount() == 0) {
- assert sourceBlob == null : "Multiple independent blobs found.";
- sourceBlob = bn;
- }
- }
-
- checkNotNull(sourceBlob);
- this.sourceBlobNode = sourceBlob;
- }
-
- /**
- * @return BlobIds of all blobnodes in the blobgraph.
- */
- public ImmutableSet getBlobIds() {
- return blobNodes.keySet();
- }
-
- public BlobNode getBlobNode(Token blobID) {
- return blobNodes.get(blobID);
- }
-
- /**
- * A Drainer can be set to the {@link BlobGraph} to perform draining.
- *
- * @param drainer
- */
- public void setDrainer(AbstractDrainer drainer) {
- for (BlobNode bn : blobNodes.values()) {
- bn.setDrainer(drainer);
- }
- }
-
- public void clearDrainData() {
- for (BlobNode node : blobNodes.values()) {
- node.drainData = null;
- }
- }
-
- /**
- * @return the sourceBlobNode
- */
- private BlobNode getSourceBlobNode() {
- return sourceBlobNode;
- }
-
- /**
- * Does a depth first traversal to detect cycles in the graph.
- *
- * @param blobNodes
- */
- private void checkCycles(Collection blobNodes) {
- Map colorMap = new HashMap<>();
- for (BlobNode b : blobNodes) {
- colorMap.put(b, Color.WHITE);
- }
- for (BlobNode b : blobNodes) {
- if (colorMap.get(b) == Color.WHITE)
- if (DFS(b, colorMap))
- throw new StreamCompilationFailedException(
- "Cycles found among blobs");
- }
- }
-
- /**
- * A cycle exits in a directed graph if a back edge is detected during a
- * DFS traversal. A back edge exists in a directed graph if the
- * currently explored vertex has an adjacent vertex that was already
- * colored gray
- *
- * @param vertex
- * @param colorMap
- * @return true if cycle found, false
- * otherwise.
- */
- private boolean DFS(BlobNode vertex, Map colorMap) {
- colorMap.put(vertex, Color.GRAY);
- for (BlobNode adj : vertex.getSuccessors()) {
- if (colorMap.get(adj) == Color.GRAY)
- return true;
- if (colorMap.get(adj) == Color.WHITE)
- if (DFS(adj, colorMap))
- return true;
- }
- colorMap.put(vertex, Color.BLACK);
- return false;
- }
-
- /**
- * Just used to build the input and output tokens of a partitioned blob
- * workers. imitate a {@link Blob}.
- */
- private final class DummyBlob {
- private final ImmutableSet inputs;
- private final ImmutableSet outputs;
- private final Token id;
-
- private DummyBlob(Set> workers) {
- ImmutableSet.Builder inputBuilder = new ImmutableSet.Builder<>();
- ImmutableSet.Builder outputBuilder = new ImmutableSet.Builder<>();
- for (IOInfo info : IOInfo.externalEdges(workers)) {
- (info.isInput() ? inputBuilder : outputBuilder).add(info
- .token());
- }
-
- inputs = inputBuilder.build();
- outputs = outputBuilder.build();
- id = Collections.min(inputs);
- }
- }
- }
-
- /**
- * BlobNode represents the vertex in the blob graph ({@link BlobGraph}). It
- * represents a {@link Blob} and carry the draining process of that blob.
- *
- * @author Sumanan
- */
- private static final class BlobNode {
-
- /**
- * Intermediate drain data.
- */
- private DrainedData drainData;
-
- private AbstractDrainer drainer;
- /**
- * The blob that wrapped by this blob node.
- */
- private final Token blobID;
- /**
- * Predecessor blob nodes of this blob node.
- */
- private List predecessors;
- /**
- * Successor blob nodes of this blob node.
- */
- private List successors;
- /**
- * The number of undrained predecessors of this blobs. Everytime, when a
- * predecessor finished draining, dependencyCount will be decremented
- * and once it reached to 0 this blob will be called for draining.
- */
- private AtomicInteger dependencyCount;
-
- // TODO: add comments
- private AtomicInteger drainState;
-
- private BlobNode(Token blob) {
- this.blobID = blob;
- predecessors = new ArrayList<>();
- successors = new ArrayList<>();
- dependencyCount = new AtomicInteger(0);
- drainState = new AtomicInteger(0);
- }
-
- /**
- * Should be called when the draining of the current blob has been
- * finished. This function stops all threads belong to the blob and
- * inform its successors as well.
- */
- private void drained() {
- if (drainState.compareAndSet(1, 3)) {
- for (BlobNode suc : this.successors) {
- suc.predecessorDrained(this);
- }
- drainer.drainingDone(this);
- } else if (drainState.compareAndSet(2, 3)) {
- drainer.drainingDone(this);
- }
- }
-
- /**
- * Drain the blob mapped by this blob node.
- */
- private void drain() {
- checkNotNull(drainer);
- if (!drainState.compareAndSet(0, 1)) {
- throw new IllegalStateException(
- "Drain of this blobNode has already been called");
- }
- drainer.drain(blobID, drainer.state == DrainerState.FINAL);
-
- // TODO: Verify the waiting time is reasonable.
- if (GlobalConstants.needDrainDeadlockHandler)
- drainer.schExecutorService.schedule(deadLockHandler(), 6000,
- TimeUnit.MILLISECONDS);
- }
-
- private void setDrainData(DrainedData drainedData) {
- if (this.drainData == null) {
- this.drainData = drainedData;
- drainState.set(4);
- } else
- throw new AssertionError(
- "Multiple drain data has been received.");
- }
-
- private ImmutableList getSuccessors() {
- return ImmutableList.copyOf(successors);
- }
-
- private void addPredecessor(BlobNode pred) {
- assert !predecessors.contains(pred) : String.format(
- "The BlobNode %s has already been set as a predecessors",
- pred);
- predecessors.add(pred);
- dependencyCount.set(dependencyCount.get() + 1);
- }
-
- private void addSuccessor(BlobNode succ) {
- assert !successors.contains(succ) : String
- .format("The BlobNode %s has already been set as a successor",
- succ);
- successors.add(succ);
- }
-
- private void predecessorDrained(BlobNode pred) {
- if (!predecessors.contains(pred))
- throw new IllegalArgumentException("Illegal Predecessor");
-
- assert dependencyCount.get() > 0 : String
- .format("Graph mismatch : My predecessors count is %d. But more than %d of BlobNodes claim me as their successor",
- predecessors.size(), predecessors.size());
-
- if (dependencyCount.decrementAndGet() == 0) {
- drain();
- }
- }
-
- /**
- * @return The number of undrained predecessors.
- */
- private int getDependencyCount() {
- return dependencyCount.get();
- }
-
- private void setDrainer(AbstractDrainer drainer) {
- checkNotNull(drainer);
- this.drainer = drainer;
- }
-
- private Runnable deadLockHandler() {
- Runnable r = new Runnable() {
-
- @Override
- public void run() {
- if (drainState.compareAndSet(1, 2)) {
- for (BlobNode suc : successors) {
- suc.predecessorDrained(BlobNode.this);
- }
- System.out
- .println("deadLockHandler: "
- + blobID
- + " - Deadlock during draining has been handled");
- }
- }
- };
- return r;
- }
- }
-
- /**
- * Color enumerator used by DFS algorithm to find cycles in the blob graph.
- */
- private enum Color {
- WHITE, GRAY, BLACK
- }
-
/**
* Reflects {@link AbstractDrainer}'s state.
*/
- private enum DrainerState {
+ enum DrainerState {
NODRAINING, /**
* Draining in middle of the stream graph's execution. This
* type of draining will be triggered by the open tuner for
diff --git a/src/edu/mit/streamjit/impl/common/drainer/BlobGraph.java b/src/edu/mit/streamjit/impl/common/drainer/BlobGraph.java
new file mode 100644
index 00000000..7bbb38b7
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/common/drainer/BlobGraph.java
@@ -0,0 +1,419 @@
+package edu.mit.streamjit.impl.common.drainer;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Sets;
+
+import edu.mit.streamjit.api.StreamCompilationFailedException;
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.Blob;
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.common.IOInfo;
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer.DrainerState;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
+import edu.mit.streamjit.impl.distributed.common.Options;
+import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData;
+
+/**
+ * [14 Feb, 2015] This class was an inner class of {@link AbstractDrainer}. I
+ * have re factored {@link AbstractDrainer} and moved this class a new file.
+ *
+ * BlobGraph builds predecessor successor relationship for set of partitioned
+ * workers, and verifies for cyclic dependencies among the partitions. Blob
+ * graph doesn't keep blobs. Instead it keeps {@link BlobNode} that represents
+ * blobs. All BlobNodes in the graph can be retrieved and used in coupled
+ * with {@link AbstractDrainer} to successfully perform draining process.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Jul 30, 2013
+ */
+public class BlobGraph {
+
+ /**
+ * All nodes in the graph.
+ */
+ final ImmutableMap blobNodes;
+
+ /**
+ * The blob which has the overall stream input.
+ */
+ private final BlobNode sourceBlobNode;
+
+ public BlobGraph(List>> partitionWorkers) {
+ checkNotNull(partitionWorkers);
+ Set blobSet = new HashSet<>();
+ for (Set> workers : partitionWorkers) {
+ blobSet.add(new DummyBlob(workers));
+ }
+
+ ImmutableMap.Builder builder = new ImmutableMap.Builder<>();
+ for (DummyBlob b : blobSet) {
+ builder.put(b.id, new BlobNode(b.id, b.inputs, b.outputs));
+ }
+
+ this.blobNodes = builder.build();
+
+ for (DummyBlob cur : blobSet) {
+ for (DummyBlob other : blobSet) {
+ if (cur == other)
+ continue;
+ if (Sets.intersection(cur.outputs, other.inputs).size() != 0) {
+ BlobNode curNode = blobNodes.get(cur.id);
+ BlobNode otherNode = blobNodes.get(other.id);
+
+ curNode.addSuccessor(otherNode);
+ otherNode.addPredecessor(curNode);
+ }
+ }
+ }
+
+ checkCycles(blobNodes.values());
+
+ BlobNode sourceBlob = null;
+ for (BlobNode bn : blobNodes.values()) {
+ if (bn.getDependencyCount() == 0) {
+ assert sourceBlob == null : "Multiple independent blobs found.";
+ sourceBlob = bn;
+ }
+ }
+
+ checkNotNull(sourceBlob);
+ this.sourceBlobNode = sourceBlob;
+ }
+
+ /**
+ * @return BlobIds of all blobnodes in the blobgraph.
+ */
+ public ImmutableSet getBlobIds() {
+ return blobNodes.keySet();
+ }
+
+ public BlobNode getBlobNode(Token blobID) {
+ return blobNodes.get(blobID);
+ }
+
+ /**
+ * TODO: We may need to make the class {@link BlobNode} public and move
+ * these functions to {@link BlobNode}.
+ *
+ * Returns output edges of a blob. This method is added on [2014-03-01].
+ *
+ * @param blobID
+ * @return
+ */
+ public ImmutableSet getOutputs(Token blobID) {
+ return blobNodes.get(blobID).outputs;
+ }
+
+ /**
+ * TODO: We may need to make the class {@link BlobNode} public and move
+ * these functions to {@link BlobNode}.
+ *
+ * Returns input edges of a blob. This method is added on [2014-03-01].
+ *
+ * @param blobID
+ * @return
+ */
+ public ImmutableSet getInputs(Token blobID) {
+ return blobNodes.get(blobID).inputs;
+ }
+
+ /**
+ * A Drainer can be set to the {@link BlobGraph} to perform draining.
+ *
+ * @param drainer
+ */
+ public void setDrainer(AbstractDrainer drainer) {
+ for (BlobNode bn : blobNodes.values()) {
+ bn.setDrainer(drainer);
+ }
+ }
+
+ public void clearDrainData() {
+ for (BlobNode node : blobNodes.values()) {
+ node.snDrainData = null;
+ }
+ }
+
+ /**
+ * @return the sourceBlobNode
+ */
+ BlobNode getSourceBlobNode() {
+ return sourceBlobNode;
+ }
+
+ /**
+ * Does a depth first traversal to detect cycles in the graph.
+ *
+ * @param blobNodes
+ */
+ private void checkCycles(Collection blobNodes) {
+ Map colorMap = new HashMap<>();
+ for (BlobNode b : blobNodes) {
+ colorMap.put(b, Color.WHITE);
+ }
+ for (BlobNode b : blobNodes) {
+ if (colorMap.get(b) == Color.WHITE)
+ if (DFS(b, colorMap))
+ throw new StreamCompilationFailedException(
+ "Cycles found among blobs");
+ }
+ }
+
+ /**
+ * A cycle exits in a directed graph if a back edge is detected during a DFS
+ * traversal. A back edge exists in a directed graph if the currently
+ * explored vertex has an adjacent vertex that was already colored gray
+ *
+ * @param vertex
+ * @param colorMap
+ * @return true if cycle found, false otherwise.
+ */
+ private boolean DFS(BlobNode vertex, Map colorMap) {
+ colorMap.put(vertex, Color.GRAY);
+ for (BlobNode adj : vertex.getSuccessors()) {
+ if (colorMap.get(adj) == Color.GRAY)
+ return true;
+ if (colorMap.get(adj) == Color.WHITE)
+ if (DFS(adj, colorMap))
+ return true;
+ }
+ colorMap.put(vertex, Color.BLACK);
+ return false;
+ }
+
+ /**
+ * Just used to build the input and output tokens of a partitioned blob
+ * workers. imitate a {@link Blob}.
+ */
+ private final class DummyBlob {
+ private final ImmutableSet inputs;
+ private final ImmutableSet outputs;
+ private final Token id;
+
+ private DummyBlob(Set> workers) {
+ ImmutableSet.Builder inputBuilder = new ImmutableSet.Builder<>();
+ ImmutableSet.Builder outputBuilder = new ImmutableSet.Builder<>();
+ for (IOInfo info : IOInfo.externalEdges(workers)) {
+ (info.isInput() ? inputBuilder : outputBuilder).add(info
+ .token());
+ }
+
+ inputs = inputBuilder.build();
+ outputs = outputBuilder.build();
+ id = Collections.min(inputs);
+ }
+ }
+
+ /**
+ * [14 Feb, 2015] This class was an inner class of {@link AbstractDrainer}.
+ * I have re factored {@link AbstractDrainer} and moved this class to here.
+ * {@link AbstractDrainer} directly accessed lots of fields and methods of
+ * this class when this was an inner class of it. So those fields and
+ * methods of this class have been made as package private when re
+ * factoring.
+ *
+ * [14 Feb, 2015] TODO: {@link AbstractDrainer#schExecutorService} and
+ * {@link AbstractDrainer#state} have been made package private during the
+ * re factoring. We can make those fields private by moving
+ * {@link BlobNode#drain()} and {@link BlobNode#drained()} to
+ * {@link AbstractDrainer}.
+ *
+ *
+ * BlobNode represents the vertex in the blob graph ({@link BlobGraph} ). It
+ * represents a {@link Blob} and carry the draining process of that blob.
+ *
+ * @author Sumanan
+ */
+ static final class BlobNode {
+
+ /**
+ * Intermediate drain data.
+ */
+ SNDrainedData snDrainData;
+
+ private AbstractDrainer drainer;
+ /**
+ * The blob that wrapped by this blob node.
+ */
+ final Token blobID;
+ /**
+ * Predecessor blob nodes of this blob node.
+ */
+ private List predecessors;
+ /**
+ * Successor blob nodes of this blob node.
+ */
+ private List successors;
+ /**
+ * The number of undrained predecessors of this blobs. Everytime, when a
+ * predecessor finished draining, dependencyCount will be decremented
+ * and once it reached to 0 this blob will be called for draining.
+ */
+ private AtomicInteger dependencyCount;
+
+ // TODO: add comments
+ AtomicInteger drainState;
+
+ /**
+ * All input channels of this blob. We need this information to globally
+ * determine buffer sizes to avoid deadlocks. This is added on
+ * [2014-03-01], when implementing global buffer size adjustment.
+ */
+ private final ImmutableSet inputs;
+
+ /**
+ * All output channels of this blob. We need this information to
+ * globally determine buffer sizes to avoid deadlocks. This is added on
+ * [2014-03-01], when implementing global buffer size adjustment.
+ */
+ private final ImmutableSet outputs;
+
+ private BlobNode(Token blob, ImmutableSet inputs,
+ ImmutableSet outputs) {
+ this.blobID = blob;
+ predecessors = new ArrayList<>();
+ successors = new ArrayList<>();
+ dependencyCount = new AtomicInteger(0);
+ drainState = new AtomicInteger(0);
+ this.inputs = inputs;
+ this.outputs = outputs;
+ }
+
+ /**
+ * Should be called when the draining of the current blob has been
+ * finished. This function stops all threads belong to the blob and
+ * inform its successors as well.
+ */
+ void drained() {
+ if (drainState.compareAndSet(1, 3)) {
+ for (BlobNode suc : this.successors) {
+ suc.predecessorDrained(this);
+ }
+ drainer.drainingDone(this);
+ } else if (drainState.compareAndSet(2, 3)) {
+ drainer.drainingDone(this);
+ }
+ }
+
+ /**
+ * Drain the blob mapped by this blob node.
+ */
+ void drain() {
+ checkNotNull(drainer);
+ if (!drainState.compareAndSet(0, 1)) {
+ throw new IllegalStateException(
+ "Drain of this blobNode has already been called");
+ }
+
+ DrainType drainType;
+ if (Options.useDrainData)
+ if (drainer.state == DrainerState.FINAL)
+ drainType = DrainType.FINAL;
+ else
+ drainType = DrainType.INTERMEDIATE;
+ else
+ drainType = DrainType.DISCARD;
+
+ drainer.drain(blobID, drainType);
+
+ // TODO: Verify the waiting time is reasonable.
+ if (Options.needDrainDeadlockHandler)
+ drainer.schExecutorService.schedule(deadLockHandler(), 6000,
+ TimeUnit.MILLISECONDS);
+ }
+
+ void setDrainData(SNDrainedData drainedData) {
+ if (this.snDrainData == null) {
+ this.snDrainData = drainedData;
+ drainState.set(4);
+ } else
+ throw new AssertionError(
+ "Multiple drain data has been received.");
+ }
+
+ private ImmutableList getSuccessors() {
+ return ImmutableList.copyOf(successors);
+ }
+
+ private void addPredecessor(BlobNode pred) {
+ assert !predecessors.contains(pred) : String.format(
+ "The BlobNode %s has already been set as a predecessors",
+ pred);
+ predecessors.add(pred);
+ dependencyCount.set(dependencyCount.get() + 1);
+ }
+
+ private void addSuccessor(BlobNode succ) {
+ assert !successors.contains(succ) : String
+ .format("The BlobNode %s has already been set as a successor",
+ succ);
+ successors.add(succ);
+ }
+
+ private void predecessorDrained(BlobNode pred) {
+ if (!predecessors.contains(pred))
+ throw new IllegalArgumentException("Illegal Predecessor");
+
+ assert dependencyCount.get() > 0 : String
+ .format("Graph mismatch : My predecessors count is %d. But more than %d of BlobNodes claim me as their successor",
+ predecessors.size(), predecessors.size());
+
+ if (dependencyCount.decrementAndGet() == 0) {
+ drain();
+ }
+ }
+
+ /**
+ * @return The number of undrained predecessors.
+ */
+ private int getDependencyCount() {
+ return dependencyCount.get();
+ }
+
+ private void setDrainer(AbstractDrainer drainer) {
+ checkNotNull(drainer);
+ this.drainer = drainer;
+ }
+
+ private Runnable deadLockHandler() {
+ Runnable r = new Runnable() {
+
+ @Override
+ public void run() {
+ if (drainState.compareAndSet(1, 2)) {
+ for (BlobNode suc : successors) {
+ suc.predecessorDrained(BlobNode.this);
+ }
+ System.out
+ .println("deadLockHandler: "
+ + blobID
+ + " - Deadlock during draining has been handled");
+ }
+ }
+ };
+ return r;
+ }
+ }
+
+ /**
+ * Color enumerator used by DFS algorithm to find cycles in the blob graph.
+ */
+ private enum Color {
+ WHITE, GRAY, BLACK
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/concurrent/ConcurrentBlobFactory.java b/src/edu/mit/streamjit/impl/concurrent/ConcurrentBlobFactory.java
new file mode 100644
index 00000000..b9f1fc50
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/concurrent/ConcurrentBlobFactory.java
@@ -0,0 +1,95 @@
+package edu.mit.streamjit.impl.concurrent;
+
+import java.util.Set;
+
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.Blob;
+import edu.mit.streamjit.impl.blob.BlobFactory;
+import edu.mit.streamjit.impl.blob.DrainData;
+import edu.mit.streamjit.impl.common.Configuration;
+import edu.mit.streamjit.impl.common.Configuration.Parameter;
+import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory;
+import edu.mit.streamjit.impl.distributed.ConfigurationManager;
+import edu.mit.streamjit.impl.distributed.PartitionManager;
+import edu.mit.streamjit.impl.distributed.WorkerMachine;
+import edu.mit.streamjit.impl.distributed.common.Options;
+
+public class ConcurrentBlobFactory implements BlobFactory {
+
+ private int noOfBlobs;
+
+ private final PartitionManager partitionManager;
+
+ public ConcurrentBlobFactory(PartitionManager partitionManager,
+ int noOfBlobs) {
+ this.partitionManager = partitionManager;
+ this.noOfBlobs = noOfBlobs;
+ }
+
+ /**
+ * If {@link ConfigurationManager} is not passed as a constructor argument
+ * then {@link WorkerMachine} will be used as default one.
+ *
+ * @param noOfMachines
+ */
+ public ConcurrentBlobFactory(int noOfBlobs) {
+ this(new WorkerMachine(null), noOfBlobs);
+ }
+
+ @Override
+ public Blob makeBlob(Set> workers, Configuration config,
+ int maxNumCores, DrainData initialState) {
+ return new Compiler2BlobFactory().makeBlob(workers, config,
+ maxNumCores, initialState);
+ }
+
+ @Override
+ public Configuration getDefaultConfiguration(Set> workers) {
+ Configuration concurrentCfg;
+ if (this.noOfBlobs > 1)
+ concurrentCfg = partitionManager.getDefaultConfiguration(workers,
+ noOfBlobs);
+ else
+ concurrentCfg = Configuration.builder().build();
+
+ if (!Options.useCompilerBlob)
+ return concurrentCfg;
+
+ Configuration.Builder builder = Configuration.builder(concurrentCfg);
+ BlobFactory compilerBf = new Compiler2BlobFactory();
+ Configuration compilercfg = compilerBf.getDefaultConfiguration(workers);
+ for (Parameter p : compilercfg.getParametersMap().values())
+ builder.addParameter(p);
+ return builder.build();
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime
+ * result
+ + ((partitionManager == null) ? 0 : partitionManager.hashCode());
+ result = prime * result + noOfBlobs;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ ConcurrentBlobFactory other = (ConcurrentBlobFactory) obj;
+ if (partitionManager == null) {
+ if (other.partitionManager != null)
+ return false;
+ } else if (!partitionManager.equals(other.partitionManager))
+ return false;
+ if (noOfBlobs != other.noOfBlobs)
+ return false;
+ return true;
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java b/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java
index ebe5357d..165e06f6 100644
--- a/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java
+++ b/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java
@@ -21,16 +21,21 @@
*/
package edu.mit.streamjit.impl.concurrent;
+import static com.google.common.base.Preconditions.checkNotNull;
+
import java.util.Map;
import java.util.Set;
-import static com.google.common.base.Preconditions.*;
import com.google.common.collect.ImmutableMap;
import edu.mit.streamjit.impl.blob.Blob;
import edu.mit.streamjit.impl.blob.Blob.Token;
-import edu.mit.streamjit.impl.common.AbstractDrainer;
import edu.mit.streamjit.impl.common.BlobThread;
+import edu.mit.streamjit.impl.common.TimeLogger;
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer;
+import edu.mit.streamjit.impl.common.drainer.BlobGraph;
+import edu.mit.streamjit.impl.distributed.StreamJitApp;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
import edu.mit.streamjit.impl.distributed.common.Utils;
/**
@@ -52,8 +57,9 @@ public final class ConcurrentDrainer extends AbstractDrainer {
*/
ImmutableMap> threadMap;
- public ConcurrentDrainer(BlobGraph blobGraph,
- Map> threadMap) {
+ public ConcurrentDrainer(StreamJitApp, ?> app, TimeLogger logger,
+ BlobGraph blobGraph, Map> threadMap) {
+ super(app, logger);
setBlobGraph(blobGraph);
blobMap = buildBlobMap(threadMap.keySet());
this.threadMap = ImmutableMap.copyOf(threadMap);
@@ -65,7 +71,7 @@ protected void drainingDone(boolean isFinal) {
}
@Override
- protected void drain(Token blobID, boolean isFinal) {
+ protected void drain(Token blobID, DrainType drainType) {
Blob blob = blobMap.get(blobID);
checkNotNull(blob);
@@ -116,6 +122,5 @@ public void run() {
@Override
protected void prepareDraining(boolean isFinal) {
- // TODO Auto-generated method stub
}
}
diff --git a/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java b/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java
index 4f7b0799..299b6f53 100644
--- a/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java
+++ b/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java
@@ -44,8 +44,6 @@
import edu.mit.streamjit.impl.blob.Blob.Token;
import edu.mit.streamjit.impl.blob.Buffer;
import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer;
-import edu.mit.streamjit.impl.common.AbstractDrainer;
-import edu.mit.streamjit.impl.common.AbstractDrainer.BlobGraph;
import edu.mit.streamjit.impl.common.BlobThread;
import edu.mit.streamjit.impl.common.Configuration;
import edu.mit.streamjit.impl.common.Configuration.IntParameter;
@@ -56,6 +54,9 @@
import edu.mit.streamjit.impl.common.OutputBufferFactory;
import edu.mit.streamjit.impl.common.Portals;
import edu.mit.streamjit.impl.common.VerifyStreamGraph;
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer;
+import edu.mit.streamjit.impl.common.drainer.BlobGraph;
+import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler;
import edu.mit.streamjit.impl.interp.ChannelFactory;
import edu.mit.streamjit.impl.interp.Interpreter;
import edu.mit.streamjit.partitioner.HorizontalPartitioner;
@@ -63,7 +64,14 @@
/**
* A stream compiler that partitions a streamgraph into multiple blobs and
- * execute it on multiple threads.
+ * execute them on a single node. This {@link StreamCompiler} can be used for
+ * following purposes
+ *
+ *
Single blob online tuning.
+ *
Multiple blobs on a single node. This will simulate
+ * {@link DistributedStreamCompiler} on a single node to find out deadlocks and
+ * other issues.
+ *
*
* @author Sumanan sumanan@mit.edu
* @since Apr 8, 2013
@@ -72,9 +80,8 @@ public class ConcurrentStreamCompiler implements StreamCompiler {
int noOfBlobs;
/**
- * @param Patrions
- * a stream graph up to noOfBlobs many blobs and executes each
- * blob on each thread.
+ * @param noOfBlobs
+ * Maximum number of blobs that can be created.
*/
public ConcurrentStreamCompiler(int noOfBlobs) {
if (noOfBlobs < 1)
@@ -83,15 +90,20 @@ public ConcurrentStreamCompiler(int noOfBlobs) {
this.noOfBlobs = noOfBlobs;
}
- public ConcurrentStreamCompiler(Configuration cfg) {
+ public ConcurrentStreamCompiler() {
+ this(1);
+ }
- IntParameter threadCount = cfg.getParameter("threadCount",
- IntParameter.class);
- this.noOfBlobs = threadCount.getValue();
- if (noOfBlobs < 1)
- throw new IllegalArgumentException(
- "noOfBlobs should be 1 or greater");
- this.noOfBlobs = noOfBlobs;
+ public ConcurrentStreamCompiler(Configuration cfg) {
+ IntParameter nBlobs = cfg.getParameter("noOfBlobs", IntParameter.class);
+ if (nBlobs == null)
+ this.noOfBlobs = 1;
+ else {
+ this.noOfBlobs = nBlobs.getValue();
+ if (noOfBlobs < 1)
+ throw new IllegalArgumentException(
+ "noOfBlobs should be 1 or greater");
+ }
}
@Override
diff --git a/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java b/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java
index 022631b2..4074cdd6 100644
--- a/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java
+++ b/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java
@@ -21,372 +21,65 @@
*/
package edu.mit.streamjit.impl.distributed;
-import java.util.ArrayDeque;
-import java.util.ArrayList;
-import java.util.Deque;
-import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.Map.Entry;
-import edu.mit.streamjit.api.Filter;
-import edu.mit.streamjit.api.Joiner;
-import edu.mit.streamjit.api.Splitter;
+import edu.mit.streamjit.api.StreamCompilationFailedException;
import edu.mit.streamjit.api.Worker;
-import edu.mit.streamjit.impl.blob.BlobFactory;
-import edu.mit.streamjit.impl.blob.Blob.Token;
import edu.mit.streamjit.impl.common.Configuration;
-import edu.mit.streamjit.impl.common.Workers;
-import edu.mit.streamjit.impl.common.Configuration.PartitionParameter;
-import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
-import edu.mit.streamjit.impl.distributed.common.Utils;
-import edu.mit.streamjit.impl.distributed.node.StreamNode;
-import edu.mit.streamjit.impl.interp.Interpreter;
-import edu.mit.streamjit.partitioner.AbstractPartitioner;
+import edu.mit.streamjit.impl.common.drainer.BlobGraph;
+import edu.mit.streamjit.tuner.OnlineTuner;
-/**
- * ConfigurationManager deals with {@link Configuration}. Mainly, It does
- * following two tasks.
- *
- *
Generates configuration for with appropriate tuning parameters for
- * tuning.
- *
Dispatch the configuration given by the open tuner and make blobs
- * accordingly.
- *
- *
- * One can implement this interface to try different search space designs as
- * they want.
- *
- * @author Sumanan sumanan@mit.edu
- * @since Jan 16, 2014
- *
- */
-public interface ConfigurationManager {
+public class ConfigurationManager {
- /**
- * Generates default configuration with all tuning parameters for tuning.
- *
- * @param streamGraph
- * @param source
- * @param sink
- * @param noOfMachines
- * @return
- */
- public Configuration getDefaultConfiguration(Set> workers,
- int noOfMachines);
+ private final StreamJitApp, ?> app;
+
+ private final PartitionManager partitionManager;
+
+ public ConfigurationManager(StreamJitApp, ?> app,
+ PartitionManager partitionManager) {
+ this.app = app;
+ this.partitionManager = partitionManager;
+ }
/**
- * When opentuner gives a new configuration, this method may be called to
- * interpret the configuration and execute the steramjit app with the new
+ * This method may be called to by the {@link OnlineTuner} to interpret a
+ * new configuration and execute the steramjit app with the new
* configuration.
+ *
+ * Builds partitionsMachineMap and {@link BlobGraph} from the new
+ * Configuration, and verifies for any cycles among blobs. If it is a valid
+ * configuration, (i.e., no cycles among the blobs), then {@link #app}
+ * object's member variables {@link StreamJitApp#blobConfiguration},
+ * {@link StreamJitApp#blobGraph} and
+ * {@link StreamJitApp#partitionsMachineMap} will be assigned according to
+ * reflect the new configuration, no changes otherwise.
*
* @param config
- * configuration from opentuner.
+ * configuration from {@link OnlineTuner}.
* @return true iff valid configuration is passed.
*/
- public boolean newConfiguration(Configuration config);
-
- /**
- * Generates static information of the app that is needed by steramnodes.
- * This configuration will be sent to streamnodes when setting up a new app
- * for execution (Only once).
- *
- * @return static information of the app that is needed by steramnodes.
- */
- public Configuration getStaticConfiguration();
-
- /**
- * For every reconfiguration, this method may be called by the appropriate
- * class to get new configuration information that can be sent to all
- * participating {@link StreamNode}s.
- *
- * @return new partition information
- */
- public Configuration getDynamicConfiguration();
-
- /**
- * Implements the functions those can be called by runtimer to send
- * configuration information to streamnodes.
- *
- * @author Sumanan sumanan@mit.edu
- * @since Jan 17, 2014
- */
- public static abstract class AbstractConfigurationManager
- implements
- ConfigurationManager {
-
- protected final StreamJitApp app;
-
- AbstractConfigurationManager(StreamJitApp app) {
- this.app = app;
+ public boolean newConfiguration(Configuration config) {
+ // for (Parameter p : config.getParametersMap().values()) {
+ // if (p instanceof IntParameter) {
+ // IntParameter ip = (IntParameter) p;
+ // System.out.println(ip.getName() + " - " + ip.getValue());
+ // } else if (p instanceof SwitchParameter>) {
+ // SwitchParameter> sp = (SwitchParameter>) p;
+ // System.out.println(sp.getName() + " - " + sp.getValue());
+ // } else
+ // System.out.println(p.getName() + " - Unknown type");
+ // }
+
+ Map>>> partitionsMachineMap = partitionManager
+ .partitionMap(config);
+ try {
+ app.verifyConfiguration(partitionsMachineMap);
+ } catch (StreamCompilationFailedException ex) {
+ return false;
}
-
- @Override
- public Configuration getStaticConfiguration() {
- Configuration.Builder builder = Configuration.builder();
- builder.putExtraData(GlobalConstants.JARFILE_PATH, app.jarFilePath)
- .putExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME,
- app.topLevelClass);
- return builder.build();
- }
-
- @Override
- public Configuration getDynamicConfiguration() {
- Configuration.Builder builder = Configuration.builder();
-
- Map coresPerMachine = new HashMap<>();
- for (Entry>>> machine : app.partitionsMachineMap
- .entrySet()) {
- coresPerMachine
- .put(machine.getKey(), machine.getValue().size());
- }
-
- PartitionParameter.Builder partParam = PartitionParameter.builder(
- GlobalConstants.PARTITION, coresPerMachine);
-
- BlobFactory factory = new Interpreter.InterpreterBlobFactory();
- partParam.addBlobFactory(factory);
-
- app.blobtoMachineMap = new HashMap<>();
-
- for (Integer machineID : app.partitionsMachineMap.keySet()) {
- List>> blobList = app.partitionsMachineMap
- .get(machineID);
- for (Set> blobWorkers : blobList) {
- // TODO: One core per blob. Need to change this.
- partParam.addBlob(machineID, 1, factory, blobWorkers);
-
- // TODO: Temp fix to build.
- Token t = Utils.getblobID(blobWorkers);
- app.blobtoMachineMap.put(t, machineID);
- }
- }
-
- builder.addParameter(partParam.build());
- if (app.blobConfiguration != null)
- builder.addSubconfiguration("blobConfigs",
- app.blobConfiguration);
- return builder.build();
- }
-
- /**
- * Copied form {@link AbstractPartitioner} class. But modified to
- * support nested splitjoiners.
Returns all {@link Worker}s in a
- * splitjoin.
- *
- * @param splitter
- * @return Returns all {@link Filter}s in a splitjoin.
- */
- protected void getAllChildWorkers(Splitter, ?> splitter,
- Set> childWorkers) {
- childWorkers.add(splitter);
- Joiner, ?> joiner = getJoiner(splitter);
- Worker, ?> cur;
- for (Worker, ?> childWorker : Workers.getSuccessors(splitter)) {
- cur = childWorker;
- while (cur != joiner) {
- if (cur instanceof Filter, ?>)
- childWorkers.add(cur);
- else if (cur instanceof Splitter, ?>) {
- getAllChildWorkers((Splitter, ?>) cur, childWorkers);
- cur = getJoiner((Splitter, ?>) cur);
- } else
- throw new IllegalStateException(
- "Some thing wrong in the algorithm.");
-
- assert Workers.getSuccessors(cur).size() == 1 : "Illegal State encounted : cur can only be either a filter or a joner";
- cur = Workers.getSuccessors(cur).get(0);
- }
- }
- childWorkers.add(joiner);
- }
-
- /**
- * Find and returns the corresponding {@link Joiner} for the passed
- * {@link Splitter}.
- *
- * @param splitter
- * : {@link Splitter} that needs it's {@link Joiner}.
- * @return Corresponding {@link Joiner} of the passed {@link Splitter}.
- */
- protected Joiner, ?> getJoiner(Splitter, ?> splitter) {
- Worker, ?> cur = Workers.getSuccessors(splitter).get(0);
- int innerSplitjoinCount = 0;
- while (!(cur instanceof Joiner, ?>) || innerSplitjoinCount != 0) {
- if (cur instanceof Splitter, ?>)
- innerSplitjoinCount++;
- if (cur instanceof Joiner, ?>)
- innerSplitjoinCount--;
- assert innerSplitjoinCount >= 0 : "Joiner Count is more than splitter count. Check the algorithm";
- cur = Workers.getSuccessors(cur).get(0);
- }
- assert cur instanceof Joiner, ?> : "Error in algorithm. Not returning a Joiner";
- return (Joiner, ?>) cur;
- }
-
- protected String getParamName(Integer id) {
- assert id > -1 : "Worker id cannot be negative";
- return String.format("worker%dtomachine", id);
- }
-
- /**
- * Goes through all workers in workerset which is passed as argument,
- * find the workers which are interconnected and group them as a blob
- * workers. i.e., Group the workers which are connected.
- *
- * TODO: If any dynamic edges exists then should create interpreter
- * blob.
- *
- * @param workerset
- * @return list of workers set which contains interconnected workers.
- * Each worker set in the list is supposed to run in an
- * individual blob.
- */
- protected List>> getConnectedComponents(
- Set> workerset) {
- List>> ret = new ArrayList>>();
- while (!workerset.isEmpty()) {
- Deque> queue = new ArrayDeque<>();
- Set> blobworkers = new HashSet<>();
- Worker, ?> w = workerset.iterator().next();
- blobworkers.add(w);
- workerset.remove(w);
- queue.offer(w);
- while (!queue.isEmpty()) {
- Worker, ?> wrkr = queue.poll();
- for (Worker, ?> succ : Workers.getSuccessors(wrkr)) {
- if (workerset.contains(succ)) {
- blobworkers.add(succ);
- workerset.remove(succ);
- queue.offer(succ);
- }
- }
-
- for (Worker, ?> pred : Workers.getPredecessors(wrkr)) {
- if (workerset.contains(pred)) {
- blobworkers.add(pred);
- workerset.remove(pred);
- queue.offer(pred);
- }
- }
- }
- ret.add(blobworkers);
- }
- return ret;
- }
-
- /**
- * Cycles can occur iff splitter and joiner happened to fall into a blob
- * while some workers of that splitjoin falls into other blob. Here, we
- * check for the above mention condition. If cycles exists, split then
- * in to several blobs.
- *
- * @param blobworkers
- * @return
- */
- protected List>> breakCycles(
- Set> blobworkers) {
- Map, Joiner, ?>> rfctrSplitJoin = new HashMap<>();
- Set> splitterSet = getSplitters(blobworkers);
- for (Splitter, ?> s : splitterSet) {
- Joiner, ?> j = getJoiner(s);
- if (blobworkers.contains(j)) {
- Set> childWorkers = new HashSet<>();
- getAllChildWorkers(s, childWorkers);
- if (!blobworkers.containsAll(childWorkers)) {
- rfctrSplitJoin.put(s, j);
- }
- }
- }
-
- List>> ret = new ArrayList<>();
-
- for (Splitter, ?> s : rfctrSplitJoin.keySet()) {
- if (blobworkers.contains(s)) {
- ret.add(getSplitterReachables(s, blobworkers,
- rfctrSplitJoin));
- }
- }
- ret.addAll(getConnectedComponents(blobworkers));
- return ret;
- }
-
- /**
- * Goes through the passed set of workers, add workers those are
- * reachable from the splitter s, but not any conflicting splitter or
- * joiner.
- *
- * This function has side effect. Modifies the argument.
- *
- * @param s
- * @param blobworkers
- * @return
- */
- protected Set> getSplitterReachables(Splitter, ?> s,
- Set> blobworkers,
- Map, Joiner, ?>> rfctrSplitJoin) {
- assert blobworkers.contains(s) : "Splitter s in not in blobworkers";
- Set> ret = new HashSet<>();
- Set> exclude = new HashSet<>();
- Deque> queue = new ArrayDeque<>();
- ret.add(s);
- exclude.add(rfctrSplitJoin.get(s));
- blobworkers.remove(s);
- queue.offer(s);
- while (!queue.isEmpty()) {
- Worker, ?> wrkr = queue.poll();
- for (Worker, ?> succ : Workers.getSuccessors(wrkr)) {
- process(succ, blobworkers, rfctrSplitJoin, exclude, queue,
- ret);
- }
-
- for (Worker, ?> pred : Workers.getPredecessors(wrkr)) {
- process(pred, blobworkers, rfctrSplitJoin, exclude, queue,
- ret);
- }
- }
- return ret;
- }
-
- /**
- * Since the code in this method repeated in two places in
- * getSplitterReachables() method, It is re-factored into a private
- * method to avoid code duplication.
- */
- protected void process(Worker, ?> wrkr,
- Set> blobworkers,
- Map, Joiner, ?>> rfctrSplitJoin,
- Set> exclude, Deque> queue,
- Set> ret) {
- if (blobworkers.contains(wrkr) && !exclude.contains(wrkr)) {
- ret.add(wrkr);
- blobworkers.remove(wrkr);
- queue.offer(wrkr);
-
- for (Entry, Joiner, ?>> e : rfctrSplitJoin
- .entrySet()) {
- if (e.getValue().equals(wrkr)) {
- exclude.add(e.getKey());
- break;
- } else if (e.getKey().equals(wrkr)) {
- exclude.add(e.getValue());
- break;
- }
- }
- }
- }
-
- protected Set> getSplitters(Set> blobworkers) {
- Set> splitterSet = new HashSet<>();
- for (Worker, ?> w : blobworkers) {
- if (w instanceof Splitter, ?>) {
- splitterSet.add((Splitter, ?>) w);
- }
- }
- return splitterSet;
- }
-
+ app.setConfiguration(config);
+ return true;
}
}
diff --git a/src/edu/mit/streamjit/impl/distributed/ConnectionManager.java b/src/edu/mit/streamjit/impl/distributed/ConnectionManager.java
new file mode 100644
index 00000000..be47742d
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/ConnectionManager.java
@@ -0,0 +1,371 @@
+package edu.mit.streamjit.impl.distributed;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.common.Configuration;
+import edu.mit.streamjit.impl.common.Configuration.Builder;
+import edu.mit.streamjit.impl.common.Configuration.Parameter;
+import edu.mit.streamjit.impl.common.Configuration.SwitchParameter;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel;
+import edu.mit.streamjit.impl.distributed.common.Connection;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionType;
+import edu.mit.streamjit.impl.distributed.common.Connection.GenericConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.node.StreamNode;
+
+/**
+ * Generates configuration parameters to tune the {@link Connection}'s
+ * communication type such as blocking TCP connection, asynchronous TCP
+ * connection, Infiniband, etc.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Jun 23, 2014
+ *
+ */
+public interface ConnectionManager {
+
+ /**
+ * Generates parameters to tune {@link BoundaryChannel} and add those into
+ * the {@link Configuration.Builder}.
+ *
+ * @param cfgBuilder
+ * @param workers
+ */
+ public void addConnectionParameters(Configuration.Builder cfgBuilder,
+ Set> workers);
+
+ /**
+ * Generates parameters to tune {@link BoundaryChannel} and return those as
+ * a new {@link Configuration}.
+ *
+ * @param workers
+ * @return
+ */
+ public Configuration getDefaultConfiguration(Set> workers);
+
+ /**
+ * Decides {@link Connection} type for each inter-blob connection based on
+ * the {@link cfg}.
+ *
+ * @param cfg
+ * @param partitionsMachineMap
+ * @param source
+ * @param sink
+ * @return
+ */
+ public Map conInfoMap(Configuration cfg,
+ Map>>> partitionsMachineMap,
+ Worker, ?> source, Worker, ?> sink);
+
+ /**
+ * Sometimes an assigned TCP ports may not available to make new connection
+ * at {@link StreamNode}s side. In this case a new {@link ConnectionInfo}
+ * must be created to replace already created {@link ConnectionInfo}.
+ *
+ * @param conInfo
+ * : Problematic {@link ConnectionInfo}.
+ * @return : New {@link ConnectionInfo} to replace problematic
+ * {@link ConnectionInfo}.
+ */
+ public ConnectionInfo replaceConInfo(ConnectionInfo conInfo);
+
+ public abstract static class AbstractConnectionManager implements
+ ConnectionManager {
+
+ private final int controllerNodeID;
+
+ protected Set currentConInfos;
+
+ protected int startPortNo = 24896; // Just a random magic number.
+
+ public AbstractConnectionManager(int controllerNodeID) {
+ this.controllerNodeID = controllerNodeID;
+ this.currentConInfos = new HashSet<>();
+ }
+
+ public Map conInfoMap(Configuration cfg,
+ Map>>> partitionsMachineMap,
+ Worker, ?> source, Worker, ?> sink) {
+
+ assert partitionsMachineMap != null : "partitionsMachineMap is null";
+
+ Set usedConInfos = new HashSet<>();
+ Map conInfoMap = new HashMap<>();
+
+ for (Integer machineID : partitionsMachineMap.keySet()) {
+ List>> blobList = partitionsMachineMap
+ .get(machineID);
+ Set> allWorkers = new HashSet<>(); // Contains all
+ // workers those
+ // are
+ // assigned to
+ // the
+ // current
+ // machineID
+ // machine.
+ for (Set> blobWorkers : blobList) {
+ allWorkers.addAll(blobWorkers);
+ }
+
+ for (Worker, ?> w : allWorkers) {
+ for (Worker, ?> succ : Workers.getSuccessors(w)) {
+ if (allWorkers.contains(succ))
+ continue;
+ int dstMachineID = getAssignedMachine(succ,
+ partitionsMachineMap);
+ Token t = new Token(w, succ);
+ addtoconInfoMap(machineID, dstMachineID, t,
+ usedConInfos, conInfoMap, cfg);
+ }
+ }
+ }
+
+ Token headToken = Token.createOverallInputToken(source);
+ int dstMachineID = getAssignedMachine(source, partitionsMachineMap);
+ addtoconInfoMap(controllerNodeID, dstMachineID, headToken,
+ usedConInfos, conInfoMap, cfg);
+
+ Token tailToken = Token.createOverallOutputToken(sink);
+ int srcMahineID = getAssignedMachine(sink, partitionsMachineMap);
+ addtoconInfoMap(srcMahineID, controllerNodeID, tailToken,
+ usedConInfos, conInfoMap, cfg);
+
+ return conInfoMap;
+ }
+
+ /**
+ * @param worker
+ * @return the machineID where on which the passed worker is assigned.
+ */
+ private int getAssignedMachine(Worker, ?> worker,
+ Map>>> partitionsMachineMap) {
+ for (Integer machineID : partitionsMachineMap.keySet()) {
+ for (Set> workers : partitionsMachineMap
+ .get(machineID)) {
+ if (workers.contains(worker))
+ return machineID;
+ }
+ }
+
+ throw new IllegalArgumentException(String.format(
+ "%s is not assigned to anyof the machines", worker));
+ }
+
+ protected abstract void addtoconInfoMap(int srcID, int dstID, Token t,
+ Set usedConInfos,
+ Map conInfoMap, Configuration cfg);
+
+ protected List getTcpConInfo(ConnectionInfo conInfo) {
+ List conList = new ArrayList<>();
+ for (ConnectionInfo tcpconInfo : currentConInfos) {
+ if (conInfo.equals(tcpconInfo))
+ conList.add(tcpconInfo);
+ }
+ return conList;
+ }
+
+ protected String getParamName(Token t) {
+ return String.format("ConnectionType-%d:%d",
+ t.getUpstreamIdentifier(), t.getDownstreamIdentifier());
+ }
+
+ public ConnectionInfo replaceConInfo(ConnectionInfo conInfo) {
+ if (currentConInfos.contains(conInfo))
+ currentConInfos.remove(conInfo);
+ ConnectionInfo newConinfo;
+ if (conInfo.getSrcID() == 0)
+ newConinfo = new TCPConnectionInfo(conInfo.getSrcID(),
+ conInfo.getDstID(), startPortNo++);
+ else
+ newConinfo = new AsyncTCPConnectionInfo(conInfo.getSrcID(),
+ conInfo.getDstID(), startPortNo++);
+ currentConInfos.add(newConinfo);
+
+ return newConinfo;
+ }
+ }
+
+ public static abstract class NoParams extends AbstractConnectionManager {
+
+ public NoParams(int controllerNodeID) {
+ super(controllerNodeID);
+ }
+
+ @Override
+ public void addConnectionParameters(Builder cfgBuilder,
+ Set> workers) {
+ return;
+ }
+
+ @Override
+ public Configuration getDefaultConfiguration(Set> workers) {
+ return Configuration.builder().build();
+ }
+
+ protected void addtoconInfoMap(int srcID, int dstID, Token t,
+ Set usedConInfos,
+ Map conInfoMap, Configuration cfg) {
+
+ ConnectionInfo conInfo = new GenericConnectionInfo(srcID, dstID);
+
+ List conSet = getTcpConInfo(conInfo);
+ ConnectionInfo tcpConInfo = null;
+
+ for (ConnectionInfo con : conSet) {
+ if (!usedConInfos.contains(con)) {
+ tcpConInfo = con;
+ break;
+ }
+ }
+
+ if (tcpConInfo == null) {
+ tcpConInfo = makeConnectionInfo(srcID, dstID);
+ this.currentConInfos.add(tcpConInfo);
+ }
+
+ conInfoMap.put(t, tcpConInfo);
+ usedConInfos.add(tcpConInfo);
+ }
+
+ protected abstract ConnectionInfo makeConnectionInfo(int srcID,
+ int dstID);
+ }
+
+ public static class BlockingTCPNoParams extends NoParams {
+
+ public BlockingTCPNoParams(int controllerNodeID) {
+ super(controllerNodeID);
+ }
+
+ @Override
+ protected ConnectionInfo makeConnectionInfo(int srcID, int dstID) {
+ return new TCPConnectionInfo(srcID, dstID, startPortNo++);
+ }
+ }
+
+ public static class AsyncTCPNoParams extends NoParams {
+
+ public AsyncTCPNoParams(int controllerNodeID) {
+ super(controllerNodeID);
+ }
+
+ @Override
+ protected ConnectionInfo makeConnectionInfo(int srcID, int dstID) {
+ return new AsyncTCPConnectionInfo(srcID, dstID, startPortNo++);
+ }
+ }
+
+ public static class AllConnectionParams extends AbstractConnectionManager {
+ public AllConnectionParams(int controllerNodeID) {
+ super(controllerNodeID);
+ }
+
+ @Override
+ public void addConnectionParameters(Builder cfgBuilder,
+ Set> workers) {
+ for (Worker, ?> w : workers) {
+ for (Worker, ?> succ : Workers.getSuccessors(w)) {
+ Token t = new Token(w, succ);
+ Parameter p = new Configuration.SwitchParameter(
+ getParamName(t), ConnectionType.class,
+ ConnectionType.BTCP, Arrays.asList(ConnectionType
+ .values()));
+ cfgBuilder.addParameter(p);
+ }
+ }
+
+ // Add Parameter for global input channel.
+ Set> heads = Workers.getTopmostWorkers(workers);
+ assert heads.size() == 1 : "Multiple first workers";
+ for (Worker, ?> firstWorker : heads) {
+ Token t = Token.createOverallInputToken(firstWorker);
+ Parameter p = new Configuration.SwitchParameter(
+ getParamName(t), ConnectionType.class,
+ ConnectionType.BTCP, Arrays.asList(ConnectionType
+ .values()));
+ cfgBuilder.addParameter(p);
+ }
+
+ // Add Parameter for global output channel.
+ Set> tail = Workers.getBottommostWorkers(workers);
+ assert tail.size() == 1 : "Multiple first workers";
+ for (Worker, ?> lastWorker : tail) {
+ Token t = Token.createOverallOutputToken(lastWorker);
+ Parameter p = new Configuration.SwitchParameter(
+ getParamName(t), ConnectionType.class,
+ ConnectionType.BTCP, Arrays.asList(ConnectionType
+ .values()));
+ cfgBuilder.addParameter(p);
+ }
+ }
+
+ @Override
+ public Configuration getDefaultConfiguration(Set> workers) {
+ Configuration.Builder cfgBuilder = Configuration.builder();
+ addConnectionParameters(cfgBuilder, workers);
+ return cfgBuilder.build();
+ }
+
+ protected void addtoconInfoMap(int srcID, int dstID, Token t,
+ Set usedConInfos,
+ Map conInfoMap, Configuration cfg) {
+
+ ConnectionInfo conInfo = new GenericConnectionInfo(srcID, dstID);
+
+ List conSet = getTcpConInfo(conInfo);
+ ConnectionInfo tcpConInfo = null;
+
+ for (ConnectionInfo con : conSet) {
+ if (!usedConInfos.contains(con)) {
+ tcpConInfo = con;
+ break;
+ }
+ }
+
+ if (tcpConInfo == null) {
+ tcpConInfo = makeConnectionInfo(srcID, dstID, t, cfg);
+ this.currentConInfos.add(tcpConInfo);
+ }
+
+ conInfoMap.put(t, tcpConInfo);
+ usedConInfos.add(tcpConInfo);
+ }
+
+ private ConnectionInfo makeConnectionInfo(int srcID, int dstID,
+ Token t, Configuration cfg) {
+ SwitchParameter p = cfg.getParameter(
+ getParamName(t), SwitchParameter.class,
+ ConnectionType.class);
+
+ if (p == null)
+ throw new IllegalStateException(String.format(
+ "No tuning parameter for connection %s", t));
+
+ ConnectionInfo conInfo;
+ switch (p.getValue()) {
+ case BTCP :
+ conInfo = new TCPConnectionInfo(srcID, dstID, startPortNo++);
+ break;
+ case ATCP :
+ conInfo = new AsyncTCPConnectionInfo(srcID, dstID,
+ startPortNo++);
+ break;
+ default :
+ throw new IllegalStateException(String.format(
+ "Unsupported connection type - %s", p.getValue()));
+ }
+ return conInfo;
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java b/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java
index 030691d0..441e981a 100644
--- a/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java
+++ b/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java
@@ -24,13 +24,10 @@
import edu.mit.streamjit.api.CompiledStream;
import edu.mit.streamjit.api.Output;
import edu.mit.streamjit.api.StreamCompiler;
-import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
-import edu.mit.streamjit.impl.interp.DebugStreamCompiler;
import edu.mit.streamjit.test.Benchmark;
-import edu.mit.streamjit.test.BenchmarkProvider;
import edu.mit.streamjit.test.Benchmark.Dataset;
+import edu.mit.streamjit.test.BenchmarkProvider;
import edu.mit.streamjit.test.apps.channelvocoder7.ChannelVocoder7;
-import edu.mit.streamjit.test.apps.fmradio.FMRadio.FMRadioBenchmarkProvider;
public class DistributedAppRunner {
@@ -85,8 +82,6 @@ public static void main(String[] args) throws InterruptedException {
}
}
- GlobalConstants.tunerMode = 1;
-
Benchmark benchmark = bp.iterator().next();
// StreamCompiler compiler = new Compiler2StreamCompiler();
StreamCompiler compiler = new DistributedStreamCompiler(noOfNodes);
diff --git a/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java b/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java
index f11ebd92..d3c8d397 100644
--- a/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java
+++ b/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java
@@ -29,7 +29,10 @@
import edu.mit.streamjit.impl.blob.DrainData;
import edu.mit.streamjit.impl.common.Configuration;
import edu.mit.streamjit.impl.common.Configuration.Parameter;
+import edu.mit.streamjit.impl.compiler.CompilerBlobFactory;
import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory;
+import edu.mit.streamjit.impl.distributed.ConnectionManager.BlockingTCPNoParams;
+import edu.mit.streamjit.impl.distributed.common.Options;
import edu.mit.streamjit.impl.interp.Interpreter.InterpreterBlobFactory;
/**
@@ -45,53 +48,69 @@
*
* TODO: For the moment this factory just deal with compiler blob. Need to make
* interpreter blob as well based on the dynamic edges.
- *
+ *
* @author Sumanan sumanan@mit.edu
* @since Sep 24, 2013
*/
public class DistributedBlobFactory implements BlobFactory {
- private int noOfMachines;
+ private final int noOfMachines;
+
+ private final PartitionManager partitionManager;
- private final ConfigurationManager cfgManager;
+ private final ConnectionManager connectionManager;
- public DistributedBlobFactory(ConfigurationManager cfgManager,
- int noOfMachines) {
- this.cfgManager = cfgManager;
+ public DistributedBlobFactory(PartitionManager partitionManager,
+ ConnectionManager connectionManager, int noOfMachines) {
+ this.partitionManager = partitionManager;
this.noOfMachines = noOfMachines;
+ this.connectionManager = connectionManager;
}
/**
- * If {@link ConfigurationManager} is not passed as a constructor argument
- * then {@link WorkerMachine} will be used as default one.
- *
+ * If {@link PartitionManager} is not passed as a constructor argument then
+ * {@link WorkerMachine} will be used as default one.
+ *
* @param noOfMachines
*/
public DistributedBlobFactory(int noOfMachines) {
- this(new WorkerMachine(null), noOfMachines);
+ this(new WorkerMachine(null), new BlockingTCPNoParams(0), noOfMachines);
}
@Override
public Blob makeBlob(Set> workers, Configuration config,
int maxNumCores, DrainData initialState) {
- return new Compiler2BlobFactory().makeBlob(workers, config, maxNumCores, initialState);
+ return new Compiler2BlobFactory().makeBlob(workers, config,
+ maxNumCores, initialState);
}
@Override
public Configuration getDefaultConfiguration(Set> workers) {
+ Configuration distCfg = partitionManager.getDefaultConfiguration(
+ workers, noOfMachines);
+ if (!Options.useCompilerBlob)
+ return distCfg;
- Configuration distCfg = cfgManager.getDefaultConfiguration(workers,
- noOfMachines);
Configuration.Builder builder = Configuration.builder(distCfg);
-
BlobFactory compilerBf = new Compiler2BlobFactory();
Configuration compilercfg = compilerBf.getDefaultConfiguration(workers);
for (Parameter p : compilercfg.getParametersMap().values())
builder.addParameter(p);
+ connectionManager.addConnectionParameters(builder, workers);
+ // addMaxCoreParam(builder);
return builder.build();
}
+ private void addMaxCoreParam(Configuration.Builder builder) {
+ int min = 1;
+ int val = Options.maxNumCores / 2;
+ val = min > val ? min : val;
+ Parameter p = new Configuration.IntParameter("maxNumCores", min,
+ Options.maxNumCores, val);
+ builder.addParameter(p);
+ }
+
@Override
public boolean equals(Object o) {
return getClass() == o.getClass()
diff --git a/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java b/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java
index e51c451b..3262ce6f 100644
--- a/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java
+++ b/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java
@@ -21,11 +21,8 @@
*/
package edu.mit.streamjit.impl.distributed;
-import java.io.BufferedReader;
-import java.io.FileReader;
import java.util.ArrayList;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -35,39 +32,36 @@
import com.google.common.collect.ImmutableMap;
import edu.mit.streamjit.api.CompiledStream;
-import edu.mit.streamjit.api.Filter;
import edu.mit.streamjit.api.Input;
import edu.mit.streamjit.api.Input.ManualInput;
import edu.mit.streamjit.api.OneToOneElement;
import edu.mit.streamjit.api.Output;
import edu.mit.streamjit.api.Pipeline;
-import edu.mit.streamjit.api.Portal;
import edu.mit.streamjit.api.Splitjoin;
-import edu.mit.streamjit.api.StreamCompilationFailedException;
import edu.mit.streamjit.api.StreamCompiler;
import edu.mit.streamjit.api.Worker;
import edu.mit.streamjit.impl.blob.Blob.Token;
import edu.mit.streamjit.impl.blob.BlobFactory;
import edu.mit.streamjit.impl.blob.Buffer;
-import edu.mit.streamjit.impl.common.AbstractDrainer;
import edu.mit.streamjit.impl.common.Configuration;
-import edu.mit.streamjit.impl.common.ConnectWorkersVisitor;
import edu.mit.streamjit.impl.common.InputBufferFactory;
-import edu.mit.streamjit.impl.common.MessageConstraint;
import edu.mit.streamjit.impl.common.OutputBufferFactory;
-import edu.mit.streamjit.impl.common.Portals;
-import edu.mit.streamjit.impl.common.VerifyStreamGraph;
+import edu.mit.streamjit.impl.common.TimeLogger;
import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer;
import edu.mit.streamjit.impl.concurrent.ConcurrentStreamCompiler;
-import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
+import edu.mit.streamjit.impl.distributed.HeadChannel.HeadBuffer;
+import edu.mit.streamjit.impl.distributed.common.Options;
import edu.mit.streamjit.impl.distributed.node.StreamNode;
import edu.mit.streamjit.impl.distributed.runtimer.CommunicationManager.CommunicationType;
import edu.mit.streamjit.impl.distributed.runtimer.Controller;
import edu.mit.streamjit.impl.distributed.runtimer.DistributedDrainer;
-import edu.mit.streamjit.impl.distributed.runtimer.OnlineTuner;
-import edu.mit.streamjit.impl.distributed.HeadChannel.HeadBuffer;
import edu.mit.streamjit.partitioner.HorizontalPartitioner;
import edu.mit.streamjit.partitioner.Partitioner;
+import edu.mit.streamjit.tuner.OnlineTuner;
+import edu.mit.streamjit.tuner.Reconfigurer;
+import edu.mit.streamjit.tuner.Verifier;
+import edu.mit.streamjit.util.ConfigurationUtils;
/**
*
@@ -89,106 +83,181 @@ public class DistributedStreamCompiler implements StreamCompiler {
/**
* Configuration from Opentuner.
*/
- Configuration cfg;
+ private Configuration cfg;
/**
* Total number of nodes including controller node.
*/
- int noOfnodes;
+ private int noOfnodes;
/**
- * @param noOfnodes
- * : Total number of nodes the stream application intended to run
- * - including controller node. If it is 1 then it means the
- * whole stream application is supposed to run on controller.
+ * Run the whole application on the controller node. No distributions. See
+ * {@link #DistributedStreamCompiler(int, Configuration)}
*/
- public DistributedStreamCompiler(int noOfnodes) {
- if (noOfnodes < 1)
- throw new IllegalArgumentException("noOfnodes must be 1 or greater");
- this.noOfnodes = noOfnodes;
+ public DistributedStreamCompiler() {
+ this(1, null);
}
/**
- * Run the whole application on the controller node.
+ * See {@link #DistributedStreamCompiler(int, Configuration)}. As no
+ * configuration is passed, tuner will activated to tune for better
+ * configuration.
*/
- public DistributedStreamCompiler() {
- this(1);
+ public DistributedStreamCompiler(int noOfnodes) {
+ this(noOfnodes, null);
}
/**
- * Run the application with the passed configureation.
+ * Run the application with the passed configuration. Pass null if the
+ * intention is to tune the application.
+ *
+ * @param noOfnodes
+ * : Total number of nodes the stream application intended to run
+ * including the controller node. If it is 1 then it means the
+ * whole stream application is supposed to run on controller.
+ * @param cfg
+ * Run the application with the passed {@link Configuration}. If
+ * it is null, tuner will be activated to tune for better
+ * configuration.
*/
public DistributedStreamCompiler(int noOfnodes, Configuration cfg) {
if (noOfnodes < 1)
throw new IllegalArgumentException("noOfnodes must be 1 or greater");
- this.noOfnodes = noOfnodes;
+ if (Options.singleNodeOnline) {
+ System.out
+ .println("Flag GlobalConstants.singleNodeOnline is enabled."
+ + " noOfNodes passed as compiler argument has no effect");
+ this.noOfnodes = 1;
+ } else
+ this.noOfnodes = noOfnodes;
+
this.cfg = cfg;
}
public CompiledStream compile(OneToOneElement stream,
Input input, Output output) {
+ StreamJitApp app = new StreamJitApp<>(stream);
+ Controller controller = establishController();
+
+ PartitionManager partitionManager = new HotSpotTuning(app);
+ ConfigurationManager cfgManager = new ConfigurationManager(app,
+ partitionManager);
+ ConnectionManager conManager = connectionManager(controller.controllerNodeID);
+
+ setConfiguration(controller, app, partitionManager, conManager,
+ cfgManager);
- checkforDefaultOneToOneElement(stream);
+ TimeLogger logger = new TimeLoggers.FileTimeLogger(app.name);
+ StreamJitAppManager manager = new StreamJitAppManager(controller, app,
+ conManager, logger);
+ final AbstractDrainer drainer = new DistributedDrainer(app, logger,
+ manager);
+ drainer.setBlobGraph(app.blobGraph);
- ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor();
- stream.visit(primitiveConnector);
- Worker source = (Worker) primitiveConnector.getSource();
- Worker, O> sink = (Worker, O>) primitiveConnector.getSink();
+ boolean needTermination = setBufferMap(input, output, drainer, app);
+
+ manager.reconfigure(1);
+ CompiledStream cs = new DistributedCompiledStream(drainer);
+
+ if (Options.tune > 0 && this.cfg != null) {
+ Reconfigurer configurer = new Reconfigurer(drainer, manager, app,
+ cfgManager, logger);
+ tuneOrVerify(configurer, needTermination);
+ }
+ return cs;
+ }
+
+ private ConnectionManager connectionManager(int controllerNodeID) {
+ switch (Options.connectionManager) {
+ case 0 :
+ return new ConnectionManager.AllConnectionParams(
+ controllerNodeID);
+ case 1 :
+ return new ConnectionManager.BlockingTCPNoParams(
+ controllerNodeID);
+ default :
+ return new ConnectionManager.AsyncTCPNoParams(controllerNodeID);
+ }
+ }
- VerifyStreamGraph verifier = new VerifyStreamGraph();
- stream.visit(verifier);
+ private Configuration cfgFromFile(StreamJitApp app,
+ Controller controller, Configuration defaultCfg) {
+ Configuration cfg1 = ConfigurationUtils.readConfiguration(app.name,
+ null);
+ if (cfg1 == null) {
+ controller.closeAll();
+ throw new IllegalConfigurationException();
+ } else if (!verifyCfg(defaultCfg, cfg1)) {
+ System.err
+ .println("Reading the configuration from configuration file");
+ System.err
+ .println("No matching between parameters in the read "
+ + "configuration and parameters in the default configuration");
+ controller.closeAll();
+ throw new IllegalConfigurationException();
+ }
+ return cfg1;
+ }
+ private Controller establishController() {
Map conTypeCount = new HashMap<>();
- // conTypeCount.put(CommunicationType.LOCAL, 1);
- conTypeCount.put(CommunicationType.TCP, this.noOfnodes);
+
+ if (this.noOfnodes == 1)
+ conTypeCount.put(CommunicationType.LOCAL, 1);
+ else
+ conTypeCount.put(CommunicationType.TCP, this.noOfnodes - 1);
Controller controller = new Controller();
controller.connect(conTypeCount);
+ return controller;
+ }
- StreamJitApp app = new StreamJitApp(stream, source, sink);
- ConfigurationManager cfgManager = new HotSpotTuning(app);
- BlobFactory bf = new DistributedBlobFactory(cfgManager, noOfnodes);
- this.cfg = bf.getDefaultConfiguration(Workers
- .getAllWorkersInGraph(source));
+ private Map>>> getMachineWorkerMap(
+ Integer[] machineIds, OneToOneElement stream,
+ Worker source, Worker, O> sink) {
+ int totalCores = machineIds.length;
- if (GlobalConstants.tune) {
+ Partitioner horzPartitioner = new HorizontalPartitioner<>();
+ List>> partitionList = horzPartitioner
+ .partitionEqually(stream, source, sink, totalCores);
- } else {
- this.cfg = readConfiguration(stream.getClass().getSimpleName());
+ Map>>> partitionsMachineMap = new HashMap>>>();
+ for (Integer machineID : machineIds) {
+ partitionsMachineMap.put(machineID,
+ new ArrayList>>());
}
- if (cfg == null) {
- System.err
- .println("Configuration is null. Runs the app with horizontal partitioning.");
- Integer[] machineIds = new Integer[this.noOfnodes];
- for (int i = 0; i < machineIds.length; i++) {
- machineIds[i] = i + 1;
+ int index = 0;
+ while (index < partitionList.size()) {
+ for (Integer machineID : partitionsMachineMap.keySet()) {
+ if (!(index < partitionList.size()))
+ break;
+ partitionsMachineMap.get(machineID).add(
+ partitionList.get(index++));
}
- Map>>> partitionsMachineMap = getMachineWorkerMap(
- machineIds, stream, source, sink);
- app.newPartitionMap(partitionsMachineMap);
- } else
- cfgManager.newConfiguration(cfg);
-
- // TODO: Copied form DebugStreamCompiler. Need to be verified for this
- // context.
- List constraints = MessageConstraint
- .findConstraints(source);
- Set> portals = new HashSet<>();
- for (MessageConstraint mc : constraints)
- portals.add(mc.getPortal());
- for (Portal> portal : portals)
- Portals.setConstraints(portal, constraints);
+ }
+ return partitionsMachineMap;
+ }
- StreamJitAppManager manager = new StreamJitAppManager(controller, app,
- cfgManager);
- final AbstractDrainer drainer = new DistributedDrainer(manager);
- drainer.setBlobGraph(app.blobGraph);
+ private void manualPartition(StreamJitApp app) {
+ Integer[] machineIds = new Integer[this.noOfnodes - 1];
+ for (int i = 0; i < machineIds.length; i++) {
+ machineIds[i] = i + 1;
+ }
+ Map>>> partitionsMachineMap = getMachineWorkerMap(
+ machineIds, app.streamGraph, app.source, app.sink);
+ app.newPartitionMap(partitionsMachineMap);
+ }
+ /**
+ * Sets head and tail buffers.
+ */
+ private boolean setBufferMap(Input input, Output output,
+ final AbstractDrainer drainer, StreamJitApp app) {
// TODO: derive a algorithm to find good buffer size and use here.
Buffer head = InputBufferFactory.unwrap(input).createReadableBuffer(
- 1000);
+ 10000);
Buffer tail = OutputBufferFactory.unwrap(output).createWritableBuffer(
- 1000);
+ 10000);
boolean needTermination;
@@ -201,7 +270,8 @@ public CompiledStream compile(OneToOneElement stream,
head) {
@Override
public void drain() {
- drainer.startDraining(2);
+ // drainer.startDraining(2);
+ drainer.drainFinal(false);
}
});
} else {
@@ -212,80 +282,55 @@ public void drain() {
ImmutableMap.Builder bufferMapBuilder = ImmutableMap
. builder();
- bufferMapBuilder.put(Token.createOverallInputToken(source), head);
- bufferMapBuilder.put(Token.createOverallOutputToken(sink), tail);
+ bufferMapBuilder.put(Token.createOverallInputToken(app.source), head);
+ bufferMapBuilder.put(Token.createOverallOutputToken(app.sink), tail);
app.bufferMap = bufferMapBuilder.build();
- app.constraints = constraints;
-
- manager.reconfigure();
- CompiledStream cs = new DistributedCompiledStream(drainer);
-
- if (GlobalConstants.tune && this.cfg != null) {
- OnlineTuner tuner = new OnlineTuner(drainer, manager, app,
- cfgManager, needTermination);
- new Thread(tuner, "OnlineTuner").start();
- }
- return cs;
- }
-
- private Configuration readConfiguration(String simpeName) {
- String name = String.format("%s.cfg", simpeName);
- try {
- BufferedReader reader = new BufferedReader(new FileReader(name));
- String json = reader.readLine();
- reader.close();
- return Configuration.fromJson(json);
- } catch (Exception ex) {
- System.err.println(String.format(
- "File reader error. No %s configuration file.", name));
- }
- return null;
+ return needTermination;
}
- private Map>>> getMachineWorkerMap(
- Integer[] machineIds, OneToOneElement stream,
- Worker source, Worker, O> sink) {
- int totalCores = machineIds.length;
-
- Partitioner horzPartitioner = new HorizontalPartitioner<>();
- List>> partitionList = horzPartitioner
- .partitionEqually(stream, source, sink, totalCores);
+ private void setConfiguration(Controller controller,
+ StreamJitApp app, PartitionManager partitionManager,
+ ConnectionManager conManager, ConfigurationManager cfgManager) {
+ BlobFactory bf = new DistributedBlobFactory(partitionManager,
+ conManager, Math.max(noOfnodes - 1, 1));
+ Configuration defaultCfg = bf.getDefaultConfiguration(Workers
+ .getAllWorkersInGraph(app.source));
+
+ if (this.cfg != null) {
+ if (!verifyCfg(defaultCfg, this.cfg)) {
+ System.err
+ .println("No matching between parameters in the passed "
+ + "configuration and parameters in the default configuration");
+ controller.closeAll();
+ throw new IllegalConfigurationException();
+ }
+ } else if (Options.tune == 0) {
+ this.cfg = cfgFromFile(app, controller, defaultCfg);
+ } else
+ this.cfg = defaultCfg;
- Map>>> partitionsMachineMap = new HashMap>>>();
- for (Integer machineID : machineIds) {
- partitionsMachineMap.put(machineID,
- new ArrayList>>());
- }
+ cfgManager.newConfiguration(this.cfg);
+ }
- int index = 0;
- while (index < partitionList.size()) {
- for (Integer machineID : partitionsMachineMap.keySet()) {
- if (!(index < partitionList.size()))
- break;
- partitionsMachineMap.get(machineID).add(
- partitionList.get(index++));
- }
- }
- return partitionsMachineMap;
+ private boolean verifyCfg(Configuration defaultCfg, Configuration cfg) {
+ if (defaultCfg.getParametersMap().keySet()
+ .equals(cfg.getParametersMap().keySet()))
+ return true;
+ return false;
}
- /**
- * TODO: Need to check for other default subtypes of {@link OneToOneElement}
- * s. Now only checks for first generation children.
- *
- * @param stream
- * @throws StreamCompilationFailedException
- * if stream is default subtype of OneToOneElement
- */
- private void checkforDefaultOneToOneElement(
- OneToOneElement stream) {
-
- if (stream.getClass() == Pipeline.class
- || stream.getClass() == Splitjoin.class
- || stream.getClass() == Filter.class) {
- throw new StreamCompilationFailedException(
- "Default subtypes of OneToOneElement are not accepted for compilation by this compiler. OneToOneElement that passed should be unique");
+ private void tuneOrVerify(Reconfigurer configurer, boolean needTermination) {
+ Runnable r;
+ if (Options.tune == 1) {
+ r = new OnlineTuner(configurer, needTermination);
+ new Thread(r, "OnlineTuner").start();
+ } else if (Options.tune == 2) {
+ r = new Verifier(configurer);
+ new Thread(r, "Verifier").start();
+ } else {
+ throw new IllegalStateException(
+ "Neither OnlineTuner nor Verifer has been started.");
}
}
@@ -297,11 +342,6 @@ public DistributedCompiledStream(AbstractDrainer drainer) {
this.drainer = drainer;
}
- @Override
- public boolean isDrained() {
- return drainer.isDrained();
- }
-
@Override
public void awaitDrained() throws InterruptedException {
drainer.awaitDrained();
@@ -313,5 +353,25 @@ public void awaitDrained(long timeout, TimeUnit unit)
throws InterruptedException, TimeoutException {
drainer.awaitDrained(timeout, unit);
}
+
+ @Override
+ public boolean isDrained() {
+ return drainer.isDrained();
+ }
+ }
+
+ private class IllegalConfigurationException extends RuntimeException {
+
+ private static final long serialVersionUID = 1L;
+
+ private static final String tag = "IllegalConfigurationException";
+
+ private IllegalConfigurationException() {
+ super(tag);
+ }
+
+ private IllegalConfigurationException(String msg) {
+ super(String.format("%s : %s", tag, msg));
+ }
}
}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/HeadChannel.java b/src/edu/mit/streamjit/impl/distributed/HeadChannel.java
index b21665dc..4dd5a1b1 100644
--- a/src/edu/mit/streamjit/impl/distributed/HeadChannel.java
+++ b/src/edu/mit/streamjit/impl/distributed/HeadChannel.java
@@ -25,10 +25,11 @@
import edu.mit.streamjit.impl.blob.AbstractReadOnlyBuffer;
import edu.mit.streamjit.impl.blob.Buffer;
-import edu.mit.streamjit.impl.common.AbstractDrainer;
-import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
-import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider;
-import edu.mit.streamjit.impl.distributed.node.TCPOutputChannel;
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider;
+import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel;
+import edu.mit.streamjit.impl.distributed.node.BlockingOutputChannel;
/**
* Head Channel is just a wrapper to TCPOutputChannel that skips
@@ -37,15 +38,79 @@
* @author Sumanan sumanan@mit.edu
* @since Oct 21, 2013
*/
-public class HeadChannel extends TCPOutputChannel {
+public class HeadChannel {
- public HeadChannel(Buffer buffer, TCPConnectionProvider conProvider,
- TCPConnectionInfo conInfo, String bufferTokenName, int debugPrint) {
- super(buffer, conProvider, conInfo, bufferTokenName, debugPrint);
+ public static class TCPHeadChannel extends BlockingOutputChannel {
+
+ public TCPHeadChannel(Buffer buffer, ConnectionProvider conProvider,
+ ConnectionInfo conInfo, String bufferTokenName, int debugLevel) {
+ super(buffer, conProvider, conInfo, bufferTokenName, debugLevel);
+ }
+
+ protected void fillUnprocessedData() {
+ this.unProcessedData = ImmutableList.of();
+ }
}
- protected void fillUnprocessedData() {
- this.unProcessedData = ImmutableList.of();
+ public static class AsyncHeadChannel extends AsyncOutputChannel {
+
+ final Buffer readBuffer;
+ private volatile boolean stopCalled;
+ private volatile boolean isFinal;
+
+ public AsyncHeadChannel(Buffer buffer, ConnectionProvider conProvider,
+ ConnectionInfo conInfo, String bufferTokenName, int debugLevel) {
+ super(conProvider, conInfo, bufferTokenName, debugLevel);
+ readBuffer = buffer;
+ stopCalled = false;
+ }
+
+ @Override
+ public Runnable getRunnable() {
+ final Runnable supperRunnable = super.getRunnable();
+ return new Runnable() {
+ @Override
+ public void run() {
+ supperRunnable.run();
+ final Buffer writeBuffer = getBuffer();
+ final int dataLength = 10000;
+ final Object[] data = new Object[dataLength];
+ int read = 1;
+ int written = 0;
+ while (!stopCalled) {
+ read = readBuffer.read(data, 0, data.length);
+ written = 0;
+ while (written < read) {
+ written += writeBuffer.write(data, written, read
+ - written);
+ if (written == 0) {
+ try {
+ // TODO: Verify this sleep time.
+ Thread.sleep(500);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+ stopSuper(isFinal);
+ }
+ };
+ }
+
+ protected void fillUnprocessedData() {
+ throw new Error("Method not implemented");
+ }
+
+ @Override
+ public void stop(boolean isFinal) {
+ this.isFinal = isFinal;
+ this.stopCalled = true;
+ }
+
+ private void stopSuper(boolean isFinal) {
+ super.stop(isFinal);
+ }
}
/**
@@ -77,6 +142,25 @@ public Object read() {
return o;
}
+ @Override
+ public int read(Object[] data, int offset, int length) {
+ int read = buffer.read(data, offset, length);
+ if (read == 0) {
+ new DrainerThread().start();
+ }
+ return read;
+ }
+
+ @Override
+ public boolean readAll(Object[] data) {
+ return buffer.readAll(data);
+ }
+
+ @Override
+ public boolean readAll(Object[] data, int offset) {
+ return buffer.readAll(data, offset);
+ }
+
@Override
public int size() {
return buffer.size();
@@ -89,7 +173,8 @@ class DrainerThread extends Thread {
public void run() {
System.out.println("Input data finished");
- drainer.startDraining(2);
+ // drainer.startDraining(2);
+ drainer.drainFinal(false);
}
}
}
diff --git a/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java b/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java
index 5e82260d..67c4403e 100644
--- a/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java
+++ b/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java
@@ -21,6 +21,8 @@
*/
package edu.mit.streamjit.impl.distributed;
+import static com.google.common.base.Preconditions.checkArgument;
+
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@@ -34,18 +36,16 @@
import edu.mit.streamjit.api.Pipeline;
import edu.mit.streamjit.api.Splitjoin;
import edu.mit.streamjit.api.Splitter;
-import edu.mit.streamjit.api.StreamCompilationFailedException;
import edu.mit.streamjit.api.StreamVisitor;
import edu.mit.streamjit.api.Worker;
import edu.mit.streamjit.impl.common.Configuration;
import edu.mit.streamjit.impl.common.Configuration.IntParameter;
+import edu.mit.streamjit.impl.common.Configuration.Parameter;
import edu.mit.streamjit.impl.common.Configuration.SwitchParameter;
import edu.mit.streamjit.impl.common.Workers;
-import edu.mit.streamjit.impl.common.Configuration.Parameter;
-import edu.mit.streamjit.impl.distributed.ConfigurationManager.AbstractConfigurationManager;
-import edu.mit.streamjit.tuner.OfflineTuner;
+import edu.mit.streamjit.impl.distributed.PartitionManager.AbstractPartitionManager;
-public final class HotSpotTuning extends AbstractConfigurationManager {
+public final class HotSpotTuning extends AbstractPartitionManager {
Map>> partitionGroup;
Map, Set>> skippedSplitters;
@@ -57,36 +57,14 @@ public HotSpotTuning(StreamJitApp app) {
@Override
public Configuration getDefaultConfiguration(Set> workers,
int noOfMachines) {
+ checkArgument(noOfMachines > 0, String.format(
+ "noOfMachines = %d, It must be > 0", noOfMachines));
PickHotSpots visitor = new PickHotSpots(noOfMachines);
app.streamGraph.visit(visitor);
return visitor.builder.build();
}
- @Override
- public boolean newConfiguration(Configuration config) {
-
- for (Parameter p : config.getParametersMap().values()) {
- if (p instanceof IntParameter) {
- IntParameter ip = (IntParameter) p;
- System.out.println(ip.getName() + " - " + ip.getValue());
- } else if (p instanceof SwitchParameter>) {
- SwitchParameter> sp = (SwitchParameter>) p;
- System.out.println(sp.getName() + " - " + sp.getValue());
- } else
- System.out.println(p.getName() + " - Unknown type");
- }
-
- Map>>> partitionsMachineMap = getMachineWorkerMap(config);
- try {
- app.varifyConfiguration(partitionsMachineMap);
- } catch (StreamCompilationFailedException ex) {
- return false;
- }
- app.blobConfiguration = config;
- return true;
- }
-
- private Map>>> getMachineWorkerMap(
+ public Map>>> partitionMap(
Configuration config) {
Map>> partition = new HashMap<>();
@@ -186,11 +164,10 @@ private class PickHotSpots extends StreamVisitor {
private Joiner, ?> skipJoiner;
- private int minSplitjoinSize = 20;
+ private int minSplitjoinSize = 8;
/**
- * Workers those are going to be part {@link OfflineTuner}
- * {@link #currentHotSpot}.
+ * Workers those are going to be part of {@link #currentHotSpot}.
*/
List> workerGropups;
diff --git a/src/edu/mit/streamjit/impl/distributed/PartitionManager.java b/src/edu/mit/streamjit/impl/distributed/PartitionManager.java
new file mode 100644
index 00000000..235c1b91
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/PartitionManager.java
@@ -0,0 +1,296 @@
+package edu.mit.streamjit.impl.distributed;
+
+import java.util.ArrayDeque;
+import java.util.ArrayList;
+import java.util.Deque;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import edu.mit.streamjit.api.Filter;
+import edu.mit.streamjit.api.Joiner;
+import edu.mit.streamjit.api.Splitter;
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.common.Configuration;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.partitioner.AbstractPartitioner;
+
+/**
+ * PartitionManager is responsible to partition a stream graph for a cluster.
+ * Partitioning process can be tuned. Implementations of this interface must
+ * provide the following two tasks.
+ *
+ *
Generate configuration with appropriate tuning parameters (Based on the
+ * search space design strategy) for tuning.
+ *
Dispatch a new configuration given by the open tuner and generate
+ * partition machine map.
+ *
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Jan 16, 2014
+ *
+ */
+public interface PartitionManager {
+
+ /**
+ * Generates default configuration with all tuning parameters for tuning.
+ *
+ * @param streamGraph
+ * @param source
+ * @param sink
+ * @param noOfMachines
+ * @return
+ */
+ public Configuration getDefaultConfiguration(Set> workers,
+ int noOfMachines);
+
+ /**
+ * Reads the configuration and returns a map of nodeID to list of set of
+ * workers (list of blob workers). Value of the returned map is list of
+ * worker set where each worker set is an individual blob.
+ *
+ * @param config
+ * @return map of nodeID to list of set of workers which are assigned to the
+ * node.
+ */
+ public Map>>> partitionMap(
+ Configuration config);
+
+ /**
+ * Implements the functions those can be called by runtimer to send
+ * configuration information to streamnodes.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Jan 17, 2014
+ */
+ public static abstract class AbstractPartitionManager implements
+ PartitionManager {
+
+ protected final StreamJitApp, ?> app;
+
+ AbstractPartitionManager(StreamJitApp, ?> app) {
+ this.app = app;
+ }
+
+ /**
+ * Copied form {@link AbstractPartitioner} class. But modified to
+ * support nested splitjoiners.
Returns all {@link Worker}s in a
+ * splitjoin.
+ *
+ * @param splitter
+ * @return Returns all {@link Filter}s in a splitjoin.
+ */
+ protected void getAllChildWorkers(Splitter, ?> splitter,
+ Set> childWorkers) {
+ childWorkers.add(splitter);
+ Joiner, ?> joiner = getJoiner(splitter);
+ Worker, ?> cur;
+ for (Worker, ?> childWorker : Workers.getSuccessors(splitter)) {
+ cur = childWorker;
+ while (cur != joiner) {
+ if (cur instanceof Filter, ?>)
+ childWorkers.add(cur);
+ else if (cur instanceof Splitter, ?>) {
+ getAllChildWorkers((Splitter, ?>) cur, childWorkers);
+ cur = getJoiner((Splitter, ?>) cur);
+ } else
+ throw new IllegalStateException(
+ "Some thing wrong in the algorithm.");
+
+ assert Workers.getSuccessors(cur).size() == 1 : "Illegal State encounted : cur can only be either a filter or a joner";
+ cur = Workers.getSuccessors(cur).get(0);
+ }
+ }
+ childWorkers.add(joiner);
+ }
+
+ /**
+ * Find and returns the corresponding {@link Joiner} for the passed
+ * {@link Splitter}.
+ *
+ * @param splitter
+ * : {@link Splitter} that needs it's {@link Joiner}.
+ * @return Corresponding {@link Joiner} of the passed {@link Splitter}.
+ */
+ protected Joiner, ?> getJoiner(Splitter, ?> splitter) {
+ Worker, ?> cur = Workers.getSuccessors(splitter).get(0);
+ int innerSplitjoinCount = 0;
+ while (!(cur instanceof Joiner, ?>) || innerSplitjoinCount != 0) {
+ if (cur instanceof Splitter, ?>)
+ innerSplitjoinCount++;
+ if (cur instanceof Joiner, ?>)
+ innerSplitjoinCount--;
+ assert innerSplitjoinCount >= 0 : "Joiner Count is more than splitter count. Check the algorithm";
+ cur = Workers.getSuccessors(cur).get(0);
+ }
+ assert cur instanceof Joiner, ?> : "Error in algorithm. Not returning a Joiner";
+ return (Joiner, ?>) cur;
+ }
+
+ protected String getParamName(Integer id) {
+ assert id > -1 : "Worker id cannot be negative";
+ return String.format("worker%dtomachine", id);
+ }
+
+ /**
+ * Goes through all workers in workerset which is passed as argument,
+ * find the workers which are interconnected and group them as a blob
+ * workers. i.e., Group the workers which are connected.
+ *
+ * TODO: If any dynamic edges exists then should create interpreter
+ * blob.
+ *
+ * @param workerset
+ * @return list of workers set which contains interconnected workers.
+ * Each worker set in the list is supposed to run in an
+ * individual blob.
+ */
+ protected List>> getConnectedComponents(
+ Set> workerset) {
+ List>> ret = new ArrayList>>();
+ while (!workerset.isEmpty()) {
+ Deque> queue = new ArrayDeque<>();
+ Set> blobworkers = new HashSet<>();
+ Worker, ?> w = workerset.iterator().next();
+ blobworkers.add(w);
+ workerset.remove(w);
+ queue.offer(w);
+ while (!queue.isEmpty()) {
+ Worker, ?> wrkr = queue.poll();
+ for (Worker, ?> succ : Workers.getSuccessors(wrkr)) {
+ if (workerset.contains(succ)) {
+ blobworkers.add(succ);
+ workerset.remove(succ);
+ queue.offer(succ);
+ }
+ }
+
+ for (Worker, ?> pred : Workers.getPredecessors(wrkr)) {
+ if (workerset.contains(pred)) {
+ blobworkers.add(pred);
+ workerset.remove(pred);
+ queue.offer(pred);
+ }
+ }
+ }
+ ret.add(blobworkers);
+ }
+ return ret;
+ }
+
+ /**
+ * Cycles can occur iff splitter and joiner happened to fall into a blob
+ * while some workers of that splitjoin falls into other blob. Here, we
+ * check for the above mention condition. If cycles exists, split then
+ * in to several blobs.
+ *
+ * @param blobworkers
+ * @return
+ */
+ protected List>> breakCycles(
+ Set> blobworkers) {
+ Map, Joiner, ?>> rfctrSplitJoin = new HashMap<>();
+ Set> splitterSet = getSplitters(blobworkers);
+ for (Splitter, ?> s : splitterSet) {
+ Joiner, ?> j = getJoiner(s);
+ if (blobworkers.contains(j)) {
+ Set> childWorkers = new HashSet<>();
+ getAllChildWorkers(s, childWorkers);
+ if (!blobworkers.containsAll(childWorkers)) {
+ rfctrSplitJoin.put(s, j);
+ }
+ }
+ }
+
+ List>> ret = new ArrayList<>();
+
+ for (Splitter, ?> s : rfctrSplitJoin.keySet()) {
+ if (blobworkers.contains(s)) {
+ ret.add(getSplitterReachables(s, blobworkers,
+ rfctrSplitJoin));
+ }
+ }
+ ret.addAll(getConnectedComponents(blobworkers));
+ return ret;
+ }
+
+ /**
+ * Goes through the passed set of workers, add workers those are
+ * reachable from the splitter s, but not any conflicting splitter or
+ * joiner.
+ *
+ * This function has side effect. Modifies the argument.
+ *
+ * @param s
+ * @param blobworkers
+ * @return
+ */
+ protected Set> getSplitterReachables(Splitter, ?> s,
+ Set> blobworkers,
+ Map, Joiner, ?>> rfctrSplitJoin) {
+ assert blobworkers.contains(s) : "Splitter s in not in blobworkers";
+ Set> ret = new HashSet<>();
+ Set> exclude = new HashSet<>();
+ Deque> queue = new ArrayDeque<>();
+ ret.add(s);
+ exclude.add(rfctrSplitJoin.get(s));
+ blobworkers.remove(s);
+ queue.offer(s);
+ while (!queue.isEmpty()) {
+ Worker, ?> wrkr = queue.poll();
+ for (Worker, ?> succ : Workers.getSuccessors(wrkr)) {
+ process(succ, blobworkers, rfctrSplitJoin, exclude, queue,
+ ret);
+ }
+
+ for (Worker, ?> pred : Workers.getPredecessors(wrkr)) {
+ process(pred, blobworkers, rfctrSplitJoin, exclude, queue,
+ ret);
+ }
+ }
+ return ret;
+ }
+
+ /**
+ * Since the code in this method repeated in two places in
+ * getSplitterReachables() method, It is re-factored into a private
+ * method to avoid code duplication.
+ */
+ protected void process(Worker, ?> wrkr,
+ Set> blobworkers,
+ Map, Joiner, ?>> rfctrSplitJoin,
+ Set> exclude, Deque> queue,
+ Set> ret) {
+ if (blobworkers.contains(wrkr) && !exclude.contains(wrkr)) {
+ ret.add(wrkr);
+ blobworkers.remove(wrkr);
+ queue.offer(wrkr);
+
+ for (Entry, Joiner, ?>> e : rfctrSplitJoin
+ .entrySet()) {
+ if (e.getValue().equals(wrkr)) {
+ exclude.add(e.getKey());
+ break;
+ } else if (e.getKey().equals(wrkr)) {
+ exclude.add(e.getValue());
+ break;
+ }
+ }
+ }
+ }
+
+ protected Set> getSplitters(Set> blobworkers) {
+ Set> splitterSet = new HashSet<>();
+ for (Worker, ?> w : blobworkers) {
+ if (w instanceof Splitter, ?>) {
+ splitterSet.add((Splitter, ?>) w);
+ }
+ }
+ return splitterSet;
+ }
+
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java b/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java
index 389c8cd6..6d52abe3 100644
--- a/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java
+++ b/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java
@@ -22,26 +22,51 @@
package edu.mit.streamjit.impl.distributed;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
+import java.util.concurrent.atomic.AtomicBoolean;
import com.google.common.collect.ImmutableMap;
+import edu.mit.streamjit.api.Filter;
import edu.mit.streamjit.api.OneToOneElement;
+import edu.mit.streamjit.api.Pipeline;
+import edu.mit.streamjit.api.Portal;
+import edu.mit.streamjit.api.Splitjoin;
import edu.mit.streamjit.api.StreamCompilationFailedException;
import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.AbstractReadOnlyBuffer;
+import edu.mit.streamjit.impl.blob.Blob;
+import edu.mit.streamjit.impl.blob.Blob.Token;
import edu.mit.streamjit.impl.blob.BlobFactory;
import edu.mit.streamjit.impl.blob.Buffer;
import edu.mit.streamjit.impl.blob.DrainData;
-import edu.mit.streamjit.impl.blob.Blob.Token;
-import edu.mit.streamjit.impl.common.AbstractDrainer.BlobGraph;
import edu.mit.streamjit.impl.common.Configuration;
+import edu.mit.streamjit.impl.common.Configuration.IntParameter;
+import edu.mit.streamjit.impl.common.Configuration.PartitionParameter;
+import edu.mit.streamjit.impl.common.Configuration.SwitchParameter;
+import edu.mit.streamjit.impl.common.ConnectWorkersVisitor;
import edu.mit.streamjit.impl.common.MessageConstraint;
+import edu.mit.streamjit.impl.common.Portals;
+import edu.mit.streamjit.impl.common.VerifyStreamGraph;
import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.common.drainer.BlobGraph;
+import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory;
+import edu.mit.streamjit.impl.concurrent.ConcurrentChannelFactory;
+import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
+import edu.mit.streamjit.impl.distributed.common.Options;
+import edu.mit.streamjit.impl.distributed.common.Utils;
+import edu.mit.streamjit.impl.distributed.node.StreamNode;
import edu.mit.streamjit.impl.distributed.runtimer.Controller;
-import edu.mit.streamjit.impl.distributed.runtimer.OnlineTuner;
+import edu.mit.streamjit.impl.interp.ChannelFactory;
+import edu.mit.streamjit.impl.interp.Interpreter;
+import edu.mit.streamjit.tuner.OnlineTuner;
+import edu.mit.streamjit.util.Pair;
/**
* This class contains all information about the current streamJit application
@@ -57,22 +82,22 @@
* @author Sumanan sumanan@mit.edu
* @since Oct 8, 2013
*/
-public class StreamJitApp {
+public class StreamJitApp {
/**
* Since this is final, lets make public
*/
public final String topLevelClass;
- public final Worker, ?> source;
+ public final Worker source;
- public final Worker, ?> sink;
+ public final Worker, O> sink;
public final String jarFilePath;
public final String name;
- final OneToOneElement, ?> streamGraph;
+ final OneToOneElement streamGraph;
public BlobGraph blobGraph;
@@ -80,10 +105,12 @@ public class StreamJitApp {
public ImmutableMap bufferMap;
- public List constraints;
+ public final List constraints;
public DrainData drainData = null;
+ public final Visualizer visualizer;
+
/**
* Keeps track of assigned machine Ids of each blob. This information is
* need for draining. TODO: If possible use a better solution.
@@ -91,21 +118,24 @@ public class StreamJitApp {
public Map blobtoMachineMap;
/**
- * blobConfiguration contains decision variables that are tuned by
- * opentuner. Specifically, a {@link Configuration} that is generated by a
- * {@link BlobFactory#getDefaultConfiguration(java.util.Set)}.
+ * The latest valid {@link Configuration} that is received from OpenTuner.
+ * {@link BlobFactory#getDefaultConfiguration(java.util.Set) generates the
+ * initial configuration}.
*/
- public Configuration blobConfiguration = null;
+ private Configuration configuration = null;
- public StreamJitApp(OneToOneElement, ?> streamGraph, Worker, ?> source,
- Worker, ?> sink) {
+ public StreamJitApp(OneToOneElement streamGraph) {
this.streamGraph = streamGraph;
+ Pair, Worker, O>> srcSink = visit(streamGraph);
this.name = streamGraph.getClass().getSimpleName();
this.topLevelClass = streamGraph.getClass().getName();
- this.source = source;
- this.sink = sink;
+ this.source = srcSink.first;
+ this.sink = srcSink.second;
this.jarFilePath = this.getClass().getProtectionDomain()
.getCodeSource().getLocation().getPath();
+ this.constraints = getConstrains();
+ Utils.newApp(name);
+ visualizer = new Visualizer.DotVisualizer(streamGraph);
}
/**
@@ -123,7 +153,7 @@ public StreamJitApp(OneToOneElement, ?> streamGraph, Worker, ?> source,
public boolean newPartitionMap(
Map>>> partitionsMachineMap) {
try {
- varifyConfiguration(partitionsMachineMap);
+ verifyConfiguration(partitionsMachineMap);
} catch (StreamCompilationFailedException ex) {
return false;
}
@@ -143,18 +173,13 @@ public boolean newPartitionMap(
* @throws StreamCompilationFailedException
* if any cycles found among blobs.
*/
- public void varifyConfiguration(
+ public void verifyConfiguration(
Map>>> partitionsMachineMap) {
- for (int machine : partitionsMachineMap.keySet()) {
- System.err.print("\nMachine - " + machine);
- for (Set> blobworkers : partitionsMachineMap
- .get(machine)) {
- System.err.print("\n\tBlob worker set : ");
- for (Worker, ?> w : blobworkers) {
- System.err.print(Workers.getIdentifier(w) + " ");
- }
- }
+
+ if (!Options.singleNodeOnline) {
+ // printPartition(partitionsMachineMap);
}
+
List>> partitionList = new ArrayList<>();
for (List>> lst : partitionsMachineMap.values()) {
partitionList.addAll(lst);
@@ -164,24 +189,29 @@ public void varifyConfiguration(
try {
bg = new BlobGraph(partitionList);
} catch (StreamCompilationFailedException ex) {
- System.err.print("Cycles found in the worker->blob assignment");
- // for (int machine : partitionsMachineMap.keySet()) {
- // System.err.print("\nMachine - " + machine);
- // for (Set> blobworkers : partitionsMachineMap
- // .get(machine)) {
- // System.err.print("\n\tBlob worker set : ");
- // for (Worker, ?> w : blobworkers) {
- // System.err.print(Workers.getIdentifier(w) + " ");
- // }
- // }
- // }
- System.err.println();
+ System.err.println("Cycles found in the worker->blob assignment");
+ printPartition(partitionsMachineMap);
throw ex;
}
this.blobGraph = bg;
this.partitionsMachineMap = partitionsMachineMap;
}
+ private void printPartition(
+ Map>>> partitionsMachineMap) {
+ for (int machine : partitionsMachineMap.keySet()) {
+ System.err.print("\nMachine - " + machine);
+ for (Set> blobworkers : partitionsMachineMap
+ .get(machine)) {
+ System.err.print("\n\tBlob worker set : ");
+ for (Worker, ?> w : blobworkers) {
+ System.err.print(Workers.getIdentifier(w) + " ");
+ }
+ }
+ }
+ System.err.println();
+ }
+
/**
* From aggregated drain data, get subset of it which is relevant to a
* particular machine. Builds and returns machineID to DrainData map.
@@ -203,6 +233,64 @@ public ImmutableMap getDrainData() {
return builder.build();
}
+ /**
+ * Uses an {@link Interpreter} blob to clear or minimize a {@link DrainData}
+ * . This method can be called after a final draining to clear the data in
+ * the intermediate buffers.
+ *
+ * @param drainData
+ * : {@link DrainData} that is received after a draining.
+ * @return : A {@link DrainData} that remains after running an
+ * {@link Interpreter} blob.
+ */
+ public DrainData minimizeDrainData(DrainData drainData) {
+ Interpreter.InterpreterBlobFactory interpFactory = new Interpreter.InterpreterBlobFactory();
+ Blob interp = interpFactory.makeBlob(Workers
+ .getAllWorkersInGraph(source), interpFactory
+ .getDefaultConfiguration(Workers.getAllWorkersInGraph(source)),
+ 1, drainData);
+ interp.installBuffers(bufferMapWithEmptyHead());
+ Runnable interpCode = interp.getCoreCode(0);
+ final AtomicBoolean interpFinished = new AtomicBoolean();
+ interp.drain(new Runnable() {
+ @Override
+ public void run() {
+ interpFinished.set(true);
+ }
+ });
+ while (!interpFinished.get())
+ interpCode.run();
+ return interp.getDrainData();
+ }
+
+ /**
+ * Remove the original headbuffer and replace it with a new empty buffer.
+ */
+ private ImmutableMap bufferMapWithEmptyHead() {
+ ImmutableMap.Builder bufMapBuilder = ImmutableMap
+ .builder();
+ Buffer head = new AbstractReadOnlyBuffer() {
+ @Override
+ public int size() {
+ return 0;
+ }
+
+ @Override
+ public Object read() {
+ return null;
+ }
+ };
+
+ Token headToken = Token.createOverallInputToken(source);
+ for (Map.Entry en : bufferMap.entrySet()) {
+ if (en.getKey().equals(headToken))
+ bufMapBuilder.put(headToken, head);
+ else
+ bufMapBuilder.put(en);
+ }
+ return bufMapBuilder.build();
+ }
+
private Set getWorkerIds(List>> blobList) {
Set workerIds = new HashSet<>();
for (Set> blobworkers : blobList) {
@@ -212,4 +300,163 @@ private Set getWorkerIds(List>> blobList) {
}
return workerIds;
}
+
+ /**
+ * @return the configuration
+ */
+ public Configuration getConfiguration() {
+ return configuration;
+ }
+
+ /**
+ * @param configuration
+ * the configuration to set
+ */
+ public void setConfiguration(Configuration configuration) {
+ this.configuration = configuration;
+ visualizer.newConfiguration(configuration);
+ visualizer.newPartitionMachineMap(partitionsMachineMap);
+ }
+
+ private Pair, Worker, O>> visit(OneToOneElement stream) {
+ checkforDefaultOneToOneElement(stream);
+ ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor();
+ stream.visit(primitiveConnector);
+ Worker source = (Worker) primitiveConnector.getSource();
+ Worker, O> sink = (Worker, O>) primitiveConnector.getSink();
+
+ VerifyStreamGraph verifier = new VerifyStreamGraph();
+ stream.visit(verifier);
+ return new Pair, Worker, O>>(source, sink);
+ }
+
+ /**
+ * TODO: Need to check for other default subtypes of {@link OneToOneElement}
+ * s. Now only checks for first generation children.
+ *
+ * @param stream
+ * @throws StreamCompilationFailedException
+ * if stream is default subtype of OneToOneElement
+ */
+ private void checkforDefaultOneToOneElement(OneToOneElement stream) {
+ if (stream.getClass() == Pipeline.class
+ || stream.getClass() == Splitjoin.class
+ || stream.getClass() == Filter.class) {
+ throw new StreamCompilationFailedException(
+ "Default subtypes of OneToOneElement are not accepted for"
+ + " compilation by this compiler. OneToOneElement"
+ + " that passed should be unique");
+ }
+ }
+
+ private List getConstrains() {
+ // TODO: Copied form DebugStreamCompiler. Need to be verified for this
+ // context.
+ List constraints = MessageConstraint
+ .findConstraints(source);
+ Set> portals = new HashSet<>();
+ for (MessageConstraint mc : constraints)
+ portals.add(mc.getPortal());
+ for (Portal> portal : portals)
+ Portals.setConstraints(portal, constraints);
+ return constraints;
+ }
+
+ /**
+ * Uses {@link StreamPathBuilder} to generate all paths in the streamGraph
+ * of this {@link StreamJitApp}. Check {@link StreamPathBuilder} for more
+ * information.
+ *
+ * @return Set of all paths in the streamGraph of this {@link StreamJitApp}.
+ */
+ public Set> paths() {
+ return StreamPathBuilder.paths(streamGraph);
+ }
+
+ /**
+ * Static information of the {@link StreamJitApp} that is essential for
+ * {@link StreamNode}s to set up. This configuration will be sent to
+ * {@link StreamNode}s when setting up a new app (Only once).
+ *
+ * @return static information of the app that is needed by steramnodes.
+ */
+ public Configuration getStaticConfiguration() {
+ Configuration.Builder builder = Configuration.builder();
+ builder.putExtraData(GlobalConstants.JARFILE_PATH, jarFilePath)
+ .putExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME,
+ topLevelClass);
+ return builder.build();
+ }
+
+ /**
+ * For every reconfiguration, this method may be called by an appropriate
+ * class to get new configuration information that can be sent to all
+ * participating {@link StreamNode}s. Mainly this configuration contains
+ * partition information.
+ *
+ * @return new partition information
+ */
+ public Configuration getDynamicConfiguration() {
+ Configuration.Builder builder = Configuration.builder();
+
+ int maxCores = maxCores();
+
+ Map machineCoreMap = new HashMap<>();
+ for (Entry>>> machine : partitionsMachineMap
+ .entrySet()) {
+ machineCoreMap.put(machine.getKey(), machine.getValue().size()
+ * maxCores);
+ }
+
+ PartitionParameter.Builder partParam = PartitionParameter.builder(
+ GlobalConstants.PARTITION, machineCoreMap);
+
+ BlobFactory intFactory = new Interpreter.InterpreterBlobFactory();
+ BlobFactory comp2Factory = new Compiler2BlobFactory();
+ partParam.addBlobFactory(intFactory);
+ partParam.addBlobFactory(comp2Factory);
+ blobtoMachineMap = new HashMap<>();
+
+ BlobFactory bf = Options.useCompilerBlob ? comp2Factory
+ : intFactory;
+ for (Integer machineID : partitionsMachineMap.keySet()) {
+ List>> blobList = partitionsMachineMap
+ .get(machineID);
+ for (Set> blobWorkers : blobList) {
+ // TODO: One core per blob. Need to change this.
+ partParam.addBlob(machineID, maxCores, bf, blobWorkers);
+
+ // TODO: Temp fix to build.
+ Token t = Utils.getblobID(blobWorkers);
+ blobtoMachineMap.put(t, machineID);
+ }
+ }
+
+ builder.addParameter(partParam.build());
+ if (Options.useCompilerBlob)
+ builder.addSubconfiguration("blobConfigs", getConfiguration());
+ else
+ builder.addSubconfiguration("blobConfigs", getInterpreterConfg());
+ return builder.build();
+ }
+
+ private Configuration getInterpreterConfg() {
+ Configuration.Builder builder = Configuration.builder();
+ List universe = Arrays
+ . asList(new ConcurrentChannelFactory());
+ SwitchParameter cfParameter = new SwitchParameter(
+ "channelFactory", ChannelFactory.class, universe.get(0),
+ universe);
+
+ builder.addParameter(cfParameter);
+ return builder.build();
+ }
+
+ private int maxCores() {
+ IntParameter maxCoreParam = configuration.getParameter("maxNumCores",
+ IntParameter.class);
+ if (maxCoreParam != null)
+ return maxCoreParam.getValue();
+ return Options.maxNumCores;
+ }
}
diff --git a/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java b/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java
index 690b1ef3..d2ee2cbf 100644
--- a/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java
+++ b/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java
@@ -21,59 +21,75 @@
*/
package edu.mit.streamjit.impl.distributed;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
import edu.mit.streamjit.api.CompiledStream;
import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.Blob.Token;
import edu.mit.streamjit.impl.blob.Buffer;
import edu.mit.streamjit.impl.blob.DrainData;
-import edu.mit.streamjit.impl.blob.Blob.Token;
-import edu.mit.streamjit.impl.common.AbstractDrainer;
import edu.mit.streamjit.impl.common.Configuration;
+import edu.mit.streamjit.impl.common.TimeLogger;
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer;
import edu.mit.streamjit.impl.distributed.common.AppStatus;
+import edu.mit.streamjit.impl.distributed.common.AppStatus.AppStatusProcessor;
+import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
import edu.mit.streamjit.impl.distributed.common.CTRLRMessageElement;
import edu.mit.streamjit.impl.distributed.common.Command;
import edu.mit.streamjit.impl.distributed.common.ConfigurationString;
-import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
-import edu.mit.streamjit.impl.distributed.common.AppStatus.AppStatusProcessor;
-import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
-import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
-import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor.ConfigType;
+import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor.ConfigType;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider;
import edu.mit.streamjit.impl.distributed.common.Error.ErrorProcessor;
+import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
import edu.mit.streamjit.impl.distributed.common.MiscCtrlElements.NewConInfo;
+import edu.mit.streamjit.impl.distributed.common.Options;
import edu.mit.streamjit.impl.distributed.common.SNDrainElement.Drained;
-import edu.mit.streamjit.impl.distributed.common.SNDrainElement.DrainedData;
import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainProcessor;
+import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData;
import edu.mit.streamjit.impl.distributed.common.SNException;
import edu.mit.streamjit.impl.distributed.common.SNException.AddressBindException;
import edu.mit.streamjit.impl.distributed.common.SNException.SNExceptionProcessor;
+import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.SNTimeInfoProcessor;
+import edu.mit.streamjit.impl.distributed.common.SNTimeInfoProcessorImpl;
import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Utils;
+import edu.mit.streamjit.impl.distributed.profiler.MasterProfiler;
+import edu.mit.streamjit.impl.distributed.profiler.ProfilerCommand;
import edu.mit.streamjit.impl.distributed.runtimer.Controller;
+import edu.mit.streamjit.util.ConfigurationUtils;
public class StreamJitAppManager {
- private SNDrainProcessorImpl dp = null;
+ private final StreamJitApp, ?> app;
- private SNExceptionProcessorImpl exP = null;
+ private AppStatusProcessorImpl apStsPro = null;
- private ErrorProcessor ep = null;
+ private Map conInfoMap;
- private AppStatusProcessorImpl apStsPro = null;
+ private final ConnectionManager conManager;
private final Controller controller;
- private final StreamJitApp app;
+ private SNDrainProcessorImpl dp = null;
- private final ConfigurationManager cfgManager;
+ private ErrorProcessor ep = null;
- private boolean isRunning;
+ private SNExceptionProcessorImpl exP = null;
+
+ private final MasterProfiler profiler;
/**
* A {@link BoundaryOutputChannel} for the head of the stream graph. If the
@@ -83,6 +99,14 @@ public class StreamJitAppManager {
*/
private BoundaryOutputChannel headChannel;
+ private Thread headThread;
+
+ private final Token headToken;
+
+ private boolean isRunning;
+
+ private volatile AppStatus status;
+
/**
* A {@link BoundaryInputChannel} for the tail of the whole stream graph. If
* the sink {@link Worker} happened to fall outside the {@link Controller},
@@ -91,44 +115,149 @@ public class StreamJitAppManager {
*/
private TailChannel tailChannel;
- private Thread headThread;
-
private Thread tailThread;
- private volatile AppStatus status;
+ private final Token tailToken;
+
+ /**
+ * [2014-03-15] Just to measure the draining time
+ */
+ AtomicReference stopwatchRef = new AtomicReference<>();
- Map conInfoMap;
+ private final TimeLogger logger;
- public StreamJitAppManager(Controller controller, StreamJitApp app,
- ConfigurationManager cfgManager) {
+ private final SNTimeInfoProcessor timeInfoProcessor;
+
+ public StreamJitAppManager(Controller controller, StreamJitApp, ?> app,
+ ConnectionManager conManager, TimeLogger logger) {
this.controller = controller;
this.app = app;
- this.cfgManager = cfgManager;
+ this.conManager = conManager;
+ this.logger = logger;
+ this.timeInfoProcessor = new SNTimeInfoProcessorImpl(logger);
this.status = AppStatus.NOT_STARTED;
this.exP = new SNExceptionProcessorImpl();
this.ep = new ErrorProcessorImpl();
this.apStsPro = new AppStatusProcessorImpl(controller.getAllNodeIDs()
.size());
controller.registerManager(this);
- controller.newApp(cfgManager.getStaticConfiguration()); // TODO: Find a
- // good calling
- // place.
+ controller.newApp(app.getStaticConfiguration()); // TODO: Find a
+ // good calling
+ // place.
isRunning = false;
+
+ headToken = Token.createOverallInputToken(app.source);
+ tailToken = Token.createOverallOutputToken(app.sink);
+ profiler = setupProfiler();
}
- public boolean reconfigure() {
- reset();
- Configuration.Builder builder = Configuration.builder(cfgManager
- .getDynamicConfiguration());
+ public AppStatusProcessor appStatusProcessor() {
+ return apStsPro;
+ }
+
+ public void drain(Token blobID, DrainType drainType) {
+ // System.out.println("Drain requested to blob " + blobID);
+ if (!app.blobtoMachineMap.containsKey(blobID))
+ throw new IllegalArgumentException(blobID
+ + " not found in the blobtoMachineMap");
+ int nodeID = app.blobtoMachineMap.get(blobID);
+ controller.send(nodeID,
+ new CTRLRDrainElement.DoDrain(blobID, drainType));
+ }
+
+ public void drainingFinished(boolean isFinal) {
+ System.out.println("App Manager : Draining Finished...");
+
+ if (headChannel != null) {
+ try {
+ headThread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ if (tailChannel != null) {
+ if (Options.useDrainData)
+ if (isFinal)
+ tailChannel.stop(DrainType.FINAL);
+ else
+ tailChannel.stop(DrainType.INTERMEDIATE);
+ else
+ tailChannel.stop(DrainType.DISCARD);
+
+ try {
+ tailThread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+
+ if (isFinal)
+ stop();
+
+ isRunning = false;
- Map> tokenMachineMap = new HashMap<>();
- Map portIdMap = new HashMap<>();
+ Stopwatch sw = stopwatchRef.get();
+ if (sw != null && sw.isRunning()) {
+ sw.stop();
+ long time = sw.elapsed(TimeUnit.MILLISECONDS);
+ System.out.println("Draining time is " + time + " milli seconds");
+ }
+ }
+
+ public void drainingStarted(boolean isFinal) {
+ stopwatchRef.set(Stopwatch.createStarted());
+ if (headChannel != null) {
+ headChannel.stop(isFinal);
+ // [2014-03-16] Moved to drainingFinished. In any case if headThread
+ // blocked at tcp write, draining will also blocked.
+ // try {
+ // headThread.join();
+ // } catch (InterruptedException e) {
+ // e.printStackTrace();
+ // }
+ }
+ }
- conInfoMap = controller.buildConInfoMap(app.partitionsMachineMap,
- app.source, app.sink);
+ public SNDrainProcessor drainProcessor() {
+ return dp;
+ }
- builder.putExtraData(GlobalConstants.TOKEN_MACHINE_MAP, tokenMachineMap)
- .putExtraData(GlobalConstants.PORTID_MAP, portIdMap);
+ public ErrorProcessor errorProcessor() {
+ return ep;
+ }
+
+ public SNExceptionProcessor exceptionProcessor() {
+ return exP;
+ }
+
+ public SNTimeInfoProcessor timeInfoProcessor() {
+ return timeInfoProcessor;
+ }
+
+ public long getFixedOutputTime(long timeout) throws InterruptedException {
+ long time = tailChannel.getFixedOutputTime(timeout);
+ if (apStsPro.error) {
+ return -1l;
+ }
+ return time;
+ }
+
+ public AppStatus getStatus() {
+ return status;
+ }
+
+ public boolean isRunning() {
+ return isRunning;
+ }
+
+ public boolean reconfigure(int multiplier) {
+ reset();
+ Configuration.Builder builder = Configuration.builder(app
+ .getDynamicConfiguration());
+
+ conInfoMap = conManager.conInfoMap(app.getConfiguration(),
+ app.partitionsMachineMap, app.source, app.sink);
builder.putExtraData(GlobalConstants.CONINFOMAP, conInfoMap);
@@ -137,17 +266,17 @@ public boolean reconfigure() {
ImmutableMap drainDataMap = app.getDrainData();
+ logger.compilationStarted();
for (int nodeID : controller.getAllNodeIDs()) {
ConfigurationString json = new ConfigurationString(jsonStirng,
ConfigType.DYNAMIC, drainDataMap.get(nodeID));
controller.send(nodeID, json);
}
- setupHeadTail(conInfoMap, app.bufferMap,
- Token.createOverallInputToken(app.source),
- Token.createOverallOutputToken(app.sink));
+ setupHeadTail(conInfoMap, app.bufferMap, multiplier);
boolean isCompiled = apStsPro.waitForCompilation();
+ logger.compilationFinished(isCompiled, "");
if (isCompiled) {
start();
@@ -156,22 +285,52 @@ public boolean reconfigure() {
isRunning = false;
}
+ if (profiler != null) {
+ String cfgPrefix = ConfigurationUtils.getConfigPrefix(app
+ .getConfiguration());
+ profiler.logger().newConfiguration(cfgPrefix);
+ }
+
+ System.out.println("StraemJit app is running...");
+ Utils.printMemoryStatus();
return isRunning;
}
+ public void setDrainer(AbstractDrainer drainer) {
+ assert dp == null : "SNDrainProcessor has already been set";
+ this.dp = new SNDrainProcessorImpl(drainer);
+ }
+
+ public void stop() {
+ this.status = AppStatus.STOPPED;
+ tailChannel.reset();
+ controller.closeAll();
+ dp.drainer.stop();
+ }
+
+ private void reset() {
+ exP.exConInfos = new HashSet<>();
+ apStsPro.reset();
+ }
+
+ private MasterProfiler setupProfiler() {
+ MasterProfiler p = null;
+ if (Options.needProfiler) {
+ p = new MasterProfiler(app.name);
+ controller.sendToAll(ProfilerCommand.START);
+ }
+ return p;
+ }
/**
* Setup the headchannel and tailchannel.
*
* @param cfg
* @param bufferMap
- * @param headToken
- * @param tailToken
*/
- private void setupHeadTail(Map conInfoMap,
- ImmutableMap bufferMap, Token headToken,
- Token tailToken) {
+ private void setupHeadTail(Map conInfoMap,
+ ImmutableMap bufferMap, int multiplier) {
- TCPConnectionInfo headconInfo = conInfoMap.get(headToken);
+ ConnectionInfo headconInfo = conInfoMap.get(headToken);
assert headconInfo != null : "No head connection info exists in conInfoMap";
assert headconInfo.getSrcID() == controller.controllerNodeID
|| headconInfo.getDstID() == controller.controllerNodeID : "Head channel should start from the controller. "
@@ -181,11 +340,18 @@ private void setupHeadTail(Map conInfoMap,
throw new IllegalArgumentException(
"No head buffer in the passed bufferMap.");
- headChannel = new HeadChannel(bufferMap.get(headToken),
- controller.getConProvider(), headconInfo, "headChannel - "
- + headToken.toString(), 0);
-
- TCPConnectionInfo tailconInfo = conInfoMap.get(tailToken);
+ if (headconInfo instanceof TCPConnectionInfo)
+ headChannel = new HeadChannel.TCPHeadChannel(
+ bufferMap.get(headToken), controller.getConProvider(),
+ headconInfo, "headChannel - " + headToken.toString(), 0);
+ else if (headconInfo instanceof AsyncTCPConnectionInfo)
+ headChannel = new HeadChannel.AsyncHeadChannel(
+ bufferMap.get(headToken), controller.getConProvider(),
+ headconInfo, "headChannel - " + headToken.toString(), 0);
+ else
+ throw new IllegalStateException("Head ConnectionInfo doesn't match");
+
+ ConnectionInfo tailconInfo = conInfoMap.get(tailToken);
assert tailconInfo != null : "No tail connection info exists in conInfoMap";
assert tailconInfo.getSrcID() == controller.controllerNodeID
|| tailconInfo.getDstID() == controller.controllerNodeID : "Tail channel should ends at the controller. "
@@ -195,9 +361,30 @@ private void setupHeadTail(Map conInfoMap,
throw new IllegalArgumentException(
"No tail buffer in the passed bufferMap.");
- tailChannel = new TailChannel(bufferMap.get(tailToken),
- controller.getConProvider(), tailconInfo, "tailChannel - "
- + tailToken.toString(), 0, 1000);
+ int skipCount = Math.max(Options.outputCount, multiplier * 5);
+ tailChannel = tailChannel(bufferMap.get(tailToken), tailconInfo,
+ skipCount);
+ }
+
+ private TailChannel tailChannel(Buffer buffer, ConnectionInfo conInfo,
+ int skipCount) {
+ String appName = app.name;
+ int steadyCount = Options.outputCount;
+ int debugLevel = 0;
+ String bufferTokenName = "tailChannel - " + tailToken.toString();
+ ConnectionProvider conProvider = controller.getConProvider();
+ String cfgPrefix = ConfigurationUtils.getConfigPrefix(app
+ .getConfiguration());
+ switch (Options.tailChannel) {
+ case 1 :
+ return new TailChannels.BlockingTailChannel1(buffer,
+ conProvider, conInfo, bufferTokenName, debugLevel,
+ skipCount, steadyCount, appName, cfgPrefix);
+ default :
+ return new TailChannels.BlockingTailChannel2(buffer,
+ conProvider, conInfo, bufferTokenName, debugLevel,
+ skipCount, steadyCount, appName, cfgPrefix);
+ }
}
/**
@@ -216,101 +403,109 @@ private void start() {
controller.sendToAll(Command.START);
if (tailChannel != null) {
- tailChannel.reset();
tailThread = new Thread(tailChannel.getRunnable(),
tailChannel.name());
tailThread.start();
}
}
- public boolean isRunning() {
- return isRunning;
+ public MasterProfiler getProfiler() {
+ return profiler;
}
- public void drainingStarted(boolean isFinal) {
- if (headChannel != null) {
- headChannel.stop(isFinal);
- try {
- headThread.join();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
+ /**
+ * {@link AppStatusProcessor} at {@link Controller} side.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Aug 11, 2013
+ */
+ private class AppStatusProcessorImpl implements AppStatusProcessor {
+
+ private boolean compilationError;
+
+ private CountDownLatch compileLatch;
+
+ private volatile boolean error;
+
+ private final int noOfnodes;
+
+ private AppStatusProcessorImpl(int noOfnodes) {
+ this.noOfnodes = noOfnodes;
}
- }
- public void drain(Token blobID, boolean isFinal) {
- // System.out.println("Drain requested to blob " + blobID);
- if (!app.blobtoMachineMap.containsKey(blobID))
- throw new IllegalArgumentException(blobID
- + " not found in the blobtoMachineMap");
- int nodeID = app.blobtoMachineMap.get(blobID);
- controller
- .send(nodeID, new CTRLRDrainElement.DoDrain(blobID, !isFinal));
- }
+ @Override
+ public void processCOMPILATION_ERROR() {
+ System.err.println("Compilation error");
+ this.compilationError = true;
+ compileLatch.countDown();
+ }
- public void drainingFinished(boolean isFinal) {
- System.out.println("App Manager : Draining Finished...");
- if (tailChannel != null) {
- if (isFinal)
- tailChannel.stop(1);
- else if (GlobalConstants.useDrainData)
- tailChannel.stop(2);
- else
- tailChannel.stop(3);
- try {
- tailThread.join();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
+ @Override
+ public void processCOMPILED() {
+ compileLatch.countDown();
}
- if (isFinal) {
- this.status = AppStatus.STOPPED;
+ @Override
+ public void processERROR() {
+ this.error = true;
+ // This will release the OpenTuner thread which is waiting for fixed
+ // output.
tailChannel.reset();
- controller.closeAll();
}
- isRunning = false;
- }
- public void awaitForFixInput() throws InterruptedException {
- tailChannel.awaitForFixInput();
- }
+ @Override
+ public void processNO_APP() {
+ }
- public void setDrainer(AbstractDrainer drainer) {
- assert dp == null : "SNDrainProcessor has already been set";
- this.dp = new SNDrainProcessorImpl(drainer);
- }
+ @Override
+ public void processNOT_STARTED() {
+ }
- public SNDrainProcessor drainProcessor() {
- return dp;
- }
+ @Override
+ public void processRUNNING() {
+ }
- public SNExceptionProcessor exceptionProcessor() {
- return exP;
- }
+ @Override
+ public void processSTOPPED() {
+ }
- public ErrorProcessor errorProcessor() {
- return ep;
- }
+ private void reset() {
+ compileLatch = new CountDownLatch(noOfnodes);
+ this.compilationError = false;
+ this.error = false;
+ }
- public AppStatusProcessor appStatusProcessor() {
- return apStsPro;
+ private boolean waitForCompilation() {
+ try {
+ compileLatch.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ return !this.compilationError;
+ }
}
- public AppStatus getStatus() {
- return status;
- }
+ /**
+ * {@link ErrorProcessor} at {@link Controller} side.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Aug 11, 2013
+ */
+ private class ErrorProcessorImpl implements ErrorProcessor {
- private void reset() {
- exP.exConInfos = new HashSet<>();
- apStsPro.reset();
- }
+ @Override
+ public void processFILE_NOT_FOUND() {
+ System.err
+ .println("No application jar file in streamNode. Terminating...");
+ stop();
+ }
- public void stop() {
- this.status = AppStatus.STOPPED;
- tailChannel.reset();
- controller.closeAll();
- dp.drainer.stop();
+ @Override
+ public void processWORKER_NOT_FOUND() {
+ System.err
+ .println("No top level class in the jar file. Terminating...");
+ stop();
+ }
}
/**
@@ -333,9 +528,9 @@ public void process(Drained drained) {
}
@Override
- public void process(DrainedData drainedData) {
- if (GlobalConstants.useDrainData)
- drainer.newDrainData(drainedData);
+ public void process(SNDrainedData snDrainedData) {
+ if (Options.useDrainData)
+ drainer.newSNDrainData(snDrainedData);
}
}
@@ -343,16 +538,12 @@ private class SNExceptionProcessorImpl implements SNExceptionProcessor {
private final Object abExLock = new Object();
- private Set exConInfos;
+ private Set exConInfos;
private SNExceptionProcessorImpl() {
exConInfos = new HashSet<>();
}
- @Override
- public void process(SNException ex) {
- }
-
@Override
public void process(AddressBindException abEx) {
synchronized (abExLock) {
@@ -363,7 +554,7 @@ public void process(AddressBindException abEx) {
}
Token t = null;
- for (Map.Entry entry : conInfoMap
+ for (Map.Entry entry : conInfoMap
.entrySet()) {
if (abEx.conInfo.equals(entry.getValue())) {
t = entry.getKey();
@@ -376,8 +567,8 @@ public void process(AddressBindException abEx) {
"Illegal TCP connection - " + abEx.conInfo);
}
- TCPConnectionInfo coninfo = controller
- .getNewTCPConInfo(abEx.conInfo);
+ ConnectionInfo coninfo = conManager
+ .replaceConInfo(abEx.conInfo);
exConInfos.add(abEx.conInfo);
@@ -386,93 +577,9 @@ public void process(AddressBindException abEx) {
controller.send(coninfo.getDstID(), msg);
}
}
- }
-
- /**
- * {@link ErrorProcessor} at {@link Controller} side.
- *
- * @author Sumanan sumanan@mit.edu
- * @since Aug 11, 2013
- */
- private class ErrorProcessorImpl implements ErrorProcessor {
-
- @Override
- public void processFILE_NOT_FOUND() {
- System.err
- .println("No application jar file in streamNode. Terminating...");
- stop();
- }
-
- @Override
- public void processWORKER_NOT_FOUND() {
- System.err
- .println("No top level class in the jar file. Terminating...");
- stop();
- }
- }
-
- /**
- * {@link AppStatusProcessor} at {@link Controller} side.
- *
- * @author Sumanan sumanan@mit.edu
- * @since Aug 11, 2013
- */
- private class AppStatusProcessorImpl implements AppStatusProcessor {
-
- private CountDownLatch compileLatch;
-
- private boolean compilationError;
-
- private final int noOfnodes;
-
- private AppStatusProcessorImpl(int noOfnodes) {
- this.noOfnodes = noOfnodes;
- }
@Override
- public void processRUNNING() {
- }
-
- @Override
- public void processSTOPPED() {
- }
-
- @Override
- public void processERROR() {
- }
-
- @Override
- public void processNOT_STARTED() {
- }
-
- @Override
- public void processNO_APP() {
- }
-
- @Override
- public void processCOMPILED() {
- compileLatch.countDown();
- }
-
- @Override
- public void processCOMPILATION_ERROR() {
- System.err.println("Compilation error");
- this.compilationError = true;
- compileLatch.countDown();
- }
-
- private void reset() {
- compileLatch = new CountDownLatch(noOfnodes);
- this.compilationError = false;
- }
-
- private boolean waitForCompilation() {
- try {
- compileLatch.await();
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- return !this.compilationError;
+ public void process(SNException ex) {
}
}
}
diff --git a/src/edu/mit/streamjit/impl/distributed/StreamPathBuilder.java b/src/edu/mit/streamjit/impl/distributed/StreamPathBuilder.java
new file mode 100644
index 00000000..dbef4b31
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/StreamPathBuilder.java
@@ -0,0 +1,170 @@
+package edu.mit.streamjit.impl.distributed;
+
+import java.util.ArrayDeque;
+import java.util.Deque;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableSet;
+
+import edu.mit.streamjit.api.Filter;
+import edu.mit.streamjit.api.Joiner;
+import edu.mit.streamjit.api.OneToOneElement;
+import edu.mit.streamjit.api.Pipeline;
+import edu.mit.streamjit.api.Splitjoin;
+import edu.mit.streamjit.api.Splitter;
+import edu.mit.streamjit.api.StreamVisitor;
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.common.ConnectWorkersVisitor;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.test.apps.filterbank6.FilterBank6;
+
+/**
+ * Generate all stream paths in a stream graph.
+ *
+ * @author sumanan
+ * @since 13 Jan, 2015
+ */
+public class StreamPathBuilder {
+
+ /**
+ * streamGraph must be connected before requesting for paths. Use
+ * {@link ConnectWorkersVisitor} to connect the streamGraph.
+ *
+ * @param streamGraph
+ * @return Set of all paths in the streamGraph.
+ */
+ public static Set> paths(OneToOneElement, ?> streamGraph) {
+ PathVisitor v = new PathVisitor();
+ streamGraph.visit(v);
+ return v.currentUnfinishedPathSet;
+ }
+
+ private static class PathVisitor extends StreamVisitor {
+
+ /**
+ * Paths those are currently being built. These paths will get extended
+ * as StreamPathBuilder visits through the stream graph.
+ */
+ private Set> currentUnfinishedPathSet;
+
+ /**
+ * Keeps track of paths to all {@link Splitter} encountered in a stack.
+ * Once corresponding {@link Joiner} is visited, path set will be popped
+ * from this stack.
+ */
+ private Deque>> pathToSplitterStack;
+
+ /**
+ * Unfinished path sets which are waiting for a correct joiner. Path set
+ * will be popped from this stack once correct joiner is reached.
+ */
+ private Deque>> waitingForJoinerStack;
+
+ private PathVisitor() {
+ currentUnfinishedPathSet = new HashSet<>();
+ pathToSplitterStack = new ArrayDeque<>();
+ waitingForJoinerStack = new ArrayDeque<>();
+ }
+
+ @Override
+ public void beginVisit() {
+ currentUnfinishedPathSet.clear();
+ pathToSplitterStack.clear();
+ waitingForJoinerStack.clear();
+ currentUnfinishedPathSet.add(new LinkedList());
+ }
+
+ @Override
+ public void visitFilter(Filter, ?> filter) {
+ addToCurrentPath(filter);
+ }
+
+ @Override
+ public boolean enterPipeline(Pipeline, ?> pipeline) {
+ return true;
+ }
+
+ @Override
+ public void exitPipeline(Pipeline, ?> pipeline) {
+ }
+
+ @Override
+ public boolean enterSplitjoin(Splitjoin, ?> splitjoin) {
+ return true;
+ }
+
+ @Override
+ public void visitSplitter(Splitter, ?> splitter) {
+ addToCurrentPath(splitter);
+ pathToSplitterStack.push(currentUnfinishedPathSet);
+ waitingForJoinerStack.push(new HashSet>());
+ }
+
+ @Override
+ public boolean enterSplitjoinBranch(OneToOneElement, ?> element) {
+ currentUnfinishedPathSet = new HashSet>();
+ for (List splitterPath : pathToSplitterStack.peek()) {
+ currentUnfinishedPathSet.add(new LinkedList(
+ splitterPath));
+ }
+ return true;
+ }
+
+ @Override
+ public void exitSplitjoinBranch(OneToOneElement, ?> element) {
+ waitingForJoinerStack.peek().addAll(currentUnfinishedPathSet);
+ }
+
+ @Override
+ public void visitJoiner(Joiner, ?> joiner) {
+ currentUnfinishedPathSet = waitingForJoinerStack.pop();
+ addToCurrentPath(joiner);
+ pathToSplitterStack.pop();
+ }
+
+ @Override
+ public void exitSplitjoin(Splitjoin, ?> splitjoin) {
+ }
+
+ @Override
+ public void endVisit() {
+ if (!waitingForJoinerStack.isEmpty())
+ throw new IllegalStateException("waitingForJoiner not empty");
+ if (!pathToSplitterStack.isEmpty())
+ throw new IllegalStateException("pathToSplitter not empty");
+ // printPaths();
+ }
+
+ /**
+ * Extends all current unfinished path set with the {@link Worker} w.
+ *
+ * @param w
+ */
+ private void addToCurrentPath(Worker, ?> w) {
+ int id = Workers.getIdentifier(w);
+ for (List path : currentUnfinishedPathSet)
+ path.add(id);
+ }
+
+ /**
+ * Prints all paths in the stream graph.
+ */
+ private void printPaths() {
+ for (List path : currentUnfinishedPathSet) {
+ for (Integer i : path) {
+ System.out.print(i.toString() + "->");
+ }
+ System.out.println();
+ }
+ }
+ }
+
+ public static void main(String[] args) {
+ OneToOneElement, ?> stream = new FilterBank6.FilterBankPipeline();
+ new StreamJitApp<>(stream); // Connects the stream into stream graph.
+ StreamPathBuilder.paths(stream);
+ }
+}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/TailChannel.java b/src/edu/mit/streamjit/impl/distributed/TailChannel.java
index 791e6a3e..dd077533 100644
--- a/src/edu/mit/streamjit/impl/distributed/TailChannel.java
+++ b/src/edu/mit/streamjit/impl/distributed/TailChannel.java
@@ -21,93 +21,67 @@
*/
package edu.mit.streamjit.impl.distributed;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import com.google.common.base.Stopwatch;
-
-import edu.mit.streamjit.impl.blob.Buffer;
-import edu.mit.streamjit.impl.distributed.common.GlobalConstants;
-import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
-import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider;
-import edu.mit.streamjit.impl.distributed.node.TCPInputChannel;
-
-public class TailChannel extends TCPInputChannel {
-
- int limit;
-
- int count;
-
- private volatile CountDownLatch latch;
-
- public TailChannel(Buffer buffer, TCPConnectionProvider conProvider,
- TCPConnectionInfo conInfo, String bufferTokenName, int debugPrint,
- int limit) {
- super(buffer, conProvider, conInfo, bufferTokenName, debugPrint);
- this.limit = limit;
- count = 0;
- latch = new CountDownLatch(1);
- if (!GlobalConstants.tune)
- new performanceLogger().start();
- }
-
- @Override
- public void receiveData() {
- super.receiveData();
- count++;
- // System.err.println(count);
- if (count > limit)
- latch.countDown();
- }
-
- public void awaitForFixInput() throws InterruptedException {
- latch.await();
- }
-
- public void reset() {
- latch.countDown();
- latch = new CountDownLatch(1);
- count = 0;
- }
-
- private class performanceLogger extends Thread {
-
- public void run() {
- int i = 0;
- FileWriter writer;
- try {
- writer = new FileWriter("FixedOutPut.txt");
- } catch (IOException e1) {
- e1.printStackTrace();
- return;
- }
- while (++i < 30) {
- try {
- Stopwatch stopwatch = Stopwatch.createStarted();
- latch.await();
- stopwatch.stop();
- Long time = stopwatch.elapsed(TimeUnit.MILLISECONDS);
-
- System.out.println("Execution time is " + time
- + " milli seconds");
-
- writer.write(time.toString());
- writer.write('\n');
-
- reset();
-
- } catch (InterruptedException | IOException e) {
- e.printStackTrace();
- }
- }
- try {
- writer.close();
- } catch (IOException e) {
- // TODO Auto-generated catch block
- e.printStackTrace();
- }
- }
- }
-}
\ No newline at end of file
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+
+/**
+ * This is a {@link BoundaryInputChannel} with counting facility.
+ * Implementations need to count the number of elements received and provide
+ * other services based on the count.
+ *
+ * @author sumanan
+ * @since 24 Jan, 2015
+ */
+public interface TailChannel extends BoundaryInputChannel {
+
+ /**
+ * @return Number of elements received after the last reset()
+ */
+ public int count();
+
+ /**
+ * Returns the time to receive fixed number of outputs. The fixed number can
+ * be hard coded in side an implementation or passed as a constructor
+ * argument.
+ *
+ * The caller will be blocked until the fixed number of outputs are
+ * received.
+ *
+ * @return the time(ms) to receive fixed number of outputs.
+ *
+ * @throws InterruptedException
+ */
+ public long getFixedOutputTime() throws InterruptedException;
+
+ /**
+ * Returns the time to receive fixed number of outputs. The fixed number can
+ * be hard coded in side an implementation or passed as a constructor
+ * argument. Waits at most timeout time to receive fixed number
+ * of output. Returns -1 if timeout occurred.
+ *
+ *
+ * If timeout < 1, then the behavior this method is equivalent to calling
+ * {@link #getFixedOutputTime()}.
+ *
+ *
+ *
+ * The caller will be blocked until the fixed number of output is received
+ * or timeout occurred, whatever happens early.
+ *
+ * @param timeout
+ * Wait at most timeout time to receive fixed number of output.
+ *
+ * @return the time(ms) to receive fixed number of outputs or -1 if timeout
+ * occurred.
+ *
+ * @throws InterruptedException
+ */
+ public long getFixedOutputTime(long timeout) throws InterruptedException;
+
+ /**
+ * Resets all counters and other resources. Any thread blocked on either
+ * {@link #getFixedOutputTime()} or {@link #getFixedOutputTime(long)} should
+ * be released after this call.
+ */
+ public void reset();
+
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/TailChannels.java b/src/edu/mit/streamjit/impl/distributed/TailChannels.java
new file mode 100644
index 00000000..b4135d82
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/TailChannels.java
@@ -0,0 +1,521 @@
+package edu.mit.streamjit.impl.distributed;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import com.google.common.base.Stopwatch;
+
+import edu.mit.streamjit.impl.blob.Buffer;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider;
+import edu.mit.streamjit.impl.distributed.common.Options;
+import edu.mit.streamjit.impl.distributed.common.Utils;
+import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel;
+
+public class TailChannels {
+
+ private static class PerformanceLogger extends Thread {
+
+ private AtomicBoolean stopFlag;
+
+ private final String appName;
+
+ private final TailChannel tailChannel;
+
+ private PerformanceLogger(TailChannel tailChannel, String appName) {
+ super("PerformanceLogger");
+ stopFlag = new AtomicBoolean(false);
+ this.appName = appName;
+ this.tailChannel = tailChannel;
+ }
+
+ public void run() {
+ int i = 0;
+ FileWriter writer;
+ try {
+ writer = new FileWriter(String.format("%s%sFixedOutPut.txt",
+ appName, File.separator));
+ } catch (IOException e1) {
+ e1.printStackTrace();
+ return;
+ }
+
+ writeInitialInfo(writer);
+
+ Long sum = 0l;
+
+ while (++i < 10 && !stopFlag.get()) {
+ try {
+ Long time = tailChannel.getFixedOutputTime();
+
+ sum += time;
+ System.out.println("Execution time is " + time
+ + " milli seconds");
+
+ writer.write(time.toString());
+ writer.write('\n');
+ writer.flush();
+ } catch (InterruptedException | IOException e) {
+ e.printStackTrace();
+ }
+ }
+ try {
+ writer.write("Average = " + sum / (i - 1));
+ writer.write('\n');
+ writer.flush();
+ writer.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+
+ System.out.println("PerformanceLogger exits. App will run till "
+ + "inputdata exhausted.");
+ }
+
+ private void writeInitialInfo(FileWriter writer) {
+ System.out.println(String.format(
+ "PerformanceLogger starts to log the time to"
+ + " produce %d number of outputs",
+ Options.outputCount));
+
+ try {
+ writer.write(String.format("GlobalConstants.outputCount = %d",
+ Options.outputCount));
+ writer.write('\n');
+ writer.flush();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public void stopLogging() {
+ stopFlag.set(true);
+ }
+ }
+
+ /**
+ * Periodically prints the number of outputs received by a
+ * {@link TailChannel}.
+ */
+ private static class OutputCountPrinter {
+
+ private final String appName;
+
+ private final TailChannel tailChannel;
+
+ /**
+ * The no of outputs received at the end of last period.
+ */
+ private int lastCount;
+
+ private FileWriter writer;
+
+ private RuntimeMXBean rb = ManagementFactory.getRuntimeMXBean();
+
+ private ScheduledExecutorService scheduledExecutorService;
+
+ OutputCountPrinter(TailChannel tailChannel, String appName) {
+ this.tailChannel = tailChannel;
+ this.appName = appName;
+ printOutputCount();
+ }
+
+ private void printOutputCount() {
+ if (Options.printOutputCountPeriod < 1)
+ return;
+ writer = Utils.fileWriter(appName, "outputCount.txt", true);
+ lastCount = 0;
+ scheduledExecutorService = Executors
+ .newSingleThreadScheduledExecutor();
+ scheduledExecutorService.scheduleAtFixedRate(
+ new Runnable() {
+ @Override
+ public void run() {
+ int currentCount = tailChannel.count();
+ int newOutputs = currentCount - lastCount;
+ lastCount = currentCount;
+ String msg = String.format("%d\t\t%d\t\t%d\n",
+ rb.getUptime(), currentCount, newOutputs);
+ try {
+ writer.write(msg);
+ writer.flush();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }, Options.printOutputCountPeriod,
+ Options.printOutputCountPeriod,
+ TimeUnit.MILLISECONDS);
+ }
+
+ private void stop() {
+ if (scheduledExecutorService != null)
+ scheduledExecutorService.shutdown();
+ if (writer != null)
+ try {
+ writer.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * This method writes to the file in a non thread safe way. But this is
+ * enough to serve the purpose.
+ *
+ * TODO: This method is just for debugging purpose, Remove this method
+ * and its usage later.
+ */
+ private boolean write(String msg) {
+ if (writer != null)
+ try {
+ writer.write(msg);
+ return true;
+ } catch (Exception e) {
+ }
+ return false;
+ }
+ }
+
+ private static abstract class AbstractBlockingTailChannel
+ extends
+ BlockingInputChannel implements TailChannel {
+
+ protected final int skipCount;
+
+ protected final int totalCount;
+
+ protected int count;
+
+ private PerformanceLogger pLogger = null;
+
+ private OutputCountPrinter outputCountPrinter = null;
+
+ private final String cfgPrefix;
+
+ protected abstract void releaseAndInitilize();
+
+ /**
+ * @param buffer
+ * @param conProvider
+ * @param conInfo
+ * @param bufferTokenName
+ * @param debugLevel
+ * @param skipCount
+ * : Skips this amount of output before evaluating the
+ * running time. This is added to avoid the noise from init
+ * schedule and the drain data. ( i.e., In order to get real
+ * steady state execution time)
+ * @param steadyCount
+ * : {@link #getFixedOutputTime()} calculates the time taken
+ * to get this amount of outputs ( after skipping skipCount
+ * number of outputs at the beginning).
+ */
+ public AbstractBlockingTailChannel(Buffer buffer,
+ ConnectionProvider conProvider, ConnectionInfo conInfo,
+ String bufferTokenName, int debugLevel, int skipCount,
+ int steadyCount, String appName, String cfgPrefix) {
+ super(buffer, conProvider, conInfo, bufferTokenName, debugLevel);
+ this.skipCount = skipCount;
+ this.totalCount = steadyCount + skipCount;
+ count = 0;
+ this.cfgPrefix = cfgPrefix;
+ if (Options.tune == 0) {
+ // TODO: Leaks this object from the constructor. May cause
+ // subtle bugs. Re-factor it.
+ pLogger = new PerformanceLogger(this, appName);
+ pLogger.start();
+ }
+ if (Options.printOutputCountPeriod > 0)
+ outputCountPrinter = new OutputCountPrinter(this, appName);
+ }
+
+ @Override
+ public void stop(DrainType type) {
+ super.stop(type);
+ if (pLogger != null) {
+ releaseAndInitilize();
+ pLogger.stopLogging();
+ }
+ if (outputCountPrinter != null)
+ outputCountPrinter.stop();
+ }
+
+ @Override
+ public int count() {
+ return count;
+ }
+
+ protected long normalizedTime(long time) {
+ return (Options.outputCount * time) / (totalCount - skipCount);
+ }
+
+ /**
+ * Opposite to the {@link #normalizedTime(long)}'s equation.
+ * time=unnormalizedTime(normalizedTime(time))
+ */
+ protected long unnormalizedTime(long time) {
+ return (time * (totalCount - skipCount)) / Options.outputCount;
+ }
+
+ /**
+ * Logs the time reporting event.
+ *
+ * TODO: This method is just for debugging purpose, Remove this method
+ * and its usage later.
+ */
+ protected void reportingTime(long time) {
+ if (outputCountPrinter != null) {
+ String msg = String.format(
+ "Reporting-%s.cfg,time=%d,TotalCount=%d\n", cfgPrefix,
+ time, count);
+ outputCountPrinter.write(msg);
+ }
+ }
+ }
+
+ public static final class BlockingTailChannel1
+ extends
+ AbstractBlockingTailChannel {
+
+ private volatile CountDownLatch steadyLatch;
+
+ private volatile CountDownLatch skipLatch;
+
+ private boolean skipLatchUp;
+
+ private boolean steadyLatchUp;
+
+ /**
+ * @param buffer
+ * @param conProvider
+ * @param conInfo
+ * @param bufferTokenName
+ * @param debugLevel
+ * For all above 5 parameters, see
+ * {@link BlockingInputChannel#BlockingInputChannel(Buffer, ConnectionProvider, ConnectionInfo, String, int)}
+ * @param skipCount
+ * : Skips this amount of output before evaluating the
+ * running time. This is added to avoid the noise from init
+ * schedule and the drain data. ( i.e., In order to get real
+ * steady state execution time)
+ * @param steadyCount
+ * : {@link #getFixedOutputTime()} calculates the time taken
+ * to get this amount of outputs ( after skipping skipCount
+ * number of outputs at the beginning).
+ */
+ public BlockingTailChannel1(Buffer buffer,
+ ConnectionProvider conProvider, ConnectionInfo conInfo,
+ String bufferTokenName, int debugLevel, int skipCount,
+ int steadyCount, String appName, String cfgPrefix) {
+ super(buffer, conProvider, conInfo, bufferTokenName, debugLevel,
+ skipCount, steadyCount, appName, cfgPrefix);
+ steadyLatch = new CountDownLatch(1);
+ skipLatch = new CountDownLatch(1);
+ this.skipLatchUp = true;
+ this.steadyLatchUp = true;
+ }
+
+ @Override
+ public void receiveData() {
+ super.receiveData();
+ count++;
+
+ if (skipLatchUp && count > skipCount) {
+ skipLatch.countDown();
+ skipLatchUp = false;
+ }
+
+ if (steadyLatchUp && count > totalCount) {
+ steadyLatch.countDown();
+ steadyLatchUp = false;
+ }
+ }
+
+ /**
+ * Skips skipCount amount of output at the beginning and then calculates
+ * the time taken to get steadyCount amount of outputs. skipCount is
+ * added to avoid the noise from init schedule and the drain data. (
+ * i.e., In order to get real steady state execution time).
+ *
+ * @return time in MILLISECONDS.
+ * @throws InterruptedException
+ */
+ public long getFixedOutputTime() throws InterruptedException {
+ releaseAndInitilize();
+ skipLatch.await();
+ Stopwatch stopwatch = Stopwatch.createStarted();
+ steadyLatch.await();
+ stopwatch.stop();
+ long time = stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ reportingTime(time);
+ return normalizedTime(time);
+ }
+
+ @Override
+ public long getFixedOutputTime(long timeout)
+ throws InterruptedException {
+ if (timeout < 1)
+ return getFixedOutputTime();
+
+ timeout = unnormalizedTime(timeout);
+ releaseAndInitilize();
+ skipLatch.await();
+ Stopwatch stopwatch = Stopwatch.createStarted();
+ while (steadyLatch.getCount() > 0
+ && stopwatch.elapsed(TimeUnit.MILLISECONDS) < timeout) {
+ Thread.sleep(100);
+ }
+
+ stopwatch.stop();
+ long time = stopwatch.elapsed(TimeUnit.MILLISECONDS);
+ reportingTime(time);
+ if (time > timeout)
+ return -1;
+ return normalizedTime(time);
+ }
+
+ /**
+ * Releases all latches, and re-initializes the latches and counters.
+ */
+ protected void releaseAndInitilize() {
+ count = 0;
+ skipLatch.countDown();
+ skipLatch = new CountDownLatch(1);
+ skipLatchUp = true;
+ steadyLatch.countDown();
+ steadyLatch = new CountDownLatch(1);
+ steadyLatchUp = true;
+ }
+
+ public void reset() {
+ steadyLatch.countDown();
+ skipLatch.countDown();
+ count = 0;
+ }
+ }
+
+ public static final class BlockingTailChannel2
+ extends
+ AbstractBlockingTailChannel {
+
+ private volatile CountDownLatch skipLatch;
+
+ private boolean skipLatchUp;
+
+ private final Stopwatch stopWatch;
+
+ /**
+ * @param buffer
+ * @param conProvider
+ * @param conInfo
+ * @param bufferTokenName
+ * @param debugLevel
+ * For all above 5 parameters, see
+ * {@link BlockingInputChannel#BlockingInputChannel(Buffer, ConnectionProvider, ConnectionInfo, String, int)}
+ * @param skipCount
+ * : Skips this amount of output before evaluating the
+ * running time. This is added to avoid the noise from init
+ * schedule and the drain data. ( i.e., In order to get real
+ * steady state execution time)
+ * @param steadyCount
+ * : {@link #getFixedOutputTime()} calculates the time taken
+ * to get this amount of outputs ( after skipping skipCount
+ * number of outputs at the beginning).
+ */
+ public BlockingTailChannel2(Buffer buffer,
+ ConnectionProvider conProvider, ConnectionInfo conInfo,
+ String bufferTokenName, int debugLevel, int skipCount,
+ int steadyCount, String appName, String cfgPrefix) {
+ super(buffer, conProvider, conInfo, bufferTokenName, debugLevel,
+ skipCount, steadyCount, appName, cfgPrefix);
+ stopWatch = Stopwatch.createUnstarted();
+ skipLatch = new CountDownLatch(1);
+ this.skipLatchUp = true;
+ }
+
+ @Override
+ public void receiveData() {
+ super.receiveData();
+ count++;
+
+ if (skipLatchUp && count > skipCount) {
+ skipLatch.countDown();
+ skipLatchUp = false;
+ }
+
+ if (stopWatch.isRunning() && count > totalCount) {
+ stopWatch.stop();
+ }
+ }
+
+ /**
+ * Skips skipCount amount of output at the beginning and then calculates
+ * the time taken to get steadyCount amount of outputs. skipCount is
+ * added to avoid the noise from init schedule and the drain data. (
+ * i.e., In order to get real steady state execution time).
+ *
+ * @return time in MILLISECONDS.
+ * @throws InterruptedException
+ */
+ public long getFixedOutputTime() throws InterruptedException {
+ releaseAndInitilize();
+ skipLatch.await();
+ stopWatch.start();
+ while (stopWatch.isRunning())
+ Thread.sleep(250);
+ long time = stopWatch.elapsed(TimeUnit.MILLISECONDS);
+ reportingTime(time);
+ return normalizedTime(time);
+ }
+
+ @Override
+ public long getFixedOutputTime(long timeout)
+ throws InterruptedException {
+ if (timeout < 1)
+ return getFixedOutputTime();
+
+ timeout = unnormalizedTime(timeout);
+ releaseAndInitilize();
+ skipLatch.await();
+ stopWatch.start();
+ while (stopWatch.isRunning()
+ && stopWatch.elapsed(TimeUnit.MILLISECONDS) < timeout) {
+ Thread.sleep(250);
+ }
+
+ long time = stopWatch.elapsed(TimeUnit.MILLISECONDS);
+ reportingTime(time);
+ if (time > timeout)
+ return -1;
+ else
+ return normalizedTime(time);
+ }
+
+ /**
+ * Releases all latches, and re-initializes the latches and counters.
+ */
+ protected void releaseAndInitilize() {
+ count = 0;
+ skipLatch.countDown();
+ skipLatch = new CountDownLatch(1);
+ skipLatchUp = true;
+ stopWatch.reset();
+ }
+
+ public void reset() {
+ stopWatch.reset();
+ skipLatch.countDown();
+ count = 0;
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/TimeLoggers.java b/src/edu/mit/streamjit/impl/distributed/TimeLoggers.java
new file mode 100644
index 00000000..593a91e4
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/TimeLoggers.java
@@ -0,0 +1,297 @@
+package edu.mit.streamjit.impl.distributed;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.base.Stopwatch;
+
+import edu.mit.streamjit.impl.common.TimeLogger;
+import edu.mit.streamjit.impl.distributed.common.Utils;
+
+/**
+ * Collection of various {@link TimeLogger} implementations.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Nov 24, 2014
+ *
+ */
+public class TimeLoggers {
+
+ /**
+ * Creates three files named compileTime.txt, runTime.txt and drainTime.txt
+ * inside app.name directory, and logs the time information.
+ *
+ * @author sumanan
+ * @since Nov 25, 2014
+ */
+ public static class FileTimeLogger extends TimeLoggerImpl {
+
+ public FileTimeLogger(String appName) {
+ super(Utils.fileWriter(appName, "compileTime.txt"), Utils
+ .fileWriter(appName, "runTime.txt"), Utils.fileWriter(
+ appName, "drainTime.txt"), Utils.fileWriter(appName,
+ "searchTime.txt"));
+ }
+ }
+
+ /**
+ * Logs nothing.
+ */
+ public static class NoTimeLogger implements TimeLogger {
+
+ @Override
+ public void compilationFinished(boolean isCompiled, String msg) {
+ }
+
+ @Override
+ public void compilationStarted() {
+
+ }
+
+ @Override
+ public void drainingFinished(String msg) {
+ }
+
+ @Override
+ public void drainingStarted() {
+ }
+
+ @Override
+ public void logCompileTime(long time) {
+ }
+
+ @Override
+ public void logCompileTime(String msg) {
+ }
+
+ @Override
+ public void logDrainDataCollectionTime(long time) {
+ }
+
+ @Override
+ public void logDrainTime(long time) {
+ }
+
+ @Override
+ public void logDrainTime(String msg) {
+ }
+
+ @Override
+ public void logRunTime(long time) {
+ }
+
+ @Override
+ public void logRunTime(String msg) {
+ }
+
+ @Override
+ public void newConfiguration(String cfgPrefix) {
+ }
+
+ @Override
+ public void drainDataCollectionStarted() {
+ }
+
+ @Override
+ public void drainDataCollectionFinished(String msg) {
+ }
+
+ @Override
+ public void logSearchTime(long time) {
+ }
+ }
+
+ /**
+ * Prints the values to the StdOut.
+ *
+ */
+ public static class PrintTimeLogger extends TimeLoggerImpl {
+
+ public PrintTimeLogger() {
+ super(System.out, System.out, System.out, System.out);
+ }
+ }
+
+ private static class TimeLoggerImpl implements TimeLogger {
+
+ private final OutputStreamWriter compileTimeWriter;
+
+ private final OutputStreamWriter drainTimeWriter;
+
+ private final OutputStreamWriter runTimeWriter;
+
+ private final OutputStreamWriter searchTimeWriter;
+
+ private int reconfigNo = 0;
+
+ private Stopwatch compileTimeSW = null;
+
+ private Stopwatch drainTimeSW = null;
+
+ private Stopwatch drainDataCollectionTimeSW = null;
+
+ private Stopwatch tuningRoundSW = null;
+
+ TimeLoggerImpl(OutputStream compileOS, OutputStream runOs,
+ OutputStream drainOs, OutputStream searchOs) {
+ this(getOSWriter(compileOS), getOSWriter(runOs),
+ getOSWriter(drainOs), getOSWriter(searchOs));
+ }
+
+ TimeLoggerImpl(OutputStreamWriter compileW, OutputStreamWriter runW,
+ OutputStreamWriter drainW, OutputStreamWriter searchW) {
+ compileTimeWriter = compileW;
+ runTimeWriter = runW;
+ drainTimeWriter = drainW;
+ searchTimeWriter = searchW;
+ }
+
+ @Override
+ public void compilationFinished(boolean isCompiled, String msg) {
+ if (compileTimeSW != null) {
+ compileTimeSW.stop();
+ long time = compileTimeSW.elapsed(TimeUnit.MILLISECONDS);
+ logCompileTime(time);
+ }
+ }
+
+ @Override
+ public void compilationStarted() {
+ compileTimeSW = Stopwatch.createStarted();
+ }
+
+ @Override
+ public void drainingFinished(String msg) {
+ if (drainTimeSW != null && drainTimeSW.isRunning()) {
+ drainTimeSW.stop();
+ long time = drainTimeSW.elapsed(TimeUnit.MILLISECONDS);
+ logDrainTime(time);
+ }
+ }
+
+ @Override
+ public void drainingStarted() {
+ drainTimeSW = Stopwatch.createStarted();
+ }
+
+ @Override
+ public void drainDataCollectionStarted() {
+ drainDataCollectionTimeSW = Stopwatch.createStarted();
+ }
+
+ @Override
+ public void drainDataCollectionFinished(String msg) {
+ if (drainDataCollectionTimeSW != null) {
+ drainDataCollectionTimeSW.stop();
+ long time = drainDataCollectionTimeSW
+ .elapsed(TimeUnit.MILLISECONDS);
+ logDrainDataCollectionTime(time);
+ }
+ }
+
+ @Override
+ public void logCompileTime(long time) {
+ write(compileTimeWriter,
+ String.format("Total compile time %dms\n", time));
+ }
+
+ @Override
+ public void logCompileTime(String msg) {
+ write(compileTimeWriter, msg);
+ }
+
+ @Override
+ public void logDrainDataCollectionTime(long time) {
+ write(drainTimeWriter,
+ String.format("Drain data collection time is %dms\n", time));
+ }
+
+ @Override
+ public void logDrainTime(long time) {
+ write(drainTimeWriter, String.format("Drain time is %dms\n", time));
+ }
+
+ @Override
+ public void logDrainTime(String msg) {
+ write(drainTimeWriter, msg);
+ }
+
+ @Override
+ public void logRunTime(long time) {
+ write(runTimeWriter,
+ String.format("Execution time is %dms\n", time));
+ }
+
+ @Override
+ public void logRunTime(String msg) {
+ write(runTimeWriter, msg);
+ }
+
+ /**
+ * [24-02-2015] When a new configuration come from the OpenTuner, we
+ * drain previous configuration. So the drainTime file should be updated
+ * with previous configuration prefix.
+ */
+ String prevcfgPrefix = "";
+
+ @Override
+ public void newConfiguration(String cfgPrefix) {
+ reconfigNo++;
+ if (cfgPrefix == null || cfgPrefix.isEmpty())
+ cfgPrefix = new Integer(reconfigNo).toString();
+
+ updateTuningRoundTime();
+
+ String msg = String
+ .format("----------------------------%s----------------------------\n",
+ cfgPrefix);
+ String msg1 = String
+ .format("----------------------------%s----------------------------\n",
+ prevcfgPrefix);
+ write(compileTimeWriter, msg);
+ write(runTimeWriter, msg);
+ write(searchTimeWriter, msg);
+ write(drainTimeWriter, msg1);
+ prevcfgPrefix = cfgPrefix;
+ }
+
+ private void updateTuningRoundTime() {
+ long time = 0;
+ if (tuningRoundSW == null)
+ tuningRoundSW = Stopwatch.createStarted();
+ else {
+ tuningRoundSW.stop();
+ time = tuningRoundSW.elapsed(TimeUnit.SECONDS);
+ tuningRoundSW.reset();
+ tuningRoundSW.start();
+ write(runTimeWriter,
+ String.format("Tuning round time %dS\n", time));
+ }
+ }
+
+ private static OutputStreamWriter getOSWriter(OutputStream os) {
+ if (os == null)
+ return null;
+ return new OutputStreamWriter(os);
+ }
+
+ private void write(OutputStreamWriter osWriter, String msg) {
+ if (osWriter != null) {
+ try {
+ osWriter.write(msg);
+ osWriter.flush();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void logSearchTime(long time) {
+ write(searchTimeWriter,
+ String.format("Search time is %dms\n", time));
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/Visualizer.java b/src/edu/mit/streamjit/impl/distributed/Visualizer.java
new file mode 100644
index 00000000..a52e5096
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/Visualizer.java
@@ -0,0 +1,303 @@
+package edu.mit.streamjit.impl.distributed;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import edu.mit.streamjit.api.Filter;
+import edu.mit.streamjit.api.Joiner;
+import edu.mit.streamjit.api.OneToOneElement;
+import edu.mit.streamjit.api.Pipeline;
+import edu.mit.streamjit.api.Splitjoin;
+import edu.mit.streamjit.api.Splitter;
+import edu.mit.streamjit.api.StreamVisitor;
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.common.Configuration;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.distributed.common.Utils;
+import edu.mit.streamjit.util.ConfigurationUtils;
+
+/**
+ * Interface to visualize a stream graph and it's configurations. Use the
+ * constructor to get the stream graph.
+ *
+ * @author Sumanan
+ * @since 29 Dec, 2014
+ */
+public interface Visualizer {
+
+ /**
+ * Call this method with new configuration, whenever the configuration
+ * changes.
+ *
+ * @param cfg
+ */
+ public void newConfiguration(Configuration cfg);
+
+ /**
+ * Partitions Machine Map of the current configuration. Only the
+ * {@link PartitionManager} has the information to generate this map.
+ * Visualizer has no glue to generate this partitionsMachineMap.
+ *
+ * @param partitionsMachineMap
+ */
+ public void newPartitionMachineMap(
+ Map>>> partitionsMachineMap);
+
+ /**
+ * Use this class to have no visualization.
+ *
+ */
+ public static class NoVisualizer implements Visualizer {
+
+ @Override
+ public void newConfiguration(Configuration cfg) {
+ return;
+ }
+
+ @Override
+ public void newPartitionMachineMap(
+ Map>>> partitionsMachineMap) {
+ return;
+ }
+ }
+
+ /**
+ * Generates dot file and then from the dot file generates graph. Before
+ * using this class, ensure that Graphviz is properly installed in the
+ * system.
+ */
+ public static class DotVisualizer implements Visualizer {
+
+ protected final OneToOneElement, ?> streamGraph;
+
+ private final String appName;
+
+ /**
+ * namePrefix of the current configuration.
+ */
+ private String namePrefix = "";
+
+ /**
+ * Tells whether the dot tool is installed in the system or not.
+ */
+ private boolean hasDot;
+
+ public DotVisualizer(OneToOneElement, ?> streamGraph) {
+ this.streamGraph = streamGraph;
+ this.appName = streamGraph.getClass().getSimpleName();
+ hasDot = true;
+ DOTstreamVisitor dotSV = new DOTstreamVisitor();
+ streamGraph.visit(dotSV);
+ }
+
+ /**
+ * Visits through the Stream graph and generates dot file.
+ *
+ * @author sumanan
+ * @since 29 Dec, 2014
+ */
+ private class DOTstreamVisitor extends StreamVisitor {
+
+ private final FileWriter writer;
+
+ DOTstreamVisitor() {
+ writer = Utils.fileWriter(appName, "streamgraph.dot");
+ }
+
+ private void initilizeDot() {
+ try {
+ writer.write(String.format("digraph %s {\n", appName));
+ writer.write("\trankdir=TD;\n");
+ writer.write("\tnodesep=0.5;\n");
+ writer.write("\tranksep=equally;\n");
+ // writer.write("\tnode [shape = circle];\n");
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void closeDot() {
+ try {
+ writer.write("}");
+ writer.flush();
+ writer.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void beginVisit() {
+ initilizeDot();
+ }
+
+ @Override
+ public void visitFilter(Filter, ?> filter) {
+ updateDot(filter);
+ }
+
+ @Override
+ public boolean enterPipeline(Pipeline, ?> pipeline) {
+ return true;
+ }
+
+ @Override
+ public void exitPipeline(Pipeline, ?> pipeline) {
+
+ }
+
+ @Override
+ public boolean enterSplitjoin(Splitjoin, ?> splitjoin) {
+ return true;
+ }
+
+ @Override
+ public void visitSplitter(Splitter, ?> splitter) {
+ updateDot(splitter);
+ }
+
+ @Override
+ public boolean enterSplitjoinBranch(OneToOneElement, ?> element) {
+ return true;
+ }
+
+ @Override
+ public void exitSplitjoinBranch(OneToOneElement, ?> element) {
+ }
+
+ @Override
+ public void visitJoiner(Joiner, ?> joiner) {
+ updateDot(joiner);
+ }
+
+ @Override
+ public void exitSplitjoin(Splitjoin, ?> splitjoin) {
+ }
+
+ @Override
+ public void endVisit() {
+ closeDot();
+ runDot("streamgraph");
+ }
+
+ private void updateDot(Worker, ?> w) {
+ for (Worker, ?> suc : Workers.getSuccessors(w)) {
+ String first = w.getClass().getSimpleName();
+ String second = suc.getClass().getSimpleName();
+ int id = Workers.getIdentifier(w);
+ int sucID = Workers.getIdentifier(suc);
+ try {
+ writer.write(String.format("\t%d -> %d;\n", id, sucID));
+ // writer.write(String.format("\t%s -> %s;\n", first,
+ // second));
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void newConfiguration(Configuration cfg) {
+ namePrefix = ConfigurationUtils.getConfigPrefix(cfg);
+ }
+
+ private void runDot(String file) {
+ String outFileFormat = "svg";
+ String fileName = String.format("./%s%s%s.dot", appName,
+ File.separator, file);
+ String outFileName = String.format("./%s%s%s%s%s_%s.%s", appName,
+ File.separator, ConfigurationUtils.configDir,
+ File.separator, namePrefix, file, outFileFormat);
+ ProcessBuilder pb = new ProcessBuilder("dot", "-T" + outFileFormat,
+ fileName, "-o", outFileName);
+ try {
+ Process p = pb.start();
+ // TODO: [20-2-2015]. I am commenting the following line for
+ // some performance improvement. Look for bugs.
+ // p.waitFor();
+ } catch (IOException e) {
+ System.err
+ .println("DotVisualizer: dot(Graphviz) tool is not properly installed in the system");
+ hasDot = false;
+ // e.printStackTrace();
+ }
+ }
+
+ @Override
+ public void newPartitionMachineMap(
+ Map>>> partitionsMachineMap) {
+ if (!hasDot)
+ return;
+ FileWriter writer;
+ try {
+ writer = blobGraphWriter();
+ for (int machine : partitionsMachineMap.keySet()) {
+ for (Set> blobworkers : partitionsMachineMap
+ .get(machine)) {
+ Token blobID = Utils.getblobID(blobworkers);
+ writer.write(String
+ .format("\tsubgraph \"cluster_%s\" { color="
+ + "royalblue1; label = \"Blob-%s:Machine-%d\";",
+ blobID, blobID, machine));
+ Set workerIDs = getWorkerIds(blobworkers);
+ for (Integer id : workerIDs)
+ writer.write(String.format(" %d;", id));
+ writer.write("}\n");
+ }
+ }
+ writer.write("}\n");
+ writer.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ runDot("blobgraph");
+ }
+ private Set getWorkerIds(Set> blobworkers) {
+ Set workerIds = new HashSet<>();
+ for (Worker, ?> w : blobworkers) {
+ workerIds.add(Workers.getIdentifier(w));
+ }
+ return workerIds;
+ }
+
+ /**
+ * Copies all lines except the final closing bracket from
+ * streamgraph.dot to blobgraph.dot.
+ *
+ * @return
+ * @throws IOException
+ */
+ private FileWriter blobGraphWriter() throws IOException {
+ File streamGraph = new File(String.format("./%s%sstreamgraph.dot",
+ appName, File.separator));
+ File blobGraph = new File(String.format("./%s%sblobgraph.dot",
+ appName, File.separator));
+ BufferedReader reader = new BufferedReader(new FileReader(
+ streamGraph));
+ FileWriter writer = new FileWriter(blobGraph, false);
+ String line;
+ int unclosedParenthesis = 0;
+ while ((line = reader.readLine()) != null) {
+ if (line.contains("{"))
+ unclosedParenthesis++;
+ if (line.contains("}"))
+ unclosedParenthesis--;
+ if (unclosedParenthesis > 0) {
+ writer.write(line);
+ writer.write("\n");
+ }
+ }
+ reader.close();
+ return writer;
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java b/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java
index 3d97b6cb..bcdc3d1d 100644
--- a/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java
+++ b/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java
@@ -28,17 +28,15 @@
import java.util.Map;
import java.util.Set;
-import com.google.common.collect.ImmutableSet;
+import static com.google.common.base.Preconditions.*;
-import edu.mit.streamjit.api.StreamCompilationFailedException;
import edu.mit.streamjit.api.Worker;
import edu.mit.streamjit.impl.common.Configuration;
-import edu.mit.streamjit.impl.common.Workers;
-import edu.mit.streamjit.impl.common.AbstractDrainer.BlobGraph;
import edu.mit.streamjit.impl.common.Configuration.IntParameter;
import edu.mit.streamjit.impl.common.Configuration.Parameter;
import edu.mit.streamjit.impl.common.Configuration.SwitchParameter;
-import edu.mit.streamjit.impl.distributed.ConfigurationManager.AbstractConfigurationManager;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.distributed.PartitionManager.AbstractPartitionManager;
/**
* This class implements one type of search space. Adds "worker to machine"
@@ -58,15 +56,20 @@
* @since Jan 16, 2014
*
*/
-public final class WorkerMachine extends AbstractConfigurationManager {
+public final class WorkerMachine extends AbstractPartitionManager {
+
+ private final Set> workerset;
- WorkerMachine(StreamJitApp app) {
+ public WorkerMachine(StreamJitApp app) {
super(app);
+ this.workerset = Workers.getAllWorkersInGraph(app.source);
}
@Override
public Configuration getDefaultConfiguration(Set> workers,
int noOfMachines) {
+ checkArgument(noOfMachines > 0, String.format(
+ "noOfMachines = %d, It must be > 0", noOfMachines));
Configuration.Builder builder = Configuration.builder();
List machinelist = new ArrayList<>(noOfMachines);
for (int i = 1; i <= noOfMachines; i++)
@@ -90,49 +93,8 @@ public Configuration getDefaultConfiguration(Set> workers,
return builder.build();
}
- /**
- * Builds partitionsMachineMap and {@link BlobGraph} from the new
- * Configuration, and verifies for any cycles among blobs. If it is a valid
- * configuration, (i.e., no cycles among the blobs), then {@link #app}
- * object's member variables {@link StreamJitApp#blobConfiguration},
- * {@link StreamJitApp#blobGraph} and
- * {@link StreamJitApp#partitionsMachineMap} will be assigned according to
- * reflect the new configuration, no changes otherwise.
- *
- * @param config
- * New configuration form Opentuer.
- * @return true iff no cycles among blobs
- */
- @Override
- public boolean newConfiguration(Configuration config) {
-
- Map>>> partitionsMachineMap = getMachineWorkerMap(
- config, app.source);
- try {
- app.varifyConfiguration(partitionsMachineMap);
- } catch (StreamCompilationFailedException ex) {
- return false;
- }
- app.blobConfiguration = config;
- return true;
- }
-
- /**
- * Reads the configuration and returns a map of nodeID to list of set of
- * workers (list of blob workers) which are assigned to the node. Value of
- * the returned map is list of worker set where each worker set is an
- * individual blob.
- *
- * @param config
- * @param workerset
- * @return map of nodeID to list of set of workers which are assigned to the
- * node.
- */
- private Map>>> getMachineWorkerMap(
- Configuration config, Worker, ?> source) {
-
- ImmutableSet> workerset = Workers
- .getAllWorkersInGraph(source);
+ public Map>>> partitionMap(
+ Configuration config) {
Map>> partition = new HashMap<>();
for (Worker, ?> w : workerset) {
diff --git a/src/edu/mit/streamjit/impl/distributed/common/AsyncTCPConnection.java b/src/edu/mit/streamjit/impl/distributed/common/AsyncTCPConnection.java
new file mode 100644
index 00000000..bf3c569c
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/AsyncTCPConnection.java
@@ -0,0 +1,821 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
+import java.io.OutputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.InetAddress;
+import java.nio.ByteBuffer;
+import java.nio.channels.AsynchronousSocketChannel;
+import java.nio.channels.CompletionHandler;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+
+import edu.mit.streamjit.impl.blob.AbstractWriteOnlyBuffer;
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel;
+import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel;
+import edu.mit.streamjit.impl.distributed.node.StreamNode;
+
+/**
+ * Uses {@link AsynchronousSocketChannel} from Java's NIO.2 to send data. This
+ * class only supports bulk asynchronous write. Reads ({@link #readObject()}) or
+ * single object writes ({@link #writeObject(Object)}) are not supported.
+ * Serialises object array into {@link ByteBuffer} and sends it over a
+ * {@link AsynchronousSocketChannel}. Further, for the performance purposes, in
+ * oder to parallelise serialisation task and sending task, multiple
+ * {@link ByteBuffer}s are used. So that while user thread is serialising the
+ * data into a {@link ByteBuffer}, Java threads can send the already written
+ * bytebuffers.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since May 05, 2014
+ *
+ */
+public class AsyncTCPConnection implements Connection {
+ /**
+ * Backed by {@link ByteBufferArrayOutputStream}.
+ */
+ private ObjectOutputStream ooStream = null;
+
+ private AsynchronousSocketChannel asyncSktChannel;
+
+ private ByteBufferArrayOutputStream bBAos;
+
+ private boolean isconnected = false;
+
+ public AsyncTCPConnection(AsynchronousSocketChannel asyncSktChannel) {
+ this(asyncSktChannel, 5000);
+ }
+
+ /**
+ * @param socket
+ * @param resetCount
+ * reset the {@link ObjectOutputStream} after this no of sends.
+ * To avoid out of memory error.
+ */
+ public AsyncTCPConnection(AsynchronousSocketChannel asyncSktChannel,
+ int resetCount) {
+ try {
+ this.asyncSktChannel = asyncSktChannel;
+
+ bBAos = new ByteBufferArrayOutputStream(2);
+ ooStream = new ObjectOutputStream(bBAos);
+ isconnected = true;
+ } catch (IOException iex) {
+ isconnected = false;
+ iex.printStackTrace();
+ }
+ }
+
+ @Override
+ public void writeObject(Object obj) throws IOException {
+ throw new java.lang.Error("Method not Implemented");
+ /*
+ * if (isStillConnected()) {
+ *
+ * while (!canWrite.get()) ;
+ *
+ * try { ooStream.writeObject(obj); send(); } catch (IOException ix) {
+ * isconnected = false; throw ix; } } else { throw new
+ * IOException("TCPConnection: Socket is not connected"); }
+ */
+ }
+
+ public int write(Object[] data, int offset, int length) throws IOException {
+
+ final ObjectOutputStream objOS = this.ooStream;
+ final ByteBufferArrayOutputStream bBAos = this.bBAos;
+
+ int written = 0;
+ if (bBAos.newWrite()) {
+ while (written < length) {
+ objOS.writeObject(data[offset++]);
+ ++written;
+ }
+ objOS.reset();
+ bBAos.writeCompleted();
+ }
+
+ send();
+ return written;
+ }
+
+ private void send() {
+ final ByteBufferOutputStream bBos;
+ final ByteBufferArrayOutputStream bBAos;
+
+ bBAos = this.bBAos;
+
+ bBos = bBAos.newRead();
+ if (bBos == null)
+ return;
+
+ ByteBuffer bb = bBos.getByteBuffer();
+ bb.flip();
+ asyncSktChannel.write(bb, bb,
+ new CompletionHandler() {
+ @Override
+ public void completed(Integer result, ByteBuffer attachment) {
+
+ if (attachment.hasRemaining()) {
+ asyncSktChannel.write(attachment, attachment, this);
+ } else {
+ bBAos.readCompleted();
+ send();
+ }
+ }
+
+ @Override
+ public void failed(Throwable exc, ByteBuffer attachment) {
+ isconnected = false;
+ exc.printStackTrace();
+ }
+ });
+ }
+
+ public final void closeConnection() {
+ isconnected = false;
+ try {
+ if (ooStream != null)
+ this.ooStream.close();
+ if (asyncSktChannel != null)
+ this.asyncSktChannel.close();
+ } catch (IOException ex) {
+ ex.printStackTrace();
+ }
+ }
+
+ @Override
+ public final boolean isStillConnected() {
+ // return (this.socket.isConnected() && !this.socket.isClosed());
+ return isconnected;
+ }
+
+ @Override
+ public T readObject() throws IOException, ClassNotFoundException {
+ throw new java.lang.Error(
+ "Reading object is not supported in asynchronous tcp mode");
+ }
+
+ @Override
+ public void softClose() throws IOException {
+ while (!bBAos.newWrite()) {
+ try {
+ // TODO : Find correct time for sleep.
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ this.ooStream.write('\u001a');
+ this.ooStream.flush();
+ bBAos.writeCompleted();
+ send();
+ // System.err.println("Softclose is called");
+ }
+
+ /**
+ * This class implements an output stream in which the data is written into
+ * a byte array. The buffer automatically grows as data is written to it.
+ * The data can be retrieved using toByteArray() and
+ * toString().
+ *
+ * Closing a ByteArrayOutputStream has no effect. The methods in
+ * this class can be called after the stream has been closed without
+ * generating an IOException.
+ *
+ * @author Arthur van Hoff
+ * @since JDK1.0
+ *
+ * This is a copy of {@link ByteArrayOutputStream} and byte array in
+ * ByteArrayOutputStream is replaced by {@link ByteBuffer} for
+ * performance.
+ * @author sumanan
+ * @since May 10, 2014
+ */
+ public class ByteBufferOutputStream extends OutputStream {
+
+ /**
+ * The buffer where data is stored.
+ */
+ protected ByteBuffer bb;
+
+ /**
+ * The number of valid bytes in the buffer.
+ */
+ protected int count;
+
+ /**
+ * Creates a new byte array output stream. The buffer capacity is
+ * initially 32 bytes, though its size increases if necessary.
+ */
+ public ByteBufferOutputStream() {
+ this(10 * 1024 * 1024);
+ }
+
+ public int getCount() {
+ return count;
+ }
+
+ /**
+ * Creates a new byte array output stream, with a buffer capacity of the
+ * specified size, in bytes.
+ *
+ * @param size
+ * the initial size.
+ * @exception IllegalArgumentException
+ * if size is negative.
+ */
+ public ByteBufferOutputStream(int size) {
+ if (size < 0) {
+ throw new IllegalArgumentException("Negative initial size: "
+ + size);
+ }
+ bb = ByteBuffer.allocateDirect(size);
+ }
+
+ /**
+ * Increases the capacity if necessary to ensure that it can hold at
+ * least the number of elements specified by the minimum capacity
+ * argument.
+ *
+ * @param minCapacity
+ * the desired minimum capacity
+ * @throws OutOfMemoryError
+ * if {@code minCapacity < 0}. This is interpreted as a
+ * request for the unsatisfiably large capacity
+ * {@code (long) Integer.MAX_VALUE + (minCapacity - Integer.MAX_VALUE)}
+ * .
+ */
+ private void ensureCapacity(int minCapacity) {
+ // overflow-conscious code
+ if (minCapacity - bb.capacity() > 0)
+ grow(minCapacity);
+ }
+
+ /**
+ * Increases the capacity to ensure that it can hold at least the number
+ * of elements specified by the minimum capacity argument.
+ *
+ * @param minCapacity
+ * the desired minimum capacity
+ */
+ private void grow(int minCapacity) {
+ // overflow-conscious code
+ int oldCapacity = bb.capacity();
+ int newCapacity = oldCapacity << 1;
+ if (newCapacity - minCapacity < 0)
+ newCapacity = minCapacity;
+ if (newCapacity < 0) {
+ if (minCapacity < 0) // overflow
+ throw new OutOfMemoryError();
+ newCapacity = Integer.MAX_VALUE;
+ }
+ ByteBuffer newBb = ByteBuffer.allocateDirect(newCapacity);
+ newBb.clear();
+ bb.flip();
+ newBb.put(bb);
+ bb = newBb;
+ System.out.println("Growing bytebuffer. newCapacity = "
+ + newCapacity);
+ }
+
+ /**
+ * Writes len bytes from the specified byte array starting
+ * at offset off to this output stream. The general
+ * contract for write(b, off, len) is that some of the
+ * bytes in the array b are written to the output stream in
+ * order; element b[off] is the first byte written and
+ * b[off+len-1] is the last byte written by this operation.
+ *
+ * The write method of OutputStream calls the
+ * write method of one argument on each of the bytes to be written out.
+ * Subclasses are encouraged to override this method and provide a more
+ * efficient implementation.
+ *
+ * If b is null, a
+ * NullPointerException is thrown.
+ *
+ * If off is negative, or len is negative, or
+ * off+len is greater than the length of the array
+ * b, then an IndexOutOfBoundsException is thrown.
+ *
+ * @param b
+ * the data.
+ * @param off
+ * the start offset in the data.
+ * @param len
+ * the number of bytes to write.
+ * @exception IOException
+ * if an I/O error occurs. In particular, an
+ * IOException is thrown if the output
+ * stream is closed.
+ */
+ public void write(byte b[], int off, int len) throws IOException {
+ if (b == null) {
+ throw new NullPointerException();
+ } else if ((off < 0) || (off > b.length) || (len < 0)
+ || ((off + len) > b.length) || ((off + len) < 0)) {
+ throw new IndexOutOfBoundsException();
+ } else if (len == 0) {
+ return;
+ }
+ ensureCapacity(count + len);
+ bb.put(b, off, len);
+ count += len;
+ assert count == bb.position() : "count != bb.position()";
+ }
+
+ /**
+ * Writes the specified byte to this byte array output stream.
+ *
+ * @param b
+ * the byte to be written.
+ */
+ public synchronized void write(int b) {
+ ensureCapacity(count + 1);
+ bb.put((byte) b);
+ count += 1;
+ assert count == bb.position() : "count != bb.position()";
+ }
+
+ /**
+ * Writes len bytes from the specified byte array starting
+ * at offset off to this byte array output stream.
+ *
+ * @param b
+ * the data.
+ * @param off
+ * the start offset in the data.
+ * @param len
+ * the number of bytes to write.
+ *
+ * public synchronized void write(byte b[], int off, int len)
+ * { if ((off < 0) || (off > b.length) || (len < 0) || ((off
+ * + len) - b.length > 0)) { throw new
+ * IndexOutOfBoundsException(); } ensureCapacity(count +
+ * len); System.arraycopy(b, off, buf, count, len); count +=
+ * len; }
+ */
+
+ /**
+ * Writes the complete contents of this byte array output stream to the
+ * specified output stream argument, as if by calling the output
+ * stream's write method using out.write(buf, 0, count).
+ *
+ * @param out
+ * the output stream to which to write the data.
+ * @exception IOException
+ * if an I/O error occurs.
+ */
+ public synchronized void writeTo(OutputStream out) throws IOException {
+ out.write(getByteArray(), 0, count);
+ }
+
+ /**
+ * Resets the count field of this byte array output stream
+ * to zero, so that all currently accumulated output in the output
+ * stream is discarded. The output stream can be used again, reusing the
+ * already allocated buffer space.
+ *
+ * @see java.io.ByteArrayInputStream#count
+ */
+ public synchronized void reset() {
+ bb.position(0);
+ bb.limit(bb.capacity());
+ count = 0;
+ }
+
+ /**
+ * Creates a newly allocated byte array. Its size is the current size of
+ * this output stream and the valid contents of the buffer have been
+ * copied into it.
+ *
+ * @return the current contents of this output stream, as a byte array.
+ * @see java.io.ByteArrayOutputStream#size()
+ */
+ public synchronized byte toByteArray()[] {
+ return getByteArray();
+ }
+
+ /**
+ * Returns the current size of the buffer.
+ *
+ * @return the value of the count field, which is the
+ * number of valid bytes in this output stream.
+ * @see java.io.ByteArrayOutputStream#count
+ */
+ public synchronized int size() {
+ assert count == bb.position() : "count != bb.position()";
+ return count;
+ }
+
+ /**
+ * Converts the buffer's contents into a string decoding bytes using the
+ * platform's default character set. The length of the new
+ * String is a function of the character set, and hence may not
+ * be equal to the size of the buffer.
+ *
+ *
+ * This method always replaces malformed-input and unmappable-character
+ * sequences with the default replacement string for the platform's
+ * default character set. The
+ * {@linkplain java.nio.charset.CharsetDecoder} class should be used
+ * when more control over the decoding process is required.
+ *
+ * @return String decoded from the buffer's contents.
+ * @since JDK1.1
+ */
+ public synchronized String toString() {
+ return new String(getByteArray(), 0, count);
+ }
+
+ /**
+ * Converts the buffer's contents into a string by decoding the bytes
+ * using the specified {@link java.nio.charset.Charset charsetName}. The
+ * length of the new String is a function of the charset, and
+ * hence may not be equal to the length of the byte array.
+ *
+ *
+ * This method always replaces malformed-input and unmappable-character
+ * sequences with this charset's default replacement string. The
+ * {@link java.nio.charset.CharsetDecoder} class should be used when
+ * more control over the decoding process is required.
+ *
+ * @param charsetName
+ * the name of a supported
+ * {@linkplain java.nio.charset.Charset charset}
+ * @return String decoded from the buffer's contents.
+ * @exception UnsupportedEncodingException
+ * If the named charset is not supported
+ * @since JDK1.1
+ */
+ public synchronized String toString(String charsetName)
+ throws UnsupportedEncodingException {
+ return new String(getByteArray(), 0, count, charsetName);
+ }
+
+ /**
+ * Creates a newly allocated string. Its size is the current size of the
+ * output stream and the valid contents of the buffer have been copied
+ * into it. Each character c in the resulting string is
+ * constructed from the corresponding element b in the byte array
+ * such that:
+ *
+ * @deprecated This method does not properly convert bytes into
+ * characters. As of JDK 1.1, the preferred way to do
+ * this is via the toString(String enc) method,
+ * which takes an encoding-name argument, or the
+ * toString() method, which uses the platform's
+ * default character encoding.
+ *
+ * @param hibyte
+ * the high byte of each resulting Unicode character.
+ * @return the current contents of the output stream, as a string.
+ * @see java.io.ByteArrayOutputStream#size()
+ * @see java.io.ByteArrayOutputStream#toString(String)
+ * @see java.io.ByteArrayOutputStream#toString()
+ */
+ @Deprecated
+ public synchronized String toString(int hibyte) {
+ return new String(getByteArray(), hibyte, 0, count);
+ }
+
+ /**
+ * Closing a ByteArrayOutputStream has no effect. The methods
+ * in this class can be called after the stream has been closed without
+ * generating an IOException.
+ *
+ *
+ */
+ public void close() throws IOException {
+ }
+
+ private byte[] getByteArray() {
+ bb.flip();
+ final int size = bb.remaining();
+ byte[] buf = new byte[size];
+ bb.get(buf, 0, size);
+ assert count == bb.position() : "count != bb.position()";
+ return buf;
+ }
+
+ public ByteBuffer getByteBuffer() {
+ return bb;
+ }
+ }
+
+ /**
+ * A {@link ByteBufferOutputStream} ( implicitly {@link ByteBuffer} ) can be
+ * in one of following 4 state. State of a {@link ByteBufferOutputStream}
+ * expected to change in a cyclic manner, from canWrite -> beingWritten ->
+ * canRead -> beingRead -> canWrite.
+ *
+ * @author sumanan
+ */
+ private enum Status {
+ canWrite, beingWritten, canRead, beingRead
+ }
+
+ /**
+ * Writers must call {@link #newWrite()} before begins the write process and
+ * call {@link #writeCompleted()} after the end of write process. Whatever
+ * written in between these two calls will be captured into single
+ * {@link ByteBufferOutputStream}.
+ *
+ * Like writers, readers also call {@link #newRead()} to get the current
+ * {@link ByteBufferOutputStream} to read and must call
+ * {@link #readCompleted()} after the end of read process.
+ *
+ * @author sumanan
+ *
+ */
+ public class ByteBufferArrayOutputStream extends OutputStream {
+
+ private final int debugLevel;
+
+ /**
+ * Read index of {@link #bytebufferArray}.
+ */
+ private int readIndex;
+
+ /**
+ * Write index of {@link #bytebufferArray}.
+ */
+ private int writeIndex;
+
+ private final ByteBufferOutputStream[] bytebufferArray;
+
+ /**
+ * Keeps the {@link Status} of each element in the
+ * {@link #bytebufferArray}
+ */
+ private Map> bufferStatus;
+
+ public ByteBufferArrayOutputStream(int listSize) {
+ debugLevel = 0;
+ writeIndex = 0;
+ readIndex = 0;
+ bytebufferArray = new ByteBufferOutputStream[listSize];
+ bufferStatus = new HashMap<>(listSize);
+ for (int i = 0; i < bytebufferArray.length; i++) {
+ bytebufferArray[i] = new ByteBufferOutputStream();
+ bufferStatus.put(i,
+ new AtomicReference(Status.canWrite));
+ }
+ }
+
+ @Override
+ public void write(int b) throws IOException {
+ bytebufferArray[writeIndex].write(b);
+ }
+
+ public void write(byte b[], int off, int len) throws IOException {
+ bytebufferArray[writeIndex].write(b, off, len);
+ }
+
+ /**
+ * Do not forget to call {@link #writeCompleted()} after every
+ * successful bulk writes. Whatever written in between these two calls
+ * will be captured into single {@link ByteBufferOutputStream}.
+ *
+ * @return true iff the next buffer is free to write.
+ */
+ public boolean newWrite() {
+ if (bufferStatus.get(writeIndex).compareAndSet(Status.canWrite,
+ Status.beingWritten)) {
+
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : newWrite-canWrite : " + "writeIndex - "
+ + writeIndex + ", readIndex - " + readIndex);
+ return true;
+ } else {
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : newWrite-failed : " + "writeIndex - "
+ + writeIndex + ", readIndex - " + readIndex);
+ return false;
+ }
+ }
+
+ /**
+ * Writer must call this method right after the writing of an collection
+ * of objects is completed.
+ */
+ public void writeCompleted() {
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : writeCompleted : " + "writeIndex - " + writeIndex
+ + ", readIndex - " + readIndex);
+ int w = writeIndex;
+ writeIndex = (writeIndex + 1) % bytebufferArray.length;
+ boolean ret = bufferStatus.get(w).compareAndSet(
+ Status.beingWritten, Status.canRead);
+ if (!ret) {
+ String msg = String.format("BufferState conflict : "
+ + "writeIndex - " + writeIndex + ", readIndex - "
+ + readIndex + " - Status of the writeBuffer is "
+ + bufferStatus.get(w).get());
+ throw new IllegalStateException(msg);
+ }
+ }
+
+ /**
+ * Do not forget to call {@link #readCompleted()} after every successful
+ * read of a {@link ByteBufferOutputStream}.
+ *
+ * @return Next available {@link ByteBufferOutputStream} if available or
+ * null if no {@link ByteBufferOutputStream} is
+ * available to read.
+ */
+ public synchronized ByteBufferOutputStream newRead() {
+ if (bufferStatus.get(readIndex).get() == Status.beingRead) {
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : newRead-beingRead : " + "writeIndex - "
+ + writeIndex + ", readIndex - " + readIndex);
+ return null;
+ }
+
+ if (bufferStatus.get(readIndex).compareAndSet(Status.canRead,
+ Status.beingRead)) {
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : newRead-canRead : " + "writeIndex - "
+ + writeIndex + ", readIndex - " + readIndex);
+ if (bytebufferArray[readIndex].getCount() == 0) {
+ throw new IllegalStateException(
+ "bytebufferArray[a].getCount() != 0 is expected.");
+ }
+ return bytebufferArray[readIndex];
+ } else {
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : newRead - not can read " + readIndex);
+ return null;
+ }
+ }
+
+ /**
+ * Reader must call this method right after the reading process is
+ * completed.
+ */
+ public void readCompleted() {
+ if (debugLevel > 0)
+ System.out.println(Thread.currentThread().getName()
+ + " : readCompleted : " + "writeIndex - " + writeIndex
+ + ", readIndex - " + readIndex);
+ bytebufferArray[readIndex].reset();
+ int r = readIndex;
+ readIndex = (readIndex + 1) % bytebufferArray.length;
+ boolean ret = bufferStatus.get(r).compareAndSet(Status.beingRead,
+ Status.canWrite);
+ if (!ret)
+ throw new IllegalStateException("bufferStatus conflict");
+ }
+ }
+
+ /**
+ * Uniquely identifies a Asynchronous TCP connection among all connected
+ * machines.
+ *
+ *
+ * NOTE: IPAddress is not included for the moment to avoid re-sending same
+ * information again and again for every reconfiguration. machineId to
+ * {@link NodeInfo} map will be sent initially. So {@link StreamNode}s can
+ * get ipAddress of a machine from that map.
+ */
+ public static class AsyncTCPConnectionInfo extends ConnectionInfo {
+
+ private static final long serialVersionUID = 1L;
+
+ private final int portNo;
+
+ public AsyncTCPConnectionInfo(int srcID, int dstID, int portNo) {
+ super(srcID, dstID, false);
+ Ipv4Validator validator = Ipv4Validator.getInstance();
+ if (!validator.isValid(portNo))
+ throw new IllegalArgumentException("Invalid port No");
+ this.portNo = portNo;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = super.hashCode();
+ result = prime * result + portNo;
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (!super.equals(obj))
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ AsyncTCPConnectionInfo other = (AsyncTCPConnectionInfo) obj;
+ if (portNo != other.portNo)
+ return false;
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "AsyncTCPConnectionInfo [srcID=" + getSrcID() + ", dstID="
+ + getDstID() + ", portID=" + portNo + "]";
+ }
+
+ @Override
+ public Connection makeConnection(int nodeID, NetworkInfo networkInfo,
+ int timeOut) {
+ Connection con = null;
+ if (srcID == nodeID) {
+ try {
+ con = ConnectionFactory.getAsyncConnection(portNo);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ else if (dstID == nodeID) {
+ InetAddress ipAddress = networkInfo.getInetAddress(srcID);
+ try {
+ con = ConnectionFactory.getConnection(
+ ipAddress.getHostAddress(), portNo, false);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ } else {
+ throw new IllegalArgumentException(
+ "Neither srcID nor dstID matches with nodeID");
+ }
+ return con;
+ }
+
+ @Override
+ public BoundaryInputChannel inputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider) {
+ return new BlockingInputChannel(bufSize, conProvider, this,
+ t.toString(), 0);
+ }
+
+ @Override
+ public BoundaryOutputChannel outputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider) {
+ return new AsyncOutputChannel(conProvider, this, t.toString(), 0);
+ }
+ }
+
+ public static class AsyncTCPBuffer extends AbstractWriteOnlyBuffer {
+
+ private final AsyncTCPConnection con;
+
+ public AsyncTCPBuffer(AsyncTCPConnection con) {
+ this.con = con;
+ }
+
+ @Override
+ public boolean write(Object t) {
+ try {
+ con.writeObject(t);
+ return true;
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return false;
+ }
+
+ public int write(Object[] data, int offset, int length) {
+ try {
+ return con.write(data, offset, length);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return 0;
+ }
+
+ @Override
+ public int size() {
+ return 0;
+ }
+
+ @Override
+ public int capacity() {
+ return Integer.MAX_VALUE;
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java
index a2f51f56..68af5fd3 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java
@@ -21,11 +21,11 @@
*/
package edu.mit.streamjit.impl.distributed.common;
-import java.io.IOException;
-
import com.google.common.collect.ImmutableList;
import edu.mit.streamjit.impl.blob.Buffer;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
/**
* {@link BoundaryChannel} wraps a {@link Buffer} that crosses over the
@@ -41,30 +41,19 @@ public interface BoundaryChannel {
String name();
- /**
- * Close the connection.
- *
- * @throws IOException
- */
- void closeConnection() throws IOException;
-
- /**
- * @return true iff the connection with the other node is still valid.
- */
- boolean isStillConnected();
-
/**
* @return {@link Runnable} that does all IO communication and send
* data(stream tuples) to other node (or receive from other node).
*/
Runnable getRunnable();
- /**
- * @return Other end of the node's ID.
- */
- int getOtherNodeID();
+ ImmutableList getUnprocessedData();
+
+ Connection getConnection();
+
+ ConnectionInfo getConnectionInfo();
- public ImmutableList getUnprocessedData();
+ Buffer getBuffer();
/**
* Interface that represents input channels.
@@ -88,20 +77,10 @@ public interface BoundaryInputChannel extends BoundaryChannel {
*
* Based on the type argument, implementation may treat uncounsumed data
* differently
- *
- *
1 - No extraBuffer. Wait and push all received data in to the
- * actual buffer. May be used at final draining.
- *
2 - Create extra buffer and put all unconsumed data. This can be
- * send to the controller as draindata. May be used at intermediate
- * draining.
- *
3 - Discard all unconsumed data. This is useful, if we don't care
- * about the data while tuning for performance.
*
*
- * @param type
- * : Can be 1, 2 or 3. rest are illegal.
*/
- void stop(int type);
+ void stop(DrainType type);
/**
* Receive data from other node.
diff --git a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelFactory.java b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelFactory.java
new file mode 100644
index 00000000..28a282d5
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelFactory.java
@@ -0,0 +1,101 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.blob.Buffer;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider;
+import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel;
+import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel;
+import edu.mit.streamjit.impl.distributed.node.BlockingOutputChannel;
+
+/**
+ * {@link BoundaryChannel} maker.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since May 28, 2014
+ */
+public interface BoundaryChannelFactory {
+
+ BoundaryInputChannel makeInputChannel(Token t, Buffer buffer,
+ ConnectionInfo conInfo);
+
+ BoundaryOutputChannel makeOutputChannel(Token t, Buffer buffer,
+ ConnectionInfo conInfo);
+
+ BoundaryInputChannel makeInputChannel(Token t, int bufSize,
+ ConnectionInfo conInfo);
+
+ BoundaryOutputChannel makeOutputChannel(Token t, int bufSize,
+ ConnectionInfo conInfo);
+
+ /**
+ * Makes blocking {@link BlockingInputChannel} and {@link BlockingOutputChannel}.
+ *
+ */
+ public static class TCPBoundaryChannelFactory
+ implements
+ BoundaryChannelFactory {
+
+ protected final ConnectionProvider conProvider;
+
+ public TCPBoundaryChannelFactory(ConnectionProvider conProvider) {
+ this.conProvider = conProvider;
+ }
+
+ @Override
+ public BoundaryInputChannel makeInputChannel(Token t, Buffer buffer,
+ ConnectionInfo conInfo) {
+ return new BlockingInputChannel(buffer, conProvider, conInfo,
+ t.toString(), 0);
+ }
+
+ @Override
+ public BoundaryOutputChannel makeOutputChannel(Token t, Buffer buffer,
+ ConnectionInfo conInfo) {
+ return new BlockingOutputChannel(buffer, conProvider, conInfo,
+ t.toString(), 0);
+ }
+
+ @Override
+ public BoundaryInputChannel makeInputChannel(Token t, int bufSize,
+ ConnectionInfo conInfo) {
+ return new BlockingInputChannel(bufSize, conProvider, conInfo,
+ t.toString(), 0);
+ }
+
+ @Override
+ public BoundaryOutputChannel makeOutputChannel(Token t, int bufSize,
+ ConnectionInfo conInfo) {
+ return new BlockingOutputChannel(bufSize, conProvider, conInfo,
+ t.toString(), 0);
+ }
+ }
+
+ /**
+ * Makes blocking {@link BlockingInputChannel} and asynchronous
+ * {@link AsyncOutputChannel}.
+ *
+ */
+ public class AsyncBoundaryChannelFactory extends TCPBoundaryChannelFactory {
+
+ public AsyncBoundaryChannelFactory(ConnectionProvider conProvider) {
+ super(conProvider);
+ }
+
+ @Override
+ public BoundaryOutputChannel makeOutputChannel(Token t, Buffer buffer,
+ ConnectionInfo conInfo) {
+ return new AsyncOutputChannel(conProvider, conInfo,
+ t.toString(), 0);
+ }
+
+ @Override
+ public BoundaryOutputChannel makeOutputChannel(Token t, int bufSize,
+ ConnectionInfo conInfo) {
+ return new AsyncOutputChannel(conProvider, conInfo,
+ t.toString(), 0);
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelManager.java b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelManager.java
new file mode 100644
index 00000000..951b77cb
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelManager.java
@@ -0,0 +1,176 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.ImmutableMap;
+
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
+import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel;
+
+/**
+ * Manages set of {@link BoundaryChannel}s.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since May 28, 2014
+ */
+public interface BoundaryChannelManager {
+
+ void start();
+
+ void waitToStart();
+
+ void waitToStop();
+
+ public interface BoundaryInputChannelManager extends BoundaryChannelManager {
+
+ /**
+ * In streamJit, a channel can be identified by a {@link Token}.
+ *
+ * @return map of channel {@link Token}, {@link BoundaryInputChannel}
+ * handled by this manager.
+ */
+ ImmutableMap inputChannelsMap();
+
+ /**
+ * @param stopType
+ * See {@link BoundaryInputChannel#stop(int)}
+ */
+ void stop(DrainType stopType);
+ }
+
+ public interface BoundaryOutputChannelManager
+ extends
+ BoundaryChannelManager {
+
+ /**
+ * In streamJit, a channel can be identified by a {@link Token}.
+ *
+ * @return map of channel {@link Token}, {@link BoundaryOutputChannel}
+ * handled by this manager.
+ */
+ ImmutableMap outputChannelsMap();
+
+ /**
+ * @param stopType
+ * See {@link BoundaryOutputChannel#stop(boolean)}
+ */
+ void stop(boolean stopType);
+ }
+
+ public static class InputChannelManager
+ implements
+ BoundaryInputChannelManager {
+
+ private final ImmutableMap inputChannels;
+
+ private final Set inputChannelThreads;
+
+ public InputChannelManager(
+ final ImmutableMap inputChannels) {
+ this.inputChannels = inputChannels;
+ inputChannelThreads = new HashSet<>(inputChannels.values().size());
+ }
+
+ @Override
+ public void start() {
+ for (BoundaryInputChannel bc : inputChannels.values()) {
+ Thread t = new Thread(bc.getRunnable(), bc.name());
+ t.start();
+ inputChannelThreads.add(t);
+ }
+ }
+
+ @Override
+ public void waitToStart() {
+ }
+
+ @Override
+ public void waitToStop() {
+ for (Thread t : inputChannelThreads) {
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void stop(DrainType stopType) {
+ for (BoundaryInputChannel bc : inputChannels.values()) {
+ bc.stop(stopType);
+ }
+ }
+
+ @Override
+ public ImmutableMap inputChannelsMap() {
+ return inputChannels;
+ }
+ }
+
+ public static class OutputChannelManager
+ implements
+ BoundaryOutputChannelManager {
+
+ protected final ImmutableMap outputChannels;
+ protected final Map outputChannelThreads;
+
+ public OutputChannelManager(
+ ImmutableMap outputChannels) {
+ this.outputChannels = outputChannels;
+ outputChannelThreads = new HashMap<>(outputChannels.values().size());
+ }
+
+ @Override
+ public void start() {
+ for (BoundaryOutputChannel bc : outputChannels.values()) {
+ Thread t = new Thread(bc.getRunnable(), bc.name());
+ t.start();
+ outputChannelThreads.put(bc, t);
+ }
+ }
+
+ @Override
+ public void waitToStart() {
+ for (Map.Entry en : outputChannelThreads
+ .entrySet()) {
+ if (en.getKey() instanceof AsyncOutputChannel) {
+ try {
+ en.getValue().join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void stop(boolean stopType) {
+ for (BoundaryOutputChannel bc : outputChannels.values()) {
+ bc.stop(stopType);
+ }
+ }
+
+ @Override
+ public void waitToStop() {
+ for (Thread t : outputChannelThreads.values()) {
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public ImmutableMap outputChannelsMap() {
+ return outputChannels;
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java b/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java
index 478cee6c..9b024e9b 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java
@@ -25,7 +25,7 @@
import edu.mit.streamjit.impl.blob.Blob;
import edu.mit.streamjit.impl.blob.Blob.Token;
-import edu.mit.streamjit.impl.blob.DrainData;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
import edu.mit.streamjit.impl.distributed.node.StreamNode;
import edu.mit.streamjit.impl.distributed.runtimer.Controller;
@@ -81,12 +81,7 @@ public void process(CTRLRDrainProcessor dp) {
public static final class DoDrain extends CTRLRDrainElement {
private static final long serialVersionUID = 1L;
- /**
- * Instead of sending another object to get the {@link DrainData},
- * {@link Controller} can set this flag to get the drain data once
- * draining is done.
- */
- public final boolean reqDrainData;
+ public final DrainType drainType;
/**
* Identifies the blob. Since {@link Blob}s do not have an unique
@@ -95,9 +90,9 @@ public static final class DoDrain extends CTRLRDrainElement {
*/
public final Token blobID;
- public DoDrain(Token blobID, boolean reqDrainData) {
+ public DoDrain(Token blobID, DrainType drainType) {
this.blobID = blobID;
- this.reqDrainData = reqDrainData;
+ this.drainType = drainType;
}
@Override
@@ -119,4 +114,35 @@ public interface CTRLRDrainProcessor {
public void process(DoDrain drain);
}
+
+ /**
+ * Three types of draining are possible.
+ */
+ public enum DrainType {
+ /**
+ * Final draining. No drain data. All {@link Blob}s are expected to run
+ * and finish data in input buffers buffers.
+ */
+ FINAL(1), /**
+ * Intermediate draining. Drain data is required in this mode.
+ * {@link BoundaryInputChannel}s may create extra buffer and put all
+ * unconsumed data, and finally send this drain data to the
+ * {@link Controller} for reconfiguration.
+ */
+ INTERMEDIATE(2), /**
+ * Discard all unconsumed data. This is useful, if we
+ * don't care about the data while tuning for performance.
+ *
+ */
+ DISCARD(3);
+ private final int code;
+
+ DrainType(int code) {
+ this.code = code;
+ }
+
+ public int toint() {
+ return code;
+ }
+ }
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java b/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java
index 3deb0a17..4cdf8e81 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java
@@ -21,6 +21,8 @@
*/
package edu.mit.streamjit.impl.distributed.common;
+import edu.mit.streamjit.impl.distributed.profiler.ProfilerCommand;
+
/**
* Visitor pattern. We have to have overloaded visit method to all sub type of
* {@link MessageElement}s. See the {@link MessageElement}.
@@ -39,4 +41,6 @@ public interface CTRLRMessageVisitor {
public void visit(CTRLRDrainElement ctrlrDrainElement);
public void visit(MiscCtrlElements miscCtrlElements);
+
+ public void visit(ProfilerCommand command);
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java b/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java
index 5395559c..8880af68 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java
@@ -23,7 +23,7 @@
import edu.mit.streamjit.impl.blob.DrainData;
import edu.mit.streamjit.impl.common.Configuration;
-import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor.ConfigType;
+import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor.ConfigType;
import edu.mit.streamjit.impl.distributed.node.StreamNode;
import edu.mit.streamjit.impl.distributed.runtimer.Controller;
@@ -55,7 +55,7 @@ public void accept(CTRLRMessageVisitor visitor) {
visitor.visit(this);
}
- public void process(ConfigurationStringProcessor jp) {
+ public void process(ConfigurationProcessor jp) {
jp.process(jsonString, type, drainData);
}
@@ -66,7 +66,7 @@ public void process(ConfigurationStringProcessor jp) {
* @author Sumanan sumanan@mit.edu
* @since May 27, 2013
*/
- public interface ConfigurationStringProcessor {
+ public interface ConfigurationProcessor {
public void process(String cfg, ConfigType type, DrainData drainData);
diff --git a/src/edu/mit/streamjit/impl/distributed/common/Connection.java b/src/edu/mit/streamjit/impl/distributed/common/Connection.java
index 22b2fa91..f2091c82 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/Connection.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/Connection.java
@@ -21,16 +21,23 @@
*/
package edu.mit.streamjit.impl.distributed.common;
+import static com.google.common.base.Preconditions.checkNotNull;
+
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.Serializable;
+import java.net.SocketTimeoutException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
import edu.mit.streamjit.impl.distributed.node.StreamNode;
-import edu.mit.streamjit.impl.distributed.runtimer.Controller;
/**
- * Communication interface for both {@link StreamNode} and {@link Controller}
- * side. This interface is for an IO connection that is already created, i.e.,
+ * Communication interface for an IO connection that is already created, i.e.,
* creating a connections is not handled at here. Consider
* {@link ConnectionFactory} to create a connection.
For the moment,
* communicates at object granularity level. We may need to add primitive
@@ -77,7 +84,7 @@ public interface Connection {
* when the thread is blocked at {@link ObjectInputStream#readObject()}
* method call.
*
- *
+ *
* @throws IOException
*/
public void softClose() throws IOException;
@@ -90,10 +97,16 @@ public interface Connection {
public boolean isStillConnected();
/**
- * Describes a connection between two machines. ConnectionInfo is considered
+ * Describes a connection between two machines.
+ *
+ *
if isSymmetric is true, ConnectionInfo is considered
* symmetric for equal() and hashCode() calculation. As long as same
* machineIDs are involved, irrespect of srcID and dstID positions, these
* methods return same result.
+ *
+ * if isSymmetric is false srcID and dstID will be treated as
+ * not interchangeable entities.
+ *
*
*
* Note : All instances of ConnectionInfo, including subclass
@@ -101,17 +114,27 @@ public interface Connection {
* hashCode() and equals() methods. The whole point of this class is to
* identify a connection between two machines.
*/
- public class ConnectionInfo implements Serializable {
+ public abstract class ConnectionInfo implements Serializable {
private static final long serialVersionUID = 1L;
- private int srcID;
+ protected final int srcID;
- private int dstID;
+ protected final int dstID;
+
+ /**
+ * Tells whether this connection is symmetric or not.
+ */
+ protected final boolean isSymmetric;
public ConnectionInfo(int srcID, int dstID) {
+ this(srcID, dstID, true);
+ }
+
+ protected ConnectionInfo(int srcID, int dstID, boolean isSymmetric) {
this.srcID = srcID;
this.dstID = dstID;
+ this.isSymmetric = isSymmetric;
}
public int getSrcID() {
@@ -122,17 +145,36 @@ public int getDstID() {
return dstID;
}
+ public boolean isSymmetric() {
+ return isSymmetric;
+ }
+
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
- int min = Math.min(srcID, dstID);
- int max = Math.max(srcID, dstID);
- result = prime * result + min;
- result = prime * result + max;
+ if (isSymmetric) {
+ int min = Math.min(srcID, dstID);
+ int max = Math.max(srcID, dstID);
+ result = prime * result + min;
+ result = prime * result + max;
+ } else {
+ result = prime * result + srcID;
+ result = prime * result + dstID;
+ }
+ result = prime * result + (isSymmetric ? 1231 : 1237);
return result;
}
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#equals(java.lang.Object) equals() overwritten
+ * here breaks the reflexive(), symmetric() and transitive() properties,
+ * especially when subclasses involves. The purpose of this overwriting
+ * is to check whether an already established connection could be
+ * reused.
+ */
@Override
public boolean equals(Object obj) {
if (this == obj)
@@ -142,20 +184,199 @@ public boolean equals(Object obj) {
if (!(obj instanceof ConnectionInfo))
return false;
ConnectionInfo other = (ConnectionInfo) obj;
- int myMin = Math.min(srcID, dstID);
- int myMax = Math.max(srcID, dstID);
- int otherMin = Math.min(other.srcID, other.dstID);
- int otherMax = Math.max(other.srcID, other.dstID);
- if (myMin != otherMin)
- return false;
- if (myMax != otherMax)
- return false;
+ if (other.isSymmetric) {
+ int myMin = Math.min(srcID, dstID);
+ int myMax = Math.max(srcID, dstID);
+ int otherMin = Math.min(other.srcID, other.dstID);
+ int otherMax = Math.max(other.srcID, other.dstID);
+ if (myMin != otherMin)
+ return false;
+ if (myMax != otherMax)
+ return false;
+ } else {
+ if (srcID != other.srcID)
+ return false;
+ if (dstID != other.dstID)
+ return false;
+ }
return true;
}
@Override
public String toString() {
- return "ConnectionInfo [srcID=" + srcID + ", dstID=" + dstID + "]";
+ return String.format(
+ "ConnectionInfo [srcID=%d, dstID=%d, isSymmetric=%s]",
+ srcID, dstID, isSymmetric);
+ }
+
+ /**
+ * This function will establish a new connection according to the
+ * connection info.
+ *
+ * @param nodeID
+ * : nodeID of the {@link StreamNode} that invokes this
+ * method.
+ * @param networkInfo
+ * : network info of the system.
+ * @return {@link Connection} that is described by this
+ * {@link ConnectionInfo}.
+ */
+ public abstract Connection makeConnection(int nodeID,
+ NetworkInfo networkInfo, int timeOut);
+
+ public abstract BoundaryInputChannel inputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider);
+
+ public abstract BoundaryOutputChannel outputChannel(Token t,
+ int bufSize, ConnectionProvider conProvider);
+ }
+
+ /**
+ * We need an instance of {@link ConnectionInfo} to compare and get a
+ * concrete {@link ConnectionInfo} from the list of already created
+ * {@link ConnectionInfo}s. This class is added for that purpose.
+ */
+ public static class GenericConnectionInfo extends ConnectionInfo {
+
+ private static final long serialVersionUID = 1L;
+
+ public GenericConnectionInfo(int srcID, int dstID) {
+ super(srcID, dstID);
+ }
+
+ public GenericConnectionInfo(int srcID, int dstID, boolean isSymmetric) {
+ super(srcID, dstID, isSymmetric);
+ }
+
+ @Override
+ public Connection makeConnection(int nodeID, NetworkInfo networkInfo,
+ int timeOut) {
+ throw new java.lang.Error("This method is not supposed to call");
+ }
+
+ @Override
+ public BoundaryInputChannel inputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider) {
+ throw new java.lang.Error("This method is not supposed to call");
+ }
+
+ @Override
+ public BoundaryOutputChannel outputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider) {
+ throw new java.lang.Error("This method is not supposed to call");
+ }
+ }
+
+ /**
+ * ConnectionType serves two purposes
+ *
+ *
Tune the connections. This will passed to opentuner.
+ *
Indicate the {@link StreamNode} to create appropriate
+ * {@link BoundaryChannel}. This will be bound with {@link ConnectionInfo}.
+ *
+ */
+ public enum ConnectionType {
+ /**
+ * Blocking TCP socket connection
+ */
+ BTCP, /**
+ * Non-Blocking TCP socket connection
+ *
+ * NBTCP,
+ */
+ /**
+ * Asynchronous TCP socket connection
+ */
+ ATCP,
+ /**
+ * Blocking InfiniBand
+ *
+ * BIB,
+ */
+ /**
+ * Non-Blocking InfiniBand
+ *
+ * NBIB
+ */
+ }
+
+ /**
+ * Keeps all opened {@link TCPConnection}s for a machine. Each machine
+ * should have a single instance of this class and use this class to make
+ * new connections.
+ *
+ *
+ * TODO: Need to make this class singleton. I didn't do it now because in
+ * current way, controller and a local {@link StreamNode} are running in a
+ * same JVM. So first, local {@link StreamNode} should be made to run on a
+ * different JVM and then make this class singleton.
+ */
+ public static class ConnectionProvider {
+
+ private ConcurrentMap allConnections;
+
+ private final int myNodeID;
+
+ private final NetworkInfo networkInfo;
+
+ public ConnectionProvider(int myNodeID, NetworkInfo networkInfo) {
+ checkNotNull(networkInfo, "networkInfo is null");
+ this.myNodeID = myNodeID;
+ this.networkInfo = networkInfo;
+ this.allConnections = new ConcurrentHashMap<>();
+ }
+
+ /**
+ * See {@link #getConnection(TCPConnectionInfo, int)}.
+ *
+ * @param conInfo
+ * @return
+ * @throws IOException
+ */
+ public Connection getConnection(ConnectionInfo conInfo)
+ throws IOException {
+ return getConnection(conInfo, 0);
+ }
+
+/**
+ * If the connection corresponds to conInfo is already established
+ * returns the connection. Try to make a new connection otherwise.
+ *
+ * @param conInfo - Information that uniquely identifies a {@link TCPConnection
+ * @param timeOut - Time out only valid if making connection needs to be
+ * done through a listener socket. i.e, conInfo.getSrcID() == myNodeID.
+ * @return
+ * @throws SocketTimeoutException
+ * @throws IOException
+ */
+ public Connection getConnection(ConnectionInfo conInfo, int timeOut)
+ throws SocketTimeoutException, IOException {
+ Connection con = allConnections.get(conInfo);
+ if (con != null) {
+ if (con.isStillConnected()) {
+ return con;
+ } else {
+ throw new AssertionError("con.closeConnection()");
+ // con.closeConnection();
+ }
+ }
+
+ con = conInfo.makeConnection(myNodeID, networkInfo, timeOut);
+ if (con == null)
+ throw new IOException("Connection making process failed.");
+
+ allConnections.put(conInfo, con);
+ return con;
+ }
+
+ public void closeAllConnections() {
+ for (Connection con : allConnections.values()) {
+ try {
+ con.closeConnection();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
}
}
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java b/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java
index 5aaee42b..b3382cc7 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java
@@ -22,8 +22,13 @@
package edu.mit.streamjit.impl.distributed.common;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
+import java.nio.channels.AsynchronousServerSocketChannel;
+import java.nio.channels.AsynchronousSocketChannel;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
import edu.mit.streamjit.impl.distributed.runtimer.ListenerSocket;
@@ -100,4 +105,25 @@ public static Connection getConnection(Socket socket, boolean needSync)
else
return new TCPConnection(socket);
}
+
+ public static AsyncTCPConnection getAsyncConnection(int portNo)
+ throws IOException {
+ AsynchronousServerSocketChannel ssc;
+ AsynchronousSocketChannel sc2;
+ InetSocketAddress isa = new InetSocketAddress("0.0.0.0", portNo);
+
+ ssc = AsynchronousServerSocketChannel.open().bind(isa);
+ Future accepted = ssc.accept();
+ System.out.println("Waiting for asynchronous socket connection @ port "
+ + portNo);
+ try {
+ sc2 = accepted.get();
+ } catch (InterruptedException | ExecutionException ex) {
+ ex.printStackTrace();
+ return null;
+ }
+
+ ssc.close();
+ return new AsyncTCPConnection(sc2);
+ }
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java b/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java
index b31a1ec9..2eb039fa 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java
@@ -21,11 +21,8 @@
*/
package edu.mit.streamjit.impl.distributed.common;
-import edu.mit.streamjit.impl.common.AbstractDrainer;
-import edu.mit.streamjit.impl.distributed.TailChannel;
import edu.mit.streamjit.impl.distributed.node.StreamNode;
import edu.mit.streamjit.impl.distributed.runtimer.StreamNodeAgent;
-import edu.mit.streamjit.tuner.TCPTuner;
/**
* This class is to keep track of all application level constants. So we can
@@ -79,48 +76,4 @@ private GlobalConstants() {
public static final String PORTID_MAP = "portIdMap";
public static final String PARTITION = "partition";
public static final String CONINFOMAP = "ConInfoMap";
-
- /**
- * Whether to start the tuner automatically or not.
- *
- *
0 - Controller will start the tuner automatically.
- *
1 - User has to manually start the tuner with correct portNo as
- * argument. Port no 12563 is used in this case. But it can be changed at
- * {@link TCPTuner#startTuner(String)}. We need this option to run the
- * tuning on remote machines.
- *
- */
- public static int tunerMode = 0;
-
- /**
- * To turn on or turn off the drain data. If this is false, drain data will
- * be ignored and every new reconfiguration will run with fresh inputs.
- */
- public static final boolean useDrainData = false;
-
- /**
- * To turn on or off the dead lock handler. see {@link AbstractDrainer} for
- * it's usage.
- */
- public static final boolean needDrainDeadlockHandler = true;
-
- /**
- * Enables tuning. Tuner will be started iff this flag is set true.
- * Otherwise, just use the fixed configuration file to run the program. No
- * tuning, no intermediate draining. In this mode (tune = false), time taken
- * to pass fixed number of input will be measured for 30 rounds and logged
- * into FixedOutPut.txt. See {@link TailChannel} for the file logging
- * details.
- */
- public static final boolean tune = false;
-
- /**
- * Save all configurations tired by open tuner in to
- * "configurations//app.name" directory.
- */
- public static final boolean saveAllConfigurations = true;
-
- static {
-
- }
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java b/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java
index d9a7fc35..316be158 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java
@@ -22,7 +22,7 @@
package edu.mit.streamjit.impl.distributed.common;
import edu.mit.streamjit.impl.blob.Blob.Token;
-import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
public abstract class MiscCtrlElements implements CTRLRMessageElement {
@@ -38,10 +38,10 @@ public void accept(CTRLRMessageVisitor visitor) {
public static final class NewConInfo extends MiscCtrlElements {
private static final long serialVersionUID = 1L;
- public final TCPConnectionInfo conInfo;
+ public final ConnectionInfo conInfo;
public final Token token;
- public NewConInfo(TCPConnectionInfo conInfo, Token token) {
+ public NewConInfo(ConnectionInfo conInfo, Token token) {
this.conInfo = conInfo;
this.token = token;
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/NetworkInfo.java b/src/edu/mit/streamjit/impl/distributed/common/NetworkInfo.java
new file mode 100644
index 00000000..a7456228
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/NetworkInfo.java
@@ -0,0 +1,28 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import java.net.InetAddress;
+import java.util.Map;
+
+/**
+ * Keeps network information of all nodes in the system.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since May 23, 2014
+ */
+public class NetworkInfo {
+
+ private final Map iNetAddressMap;
+
+ public NetworkInfo(Map iNetAddressMap) {
+ this.iNetAddressMap = iNetAddressMap;
+ }
+
+ public InetAddress getInetAddress(int nodeID) {
+ if (this.iNetAddressMap == null)
+ return null;
+ InetAddress ipAddress = iNetAddressMap.get(nodeID);
+ if (ipAddress.isLoopbackAddress())
+ ipAddress = iNetAddressMap.get(0);
+ return ipAddress;
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/Options.java b/src/edu/mit/streamjit/impl/distributed/common/Options.java
new file mode 100644
index 00000000..ed028e2e
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/Options.java
@@ -0,0 +1,278 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Properties;
+
+import edu.mit.streamjit.impl.common.drainer.AbstractDrainer;
+import edu.mit.streamjit.impl.distributed.ConnectionManager.AllConnectionParams;
+import edu.mit.streamjit.impl.distributed.ConnectionManager.AsyncTCPNoParams;
+import edu.mit.streamjit.impl.distributed.ConnectionManager.BlockingTCPNoParams;
+import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler;
+import edu.mit.streamjit.impl.distributed.TailChannels;
+import edu.mit.streamjit.impl.distributed.TailChannels.BlockingTailChannel1;
+import edu.mit.streamjit.impl.distributed.TailChannels.BlockingTailChannel2;
+import edu.mit.streamjit.tuner.OnlineTuner;
+import edu.mit.streamjit.tuner.TCPTuner;
+
+/**
+ * Program options. Loads the values from "options.properties".
+ *
+ * @author sumanan
+ * @since 1 Mar, 2015
+ */
+public final class Options {
+
+ /**
+ * We can set this value at class loading time also as follows.
+ *
+ * maxThreadCount = Math.max(Runtime.getntime().availableProcessors() / 2,
+ * 1);
+ *
+ * Lets hard code this for the moment.
+ */
+ public static final int maxNumCores;
+
+ /**
+ * To turn on or off the dead lock handler. see {@link AbstractDrainer} for
+ * it's usage.
+ */
+ public static final boolean needDrainDeadlockHandler;
+
+ /**
+ * Turn On/Off the profiling.
+ */
+ public static final boolean needProfiler;
+
+ /**
+ * Output count for tuning. Tuner measures the running time for this number
+ * of outputs.
+ */
+ public static final int outputCount;
+
+ /**
+ * Period to print output count periodically. This printing feature get
+ * turned off if this value is less than 1. Time unit is ms. See
+ * {@link TailChannels}.
+ */
+ public static final int printOutputCountPeriod;
+
+ /**
+ * Save all configurations tired by open tuner in to
+ * "configurations//app.name" directory.
+ */
+ public static final boolean saveAllConfigurations;
+
+ /**
+ * Enables {@link DistributedStreamCompiler} to run on a single node. When
+ * this is enabled, noOfNodes passed as compiler argument has no effect.
+ */
+ public static final boolean singleNodeOnline;
+
+ /**
+ * Enables tuning. Tuner will be started iff this flag is set true.
+ * Otherwise, just use the fixed configuration file to run the program. No
+ * tuning, no intermediate draining. In this mode (tune = false), time taken
+ * to pass fixed number of input will be measured for 30 rounds and logged
+ * into FixedOutPut.txt. See {@link TailChannels} for the file logging
+ * details.
+ *
+ * 0 - No tuning, uses configuration file to run.
+ *
+ * 1 - Tuning.
+ *
+ * 2 - Evaluate configuration files. ( compares final cfg with hand tuned
+ * cfg. Both file should be presented in the running directory.
+ */
+ public static final int tune;
+
+ /**
+ * Decides how to start the opentuner. In first 2 cases, controller starts
+ * opentuner and establishes connection with it on a random port no range
+ * from 5000-65536. User can provide port no in 3 case.
+ *
+ *
+ *
0 - Controller starts the tuner automatically on a terminal. User can
+ * see Opentuner related outputs in the new terminal.
+ *
1 - Controller starts the tuner automatically as a Python process. No
+ * explicit window will be opened. Suitable for remote running through SSH
+ * terminal.
+ *
2 - User has to manually start the tuner with correct portNo as
+ * argument. Port no 12563 is used in this case. But it can be changed at
+ * {@link TCPTuner#startTuner(String)}. We need this option to run the
+ * tuning on remote machines.
+ *
+ */
+ public static final int tunerStartMode;
+
+ /**
+ * if true uses Compiler2, interpreter otherwise.
+ */
+ public static final boolean useCompilerBlob;
+
+ /**
+ * To turn on or turn off the drain data. If this is false, drain data will
+ * be ignored and every new reconfiguration will run with fresh inputs.
+ */
+ public static final boolean useDrainData;
+
+ // Following are miscellaneous options to avoid rebuilding jar files every
+ // time to change some class selections. You may decide to remove these
+ // variables in a stable release.
+ // TODO: Fix all design pattern related issues.
+
+ /**
+ *
+ *
0 - {@link AllConnectionParams}
+ *
1 - {@link BlockingTCPNoParams}
+ *
2 - {@link AsyncTCPNoParams}
+ *
default: {@link AsyncTCPNoParams}
+ *
+ */
+ public static final int connectionManager;
+
+ /**
+ *
+ *
1 - {@link BlockingTailChannel1}
+ *
2 - {@link BlockingTailChannel2}
+ *
default: {@link BlockingTailChannel2}
+ *
+ */
+ public static final int tailChannel;
+
+ /**
+ * {@link OnlineTuner}'s verifier verifies the configurations if
+ * {@link #tune}==2. evaluationCount determines the number of re runs for a
+ * configuration. Default value is 2.
+ */
+ public static final int evaluationCount;
+
+ /**
+ * {@link OnlineTuner}'s verifier verifies the configurations if
+ * {@link #tune}==2. verificationCount determines the number of re runs for
+ * a set of configurations in the verify.txt. Default value is 1.
+ */
+ public static final int verificationCount;
+
+ /**
+ * Large multiplier -> Large compilation time and Large waiting time.
+ */
+ public static final int multiplierMaxValue;
+
+ public static final boolean prognosticate;
+
+ public static final int bigToSmallBlobRatio;
+
+ public static final int loadRatio;
+
+ public static final int blobToNodeRatio;
+
+ public static final int boundaryChannelRatio;
+
+ public static final boolean timeOut;
+
+ static {
+ Properties prop = loadProperties();
+ printOutputCountPeriod = Integer.parseInt(prop
+ .getProperty("printOutputCountPeriod"));;
+ maxNumCores = Integer.parseInt(prop.getProperty("maxNumCores"));
+ useCompilerBlob = Boolean.parseBoolean(prop
+ .getProperty("useCompilerBlob"));
+ needDrainDeadlockHandler = Boolean.parseBoolean(prop
+ .getProperty("needDrainDeadlockHandler"));
+ needProfiler = Boolean.parseBoolean(prop.getProperty("needProfiler"));
+ outputCount = Integer.parseInt(prop.getProperty("outputCount"));
+ tune = Integer.parseInt(prop.getProperty("tune"));
+ tunerStartMode = Integer.parseInt(prop.getProperty("tunerStartMode"));
+ saveAllConfigurations = Boolean.parseBoolean(prop
+ .getProperty("saveAllConfigurations"));
+ singleNodeOnline = Boolean.parseBoolean(prop
+ .getProperty("singleNodeOnline"));
+ useDrainData = Boolean.parseBoolean(prop.getProperty("useDrainData"));
+ connectionManager = Integer.parseInt(prop
+ .getProperty("connectionManager"));
+ tailChannel = Integer.parseInt(prop.getProperty("tailChannel"));
+ evaluationCount = Integer.parseInt(prop.getProperty("evaluationCount"));
+ verificationCount = Integer.parseInt(prop
+ .getProperty("verificationCount"));
+ multiplierMaxValue = Integer.parseInt(prop
+ .getProperty("multiplierMaxValue"));
+ prognosticate = Boolean.parseBoolean(prop.getProperty("prognosticate"));
+ bigToSmallBlobRatio = Integer.parseInt(prop
+ .getProperty("bigToSmallBlobRatio"));
+ loadRatio = Integer.parseInt(prop.getProperty("loadRatio"));
+ blobToNodeRatio = Integer.parseInt(prop.getProperty("blobToNodeRatio"));
+ boundaryChannelRatio = Integer.parseInt(prop
+ .getProperty("boundaryChannelRatio"));
+ timeOut = Boolean.parseBoolean(prop.getProperty("timeOut"));
+ }
+
+ public static Properties getProperties() {
+ Properties prop = new Properties();
+ setProperty(prop, "tunerStartMode", tunerStartMode);
+ setProperty(prop, "useDrainData", useDrainData);
+ setProperty(prop, "needDrainDeadlockHandler", needDrainDeadlockHandler);
+ setProperty(prop, "tune", tune);
+ setProperty(prop, "saveAllConfigurations", saveAllConfigurations);
+ setProperty(prop, "outputCount", outputCount);
+ setProperty(prop, "useCompilerBlob", useCompilerBlob);
+ setProperty(prop, "printOutputCountPeriod", printOutputCountPeriod);
+ setProperty(prop, "singleNodeOnline", singleNodeOnline);
+ setProperty(prop, "maxNumCores", maxNumCores);
+ setProperty(prop, "needProfiler", needProfiler);
+ setProperty(prop, "connectionManager", connectionManager);
+ setProperty(prop, "tailChannel", tailChannel);
+ setProperty(prop, "evaluationCount", evaluationCount);
+ setProperty(prop, "verificationCount", verificationCount);
+ setProperty(prop, "multiplierMaxValue", multiplierMaxValue);
+ setProperty(prop, "prognosticate", prognosticate);
+ setProperty(prop, "bigToSmallBlobRatio", bigToSmallBlobRatio);
+ setProperty(prop, "loadRatio", loadRatio);
+ setProperty(prop, "blobToNodeRatio", blobToNodeRatio);
+ setProperty(prop, "boundaryChannelRatio", boundaryChannelRatio);
+ setProperty(prop, "timeOut", timeOut);
+ return prop;
+ }
+
+ private static Properties loadProperties() {
+ Properties prop = new Properties();
+ InputStream input = null;
+ try {
+ input = new FileInputStream("options.properties");
+ prop.load(input);
+ } catch (IOException ex) {
+ System.err.println("Failed to load options.properties");
+ }
+ return prop;
+ }
+
+ private static void setProperty(Properties prop, String name, Boolean val) {
+ prop.setProperty(name, val.toString());
+ }
+
+ private static void setProperty(Properties prop, String name, Integer val) {
+ prop.setProperty(name, val.toString());
+ }
+
+ public static void storeProperties() {
+ OutputStream output = null;
+ try {
+ output = new FileOutputStream("options.properties");
+ Properties prop = getProperties();
+ prop.store(output, null);
+ } catch (IOException io) {
+ io.printStackTrace();
+ } finally {
+ if (output != null) {
+ try {
+ output.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java b/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java
index 6baa7944..84729821 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java
@@ -79,7 +79,7 @@ public void process(SNDrainProcessor dp) {
* the drain data of the blobs after the draining. See {@link DrainData} for
* more information.
*/
- public static final class DrainedData extends SNDrainElement {
+ public static final class SNDrainedData extends SNDrainElement {
private static final long serialVersionUID = 1L;
public final Token blobID;
@@ -87,7 +87,7 @@ public static final class DrainedData extends SNDrainElement {
public final ImmutableMap> inputData;
public final ImmutableMap> outputData;
- public DrainedData(Token blobID, DrainData drainData,
+ public SNDrainedData(Token blobID, DrainData drainData,
ImmutableMap> inputData,
ImmutableMap> outputData) {
this.blobID = blobID;
@@ -113,6 +113,6 @@ public interface SNDrainProcessor {
public void process(Drained drained);
- public void process(DrainedData drainedData);
+ public void process(SNDrainedData snDrainedData);
}
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNException.java b/src/edu/mit/streamjit/impl/distributed/common/SNException.java
index 32da1a2e..d42e3ef3 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/SNException.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/SNException.java
@@ -21,7 +21,7 @@
*/
package edu.mit.streamjit.impl.distributed.common;
-import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
public class SNException implements SNMessageElement {
@@ -39,9 +39,9 @@ public void accept(SNMessageVisitor visitor) {
public static final class AddressBindException extends SNException {
private static final long serialVersionUID = 1L;
- public final TCPConnectionInfo conInfo;
+ public final ConnectionInfo conInfo;
- public AddressBindException(TCPConnectionInfo conInfo) {
+ public AddressBindException(ConnectionInfo conInfo) {
this.conInfo = conInfo;
}
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java b/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java
index ecdef2e3..5db2abc5 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java
@@ -21,6 +21,8 @@
*/
package edu.mit.streamjit.impl.distributed.common;
+import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement;
+
public interface SNMessageVisitor {
void visit(Error error);
@@ -34,4 +36,8 @@ public interface SNMessageVisitor {
void visit(SNDrainElement snDrainElement);
void visit(SNException snException);
+
+ void visit(SNTimeInfo timeInfo);
+
+ void visit(SNProfileElement snProfileElement);
}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfo.java b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfo.java
new file mode 100644
index 00000000..505c0b3c
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfo.java
@@ -0,0 +1,71 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.distributed.node.StreamNode;
+
+/**
+ * {@link StreamNode}s shall send the timing information such as compilation
+ * time of each blob, draining time, draindata collection time, Init schedule
+ * time and etc by sending {@link SNTimeInfo}.
+ *
+ * @author Sumanan sumanan@mit.edu
+ * @since Nov 20, 2014
+ *
+ */
+public abstract class SNTimeInfo implements SNMessageElement {
+
+ private static final long serialVersionUID = 1L;
+
+ public abstract void process(SNTimeInfoProcessor snTimeInfoProcessor);
+
+ @Override
+ public void accept(SNMessageVisitor visitor) {
+ visitor.visit(this);
+ }
+
+ public static final class CompilationTime extends SNTimeInfo {
+
+ private static final long serialVersionUID = 1L;
+
+ public final Token blobID;
+
+ public final double milliSec;
+
+ public CompilationTime(Token blobID, double milliSec) {
+ this.blobID = blobID;
+ this.milliSec = milliSec;
+ }
+
+ @Override
+ public void process(SNTimeInfoProcessor snTimeInfoProcessor) {
+ snTimeInfoProcessor.process(this);
+ }
+
+ }
+
+ public static final class DrainingTime extends SNTimeInfo {
+
+ private static final long serialVersionUID = 1L;
+
+ public final Token blobID;
+
+ public final double milliSec;
+
+ public DrainingTime(Token blobID, double milliSec) {
+ this.blobID = blobID;
+ this.milliSec = milliSec;
+ }
+
+ @Override
+ public void process(SNTimeInfoProcessor snTimeInfoProcessor) {
+ snTimeInfoProcessor.process(this);
+ }
+
+ }
+
+ public interface SNTimeInfoProcessor {
+ public void process(CompilationTime compilationTime);
+
+ public void process(DrainingTime drainingTime);
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfoProcessorImpl.java b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfoProcessorImpl.java
new file mode 100644
index 00000000..94ff2a4a
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfoProcessorImpl.java
@@ -0,0 +1,35 @@
+package edu.mit.streamjit.impl.distributed.common;
+
+import edu.mit.streamjit.impl.common.TimeLogger;
+import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.CompilationTime;
+import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.DrainingTime;
+import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.SNTimeInfoProcessor;
+
+/**
+ * Uses {@link TimeLogger} to log timing information.
+ *
+ * @author sumanan
+ * @since Nov 24, 2014
+ */
+public class SNTimeInfoProcessorImpl implements SNTimeInfoProcessor {
+
+ private final TimeLogger logger;
+
+ public SNTimeInfoProcessorImpl(TimeLogger logger) {
+ this.logger = logger;
+ }
+
+ @Override
+ public void process(CompilationTime compilationTime) {
+ String msg = String.format("Blob-%s-%.0fms\n", compilationTime.blobID,
+ compilationTime.milliSec);
+ logger.logCompileTime(msg);
+ }
+
+ @Override
+ public void process(DrainingTime drainingTime) {
+ String msg = String.format("Blob-%s-%.0fms\n", drainingTime.blobID,
+ drainingTime.milliSec);
+ logger.logDrainTime(msg);
+ }
+}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java b/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java
index 024ccbfc..6d9431ff 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java
@@ -42,7 +42,7 @@ public class SynchronizedTCPConnection extends TCPConnection {
* @param socket
*/
public SynchronizedTCPConnection(Socket socket) {
- super(socket, 50);
+ super(socket, 5);
}
@Override
diff --git a/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java b/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java
index 9d1d0dc1..53ea1f23 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java
@@ -21,14 +21,19 @@
*/
package edu.mit.streamjit.impl.distributed.common;
-import java.io.*;
-import java.net.*;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-import static com.google.common.base.Preconditions.*;
-
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.OptionalDataException;
+import java.net.InetAddress;
+import java.net.Socket;
+
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel;
+import edu.mit.streamjit.impl.distributed.node.BlockingOutputChannel;
import edu.mit.streamjit.impl.distributed.node.StreamNode;
/**
@@ -85,8 +90,9 @@ public void writeObject(Object obj) throws IOException {
try {
ooStream.writeObject(obj);
+ n++;
// TODO: Any way to improve the performance?
- if (n++ > resetCount) {
+ if (n > resetCount) {
n = 0;
ooStream.reset();
}
@@ -113,6 +119,7 @@ public void writeObject(Object obj) throws IOException {
}
public final void closeConnection() {
+ isconnected = false;
try {
if (ooStream != null)
this.ooStream.close();
@@ -121,7 +128,6 @@ public final void closeConnection() {
if (socket != null)
this.socket.close();
} catch (IOException ex) {
- isconnected = false;
ex.printStackTrace();
}
}
@@ -195,10 +201,10 @@ public static class TCPConnectionInfo extends ConnectionInfo {
private static final long serialVersionUID = 1L;
- int portNo;
+ private final int portNo;
public TCPConnectionInfo(int srcID, int dstID, int portNo) {
- super(srcID, dstID);
+ super(srcID, dstID, true);
Ipv4Validator validator = Ipv4Validator.getInstance();
if (!validator.isValid(portNo))
throw new IllegalArgumentException("Invalid port No");
@@ -236,90 +242,47 @@ public String toString() {
return "TCPConnectionInfo [srcID=" + getSrcID() + ", dstID="
+ getDstID() + ", portID=" + portNo + "]";
}
- }
- /**
- * Keeps all opened {@link TCPConnection}s for a machine. Each machine
- * should have a single instance of this class and use this class to make
- * new connections.
- *
- *
- * TODO: Need to make this class singleton. I didn't do it now because in
- * current way, controller and a local {@link StreamNode} are running in a
- * same JVM. So first, local {@link StreamNode} should be made to run on a
- * different JVM and then make this class singleton.
- */
- public static class TCPConnectionProvider {
-
- private ConcurrentMap allConnections;
-
- private final int myNodeID;
-
- private final Map iNetAddressMap;
-
- public TCPConnectionProvider(int myNodeID,
- Map iNetAddressMap) {
- checkNotNull(iNetAddressMap, "nodeInfoMap is null");
- this.myNodeID = myNodeID;
- this.iNetAddressMap = iNetAddressMap;
- this.allConnections = new ConcurrentHashMap<>();
- }
-
- /**
- * See {@link #getConnection(TCPConnectionInfo, int)}.
- *
- * @param conInfo
- * @return
- * @throws IOException
- */
- public Connection getConnection(TCPConnectionInfo conInfo)
- throws IOException {
- return getConnection(conInfo, 0);
- }
-
-/**
- * If the connection corresponds to conInfo is already established
- * returns the connection. Try to make a new connection otherwise.
- *
- * @param conInfo - Information that uniquely identifies a {@link TCPConnection
- * @param timeOut - Time out only valid if making connection needs to be
- * done through a listener socket. i.e, conInfo.getSrcID() == myNodeID.
- * @return
- * @throws SocketTimeoutException
- * @throws IOException
- */
- public Connection getConnection(TCPConnectionInfo conInfo, int timeOut)
- throws SocketTimeoutException, IOException {
- TCPConnection con = allConnections.get(conInfo);
- if (con != null) {
- if (con.isStillConnected()) {
- return con;
- } else {
- throw new AssertionError("con.closeConnection()");
- // con.closeConnection();
+ @Override
+ public Connection makeConnection(int nodeID, NetworkInfo networkInfo,
+ int timeOut) {
+ Connection con = null;
+ if (srcID == nodeID) {
+ try {
+ con = ConnectionFactory.getConnection(portNo, timeOut,
+ false);
+ } catch (IOException e) {
+ e.printStackTrace();
}
}
- if (conInfo.getSrcID() == myNodeID) {
- con = ConnectionFactory.getConnection(conInfo.getPortNo(),
- timeOut, false);
- } else if (conInfo.getDstID() == myNodeID) {
- InetAddress ipAddress = iNetAddressMap.get(conInfo.getSrcID());
- if (ipAddress.isLoopbackAddress())
- ipAddress = iNetAddressMap.get(0);
-
- int portNo = conInfo.getPortNo();
- con = ConnectionFactory.getConnection(
- ipAddress.getHostAddress(), portNo, false);
+ else if (dstID == nodeID) {
+ InetAddress ipAddress = networkInfo.getInetAddress(srcID);
+ try {
+ con = ConnectionFactory.getConnection(
+ ipAddress.getHostAddress(), portNo, false);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ } else {
+ throw new IllegalArgumentException(
+ "Neither srcID nor dstID matches with nodeID");
}
- allConnections.put(conInfo, con);
return con;
}
- public void closeAllConnections() {
- for (TCPConnection con : allConnections.values()) {
- con.closeConnection();
- }
+ @Override
+ public BoundaryInputChannel inputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider) {
+ return new BlockingInputChannel(bufSize, conProvider, this,
+ t.toString(), 0);
+ }
+
+ @Override
+ public BoundaryOutputChannel outputChannel(Token t, int bufSize,
+ ConnectionProvider conProvider) {
+ return new BlockingOutputChannel(bufSize, conProvider, this,
+ t.toString(), 0);
}
}
}
\ No newline at end of file
diff --git a/src/edu/mit/streamjit/impl/distributed/common/Tester.java b/src/edu/mit/streamjit/impl/distributed/common/Tester.java
index 781692b5..1f2171a7 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/Tester.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/Tester.java
@@ -28,6 +28,18 @@
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionType;
+import edu.mit.streamjit.impl.distributed.common.Connection.GenericConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo;
+
+;
public class Tester {
@@ -35,7 +47,15 @@ public class Tester {
* @param args
*/
public static void main(String[] args) {
+ // test1();
+ // test2();
+ test3();
+ }
+ /**
+ * Testing one - tests the size of an object.
+ */
+ private static void test1() {
Error er = Error.FILE_NOT_FOUND;
AppStatus apSts = AppStatus.STOPPED;
ByteArrayOutputStream byteAos = new ByteArrayOutputStream();
@@ -54,10 +74,108 @@ public static void main(String[] args) {
e.printStackTrace();
}
- /*
- * try { os.writeInt(34345); } catch (IOException e) { e.printStackTrace(); }
- */
+ try {
+ os.writeInt(34345);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
System.out.println(byteAos.toByteArray().length);
}
+
+ /**
+ * Tests the equals and hascode.
+ */
+ private static void test2() {
+
+ ConnectionInfo asyConInfo1 = new AsyncTCPConnectionInfo(1, 4, 8989);
+ ConnectionInfo asyConInfo2 = new AsyncTCPConnectionInfo(1, 4, 8980);
+ ConnectionInfo asyConInfo3 = new AsyncTCPConnectionInfo(4, 1, 8989);
+ ConnectionInfo asyConInfo4 = new AsyncTCPConnectionInfo(4, 1, 8980);
+ ConnectionInfo asyConInfo5 = new AsyncTCPConnectionInfo(1, 4, 8989);
+
+ ConnectionInfo tcpConInfo1 = new TCPConnectionInfo(1, 4, 8989);
+ ConnectionInfo tcpConInfo2 = new TCPConnectionInfo(1, 4, 8980);
+ ConnectionInfo tcpConInfo3 = new TCPConnectionInfo(4, 1, 8989);
+ ConnectionInfo tcpConInfo4 = new TCPConnectionInfo(4, 1, 8980);
+
+ ConnectionInfo conInfo1 = new GenericConnectionInfo(1, 4, true);
+ ConnectionInfo conInfo2 = new GenericConnectionInfo(1, 4, false);
+ ConnectionInfo conInfo3 = new GenericConnectionInfo(4, 1, true);
+ ConnectionInfo conInfo4 = new GenericConnectionInfo(4, 1, false);
+
+ System.out.println("AsyncTCPConnectionInfo - AsyncTCPConnectionInfo");
+ System.out.println(asyConInfo1.equals(asyConInfo2));
+ System.out.println(asyConInfo1.equals(asyConInfo3));
+ System.out.println(asyConInfo1.equals(asyConInfo4));
+ System.out.println(asyConInfo2.equals(asyConInfo3));
+ System.out.println(asyConInfo2.equals(asyConInfo4));
+ System.out.println(asyConInfo3.equals(asyConInfo4));
+ System.out.println();
+
+ System.out.println("ConnectionInfo - AsyncTCPConnectionInfo");
+ System.out.println(conInfo1.equals(asyConInfo1));
+ System.out.println(conInfo1.equals(asyConInfo2));
+ System.out.println(conInfo1.equals(asyConInfo3));
+ System.out.println(conInfo1.equals(asyConInfo4));
+ System.out.println(conInfo2.equals(asyConInfo1));
+ System.out.println(conInfo2.equals(asyConInfo2));
+ System.out.println(conInfo2.equals(asyConInfo3));
+ System.out.println(conInfo2.equals(asyConInfo4));
+ System.out.println(conInfo3.equals(asyConInfo1));
+ System.out.println(conInfo3.equals(asyConInfo2));
+ System.out.println(conInfo3.equals(asyConInfo3));
+ System.out.println(conInfo3.equals(asyConInfo4));
+ System.out.println(conInfo4.equals(asyConInfo1));
+ System.out.println(conInfo4.equals(asyConInfo2));
+ System.out.println(conInfo4.equals(asyConInfo3));
+ System.out.println(conInfo4.equals(asyConInfo4));
+ System.out.println();
+
+ System.out.println("ConnectionInfo - TCPConnectionInfo");
+ System.out.println(conInfo1.equals(tcpConInfo1));
+ System.out.println(conInfo1.equals(tcpConInfo2));
+ System.out.println(conInfo1.equals(tcpConInfo3));
+ System.out.println(conInfo1.equals(tcpConInfo4));
+ System.out.println(conInfo2.equals(tcpConInfo1));
+ System.out.println(conInfo2.equals(tcpConInfo2));
+ System.out.println(conInfo2.equals(tcpConInfo3));
+ System.out.println(conInfo2.equals(tcpConInfo4));
+ System.out.println(conInfo3.equals(tcpConInfo1));
+ System.out.println(conInfo3.equals(tcpConInfo2));
+ System.out.println(conInfo3.equals(tcpConInfo3));
+ System.out.println(conInfo3.equals(tcpConInfo4));
+ System.out.println(conInfo4.equals(tcpConInfo1));
+ System.out.println(conInfo4.equals(tcpConInfo2));
+ System.out.println(conInfo4.equals(tcpConInfo3));
+ System.out.println(conInfo4.equals(tcpConInfo4));
+ System.out.println();
+
+ Map tesMap = new HashMap<>();
+ tesMap.put(tcpConInfo1, 1);
+ tesMap.put(asyConInfo1, 2);
+
+ System.out.println(tesMap.containsKey(tcpConInfo1));
+ System.out.println(tesMap.containsKey(tcpConInfo2));
+ System.out.println(tesMap.containsKey(tcpConInfo3));
+ System.out.println(tesMap.containsKey(tcpConInfo4));
+
+ System.out.println(tesMap.containsKey(asyConInfo1));
+ System.out.println(tesMap.containsKey(asyConInfo2));
+ System.out.println(tesMap.containsKey(asyConInfo3));
+ System.out.println(tesMap.containsKey(asyConInfo4));
+ System.out.println(tesMap.containsKey(asyConInfo5));
+
+ System.out.println(tesMap.containsKey(conInfo1));
+ System.out.println(tesMap.containsKey(conInfo2));
+ System.out.println(tesMap.containsKey(conInfo3));
+ System.out.println(tesMap.containsKey(conInfo4));
+ }
+
+ private static void test3() {
+ List conlist = Arrays.asList(ConnectionType.values());
+ for (ConnectionType connectionType : conlist) {
+ System.out.println(connectionType);
+ }
+ }
}
diff --git a/src/edu/mit/streamjit/impl/distributed/common/Utils.java b/src/edu/mit/streamjit/impl/distributed/common/Utils.java
index 292e3f9c..8ee4c41c 100644
--- a/src/edu/mit/streamjit/impl/distributed/common/Utils.java
+++ b/src/edu/mit/streamjit/impl/distributed/common/Utils.java
@@ -21,15 +21,41 @@
*/
package edu.mit.streamjit.impl.distributed.common;
+import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
+import java.lang.management.MemoryUsage;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Calendar;
import java.util.Collections;
+import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableSet;
+import edu.mit.streamjit.api.OneToOneElement;
import edu.mit.streamjit.api.Worker;
import edu.mit.streamjit.impl.blob.Blob;
import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.common.Configuration;
import edu.mit.streamjit.impl.common.IOInfo;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.distributed.ConfigurationManager;
+import edu.mit.streamjit.impl.distributed.HotSpotTuning;
+import edu.mit.streamjit.impl.distributed.PartitionManager;
+import edu.mit.streamjit.impl.distributed.StreamJitApp;
+import edu.mit.streamjit.test.apps.fmradio.FMRadio;
+import edu.mit.streamjit.util.ConfigurationUtils;
/**
* @author Sumanan sumanan@mit.edu
@@ -50,4 +76,269 @@ public static Token getblobID(Set> workers) {
return Collections.min(inputBuilder.build());
}
+
+ /**
+ * Prints heapMaxSize, current heapSize and heapFreeSize.
+ */
+ public static void printMemoryStatus() {
+ long heapMaxSize = Runtime.getRuntime().maxMemory();
+ long heapSize = Runtime.getRuntime().totalMemory();
+ long heapFreeSize = Runtime.getRuntime().freeMemory();
+ int MEGABYTE = 1024 * 1024;
+ System.out.println("#########################");
+ printCurrentDateTime();
+ System.out.println(String.format("heapMaxSize = %dMB", heapMaxSize
+ / MEGABYTE));
+ System.out.println(String
+ .format("heapSize = %dMB", heapSize / MEGABYTE));
+ System.out.println(String.format("heapFreeSize = %dMB", heapFreeSize
+ / MEGABYTE));
+ System.out.println("#########################");
+ }
+
+ /**
+ * Prints current date and time in "yyyy/MM/dd HH:mm:ss" format.
+ */
+ public static void printCurrentDateTime() {
+ DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
+ Calendar cal = Calendar.getInstance();
+ System.out.println(dateFormat.format(cal.getTime()));
+ }
+
+ public static void printOutOfMemory() {
+ MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
+ System.out.println("******OutOfMemoryError******");
+ MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage();
+ int MEGABYTE = 1024 * 1024;
+ long maxMemory = heapUsage.getMax() / MEGABYTE;
+ long usedMemory = heapUsage.getUsed() / MEGABYTE;
+ System.out
+ .println("Memory Use :" + usedMemory + "M/" + maxMemory + "M");
+ }
+
+ /**
+ * @param name
+ * name of the directory.
+ * @return true if and only if the directory was created; false
+ * otherwise.
+ */
+ public static boolean createDir(String name) {
+ File dir = new File(name);
+ if (dir.exists()) {
+ if (dir.isDirectory())
+ return true;
+ else {
+ System.err.println("A file exists in the name of dir-" + name);
+ return false;
+ }
+ } else
+ return dir.mkdirs();
+ }
+
+ /**
+ * Creates app directory with the name of appName, and creates a sub
+ * directory "configurations".
+ *
+ * @param name
+ * name of the directory.
+ * @return true if and only if the directories were created;
+ * false otherwise.
+ */
+ public static boolean createAppDir(String appName) {
+ if (createDir(appName))
+ return createDir(String.format("%s%s%s", appName, File.separator,
+ ConfigurationUtils.configDir));
+ else
+ return false;
+ }
+
+ /**
+ * Writes README.txt. Mainly saves GlobalConstant values.
+ *
+ * @param appName
+ */
+ public static void writeReadMeTxt(String appName) {
+ try {
+ // rename(appName, "README.txt");
+ FileWriter writer = new FileWriter(String.format("%s%sREADME.txt",
+ appName, File.separator));
+ DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
+ Calendar cal = Calendar.getInstance();
+ writer.write(dateFormat.format(cal.getTime()) + "\n");
+ writer.write(appName + "\n");
+ Properties prop = Options.getProperties();
+ prop.store(writer, "GlobalConstants.Properties");
+ writer.close();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ /**
+ * @return true iff renaming is success.
+ */
+ public static boolean rename(String appName, String fileName) {
+ File file = new File(String.format("%s%s%s", appName, File.separator,
+ fileName));
+ File fileOrig = new File(String.format("%s%s%s.orig", appName,
+ File.separator, fileName));
+ if (fileOrig.exists())
+ return false;
+ if (file.exists())
+ file.renameTo(fileOrig);
+ return true;
+ }
+
+ /**
+ * Returns a {@link FileWriter} of the file "dirName/fileName" with append =
+ * false. Creates the file if it not exists. Suppresses {@link IOException}
+ * and returns null if exception occurred. This method is added to keep
+ * other classes clean.
+ *
+ * @return {@link FileWriter} or null.
+ */
+ public static FileWriter fileWriter(String dirName, String fileName) {
+ return fileWriter(dirName, fileName, false);
+ }
+
+ /**
+ * Returns a {@link FileWriter} of the file "dirName/fileName". Creates the
+ * file if it not exists. Suppresses {@link IOException} and returns null if
+ * exception occurred. This method is added to keep other classes clean.
+ *
+ * @return {@link FileWriter} or null.
+ */
+ public static FileWriter fileWriter(String dirName, String fileName,
+ boolean append) {
+ String fullFileName = String.format("%s%s%s", dirName, File.separator,
+ fileName);
+ return fileWriter(fullFileName, append);
+ }
+ /**
+ * Creates and returns a {@link FileWriter} with append = false. Suppresses
+ * {@link IOException} and returns null if exception occurred. This method
+ * is added to keep other classes clean.
+ *
+ * @return {@link FileWriter} or null.
+ */
+ public static FileWriter fileWriter(String name) {
+ return fileWriter(name, false);
+ }
+
+ /**
+ * Creates and returns a {@link FileWriter}. Suppresses {@link IOException}
+ * and returns null if exception occurred. This method is added to keep
+ * other classes clean.
+ *
+ * @return {@link FileWriter} or null.
+ */
+ public static FileWriter fileWriter(String name, boolean append) {
+ FileWriter fw = null;
+ try {
+ fw = new FileWriter(name, append);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return fw;
+ }
+
+ /**
+ * [16-02-2015] - I couldn't run dot tools in Lanka cluster. So as a hack, i
+ * implemented this method to generate blob graph for each configuration.
+ * TODO: This generation process is damn slow. Takes 40 mins to process 5000
+ * cfgs.
+ *
+ * @param stream
+ * @throws IOException
+ */
+ public static void generateBlobGraphs(OneToOneElement, ?> stream)
+ throws IOException {
+ StreamJitApp, ?> app = new StreamJitApp<>(stream);
+ PartitionManager partitionManager = new HotSpotTuning(app);
+ partitionManager.getDefaultConfiguration(
+ Workers.getAllWorkersInGraph(app.source), 2);
+ ConfigurationManager cfgManager = new ConfigurationManager(app,
+ partitionManager);
+ Stopwatch sw = Stopwatch.createStarted();
+ for (Integer i = 1; i < 5010; i++) {
+ String prefix = i.toString();
+ Configuration cfg = ConfigurationUtils.readConfiguration(app.name,
+ prefix);
+ if (cfg != null) {
+ cfg = ConfigurationUtils.addConfigPrefix(cfg, prefix);
+ cfgManager.newConfiguration(cfg);
+ }
+ }
+
+ Configuration cfg = ConfigurationUtils.readConfiguration(app.name,
+ "final");
+ if (cfg != null) {
+ cfg = ConfigurationUtils.addConfigPrefix(cfg, "final");
+ cfgManager.newConfiguration(cfg);
+ }
+ sw.stop();
+ System.out.println(sw.elapsed(TimeUnit.SECONDS));
+ }
+
+ public static void main(String[] args) throws IOException {
+ generateBlobGraphs(new FMRadio.FMRadioCore());
+ }
+
+ /**
+ * Backups the files generated during tuning.
+ */
+ public static void backup(String appName) {
+ rename(appName, "summary");
+ rename(appName, "compileTime.txt");
+ rename(appName, "runTime.txt");
+ rename(appName, "drainTime.txt");
+ rename(appName, "GraphProperty.txt");
+ rename(appName, "profile.txt");
+ }
+
+ /**
+ * Move all files and directories, except the configuration directory, from
+ * appDir to appDir/tune directory. Does nothing if tune directory exists.
+ *
+ * @param appName
+ */
+ public static void backup1(String appName) {
+ File[] listOfFilesMove = listOfFilesMove(appName);
+ if (listOfFilesMove.length == 0)
+ return;
+
+ File tuneDir = new File(String.format("%s%stune", appName,
+ File.separator));
+ if (tuneDir.exists())
+ return;
+
+ if (!createDir(tuneDir.getPath()))
+ System.err.println(String.format("Creating %s dir failed.",
+ tuneDir.getPath()));
+ for (File f : listOfFilesMove) {
+ try {
+ Files.move(f.toPath(),
+ Paths.get(tuneDir.getPath(), f.getName()),
+ REPLACE_EXISTING);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public static File[] listOfFilesMove(final String appName) {
+ File dir = new File(appName);
+ File[] files = dir.listFiles(new FilenameFilter() {
+ public boolean accept(File dir, String name) {
+ return !name.equals(ConfigurationUtils.configDir);
+ }
+ });
+ return files;
+ }
+
+ public static void newApp(String appName) {
+ createAppDir(appName);
+ backup1(appName);
+ Utils.writeReadMeTxt(appName);
+ }
}
diff --git a/src/edu/mit/streamjit/impl/distributed/node/AffinityManager.java b/src/edu/mit/streamjit/impl/distributed/node/AffinityManager.java
new file mode 100644
index 00000000..4a6ca579
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/node/AffinityManager.java
@@ -0,0 +1,24 @@
+package edu.mit.streamjit.impl.distributed.node;
+
+import com.google.common.collect.ImmutableSet;
+
+import edu.mit.streamjit.impl.blob.Blob;
+import edu.mit.streamjit.impl.distributed.node.BlobExecuter.BlobThread2;
+
+/**
+ * Assigns CPU cores to {@link BlobThread2}s. {@link BlobThread2}s are expected
+ * to set their processor affinity which is given by
+ * {@link AffinityManager#getAffinity(Blob, int)} before start running.
+ *
+ * @author sumanan
+ * @since 4 Feb, 2015
+ */
+public interface AffinityManager {
+
+ /**
+ * @param blob
+ * @param coreCode
+ * @return Set of CPU cores that is assigned the blob's coreCode.
+ */
+ ImmutableSet getAffinity(Blob blob, int coreCode);
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/node/AffinityManagers.java b/src/edu/mit/streamjit/impl/distributed/node/AffinityManagers.java
new file mode 100644
index 00000000..7d19f705
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/node/AffinityManagers.java
@@ -0,0 +1,29 @@
+package edu.mit.streamjit.impl.distributed.node;
+
+import com.google.common.collect.ImmutableSet;
+
+import edu.mit.streamjit.impl.blob.Blob;
+
+/**
+ * Various implementations of the interface {@link AffinityManager}.
+ *
+ * @author sumanan
+ * @since 4 Feb, 2015
+ */
+public class AffinityManagers {
+
+ /**
+ * This is an empty {@link AffinityManager}. {@link #getAffinity(Blob, int)}
+ * always returns null.
+ *
+ * @author sumanan
+ * @since 4 Feb, 2015
+ */
+ public static class EmptyAffinityManager implements AffinityManager {
+
+ @Override
+ public ImmutableSet getAffinity(Blob blob, int coreCode) {
+ return null;
+ }
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/node/AsyncOutputChannel.java b/src/edu/mit/streamjit/impl/distributed/node/AsyncOutputChannel.java
new file mode 100644
index 00000000..c36f7369
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/node/AsyncOutputChannel.java
@@ -0,0 +1,100 @@
+package edu.mit.streamjit.impl.distributed.node;
+
+import java.io.IOException;
+
+import com.google.common.collect.ImmutableList;
+
+import edu.mit.streamjit.impl.blob.Buffer;
+import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection;
+import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPBuffer;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.common.Connection;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo;
+import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider;
+
+public class AsyncOutputChannel implements BoundaryOutputChannel {
+
+ private volatile Connection con;
+
+ private final String name;
+
+ private final ConnectionProvider conProvider;
+
+ private final ConnectionInfo conInfo;
+
+ private AsyncTCPBuffer buffer = null;
+
+ private volatile boolean isFinal;
+
+ private volatile boolean stopCalled;
+
+ public AsyncOutputChannel(ConnectionProvider conProvider,
+ ConnectionInfo conInfo, String bufferTokenName, int debugLevel) {
+ name = "AsyncTCPOutputChannel " + bufferTokenName;
+ this.conProvider = conProvider;
+ this.conInfo = conInfo;
+ isFinal = false;
+ stopCalled = false;
+ }
+
+ @Override
+ public String name() {
+ return name;
+ }
+
+ @Override
+ public Runnable getRunnable() {
+ return new Runnable() {
+ @Override
+ public void run() {
+ if (con == null || !con.isStillConnected()) {
+ try {
+ con = conProvider.getConnection(conInfo);
+ buffer = new AsyncTCPBuffer((AsyncTCPConnection) con);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ };
+ }
+
+ @Override
+ public ImmutableList getUnprocessedData() {
+ return ImmutableList.of();
+ }
+
+ @Override
+ public void stop(boolean isFinal) {
+ while (con == null);
+ this.isFinal = isFinal;
+ if (!stopCalled) {
+ try {
+ con.softClose();
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+ stopCalled = true;
+ }
+
+ @Override
+ public void sendData() {
+
+ }
+
+ @Override
+ public Connection getConnection() {
+ return con;
+ }
+
+ @Override
+ public ConnectionInfo getConnectionInfo() {
+ return conInfo;
+ }
+
+ @Override
+ public Buffer getBuffer() {
+ return buffer;
+ }
+}
diff --git a/src/edu/mit/streamjit/impl/distributed/node/BlobExecuter.java b/src/edu/mit/streamjit/impl/distributed/node/BlobExecuter.java
new file mode 100644
index 00000000..95f27428
--- /dev/null
+++ b/src/edu/mit/streamjit/impl/distributed/node/BlobExecuter.java
@@ -0,0 +1,508 @@
+package edu.mit.streamjit.impl.distributed.node;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableTable;
+
+import edu.mit.streamjit.api.Worker;
+import edu.mit.streamjit.impl.blob.Blob;
+import edu.mit.streamjit.impl.blob.Blob.Token;
+import edu.mit.streamjit.impl.blob.Buffer;
+import edu.mit.streamjit.impl.blob.Buffers;
+import edu.mit.streamjit.impl.blob.DrainData;
+import edu.mit.streamjit.impl.common.Workers;
+import edu.mit.streamjit.impl.distributed.common.AppStatus;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.BoundaryInputChannelManager;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.BoundaryOutputChannelManager;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.InputChannelManager;
+import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.OutputChannelManager;
+import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType;
+import edu.mit.streamjit.impl.distributed.common.Connection;
+import edu.mit.streamjit.impl.distributed.common.SNDrainElement;
+import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData;
+import edu.mit.streamjit.impl.distributed.common.SNMessageElement;
+import edu.mit.streamjit.impl.distributed.common.SNTimeInfo;
+import edu.mit.streamjit.impl.distributed.runtimer.Controller;
+import edu.mit.streamjit.util.affinity.Affinity;
+
+/**
+ * This class was an inner class of {@link BlobsManagerImpl}. I have re factored
+ * {@link BlobsManagerImpl} and moved this class a new file.
+ *
+ * @author sumanan
+ * @since 4 Feb, 2015
+ */
+class BlobExecuter {
+
+ /**
+ *
+ */
+ private final BlobsManagerImpl blobsManagerImpl;
+
+ Blob blob;
+
+ final Token blobID;
+
+ final private Set blobThreads;
+
+ /**
+ * Buffers for all input and output edges of the {@link #blob}.
+ */
+ ImmutableMap bufferMap;
+
+ private ImmutableMap outputLocalBuffers;
+
+ /**
+ * This flag will be set to true if an exception thrown by the core code of
+ * the {@link Blob}. Any exception occurred in a blob's corecode will be
+ * informed to {@link Controller} to halt the application. See the
+ * {@link BlobThread2}.
+ */
+ AtomicBoolean crashed;
+
+ volatile int drainState;
+
+ final BoundaryInputChannelManager inChnlManager;
+
+ final BoundaryOutputChannelManager outChnlManager;
+
+ private DrainType drainType;
+
+ BlobExecuter(BlobsManagerImpl blobsManagerImpl, Token t, Blob blob,
+ ImmutableMap inputChannels,
+ ImmutableMap outputChannels) {
+ this.blobsManagerImpl = blobsManagerImpl;
+ this.crashed = new AtomicBoolean(false);
+ this.blob = blob;
+ this.blobThreads = new HashSet<>();
+ assert blob.getInputs().containsAll(inputChannels.keySet());
+ assert blob.getOutputs().containsAll(outputChannels.keySet());
+ this.inChnlManager = new InputChannelManager(inputChannels);
+ this.outChnlManager = new OutputChannelManager(outputChannels);
+
+ String baseName = getName(blob);
+ for (int i = 0; i < blob.getCoreCount(); i++) {
+ String name = String.format("%s - %d", baseName, i);
+ blobThreads.add(new BlobThread2(blob.getCoreCode(i), this, name,
+ blobsManagerImpl.affinityManager.getAffinity(blob, i)));
+ }
+
+ if (blobThreads.size() < 1)
+ throw new IllegalStateException("No blobs to execute");
+
+ drainState = 0;
+ this.blobID = t;
+ }
+
+ public Token getBlobID() {
+ return blobID;
+ }
+
+ /**
+ * Gets buffer from {@link BoundaryChannel}s and builds bufferMap. The
+ * bufferMap will contain all input and output edges of the {@link #blob}.
+ *
+ * Note that, Some {@link BoundaryChannel}s (e.g.,
+ * {@link AsyncOutputChannel}) create {@link Buffer}s after establishing
+ * {@link Connection} with other end. So this method must be called after
+ * establishing all IO connections.
+ * {@link InputChannelManager#waitToStart()} and
+ * {@link OutputChannelManager#waitToStart()} ensure that the IO connections
+ * are successfully established.
+ *
+ * @return Buffer map which contains {@link Buffers} for all input and
+ * output edges of the {@link #blob}.
+ */
+ private ImmutableMap buildBufferMap() {
+ ImmutableMap.Builder bufferMapBuilder = ImmutableMap
+ .builder();
+ ImmutableMap.Builder outputLocalBufferBuilder = ImmutableMap
+ .builder();
+ ImmutableMap localBufferMap = this.blobsManagerImpl.bufferManager
+ .localBufferMap();
+ ImmutableMap