diff --git a/doc/How to tune.txt b/doc/How to tune.txt index 34d0d0b6..ff247917 100644 --- a/doc/How to tune.txt +++ b/doc/How to tune.txt @@ -2,7 +2,7 @@ Go to lib/opentuner/ folder and run venv-bootstrap.py. This will setup a virtual Run the edu.mit.streamjit.tuner.ConfigGenerator.java to generate configuration information for a particular app. Tuner.java will update the apps table in the streamjit.db with name, configuration, location and class name of the streamjit app. -Build the edu.mit.streamjit.tuner.RunApp.java and export it as runnable jar in to stramjit folder. If you need to change the input size, you can change it inside the function runApp() in the RunApp class. +Build the edu.mit.streamjit.tuner.RunApp.java and export it as runnable jar in to stramjit folder. Run lib/opentuner/streamjit/tuner2.py. diff --git a/jarapp.properties b/jarapp.properties new file mode 100644 index 00000000..59f0ecf8 --- /dev/null +++ b/jarapp.properties @@ -0,0 +1,8 @@ +app=FMRadio +streamGraphName=FMRadioCore +#app=NestedSplitJoin +#streamGraphName=NestedSplitJoinCore +username=sumanan +##password=abc123 +mainclasspath=edu.mit.streamjit.test.DistAppRunner +numOfSNs=2 diff --git a/jarapp.xml b/jarapp.xml new file mode 100644 index 00000000..c87cff99 --- /dev/null +++ b/jarapp.xml @@ -0,0 +1,153 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + #!/bin/bash +#Author - Sumanan +app=${app} +mainClass=${streamGraphName} +if [ -d $app ]; then + echo "$app exists. No downloads..." + exit +fi +mkdir -p $app +cd $app +mkdir -p $mainClass +scp -r ${username}@lanka.csail.mit.edu:/data/scratch/${username}/$app/$mainClass/\{summary,*.txt,*.orig,streamgraph.dot\} $mainClass/ +scp -r ${username}@lanka.csail.mit.edu:/data/scratch/${username}/$app/\{*.sh,slurm-*,options.properties\} . +#to download everything. +#rsync -avh --progress ${username}@lanka.csail.mit.edu:/data/scratch/${username}/$app . + + + diff --git a/lib/opentuner/streamjit/onlinetuner.py b/lib/opentuner/streamjit/onlinetuner.py index d68c14f0..115c971d 100644 --- a/lib/opentuner/streamjit/onlinetuner.py +++ b/lib/opentuner/streamjit/onlinetuner.py @@ -17,21 +17,26 @@ class StreamJitMI(MeasurementInterface): ''' Measurement Interface for tunning a StreamJit application''' - def __init__(self, args, ss, manipulator, inputmanager, objective): + def __init__(self, args, configuration, connection, manipulator, inputmanager, objective): super(StreamJitMI, self).__init__(args = args, program_name = args.program, manipulator = manipulator, input_manager = inputmanager, objective = objective) - self.sdk = ss + self.connection = connection self.trycount = 0 + self.config = configuration def run(self, desired_result, input, limit): self.trycount = self.trycount + 1 print self.trycount - cfg = desired_result.configuration.data - #self.niceprint(cfg) - self.sdk.sendmsg("%s\n"%cfg) - msg = self.sdk.recvmsg() + + cfg_data = desired_result.configuration.data + #self.niceprint(cfg_data) + for k in self.config.params: + self.config.getParameter(k).update_value_for_json(cfg_data) + self.connection.sendmsg(self.config.toJSON()) + + msg = self.connection.recvmsg() if (msg == "exit\n"): #data = raw_input ( "exit cmd received. Press Keyboard to exit..." ) - self.sdk.close() + self.connection.close() sys.exit(1) exetime = float(msg) if exetime < 0: @@ -55,31 +60,32 @@ def program_version(self): def save_final_config(self, configuration): '''called at the end of autotuning with the best resultsdb.models.Configuration''' - cfg = configuration.data - print "Final configuration", cfg - self.sdk.sendmsg("Completed") - self.sdk.sendmsg("%s\n"%cfg) - self.sdk.close() + cfg_data = configuration.data + print "Final configuration", cfg_data + for k in self.config.params: + self.config.getParameter(k).update_value_for_json(cfg_data) + + self.connection.sendmsg("Completed") + self.connection.sendmsg(self.config.toJSON()) + self.connection.close() sys.exit(0) -def main(args, cfg, ss): +def main(args, cfg, connection): logging.basicConfig(level=logging.INFO) manipulator = ConfigurationManipulator() - params = cfg.getAllParameters() #print "\nFeature variables...." - for key in params.keys(): - #print "\t", key - manipulator.add_parameter(cfg.getParameter(key)) + for p in cfg.getAllParameters().values(): + manipulator.add_parameter(p) - mi = StreamJitMI(args, ss, manipulator, FixedInputManager(), + mi = StreamJitMI(args, cfg, connection, manipulator, FixedInputManager(), MinimizeTime()) m = TuningRunMain(mi, args) m.main() -def start(argv, cfg, ss): +def start(argv, cfg, connection): log = logging.getLogger(__name__) parser = argparse.ArgumentParser(parents=opentuner.argparsers()) @@ -90,4 +96,4 @@ def start(argv, cfg, ss): if not args.database: args.database = 'sqlite:///' + args.program + '.db' - main(args, cfg, ss) + main(args, cfg, connection) diff --git a/lib/opentuner/streamjit/sjparameters.py b/lib/opentuner/streamjit/sjparameters.py index 5d7471f2..3f2862cc 100644 --- a/lib/opentuner/streamjit/sjparameters.py +++ b/lib/opentuner/streamjit/sjparameters.py @@ -1,7 +1,7 @@ import deps #fix sys.path import random import opentuner -from opentuner.search.manipulator import IntegerParameter, FloatParameter, SwitchParameter, PermutationParameter, ArrayParameter +from opentuner.search.manipulator import IntegerParameter, FloatParameter, SwitchParameter, PermutationParameter, ParameterArray class sjIntegerParameter(IntegerParameter): def __init__(self, name, min, max, value, javaClass = None, **kwargs): @@ -86,7 +86,7 @@ def json_replacement(self): "universe": self.universe, "class": self.javaClass} -class sjCompositionParameter(ArrayParameter): +class sjCompositionParameter(ParameterArray): def __init__(self, name, values, javaClass): super(sjCompositionParameter, self).__init__(name, len(values), FloatParameter, 0.0, 1.0) self.values = values diff --git a/lib/opentuner/streamjit/tuner2.py b/lib/opentuner/streamjit/tuner2.py index a7149ca7..c90460b7 100644 --- a/lib/opentuner/streamjit/tuner2.py +++ b/lib/opentuner/streamjit/tuner2.py @@ -23,13 +23,14 @@ class StreamJitMI(MeasurementInterface): ''' Measurement Interface for tunning a StreamJit application''' - def __init__(self, args, jvmOptions, manipulator, inputmanager, objective): + def __init__(self, args, configuration, jvmOptions, manipulator, inputmanager, objective): args.technique = ['StreamJITBandit'] super(StreamJitMI, self).__init__(args = args, program_name = args.program, manipulator = manipulator, input_manager = inputmanager, objective = objective) self.trycount = 0 self.jvmOptions = jvmOptions self.program = args.program self.StreamNodes = [] + self.config = configuration try: self.tunedataDB = sqlite3.connect('sj' + args.program + '.db') c = self.tunedataDB.cursor() @@ -43,42 +44,41 @@ def __init__(self, args, jvmOptions, manipulator, inputmanager, objective): data = raw_input ( "Press Keyboard to exit..." ) def run(self, desired_result, input, limit): - cfg = dict.copy(desired_result.configuration.data) - (st, t) = self.runApp(cfg) + cfg_data = dict.copy(desired_result.configuration.data) + (st, t) = self.runApp(cfg_data) return opentuner.resultsdb.models.Result(state=st, time=t) - def runApp(self, cfg): + def runApp(self, cfg_data): self.trycount = self.trycount + 1 print '\n**********New Run - %d **********'%self.trycount - #self.niceprint(cfg) + #self.niceprint(cfg_data) + + for k in self.config.params: + self.config.getParameter(k).update_value_for_json(cfg_data) #TODO: find a better place for these system-specific constants #the path to the Java executable, or "java" to use system's default javaPath = "java" - #the classpath, suitable as the value of the '-cp' java argument - javaClassPath = "build/jar/streamjit.jar:lib/asm.jar:lib/bridj.jar:lib/bytecodelib.jar:lib/guava.jar:lib/javax.json.jar:lib/joptsimple.jar:lib/sqlitejdbc.jar" - args = [javaPath, "-cp", javaClassPath] jvmArgs = [] for key in self.jvmOptions.keys(): - self.jvmOptions.get(key).setValue(cfg[key]) + self.jvmOptions.get(key).setValue(cfg_data[key]) cmd = self.jvmOptions.get(key).getCommand() if len(cmd) > 0: jvmArgs.append(cmd) - args.extend(jvmArgs) - args.append("edu.mit.streamjit.tuner.RunApp") + + args = self.getArgs1(javaPath, jvmArgs) args.append(str(self.program)) args.append(str(self.trycount)) cur = self.tunedataDB.cursor() - query = 'INSERT INTO results VALUES (%d,"%s","%s", "%f")'%(self.trycount, " ".join(jvmArgs), cfg, -1) + query = "INSERT INTO results VALUES (%d,'%s','%s', '%f')"%(self.trycount, " ".join(jvmArgs), self.config.toJSON(), -1) cur.execute(query) self.tunedataDB.commit() - p = subprocess.Popen(args, stderr=subprocess.PIPE) - if cfg.get('noOfMachines'): - self.startStreamNodes(cfg.get('noOfMachines') - 1, args) + if cfg_data.get('noOfMachines'): + self.startStreamNodes(cfg_data.get('noOfMachines') - 1, args) timeout = 100 @@ -102,8 +102,8 @@ def runApp(self, cfg): print "\033[31;1mException Found\033[0m" self.waitForStreamNodes(True) cur = self.tunedataDB.cursor() - str1 = str(commandStr) - str2 = str(cfg) + str1 = str(jvmArgs) + str2 = self.config.toJSON() cur.execute('INSERT INTO exceptions VALUES (?,?,?)', (err, str1, str2)) self.tunedataDB.commit() return ('ERROR', float('inf')) @@ -122,11 +122,27 @@ def runApp(self, cfg): self.waitForStreamNodes(False) return ('OK',exetime) - def niceprint(self, cfg): + # Return args that is to run a runnable jar file. + def getArgs1(self, javaPath, jvmArgs): + args = [javaPath] + args.extend(jvmArgs) + args.append("-jar") + args.append("RunApp.jar") + return args + + # Return args that is to run from class file. + def getArgs2(self, javaPath, jvmArgs): + #the classpath, suitable as the value of the '-cp' java argument + javaClassPath = "build/jar/streamjit.jar:lib/asm.jar:lib/bridj.jar:lib/bytecodelib.jar:lib/guava.jar:lib/javax.json.jar:lib/joptsimple.jar:lib/sqlitejdbc.jar" + args = [javaPath, "-cp", javaClassPath] + args.append("edu.mit.streamjit.tuner.RunApp") + return args + + def niceprint(self, cfg_data): print "\n--------------------------------------------------" print self.trycount - for key in cfg.keys(): - print "%s - %s"%(key, cfg[key]) + for key in cfg_data.keys(): + print "%s - %s"%(key, cfg_data[key]) def program_name(self): return self.args.program @@ -136,12 +152,13 @@ def program_version(self): def save_final_config(self, configuration): '''called at the end of autotuning with the best resultsdb.models.Configuration''' - cfg = dict.copy(configuration.data) + cfg_data = dict.copy(configuration.data) + print "\033[32;1mFinal Config...\033[0m" - (state, time) = self.runApp(cfg) + (state, time) = self.runApp(cfg_data) conn = sqlite3.connect('streamjit.db', 100) cur = conn.cursor() - query = 'INSERT INTO FinalResult VALUES ("%s","%s", %d, "%s", "%f")'%(self.program, cfg, self.trycount, state, float(time)) + query = "INSERT INTO FinalResult VALUES ('%s','%s', %d, '%s', '%f')"%(self.program, self.config.toJSON(), self.trycount, state, float(time)) cur.execute(query) conn.commit() @@ -166,13 +183,15 @@ def main(args, cfg, jvmOptions): logging.basicConfig(level=logging.INFO) manipulator = ConfigurationManipulator() - params = dict(cfg.items() + jvmOptions.items()) + cfgparams = cfg.getAllParameters() + + params = dict(cfgparams.items() + jvmOptions.items()) #print "\nFeature variables...." for key in params.keys(): #print "\t", key manipulator.add_parameter(params.get(key)) - mi = StreamJitMI(args,jvmOptions, manipulator, FixedInputManager(), + mi = StreamJitMI(args, cfg, jvmOptions, manipulator, FixedInputManager(), MinimizeTime()) m = TuningRunMain(mi, args) @@ -200,7 +219,6 @@ def start(program): sys.exit(1) cfgString = row[0] cfg = configuration.getConfiguration(cfgString) - cfgparams = cfg.getAllParameters() except Exception, e: print 'Exception occured' traceback.print_exc() @@ -244,7 +262,7 @@ def start(program): enabledJvmOptions = [aggressiveOpts, compileThreshold, freqInlineSize, maxInlineSize, maxInlineLevel] jvmOptions = {x.name:x for x in enabledJvmOptions} - main(args, cfgparams, jvmOptions) + main(args, cfg, jvmOptions) if __name__ == '__main__': prgrms = [] diff --git a/lib/opentuner/streamjit/tuner3.py b/lib/opentuner/streamjit/tuner3.py index aa19f643..f23ee08a 100755 --- a/lib/opentuner/streamjit/tuner3.py +++ b/lib/opentuner/streamjit/tuner3.py @@ -183,7 +183,7 @@ def make_jvm_options(): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(parents=opentuner.argparsers()) - parser.add_argument('--program', help='StreamJIT benchmark to tune (with first input)') + parser.add_argument('program', help='StreamJIT benchmark to tune (with first input)') parser.add_argument('--timestamp', help='timestamp to use for final config/errors', default=time.strftime('%Y%m%d-%H%M%S')) args = parser.parse_args() diff --git a/scripts/FMRadio.sh b/scripts/FMRadio.sh new file mode 100644 index 00000000..988d2c77 --- /dev/null +++ b/scripts/FMRadio.sh @@ -0,0 +1,15 @@ +#!/bin/bash +#Author - Sumanan +# A sample file to download tuning files from Lanka cluster. +app=FMRadio15 +mainClass=FMRadioCore +if [ -d $app ]; then + echo "$app exists. No downloads..." + exit +fi +mkdir -p $app +cd $app +mkdir -p $mainClass +scp -r sumanan@lanka.csail.mit.edu:/data/scratch/sumanan/$app/$mainClass/\{summary,*.txt,streamgraph.dot\} $mainClass/ +scp -r sumanan@lanka.csail.mit.edu:/data/scratch/sumanan/$app/\{*.sh,slurm-*,options.properties,*.jar\} . + diff --git a/scripts/backup.sh b/scripts/backup.sh new file mode 100644 index 00000000..98ad53d8 --- /dev/null +++ b/scripts/backup.sh @@ -0,0 +1,23 @@ +#!/bin/bash +#Author - Sumanan +#Feb 25, 2015 +#Backups tuning output files and directories. +args=("$@") +suffix=${args[0]} +if [ -z $suffix ] +then +suffix="Orig" +fi + +if [ -d summary$suffix ]; then + echo "summary$suffix exists. No backups. Exiting..." + exit +fi + +mv summary summary$suffix +mv compileTime.txt compileTime$suffix.txt +mv runTime.txt runTime$suffix.txt +mv drainTime.txt drainTime$suffix.txt +mv GraphProperty.txt GraphProperty$suffix.txt +mv README.txt README$suffix.txt + diff --git a/scripts/setup.sh b/scripts/setup.sh new file mode 100644 index 00000000..c25b97f6 --- /dev/null +++ b/scripts/setup.sh @@ -0,0 +1,61 @@ +#!/bin/bash +#Author - Sumanan +#Feb 9, 2015 +#Setup directories and scripts to run a distributed StreamJit app. +function writeRun(){ + runfile="run.sh" + res=$(get_prop "./options.properties" "tune") + echo "#!/bin/bash" > $runfile + echo "#SBATCH --tasks-per-node=1" >> $runfile + echo "#SBATCH -N 1" >> $runfile + echo "#SBATCH --cpu_bind=verbose,cores" >> $runfile + echo "#SBATCH --exclusive" >> $runfile + echo "cd /data/scratch/sumanan/"$1 >> $runfile + if [ "$res" -eq "1" ];then + echo "mkdir -p $2" >> $runfile + echo "cd $2" >> $runfile + echo "srun python ../lib/opentuner/streamjit/streamjit2.py 12563 &" >> $runfile + echo "cd .." >> $runfile + fi + echo "srun -l ../bin/java/jdk1.8.0_31/bin/java -Xmx2048m -jar $1.jar $3" >> $runfile +} + +function writeSN(){ + runfile="streamnode.sh" + echo "#!/bin/bash" > $runfile + echo "#SBATCH --tasks-per-node=1" >> $runfile + echo "#SBATCH -N $2" >> $runfile + echo "#SBATCH --cpu_bind=verbose,cores" >> $runfile + echo "#SBATCH --exclusive" >> $runfile + echo "cd /data/scratch/sumanan/"$1 >> $runfile + echo "srun --exclusive --nodes=$2 ../bin/java/jdk1.8.0_31/bin/java -Xmx2048m -jar StreamNode.jar 128.30.116." >> $runfile +} + +function creatdirs(){ + mkdir -p $1 + ln -s /data/scratch/sumanan/data $1/data + ln -s /data/scratch/sumanan/lib $1/lib + cd $1 +} + +get_prop(){ + grep "^${2}=" ${1}| sed "s%${2}=\(.*\)%\1%" +} + +if [ "$#" -ne 3 ]; then + echo "Illegal number of parameters" + echo "3 arguments must be passed" + echo "setup.sh " + exit +fi + +args=("$@") +app=${args[0]} +mainClass=${args[1]} +nodes=${args[2]} +totalNodes=$((nodes + 1)) +cd /data/scratch/sumanan +creatdirs $app #Changes the current working directory(CWD). +mv "optionsLanka.properties" "options.properties" +writeRun $app $mainClass $totalNodes +writeSN $app $nodes diff --git a/src/edu/mit/streamjit/impl/blob/DrainData.java b/src/edu/mit/streamjit/impl/blob/DrainData.java index 01b0a1f7..728a9402 100644 --- a/src/edu/mit/streamjit/impl/blob/DrainData.java +++ b/src/edu/mit/streamjit/impl/blob/DrainData.java @@ -86,6 +86,8 @@ public Object getWorkerState(int workerId, String fieldName) { * @return a merged DrainData */ public DrainData merge(DrainData other) { + if (other == null) + return this; ImmutableMap.Builder> dataBuilder = ImmutableMap.builder(); for (Token t : Sets.union(data.keySet(), other.data.keySet())) { ImmutableList us = getData(t) != null ? getData(t) : ImmutableList.of(); diff --git a/src/edu/mit/streamjit/impl/common/ConfigurationAnalyzer.java b/src/edu/mit/streamjit/impl/common/ConfigurationAnalyzer.java new file mode 100644 index 00000000..6150a2d3 --- /dev/null +++ b/src/edu/mit/streamjit/impl/common/ConfigurationAnalyzer.java @@ -0,0 +1,177 @@ +package edu.mit.streamjit.impl.common; + +import java.io.File; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map.Entry; + +import edu.mit.streamjit.impl.common.Configuration.FloatParameter; +import edu.mit.streamjit.impl.common.Configuration.IntParameter; +import edu.mit.streamjit.impl.common.Configuration.Parameter; +import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; +import edu.mit.streamjit.tuner.SqliteAdapter; +import edu.mit.streamjit.util.ConfigurationUtils; + +public class ConfigurationAnalyzer { + + public static void main(String[] args) { + ConfigurationAnalyzer ca = new ConfigurationAnalyzer( + "NestedSplitJoinCore"); + // ca.compare(3, 4); + + System.out.println(ca.getRunningTime("NestedSplitJoinCore", 3)); + } + + private final String cfgDir; + + private final String appName; + + List bestConfigurations; + + /** + * Path of the directory which contains app's configuration in sub + * directory. + * + *
+	 * confgDirectory
+	 * 			|
+	 * 			------>appName1
+	 * 			|
+	 * 			------>appName2
+	 * 			|
+	 * 			------>
+	 * 
+ */ + + public ConfigurationAnalyzer(String appName) { + verifyPath(ConfigurationUtils.configDir, appName); + bestConfigurations = new LinkedList<>(); + this.appName = appName; + this.cfgDir = String.format("%s%s%s", appName, File.separator, + ConfigurationUtils.configDir); + } + + private void compare(FloatParameter p1, FloatParameter p2) { + float val1 = p1.getValue(); + float val2 = p2.getValue(); + if (val1 == val2) + System.out.println(String.format("%s: p1 = p2. value = %f", + p1.getName(), val1)); + if (val1 > val2) + System.out.println(String.format("%s: p1 > p2. %f > %f", + p1.getName(), val1, val2)); + else + System.out.println(String.format("%s: p1 < p2. %f < %f", + p1.getName(), val1, val2)); + } + + private void compare(Integer first, Integer second) { + Configuration cfg1 = readcoConfiguration(first); + Configuration cfg2 = readcoConfiguration(second); + for (Entry en : cfg1.getParametersMap().entrySet()) { + Parameter p1 = en.getValue(); + Parameter p2 = cfg2.getParameter(en.getKey()); + if (p2 == null) + throw new IllegalStateException(String.format( + "No parameter %s in configuration2", en.getKey())); + if (p1.getClass() == Configuration.IntParameter.class) + compare((IntParameter) p1, (IntParameter) p2); + else if (p1.getClass() == Configuration.FloatParameter.class) + compare((FloatParameter) p1, (FloatParameter) p2); + else if (p1.getClass() == Configuration.SwitchParameter.class) + compare((SwitchParameter) p1, (SwitchParameter) p2); + else + System.out.println(String.format( + "Parameter class %s is not handled.", p1.getClass() + .getName())); + + } + } + + /* + * Any way to avoid code duplication in compare(IntParameter p1, + * IntParameter p2) and compare(FloatParameter p1, FloatParameter p2)? + */ + /** + * + * @param p1 + * @param p2 + */ + private void compare(IntParameter p1, IntParameter p2) { + int val1 = p1.getValue(); + int val2 = p2.getValue(); + if (val1 == val2) + System.out.println(String.format("%s: p1 = p2. value = %d", + p1.getName(), val1)); + if (val1 > val2) + System.out.println(String.format("%s: p1 > p2. %d > %d", + p1.getName(), val1, val2)); + else + System.out.println(String.format("%s: p1 < p2. %d < %d", + p1.getName(), val1, val2)); + } + + private void compare(SwitchParameter p1, SwitchParameter p2) { + Class type1 = p1.getGenericParameter(); + Class type2 = p2.getGenericParameter(); + assert type1 == type2; + T1 val1 = p1.getValue(); + T2 val2 = p2.getValue(); + + if (val1.equals(val2)) + System.out.println(String.format( + "%s - same values - p1 = %s, p2 = %s. Universe:%s", + p1.getName(), val1, val2, p1.getUniverse())); + else + System.out.println(String.format( + "%s - different values - p1 = %s, p2 = %s. Universe:%s", + p1.getName(), val1, val2, p1.getUniverse())); + } + + private SqliteAdapter connectDB(String appName) { + SqliteAdapter sqlite = new SqliteAdapter(); + sqlite.connectDB(appName); + return sqlite; + } + + private double getRunningTime(String appName, int round) { + String dbPath = String.format("%s%s%s", appName, File.separator, + appName); + SqliteAdapter sqlite = connectDB(dbPath); + ResultSet result = sqlite.executeQuery(String.format( + "SELECT * FROM result WHERE id=%d", round)); + + String runtime = "1000000000"; + try { + runtime = result.getString("time"); + } catch (SQLException e) { + e.printStackTrace(); + } + return Double.parseDouble(runtime); + } + + private Configuration readcoConfiguration(Integer cfgNo) { + String cfg = String.format("%s%s%d_%s.cfg", cfgDir, File.separator, + cfgNo, appName); + return ConfigurationUtils.readConfiguration(cfg); + } + + private boolean verifyPath(String cfgDir, String appName) { + String dbPath = String.format("%s%s%s", appName, File.separator, + appName); + File db = new File(dbPath); + if (!db.exists()) + throw new IllegalStateException("No database file found in " + + dbPath); + + String dirPath = String.format("%s%s%s", appName, File.separator, + cfgDir); + File dir = new File(dirPath); + if (!dir.exists()) + throw new IllegalStateException("No directory found in " + dirPath); + + return true; + } +} diff --git a/src/edu/mit/streamjit/impl/common/ConfigurationEditor.java b/src/edu/mit/streamjit/impl/common/ConfigurationEditor.java index fe4b8138..f43ca5eb 100644 --- a/src/edu/mit/streamjit/impl/common/ConfigurationEditor.java +++ b/src/edu/mit/streamjit/impl/common/ConfigurationEditor.java @@ -22,70 +22,96 @@ package edu.mit.streamjit.impl.common; import java.io.BufferedReader; +import java.io.File; import java.io.FileReader; -import java.io.FileWriter; import java.io.IOException; import java.io.InputStreamReader; import java.util.Map; +import com.google.common.base.Splitter; + import edu.mit.streamjit.api.OneToOneElement; import edu.mit.streamjit.api.Worker; import edu.mit.streamjit.impl.blob.BlobFactory; +import edu.mit.streamjit.impl.common.Configuration.IntParameter; import edu.mit.streamjit.impl.common.Configuration.Parameter; import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; +import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory; +import edu.mit.streamjit.impl.distributed.ConnectionManager; +import edu.mit.streamjit.impl.distributed.ConnectionManager.BlockingTCPNoParams; import edu.mit.streamjit.impl.distributed.DistributedBlobFactory; +import edu.mit.streamjit.impl.distributed.HotSpotTuning; +import edu.mit.streamjit.impl.distributed.PartitionManager; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.impl.distributed.WorkerMachine; +import edu.mit.streamjit.test.apps.channelvocoder7.ChannelVocoder7; +import edu.mit.streamjit.util.ConfigurationUtils; +import edu.mit.streamjit.util.Pair; +import edu.mit.streamjit.util.json.Jsonifiers; public class ConfigurationEditor { - static String name; - static int noofwrks; /** * @param args * @throws IOException */ public static void main(String[] args) throws IOException { - edit(name, noofwrks); - // print("4366NestedSplitJoinCore.cfg"); + Pair ret = generate( + new ChannelVocoder7.ChannelVocoder7Kernel(), 16); + // edit1("FilterBankPipeline", "463", 83); + // print("4366_NestedSplitJoinCore.cfg"); + // changeMultiplierVal("NestedSplitJoinCore","final"); } - private static void generate(OneToOneElement stream) { - int noOfnodes = 4; + /** + * Reads a configuration and changes its multiplier value. + */ + private static void changeMultiplierVal(String appName, String namePrefix) { + Configuration config = ConfigurationUtils.readConfiguration(appName, + namePrefix); + if (config == null) + return; + Configuration.Builder builder = Configuration.builder(config); + IntParameter mulParam = config.getParameter("multiplier", + IntParameter.class); + if (mulParam != null) { + System.out.println("Multiplier values is " + mulParam.getValue()); + builder.removeParameter(mulParam.getName()); + } + + IntParameter newMulParam = new IntParameter("multiplier", 1, 100, 100); + builder.addParameter(newMulParam); + ConfigurationUtils.saveConfg(builder.build(), "444", appName); + } + private static Pair generate(OneToOneElement stream, + int noOfnodes) { ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor(); stream.visit(primitiveConnector); Worker source = (Worker) primitiveConnector.getSource(); Worker sink = (Worker) primitiveConnector.getSink(); - noofwrks = Workers.getIdentifier(sink) + 1; + int noofwrks = Workers.getIdentifier(sink) + 1; BlobFactory bf = new DistributedBlobFactory(noOfnodes); Configuration cfg = bf.getDefaultConfiguration(Workers .getAllWorkersInGraph(source)); - name = String.format("%s.cfg", stream.getClass().getSimpleName()); - - try { - FileWriter writer = new FileWriter(name, false); - writer.write(cfg.toJson()); - writer.flush(); - writer.close(); - } catch (IOException e) { - e.printStackTrace(); - } - + String appName = stream.getClass().getSimpleName(); + String namePrefix = "hand_"; + ConfigurationUtils.saveConfg(cfg, namePrefix, appName); + return new Pair(appName, noofwrks); } - private static void edit(String name, int maxWor) + /** + * This edit is for the configurations which are generated using + * {@link WorkerMachine} as {@link PartitionManager}. + */ + private static void edit(String appName, String namePrefix, int maxWor) throws NumberFormatException, IOException { - Configuration cfg; - try { - BufferedReader reader = new BufferedReader(new FileReader(name)); - String json = reader.readLine(); - cfg = Configuration.fromJson(json); - reader.close(); - } catch (Exception ex) { - System.err.println("File reader error"); + Configuration cfg = ConfigurationUtils.readConfiguration(appName, + namePrefix); + if (cfg == null) return; - } Configuration.Builder builder = Configuration.builder(cfg); BufferedReader keyinreader = new BufferedReader(new InputStreamReader( @@ -103,24 +129,117 @@ private static void edit(String name, int maxWor) } cfg = builder.build(); - FileWriter writer = new FileWriter(name); - writer.write(cfg.toJson()); - writer.close(); + ConfigurationUtils.saveConfg(cfg, namePrefix, appName); System.out.println("Successfully updated"); } - private static void print(String name) { - Configuration cfg; - try { - BufferedReader reader = new BufferedReader(new FileReader(name)); - String json = reader.readLine(); - cfg = Configuration.fromJson(json); - reader.close(); - } catch (Exception ex) { - System.err.println("File reader error"); + private static Pair generate1( + OneToOneElement stream, int noOfnodes) { + StreamJitApp app = new StreamJitApp<>(stream); + int noofwrks = Workers.getIdentifier(app.sink) + 1; + PartitionManager partitionManager = new HotSpotTuning(app); + ConnectionManager conManger = new BlockingTCPNoParams(0); + BlobFactory bf = new DistributedBlobFactory(partitionManager, + conManger, noOfnodes); + + Configuration cfg = bf.getDefaultConfiguration(Workers + .getAllWorkersInGraph(app.source)); + + String namePrefix = "hand_"; + ConfigurationUtils.saveConfg(cfg, namePrefix, app.name); + return new Pair(app.name, noofwrks); + } + + /** + * Generates default cfg of {@link Compiler2BlobFactory}. No modification + * done. + * + * @param stream + */ + private static void generate2(OneToOneElement stream) { + ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor(); + stream.visit(primitiveConnector); + Worker source = (Worker) primitiveConnector.getSource(); + BlobFactory bf = new Compiler2BlobFactory(); + // BlobFactory bf = new DistributedBlobFactory(1); + + Configuration cfg = bf.getDefaultConfiguration(Workers + .getAllWorkersInGraph(source)); + + String appName = stream.getClass().getSimpleName(); + String namePrefix = "hand_"; + ConfigurationUtils.saveConfg(cfg, namePrefix, appName); + } + + /** + * This edit is for the configurations which are generated using + * {@link HotSpotTuning} as {@link PartitionManager}. + */ + private static void edit1(String appName, String namePrefix, int maxWor) + throws NumberFormatException, IOException { + Configuration cfg = ConfigurationUtils.readConfiguration(appName, + namePrefix); + if (cfg == null) return; + + Configuration.Builder builder = Configuration.builder(cfg); + BufferedReader keyinreader = new BufferedReader(new InputStreamReader( + System.in)); + + for (int i = 0; i < maxWor; i++) { + String wrkrMachineName = String.format("worker%dtomachine", i); + String wrkrCutname = String.format("worker%dcut", i); + + SwitchParameter wrkrMachine = cfg.getParameter( + wrkrMachineName, SwitchParameter.class); + IntParameter wrkrCut = cfg.getParameter(wrkrCutname, + IntParameter.class); + + if (wrkrMachine != null) { + System.out.println(wrkrMachine.toString()); + boolean isOk1 = false; + while (!isOk1) { + try { + int val = Integer.parseInt(keyinreader.readLine()); + builder.removeParameter(wrkrMachine.getName()); + builder.addParameter(new SwitchParameter( + wrkrMachine.getName(), Integer.class, val, + wrkrMachine.getUniverse())); + isOk1 = true; + } catch (Exception ex) { + ex.printStackTrace(); + isOk1 = false; + } + } + } + + if (wrkrCut != null) { + System.out.println(wrkrCut.toString()); + boolean isOk = false; + while (!isOk) { + try { + int val = Integer.parseInt(keyinreader.readLine()); + builder.removeParameter(wrkrCut.getName()); + builder.addParameter(new IntParameter( + wrkrCut.getName(), wrkrCut.getRange(), val)); + isOk = true; + } catch (Exception ex) { + ex.printStackTrace(); + isOk = false; + } + } + } } + cfg = builder.build(); + ConfigurationUtils.saveConfg(cfg, namePrefix, appName); + System.out.println("Successfully updated"); + } + + private static void print(String cfgFilePath) { + Configuration cfg = ConfigurationUtils.readConfiguration(cfgFilePath); + if (cfg == null) + return; for (Map.Entry en : cfg.getParametersMap() .entrySet()) { if (en.getValue() instanceof SwitchParameter) { @@ -130,4 +249,93 @@ private static void print(String name) { } } } + + @Deprecated + private static void convert() { + String appName = "ChannelVocoder7Kernel"; + Configuration cfg = ConfigurationUtils.readConfiguration(String.format( + "%s%s%s%s%d_%s.cfg", appName, File.separator, + ConfigurationUtils.configDir, File.separator, 1, appName)); + try { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%d_%s.cfg", 0, appName))); + String pythonDict = reader.readLine(); + reader.close(); + + Configuration finalCfg = rebuildConfiguration(pythonDict, cfg); + ConfigurationUtils.saveConfg(finalCfg, "0", appName); + } catch (Exception ex) { + ex.printStackTrace(); + } + } + + /** + * Creates a new {@link Configuration} from the received python dictionary + * string. This is not a good way to do. + *

+ * TODO: Need to add a method to {@link Configuration} so that the + * configuration object can be updated from the python dict string. Now we + * are destructing the old confg object and recreating a new one every time. + * Not a appreciatable way. + * + * @param pythonDict + * Python dictionary string. Autotuner gives a dictionary of + * features with trial values. + * @param config + * Old configuration object. + * @return New configuration object with updated values from the pythonDict. + */ + @Deprecated + private static Configuration rebuildConfiguration(String pythonDict, + Configuration config) { + // System.out.println(pythonDict); + pythonDict = pythonDict.replaceAll("u'", ""); + pythonDict = pythonDict.replaceAll("':", ""); + pythonDict = pythonDict.replaceAll("\\{", ""); + pythonDict = pythonDict.replaceAll("\\}", ""); + Splitter dictSplitter = Splitter.on(", ").omitEmptyStrings() + .trimResults(); + Configuration.Builder builder = Configuration.builder(); + System.out.println("New parameter values from Opentuner..."); + for (String s : dictSplitter.split(pythonDict)) { + String[] str = s.split(" "); + if (str.length != 2) + throw new AssertionError("Wrong python dictionary..."); + Parameter p = config.getParameter(str[0]); + if (p == null) + continue; + // System.out.println(String.format("\t%s = %s", str[0], str[1])); + if (p instanceof IntParameter) { + IntParameter ip = (IntParameter) p; + builder.addParameter(new IntParameter(ip.getName(), + ip.getMin(), ip.getMax(), Integer.parseInt(str[1]))); + + } else if (p instanceof SwitchParameter) { + SwitchParameter sp = (SwitchParameter) p; + Class type = sp.getGenericParameter(); + int val = Integer.parseInt(str[1]); + SwitchParameter sp1 = new SwitchParameter(sp.getName(), + type, sp.getUniverse().get(val), sp.getUniverse()); + builder.addParameter(sp1); + } + + } + return builder.build(); + } + + /** + * TODO: This method is totally unnecessary if we remove the usage of the + * name "class" in side {@link Configuration}. + * + * @param cfg + * @return + */ + @Deprecated + private static String getConfigurationString(Configuration cfg) { + String s = Jsonifiers.toJson(cfg).toString(); + String s1 = s.replaceAll("__class__", "ttttt"); + String s2 = s1.replaceAll("class", "javaClassPath"); + String s3 = s2.replaceAll("ttttt", "__class__"); + return s3; + } } diff --git a/src/edu/mit/streamjit/impl/common/TimeLogger.java b/src/edu/mit/streamjit/impl/common/TimeLogger.java new file mode 100644 index 00000000..cc9b8b89 --- /dev/null +++ b/src/edu/mit/streamjit/impl/common/TimeLogger.java @@ -0,0 +1,144 @@ +package edu.mit.streamjit.impl.common; + +import edu.mit.streamjit.impl.blob.DrainData; +import edu.mit.streamjit.impl.distributed.node.StreamNode; +import edu.mit.streamjit.tuner.OnlineTuner; + +/** + * Logs various time measurements for off line performance analysis. Controller + * node may + *

    + *
  1. Measure the time durations for different events and use this interface to + * log those values. + *
  2. Call the appropriate event indicating methods and let the TimeLogger to + * measure and log the time values. + *
+ * + * @author Sumanan sumanan@mit.edu + * @since Nov 22, 2014 + */ +public interface TimeLogger { + + /** + * Compiler may call this method to indicate the compilation event has + * started. TimeLogger may start a timer to measure the compilation time. + */ + public void compilationStarted(); + + /** + * Compiler can call this method to indicate the compilation event has + * finished. TimeLogger may stop the timer and log the compilation time. + * + * @param isCompiled + * : Additional detail that goes with log. + * @param msg + * : Additional details that go with log. + */ + public void compilationFinished(boolean isCompiled, String msg); + + /** + * Drainer or Tuner may call this method to indicate the draining event has + * started. TimeLogger may start a timer to measure the draining time. + */ + public void drainingStarted(); + + /** + * Drainer or Tuner may call this method to indicate the draining event has + * finished. TimeLogger may stop the timer and log the draining time. + * + * @param msg + * : Additional details that go with log. + */ + public void drainingFinished(String msg); + + /** + * Drainer or Tuner may call this method to indicate the {@link DrainData} + * collection event has started. TimeLogger may start a timer to measure the + * drain data collection time. + */ + public void drainDataCollectionStarted(); + + /** + * Drainer or Tuner may call this method to indicate the drain data + * collection event has finished. TimeLogger may stop the timer and log the + * drain data collection time. + * + * @param msg + * : Additional details that go with log. + */ + public void drainDataCollectionFinished(String msg); + + /** + * Log the total compilation time of a new configuration. (Controller node + * point of view). + * + * @param time + */ + public void logCompileTime(long time); + + /** + * Writes additional messages to compileTime OutputStreamWriter. + * SNTimeInfoProcessor may use this method to log additional compilation + * messages those are collected from {@link StreamNode}s. + * + * @param msg + */ + public void logCompileTime(String msg); + + /** + * Log total {@link DrainData} collection time. + * + * @param time + */ + public void logDrainDataCollectionTime(long time); + + /** + * Log total draining time. + * + * @param time + */ + public void logDrainTime(long time); + + /** + * Writes additional messages to drainTime OutputStreamWriter. + * SNTimeInfoProcessor may use this method to log additional draining + * messages those are collected from {@link StreamNode}s. + * + * @param msg + */ + public void logDrainTime(String msg); + + /** + * Log the time taken to generate fixed amount of steady state outputs. + * + * @param time + */ + public void logRunTime(long time); + + /** + * Writes additional messages to runTime OutputStreamWriter. + * SNTimeInfoProcessor may use this method to log additional runTime + * messages those are collected from {@link StreamNode}s. + * + * @param msg + */ + public void logRunTime(String msg); + + /** + * This method shall be called to indicate the logger that a new + * configuration has been received. Appropriate caller would be + * {@link OnlineTuner}. + * + * @param cfgPrefix + * The prefix name of the new {@link Configuration}. Pass null or + * empty string if the prefix is unknown. + */ + public void newConfiguration(String cfgPrefix); + + /** + * Logs the time taken to get a new configuration from the OpenTuner. + * + * @param time + */ + public void logSearchTime(long time); +} diff --git a/src/edu/mit/streamjit/impl/common/TuningStatistics.java b/src/edu/mit/streamjit/impl/common/TuningStatistics.java new file mode 100644 index 00000000..d3e7d98a --- /dev/null +++ b/src/edu/mit/streamjit/impl/common/TuningStatistics.java @@ -0,0 +1,126 @@ +package edu.mit.streamjit.impl.common; + +import java.io.File; +import java.io.FileWriter; +import java.io.FilenameFilter; +import java.io.IOException; +import java.util.Map; + +import edu.mit.streamjit.impl.common.Configuration.IntParameter; +import edu.mit.streamjit.impl.common.Configuration.Parameter; +import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; +import edu.mit.streamjit.util.ConfigurationUtils; + +public class TuningStatistics { + + private static String[] boolParams = { "remove", "fuse", "unboxStor", + "unboxInput", "unboxOutput" }; + + private static String[] intParams = { "InitBuffer", "multipl", "UnrollCo" }; + + /** + * @param args + * @throws IOException + */ + public static void main(String[] args) throws IOException { + // printCfgValues("1_NestedSplitJoinCore.cfg"); + printAll("../Tuner layer/tuning-oopsla2014"); + + } + + private static Double getBoolParamStat(Map parameters, + String prefix) { + int totalParams = 0; + int noOfTRUE = 0; + + for (Map.Entry en : parameters.entrySet()) { + if (en.getKey().startsWith(prefix)) { + totalParams++; + SwitchParameter p = (SwitchParameter) en + .getValue(); + if (p.getValue()) { + noOfTRUE++; + } + } + } + double per = 100 * (double) noOfTRUE / totalParams; + // System.out.println("totalParams - " + totalParams); + // System.out.println("noOfTRUE - " + noOfTRUE); + // System.out.println("Percentage - " + per); + return per; + } + + private static Integer getIntParamStat(Map parameters, + String prefix) { + int totalParams = 0; + int totalVal = 0; + + for (Map.Entry en : parameters.entrySet()) { + if (en.getKey().startsWith(prefix)) { + totalParams++; + IntParameter p = (IntParameter) en.getValue(); + int normalized = (100 * p.getValue()) + / (p.getMax() - p.getMax()); + // totalVal += p.getValue(); + totalVal += normalized; + } + } + return totalVal / totalParams; + } + + private static void printAll(String folderPath) throws IOException { + File folder = new File(folderPath); + File[] listOfFiles = folder.listFiles(new FilenameFilter() { + + @Override + public boolean accept(File dir, String name) { + return name.toLowerCase().endsWith(".cfg"); + } + }); + System.out.println(String.format("Parameters in configuration...")); + + FileWriter writer = new FileWriter("paramStat.dat", false); + writeHeader(writer); + for (int i = 0; i < listOfFiles.length; i++) { + printCfgValues(listOfFiles[i].getAbsolutePath(), writer); + } + } + + private static void printCfgValues(String fileName, FileWriter writer) + throws IOException { + Configuration cfg = ConfigurationUtils.readConfiguration(fileName); + if (cfg != null) { + File f = new File(fileName); + Map parameters = cfg.getParametersMap(); + writer.write("\n"); + // writer.write(String.format("\n%.20s", f.getName())); + System.out.println(String.format("%s - %d", f.getName(), parameters + .entrySet().size())); + + for (int i = 0; i < boolParams.length; i++) { + writer.write(String.format("%.2f\t\t", + getBoolParamStat(parameters, boolParams[i]))); + } + + for (int i = 0; i < intParams.length; i++) { + writer.write(String.format("%d\t\t", + getIntParamStat(parameters, intParams[i]))); + } + + writer.flush(); + } + } + + private static void writeHeader(FileWriter writer) throws IOException { + // writer.write("\t\t"); + for (int i = 0; i < boolParams.length; i++) { + writer.write(String.format("%.7s", boolParams[i])); + writer.write("\t\t"); + } + for (int i = 0; i < intParams.length; i++) { + writer.write(String.format("%.7s", intParams[i])); + writer.write("\t\t"); + } + writer.flush(); + } +} diff --git a/src/edu/mit/streamjit/impl/common/VerifyStreamGraph.java b/src/edu/mit/streamjit/impl/common/VerifyStreamGraph.java index 2e7f06e7..ba01bdcc 100644 --- a/src/edu/mit/streamjit/impl/common/VerifyStreamGraph.java +++ b/src/edu/mit/streamjit/impl/common/VerifyStreamGraph.java @@ -42,10 +42,14 @@ /** * {@link VerifyStreamGraph} currently verifies a stream graph for following - * correctness. 1) A filter instance should be added only once in the graph. 2) - * {@link WeightedRoundrobinSplitter} has matching numbers of branches and - * weights. 3) {@link WeightedRoundrobinJoiner} has matching numbers of weights - * array and the input branches. + * correctness. + *
    + *
  1. 1) A filter instance should be added only once in the graph. + *
  2. 2) {@link WeightedRoundrobinSplitter} has matching numbers of branches + * and weights. + *
  3. 3) {@link WeightedRoundrobinJoiner} has matching numbers of weights array + * and the input branches. + *
* * @author Sumanan sumanan@mit.edu * @since May 9, 2013 diff --git a/src/edu/mit/streamjit/impl/common/AbstractDrainer.java b/src/edu/mit/streamjit/impl/common/drainer/AbstractDrainer.java similarity index 53% rename from src/edu/mit/streamjit/impl/common/AbstractDrainer.java rename to src/edu/mit/streamjit/impl/common/drainer/AbstractDrainer.java index a92a54a9..2ba7e7ec 100644 --- a/src/edu/mit/streamjit/impl/common/AbstractDrainer.java +++ b/src/edu/mit/streamjit/impl/common/drainer/AbstractDrainer.java @@ -19,20 +19,17 @@ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ -package edu.mit.streamjit.impl.common; - -import static com.google.common.base.Preconditions.checkNotNull; +package edu.mit.streamjit.impl.common.drainer; +import java.io.File; +import java.io.FileOutputStream; import java.io.FileWriter; import java.io.IOException; +import java.io.ObjectOutputStream; import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -42,23 +39,24 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableTable; import com.google.common.collect.Sets; import edu.mit.streamjit.api.CompiledStream; import edu.mit.streamjit.api.Input; -import edu.mit.streamjit.api.StreamCompilationFailedException; import edu.mit.streamjit.api.StreamCompiler; -import edu.mit.streamjit.api.Worker; -import edu.mit.streamjit.impl.blob.Blob; import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.blob.DrainData; +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.common.drainer.BlobGraph.BlobNode; import edu.mit.streamjit.impl.concurrent.ConcurrentStreamCompiler; import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.distributed.common.SNDrainElement.DrainedData; -import edu.mit.streamjit.impl.distributed.runtimer.OnlineTuner; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData; +import edu.mit.streamjit.impl.distributed.node.StreamNode; +import edu.mit.streamjit.tuner.OnlineTuner; /** * Abstract drainer is to perform draining on a stream application. Both @@ -124,16 +122,22 @@ public abstract class AbstractDrainer { private AtomicInteger unDrainedNodes; - private ScheduledExecutorService schExecutorService; + ScheduledExecutorService schExecutorService; /** * State of the drainer. */ - private DrainerState state; + DrainerState state; + + private final TimeLogger logger; + + private final StreamJitApp app; - public AbstractDrainer() { + public AbstractDrainer(StreamJitApp app, TimeLogger logger) { state = DrainerState.NODRAINING; finalLatch = new CountDownLatch(1); + this.app = app; + this.logger = logger; } /** @@ -158,15 +162,14 @@ public final void setBlobGraph(BlobGraph blobGraph) { * Initiate the draining of the blobgraph. Three type of draining could be * carried out. *
    - *
  1. type 0 - Intermediate draining: In this case, no data from input - * buffer will be consumed and StreamJit app will not be stopped. Rather, - * StreamJit app will be just paused for reconfiguration purpose. This - * draining may be triggered by {@link OnlineTuner}.
  2. - *
  3. type 1 - Semi final draining: In this case, no data from input buffer - * will be consumed but StreamJit app will be stopped. i.e, StreamJit app - * will be stopped safely without consuming any new input. This draining may - * be triggered by {@link OnlineTuner} after opentuner finish tuning and - * send it's final configuration.
  4. + *
  5. type 0 - Intermediate draining: In this case, no new data from + * {@link Input} will be consumed and StreamJit app will not be stopped. + * Rather, StreamJit app will be just paused for reconfiguration purpose. + * This draining may be triggered by {@link OnlineTuner}.
  6. + *
  7. type 1 - Semi final draining: In this case, StreamJit app will be + * stopped safely without consuming any new data from {@link Input}. This + * draining may be triggered by {@link OnlineTuner} after opentuner finish + * tuning and send it's final configuration.
  8. *
  9. type 2 - Final draining: At the end of input data. After this * draining StreamJit app will stop. This draining may be triggered by a * {@link Input} when it run out of input data.
  10. @@ -180,28 +183,37 @@ public final void setBlobGraph(BlobGraph blobGraph) { */ public final boolean startDraining(int type) { if (state == DrainerState.NODRAINING) { + boolean isFinal = false; switch (type) { case 0 : - this.blobGraph.clearDrainData(); this.state = DrainerState.INTERMEDIATE; - drainDataLatch = new CountDownLatch(1); - intermediateLatch = new CountDownLatch(1); - prepareDraining(false); break; case 1 : this.state = DrainerState.FINAL; - prepareDraining(false); break; case 2 : this.state = DrainerState.FINAL; - prepareDraining(true); + isFinal = true; break; default : throw new IllegalArgumentException( "Invalid draining type. type can be 0, 1, or 2."); } - if (GlobalConstants.needDrainDeadlockHandler) + this.blobGraph.clearDrainData(); + drainDataLatch = new CountDownLatch(1); + intermediateLatch = new CountDownLatch(1); + + try { + prepareDraining(isFinal); + } catch (Exception e) { + this.state = DrainerState.NODRAINING; + System.err + .println("No Drain called. Exception in prepareDraining()"); + throw e; + } + + if (Options.needDrainDeadlockHandler) this.schExecutorService = Executors .newSingleThreadScheduledExecutor(); @@ -211,8 +223,73 @@ public final boolean startDraining(int type) { } else if (state == DrainerState.FINAL) { return false; } else { - throw new RuntimeException("Drainer is in draing mode."); + throw new RuntimeException("Drainer is in draining mode."); + } + } + + public boolean drainIntermediate() { + logger.drainingStarted(); + boolean state = startDraining(0); + if (!state) { + String msg = "Final drain has already been called. No more intermediate draining."; + System.err.println(msg); + logger.drainingFinished(msg); + return false; + } + + System.err.println("awaitDrainedIntrmdiate"); + try { + awaitDrainedIntrmdiate(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + drainingDone(this.state == DrainerState.FINAL); + logger.drainingFinished("Intermediate"); + logger.drainDataCollectionStarted(); + try { + awaitDrainData(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + logger.drainDataCollectionFinished(""); + return true; + } + + public boolean drainFinal(Boolean isSemeFinal) { + int drainType = 2; + if (isSemeFinal) + drainType = 1; + logger.drainingStarted(); + boolean state = startDraining(drainType); + if (!state) { + return false; + } + + System.err.println("awaitDrainedIntrmdiate"); + try { + awaitDrainedIntrmdiate(); + } catch (InterruptedException e) { + e.printStackTrace(); } + drainingDone(false); + logger.drainingFinished("Intermediate"); + logger.drainDataCollectionStarted(); + try { + awaitDrainData(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + logger.drainDataCollectionFinished(""); + // TODO : Even after the final draining, we can clear some more + // intermediate data by running an Interpreter blob. + // StreamJitApp.minimizeDrainData() does this job. Uncomment the + // following lines later. + // app.drainData = app.minimizeDrainData(app.drainData); + // printDrainDataStats(app.drainData); + // dumpDrainData(app.drainData); + drainingDone(true); + stop(); + return true; } /** @@ -223,12 +300,22 @@ public final void drained(Token blobID) { blobGraph.getBlobNode(blobID).drained(); } + /** + * Awaits for {@link DrainData} from all {@link StreamNode}s, combines the + * all received DrainData and set the combined DrainData to + * {@link StreamJitApp#drainData}. + * + * @throws InterruptedException + */ public final void awaitDrainData() throws InterruptedException { - drainDataLatch.await(); + if (Options.useDrainData) { + drainDataLatch.await(); + app.drainData = getDrainData(); + } } - public final void newDrainData(DrainedData drainedData) { - blobGraph.getBlobNode(drainedData.blobID).setDrainData(drainedData); + public final void newSNDrainData(SNDrainedData snDrainedData) { + blobGraph.getBlobNode(snDrainedData.blobID).setDrainData(snDrainedData); if (noOfDrainData.decrementAndGet() == 0) { assert state == DrainerState.NODRAINING; drainDataLatch.countDown(); @@ -241,18 +328,20 @@ public final void newDrainData(DrainedData drainedData) { /** * @return Aggregated DrainData after the draining. */ - public final DrainData getDrainData() { + private final DrainData getDrainData() { + if (!Options.useDrainData) + return null; DrainData drainData = null; Map> boundaryInputData = new HashMap<>(); Map> boundaryOutputData = new HashMap<>(); for (BlobNode node : blobGraph.blobNodes.values()) { - boundaryInputData.putAll(node.drainData.inputData); - boundaryOutputData.putAll(node.drainData.outputData); + boundaryInputData.putAll(node.snDrainData.inputData); + boundaryOutputData.putAll(node.snDrainData.outputData); if (drainData == null) - drainData = node.drainData.drainData; + drainData = node.snDrainData.drainData; else - drainData = drainData.merge(node.drainData.drainData); + drainData = drainData.merge(node.snDrainData.drainData); } ImmutableMap.Builder> dataBuilder = ImmutableMap @@ -260,11 +349,9 @@ public final DrainData getDrainData() { for (Token t : Sets.union(boundaryInputData.keySet(), boundaryOutputData.keySet())) { ImmutableList in = boundaryInputData.get(t) != null - ? boundaryInputData.get(t) - : ImmutableList.of(); + ? boundaryInputData.get(t) : ImmutableList.of(); ImmutableList out = boundaryOutputData.get(t) != null - ? boundaryOutputData.get(t) - : ImmutableList.of(); + ? boundaryOutputData.get(t) : ImmutableList.of(); dataBuilder.put(t, ImmutableList.builder().addAll(in).addAll(out) .build()); } @@ -272,26 +359,60 @@ public final DrainData getDrainData() { ImmutableTable state = ImmutableTable.of(); DrainData draindata1 = new DrainData(dataBuilder.build(), state); drainData = drainData.merge(draindata1); + updateDrainDataStatistics(drainData); + // printDrainDataStats(drainData); + // dumpDrainData(drainData); + return drainData; + } + private void updateDrainDataStatistics(DrainData drainData) { if (drainDataStatistics == null) { drainDataStatistics = new HashMap<>(); for (Token t : drainData.getData().keySet()) { drainDataStatistics.put(t, new ArrayList()); } } - for (Token t : drainData.getData().keySet()) { - // System.out.print("Aggregated data: " + t.toString() + " - " - // + drainData.getData().get(t).size() + " - "); - // for (Object o : drainData.getData().get(t)) { - // System.out.print(o.toString() + ", "); - // } - // System.out.print('\n'); - - drainDataStatistics.get(t).add(drainData.getData().get(t).size()); + int size = drainData.getData().get(t).size(); + drainDataStatistics.get(t).add(size); + } + } - return drainData; + private void printDrainDataStats(DrainData drainData) { + try { + String fileName = String.format("%s%sdraindatasize.txt", app.name, + File.separator); + FileWriter writer = new FileWriter(fileName, true); + writer.write("-----------------------------------------------------------\n"); + System.out.println("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"); + + for (Token t : drainData.getData().keySet()) { + int size = drainData.getData().get(t).size(); + if (size != 0) { + String msg = String.format("%s - %d\n", t.toString(), size); + System.out.print(msg); + writer.write(msg); + } + } + writer.flush(); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + private void dumpDrainData(DrainData drainData) { + try { + String fileName = String.format("%s%sDrainData", app.name, + File.separator); + FileOutputStream fout = new FileOutputStream(fileName); + ObjectOutputStream oos = new ObjectOutputStream(fout); + oos.writeObject(drainData); + oos.close(); + } catch (Exception ex) { + ex.printStackTrace(); + } } /** @@ -306,7 +427,9 @@ public void dumpDraindataStatistics() throws IOException { return; } - FileWriter writer = new FileWriter("DrainDataStatistics.txt"); + String fileName = String.format("%s%sDrainDataStatistics.txt", + app.name, File.separator); + FileWriter writer = new FileWriter(fileName); for (Token t : drainDataStatistics.keySet()) { writer.write(t.toString()); writer.write(" - "); @@ -337,9 +460,9 @@ public final void awaitDrained() throws InterruptedException { public final void awaitDrainedIntrmdiate() throws InterruptedException { intermediateLatch.await(); - // Just for debugging purpose. To make effect of this code snippet - // comment the above, intermediateLatch.await(), line. Otherwise no - // effect. + // The following while loop is added just for debugging purpose. To + // activate the following while loop code snippet, comment the above + // [intermediateLatch.await()] line. while (intermediateLatch.getCount() != 0) { Thread.sleep(3000); System.out.println("****************************************"); @@ -394,13 +517,8 @@ public final void awaitDrained(long timeout, TimeUnit unit) /** * Once a {@link BlobNode}'s all preconditions are satisfied for draining, * blob node will call this function drain the blob. - * - * @param blobID - * @param isFinal - * : whether the draining is the final draining or intermediate - * draining. Set to true for semi final case. */ - protected abstract void drain(Token blobID, boolean isFinal); + protected abstract void drain(Token blobID, DrainType drainType); /** * {@link AbstractDrainer} will call this function after the corresponding @@ -443,350 +561,25 @@ public final void awaitDrained(long timeout, TimeUnit unit) * * @param blobNode */ - private void drainingDone(BlobNode blobNode) { + void drainingDone(BlobNode blobNode) { assert state != DrainerState.NODRAINING : "Illegal call. Drainer is not in draining mode."; drainingDone(blobNode.blobID, state == DrainerState.FINAL); if (unDrainedNodes.decrementAndGet() == 0) { - drainingDone(state == DrainerState.FINAL); + intermediateLatch.countDown(); if (state == DrainerState.FINAL) { - finalLatch.countDown(); } else { state = DrainerState.NODRAINING; - intermediateLatch.countDown(); } - if (GlobalConstants.needDrainDeadlockHandler) + if (Options.needDrainDeadlockHandler) schExecutorService.shutdownNow(); } } - /** - * BlobGraph builds predecessor successor relationship for set of - * partitioned workers, and verifies for cyclic dependencies among the - * partitions. Blob graph doesn't keep blobs. Instead it keeps - * {@link BlobNode} that represents blobs.

    All BlobNodes in the graph - * can be retrieved and used in coupled with {@link AbstractDrainer} to - * successfully perform draining process. - * - * @author Sumanan sumanan@mit.edu - * @since Jul 30, 2013 - */ - public static class BlobGraph { - - /** - * All nodes in the graph. - */ - private final ImmutableMap blobNodes; - - /** - * The blob which has the overall stream input. - */ - private final BlobNode sourceBlobNode; - - public BlobGraph(List>> partitionWorkers) { - checkNotNull(partitionWorkers); - Set blobSet = new HashSet<>(); - for (Set> workers : partitionWorkers) { - blobSet.add(new DummyBlob(workers)); - } - - ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); - for (DummyBlob b : blobSet) { - builder.put(b.id, new BlobNode(b.id)); - } - - this.blobNodes = builder.build(); - - for (DummyBlob cur : blobSet) { - for (DummyBlob other : blobSet) { - if (cur == other) - continue; - if (Sets.intersection(cur.outputs, other.inputs).size() != 0) { - BlobNode curNode = blobNodes.get(cur.id); - BlobNode otherNode = blobNodes.get(other.id); - - curNode.addSuccessor(otherNode); - otherNode.addPredecessor(curNode); - } - } - } - - checkCycles(blobNodes.values()); - - BlobNode sourceBlob = null; - for (BlobNode bn : blobNodes.values()) { - if (bn.getDependencyCount() == 0) { - assert sourceBlob == null : "Multiple independent blobs found."; - sourceBlob = bn; - } - } - - checkNotNull(sourceBlob); - this.sourceBlobNode = sourceBlob; - } - - /** - * @return BlobIds of all blobnodes in the blobgraph. - */ - public ImmutableSet getBlobIds() { - return blobNodes.keySet(); - } - - public BlobNode getBlobNode(Token blobID) { - return blobNodes.get(blobID); - } - - /** - * A Drainer can be set to the {@link BlobGraph} to perform draining. - * - * @param drainer - */ - public void setDrainer(AbstractDrainer drainer) { - for (BlobNode bn : blobNodes.values()) { - bn.setDrainer(drainer); - } - } - - public void clearDrainData() { - for (BlobNode node : blobNodes.values()) { - node.drainData = null; - } - } - - /** - * @return the sourceBlobNode - */ - private BlobNode getSourceBlobNode() { - return sourceBlobNode; - } - - /** - * Does a depth first traversal to detect cycles in the graph. - * - * @param blobNodes - */ - private void checkCycles(Collection blobNodes) { - Map colorMap = new HashMap<>(); - for (BlobNode b : blobNodes) { - colorMap.put(b, Color.WHITE); - } - for (BlobNode b : blobNodes) { - if (colorMap.get(b) == Color.WHITE) - if (DFS(b, colorMap)) - throw new StreamCompilationFailedException( - "Cycles found among blobs"); - } - } - - /** - * A cycle exits in a directed graph if a back edge is detected during a - * DFS traversal. A back edge exists in a directed graph if the - * currently explored vertex has an adjacent vertex that was already - * colored gray - * - * @param vertex - * @param colorMap - * @return true if cycle found, false - * otherwise. - */ - private boolean DFS(BlobNode vertex, Map colorMap) { - colorMap.put(vertex, Color.GRAY); - for (BlobNode adj : vertex.getSuccessors()) { - if (colorMap.get(adj) == Color.GRAY) - return true; - if (colorMap.get(adj) == Color.WHITE) - if (DFS(adj, colorMap)) - return true; - } - colorMap.put(vertex, Color.BLACK); - return false; - } - - /** - * Just used to build the input and output tokens of a partitioned blob - * workers. imitate a {@link Blob}. - */ - private final class DummyBlob { - private final ImmutableSet inputs; - private final ImmutableSet outputs; - private final Token id; - - private DummyBlob(Set> workers) { - ImmutableSet.Builder inputBuilder = new ImmutableSet.Builder<>(); - ImmutableSet.Builder outputBuilder = new ImmutableSet.Builder<>(); - for (IOInfo info : IOInfo.externalEdges(workers)) { - (info.isInput() ? inputBuilder : outputBuilder).add(info - .token()); - } - - inputs = inputBuilder.build(); - outputs = outputBuilder.build(); - id = Collections.min(inputs); - } - } - } - - /** - * BlobNode represents the vertex in the blob graph ({@link BlobGraph}). It - * represents a {@link Blob} and carry the draining process of that blob. - * - * @author Sumanan - */ - private static final class BlobNode { - - /** - * Intermediate drain data. - */ - private DrainedData drainData; - - private AbstractDrainer drainer; - /** - * The blob that wrapped by this blob node. - */ - private final Token blobID; - /** - * Predecessor blob nodes of this blob node. - */ - private List predecessors; - /** - * Successor blob nodes of this blob node. - */ - private List successors; - /** - * The number of undrained predecessors of this blobs. Everytime, when a - * predecessor finished draining, dependencyCount will be decremented - * and once it reached to 0 this blob will be called for draining. - */ - private AtomicInteger dependencyCount; - - // TODO: add comments - private AtomicInteger drainState; - - private BlobNode(Token blob) { - this.blobID = blob; - predecessors = new ArrayList<>(); - successors = new ArrayList<>(); - dependencyCount = new AtomicInteger(0); - drainState = new AtomicInteger(0); - } - - /** - * Should be called when the draining of the current blob has been - * finished. This function stops all threads belong to the blob and - * inform its successors as well. - */ - private void drained() { - if (drainState.compareAndSet(1, 3)) { - for (BlobNode suc : this.successors) { - suc.predecessorDrained(this); - } - drainer.drainingDone(this); - } else if (drainState.compareAndSet(2, 3)) { - drainer.drainingDone(this); - } - } - - /** - * Drain the blob mapped by this blob node. - */ - private void drain() { - checkNotNull(drainer); - if (!drainState.compareAndSet(0, 1)) { - throw new IllegalStateException( - "Drain of this blobNode has already been called"); - } - drainer.drain(blobID, drainer.state == DrainerState.FINAL); - - // TODO: Verify the waiting time is reasonable. - if (GlobalConstants.needDrainDeadlockHandler) - drainer.schExecutorService.schedule(deadLockHandler(), 6000, - TimeUnit.MILLISECONDS); - } - - private void setDrainData(DrainedData drainedData) { - if (this.drainData == null) { - this.drainData = drainedData; - drainState.set(4); - } else - throw new AssertionError( - "Multiple drain data has been received."); - } - - private ImmutableList getSuccessors() { - return ImmutableList.copyOf(successors); - } - - private void addPredecessor(BlobNode pred) { - assert !predecessors.contains(pred) : String.format( - "The BlobNode %s has already been set as a predecessors", - pred); - predecessors.add(pred); - dependencyCount.set(dependencyCount.get() + 1); - } - - private void addSuccessor(BlobNode succ) { - assert !successors.contains(succ) : String - .format("The BlobNode %s has already been set as a successor", - succ); - successors.add(succ); - } - - private void predecessorDrained(BlobNode pred) { - if (!predecessors.contains(pred)) - throw new IllegalArgumentException("Illegal Predecessor"); - - assert dependencyCount.get() > 0 : String - .format("Graph mismatch : My predecessors count is %d. But more than %d of BlobNodes claim me as their successor", - predecessors.size(), predecessors.size()); - - if (dependencyCount.decrementAndGet() == 0) { - drain(); - } - } - - /** - * @return The number of undrained predecessors. - */ - private int getDependencyCount() { - return dependencyCount.get(); - } - - private void setDrainer(AbstractDrainer drainer) { - checkNotNull(drainer); - this.drainer = drainer; - } - - private Runnable deadLockHandler() { - Runnable r = new Runnable() { - - @Override - public void run() { - if (drainState.compareAndSet(1, 2)) { - for (BlobNode suc : successors) { - suc.predecessorDrained(BlobNode.this); - } - System.out - .println("deadLockHandler: " - + blobID - + " - Deadlock during draining has been handled"); - } - } - }; - return r; - } - } - - /** - * Color enumerator used by DFS algorithm to find cycles in the blob graph. - */ - private enum Color { - WHITE, GRAY, BLACK - } - /** * Reflects {@link AbstractDrainer}'s state. */ - private enum DrainerState { + enum DrainerState { NODRAINING, /** * Draining in middle of the stream graph's execution. This * type of draining will be triggered by the open tuner for diff --git a/src/edu/mit/streamjit/impl/common/drainer/BlobGraph.java b/src/edu/mit/streamjit/impl/common/drainer/BlobGraph.java new file mode 100644 index 00000000..7bbb38b7 --- /dev/null +++ b/src/edu/mit/streamjit/impl/common/drainer/BlobGraph.java @@ -0,0 +1,419 @@ +package edu.mit.streamjit.impl.common.drainer; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +import edu.mit.streamjit.api.StreamCompilationFailedException; +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.common.IOInfo; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer.DrainerState; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData; + +/** + * [14 Feb, 2015] This class was an inner class of {@link AbstractDrainer}. I + * have re factored {@link AbstractDrainer} and moved this class a new file. + * + * BlobGraph builds predecessor successor relationship for set of partitioned + * workers, and verifies for cyclic dependencies among the partitions. Blob + * graph doesn't keep blobs. Instead it keeps {@link BlobNode} that represents + * blobs.

    All BlobNodes in the graph can be retrieved and used in coupled + * with {@link AbstractDrainer} to successfully perform draining process. + * + * @author Sumanan sumanan@mit.edu + * @since Jul 30, 2013 + */ +public class BlobGraph { + + /** + * All nodes in the graph. + */ + final ImmutableMap blobNodes; + + /** + * The blob which has the overall stream input. + */ + private final BlobNode sourceBlobNode; + + public BlobGraph(List>> partitionWorkers) { + checkNotNull(partitionWorkers); + Set blobSet = new HashSet<>(); + for (Set> workers : partitionWorkers) { + blobSet.add(new DummyBlob(workers)); + } + + ImmutableMap.Builder builder = new ImmutableMap.Builder<>(); + for (DummyBlob b : blobSet) { + builder.put(b.id, new BlobNode(b.id, b.inputs, b.outputs)); + } + + this.blobNodes = builder.build(); + + for (DummyBlob cur : blobSet) { + for (DummyBlob other : blobSet) { + if (cur == other) + continue; + if (Sets.intersection(cur.outputs, other.inputs).size() != 0) { + BlobNode curNode = blobNodes.get(cur.id); + BlobNode otherNode = blobNodes.get(other.id); + + curNode.addSuccessor(otherNode); + otherNode.addPredecessor(curNode); + } + } + } + + checkCycles(blobNodes.values()); + + BlobNode sourceBlob = null; + for (BlobNode bn : blobNodes.values()) { + if (bn.getDependencyCount() == 0) { + assert sourceBlob == null : "Multiple independent blobs found."; + sourceBlob = bn; + } + } + + checkNotNull(sourceBlob); + this.sourceBlobNode = sourceBlob; + } + + /** + * @return BlobIds of all blobnodes in the blobgraph. + */ + public ImmutableSet getBlobIds() { + return blobNodes.keySet(); + } + + public BlobNode getBlobNode(Token blobID) { + return blobNodes.get(blobID); + } + + /** + * TODO: We may need to make the class {@link BlobNode} public and move + * these functions to {@link BlobNode}. + *

    + * Returns output edges of a blob. This method is added on [2014-03-01]. + * + * @param blobID + * @return + */ + public ImmutableSet getOutputs(Token blobID) { + return blobNodes.get(blobID).outputs; + } + + /** + * TODO: We may need to make the class {@link BlobNode} public and move + * these functions to {@link BlobNode}. + *

    + * Returns input edges of a blob. This method is added on [2014-03-01]. + * + * @param blobID + * @return + */ + public ImmutableSet getInputs(Token blobID) { + return blobNodes.get(blobID).inputs; + } + + /** + * A Drainer can be set to the {@link BlobGraph} to perform draining. + * + * @param drainer + */ + public void setDrainer(AbstractDrainer drainer) { + for (BlobNode bn : blobNodes.values()) { + bn.setDrainer(drainer); + } + } + + public void clearDrainData() { + for (BlobNode node : blobNodes.values()) { + node.snDrainData = null; + } + } + + /** + * @return the sourceBlobNode + */ + BlobNode getSourceBlobNode() { + return sourceBlobNode; + } + + /** + * Does a depth first traversal to detect cycles in the graph. + * + * @param blobNodes + */ + private void checkCycles(Collection blobNodes) { + Map colorMap = new HashMap<>(); + for (BlobNode b : blobNodes) { + colorMap.put(b, Color.WHITE); + } + for (BlobNode b : blobNodes) { + if (colorMap.get(b) == Color.WHITE) + if (DFS(b, colorMap)) + throw new StreamCompilationFailedException( + "Cycles found among blobs"); + } + } + + /** + * A cycle exits in a directed graph if a back edge is detected during a DFS + * traversal. A back edge exists in a directed graph if the currently + * explored vertex has an adjacent vertex that was already colored gray + * + * @param vertex + * @param colorMap + * @return true if cycle found, false otherwise. + */ + private boolean DFS(BlobNode vertex, Map colorMap) { + colorMap.put(vertex, Color.GRAY); + for (BlobNode adj : vertex.getSuccessors()) { + if (colorMap.get(adj) == Color.GRAY) + return true; + if (colorMap.get(adj) == Color.WHITE) + if (DFS(adj, colorMap)) + return true; + } + colorMap.put(vertex, Color.BLACK); + return false; + } + + /** + * Just used to build the input and output tokens of a partitioned blob + * workers. imitate a {@link Blob}. + */ + private final class DummyBlob { + private final ImmutableSet inputs; + private final ImmutableSet outputs; + private final Token id; + + private DummyBlob(Set> workers) { + ImmutableSet.Builder inputBuilder = new ImmutableSet.Builder<>(); + ImmutableSet.Builder outputBuilder = new ImmutableSet.Builder<>(); + for (IOInfo info : IOInfo.externalEdges(workers)) { + (info.isInput() ? inputBuilder : outputBuilder).add(info + .token()); + } + + inputs = inputBuilder.build(); + outputs = outputBuilder.build(); + id = Collections.min(inputs); + } + } + + /** + * [14 Feb, 2015] This class was an inner class of {@link AbstractDrainer}. + * I have re factored {@link AbstractDrainer} and moved this class to here. + * {@link AbstractDrainer} directly accessed lots of fields and methods of + * this class when this was an inner class of it. So those fields and + * methods of this class have been made as package private when re + * factoring.

    + *

    + * [14 Feb, 2015] TODO: {@link AbstractDrainer#schExecutorService} and + * {@link AbstractDrainer#state} have been made package private during the + * re factoring. We can make those fields private by moving + * {@link BlobNode#drain()} and {@link BlobNode#drained()} to + * {@link AbstractDrainer}. + *

    + * + * BlobNode represents the vertex in the blob graph ({@link BlobGraph} ). It + * represents a {@link Blob} and carry the draining process of that blob. + * + * @author Sumanan + */ + static final class BlobNode { + + /** + * Intermediate drain data. + */ + SNDrainedData snDrainData; + + private AbstractDrainer drainer; + /** + * The blob that wrapped by this blob node. + */ + final Token blobID; + /** + * Predecessor blob nodes of this blob node. + */ + private List predecessors; + /** + * Successor blob nodes of this blob node. + */ + private List successors; + /** + * The number of undrained predecessors of this blobs. Everytime, when a + * predecessor finished draining, dependencyCount will be decremented + * and once it reached to 0 this blob will be called for draining. + */ + private AtomicInteger dependencyCount; + + // TODO: add comments + AtomicInteger drainState; + + /** + * All input channels of this blob. We need this information to globally + * determine buffer sizes to avoid deadlocks. This is added on + * [2014-03-01], when implementing global buffer size adjustment. + */ + private final ImmutableSet inputs; + + /** + * All output channels of this blob. We need this information to + * globally determine buffer sizes to avoid deadlocks. This is added on + * [2014-03-01], when implementing global buffer size adjustment. + */ + private final ImmutableSet outputs; + + private BlobNode(Token blob, ImmutableSet inputs, + ImmutableSet outputs) { + this.blobID = blob; + predecessors = new ArrayList<>(); + successors = new ArrayList<>(); + dependencyCount = new AtomicInteger(0); + drainState = new AtomicInteger(0); + this.inputs = inputs; + this.outputs = outputs; + } + + /** + * Should be called when the draining of the current blob has been + * finished. This function stops all threads belong to the blob and + * inform its successors as well. + */ + void drained() { + if (drainState.compareAndSet(1, 3)) { + for (BlobNode suc : this.successors) { + suc.predecessorDrained(this); + } + drainer.drainingDone(this); + } else if (drainState.compareAndSet(2, 3)) { + drainer.drainingDone(this); + } + } + + /** + * Drain the blob mapped by this blob node. + */ + void drain() { + checkNotNull(drainer); + if (!drainState.compareAndSet(0, 1)) { + throw new IllegalStateException( + "Drain of this blobNode has already been called"); + } + + DrainType drainType; + if (Options.useDrainData) + if (drainer.state == DrainerState.FINAL) + drainType = DrainType.FINAL; + else + drainType = DrainType.INTERMEDIATE; + else + drainType = DrainType.DISCARD; + + drainer.drain(blobID, drainType); + + // TODO: Verify the waiting time is reasonable. + if (Options.needDrainDeadlockHandler) + drainer.schExecutorService.schedule(deadLockHandler(), 6000, + TimeUnit.MILLISECONDS); + } + + void setDrainData(SNDrainedData drainedData) { + if (this.snDrainData == null) { + this.snDrainData = drainedData; + drainState.set(4); + } else + throw new AssertionError( + "Multiple drain data has been received."); + } + + private ImmutableList getSuccessors() { + return ImmutableList.copyOf(successors); + } + + private void addPredecessor(BlobNode pred) { + assert !predecessors.contains(pred) : String.format( + "The BlobNode %s has already been set as a predecessors", + pred); + predecessors.add(pred); + dependencyCount.set(dependencyCount.get() + 1); + } + + private void addSuccessor(BlobNode succ) { + assert !successors.contains(succ) : String + .format("The BlobNode %s has already been set as a successor", + succ); + successors.add(succ); + } + + private void predecessorDrained(BlobNode pred) { + if (!predecessors.contains(pred)) + throw new IllegalArgumentException("Illegal Predecessor"); + + assert dependencyCount.get() > 0 : String + .format("Graph mismatch : My predecessors count is %d. But more than %d of BlobNodes claim me as their successor", + predecessors.size(), predecessors.size()); + + if (dependencyCount.decrementAndGet() == 0) { + drain(); + } + } + + /** + * @return The number of undrained predecessors. + */ + private int getDependencyCount() { + return dependencyCount.get(); + } + + private void setDrainer(AbstractDrainer drainer) { + checkNotNull(drainer); + this.drainer = drainer; + } + + private Runnable deadLockHandler() { + Runnable r = new Runnable() { + + @Override + public void run() { + if (drainState.compareAndSet(1, 2)) { + for (BlobNode suc : successors) { + suc.predecessorDrained(BlobNode.this); + } + System.out + .println("deadLockHandler: " + + blobID + + " - Deadlock during draining has been handled"); + } + } + }; + return r; + } + } + + /** + * Color enumerator used by DFS algorithm to find cycles in the blob graph. + */ + private enum Color { + WHITE, GRAY, BLACK + } +} diff --git a/src/edu/mit/streamjit/impl/concurrent/ConcurrentBlobFactory.java b/src/edu/mit/streamjit/impl/concurrent/ConcurrentBlobFactory.java new file mode 100644 index 00000000..b9f1fc50 --- /dev/null +++ b/src/edu/mit/streamjit/impl/concurrent/ConcurrentBlobFactory.java @@ -0,0 +1,95 @@ +package edu.mit.streamjit.impl.concurrent; + +import java.util.Set; + +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.BlobFactory; +import edu.mit.streamjit.impl.blob.DrainData; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Configuration.Parameter; +import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory; +import edu.mit.streamjit.impl.distributed.ConfigurationManager; +import edu.mit.streamjit.impl.distributed.PartitionManager; +import edu.mit.streamjit.impl.distributed.WorkerMachine; +import edu.mit.streamjit.impl.distributed.common.Options; + +public class ConcurrentBlobFactory implements BlobFactory { + + private int noOfBlobs; + + private final PartitionManager partitionManager; + + public ConcurrentBlobFactory(PartitionManager partitionManager, + int noOfBlobs) { + this.partitionManager = partitionManager; + this.noOfBlobs = noOfBlobs; + } + + /** + * If {@link ConfigurationManager} is not passed as a constructor argument + * then {@link WorkerMachine} will be used as default one. + * + * @param noOfMachines + */ + public ConcurrentBlobFactory(int noOfBlobs) { + this(new WorkerMachine(null), noOfBlobs); + } + + @Override + public Blob makeBlob(Set> workers, Configuration config, + int maxNumCores, DrainData initialState) { + return new Compiler2BlobFactory().makeBlob(workers, config, + maxNumCores, initialState); + } + + @Override + public Configuration getDefaultConfiguration(Set> workers) { + Configuration concurrentCfg; + if (this.noOfBlobs > 1) + concurrentCfg = partitionManager.getDefaultConfiguration(workers, + noOfBlobs); + else + concurrentCfg = Configuration.builder().build(); + + if (!Options.useCompilerBlob) + return concurrentCfg; + + Configuration.Builder builder = Configuration.builder(concurrentCfg); + BlobFactory compilerBf = new Compiler2BlobFactory(); + Configuration compilercfg = compilerBf.getDefaultConfiguration(workers); + for (Parameter p : compilercfg.getParametersMap().values()) + builder.addParameter(p); + return builder.build(); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime + * result + + ((partitionManager == null) ? 0 : partitionManager.hashCode()); + result = prime * result + noOfBlobs; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + ConcurrentBlobFactory other = (ConcurrentBlobFactory) obj; + if (partitionManager == null) { + if (other.partitionManager != null) + return false; + } else if (!partitionManager.equals(other.partitionManager)) + return false; + if (noOfBlobs != other.noOfBlobs) + return false; + return true; + } +} diff --git a/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java b/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java index ebe5357d..165e06f6 100644 --- a/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java +++ b/src/edu/mit/streamjit/impl/concurrent/ConcurrentDrainer.java @@ -21,16 +21,21 @@ */ package edu.mit.streamjit.impl.concurrent; +import static com.google.common.base.Preconditions.checkNotNull; + import java.util.Map; import java.util.Set; -import static com.google.common.base.Preconditions.*; import com.google.common.collect.ImmutableMap; import edu.mit.streamjit.impl.blob.Blob; import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.common.AbstractDrainer; import edu.mit.streamjit.impl.common.BlobThread; +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; +import edu.mit.streamjit.impl.common.drainer.BlobGraph; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; import edu.mit.streamjit.impl.distributed.common.Utils; /** @@ -52,8 +57,9 @@ public final class ConcurrentDrainer extends AbstractDrainer { */ ImmutableMap> threadMap; - public ConcurrentDrainer(BlobGraph blobGraph, - Map> threadMap) { + public ConcurrentDrainer(StreamJitApp app, TimeLogger logger, + BlobGraph blobGraph, Map> threadMap) { + super(app, logger); setBlobGraph(blobGraph); blobMap = buildBlobMap(threadMap.keySet()); this.threadMap = ImmutableMap.copyOf(threadMap); @@ -65,7 +71,7 @@ protected void drainingDone(boolean isFinal) { } @Override - protected void drain(Token blobID, boolean isFinal) { + protected void drain(Token blobID, DrainType drainType) { Blob blob = blobMap.get(blobID); checkNotNull(blob); @@ -116,6 +122,5 @@ public void run() { @Override protected void prepareDraining(boolean isFinal) { - // TODO Auto-generated method stub } } diff --git a/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java b/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java index 4f7b0799..299b6f53 100644 --- a/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java +++ b/src/edu/mit/streamjit/impl/concurrent/ConcurrentStreamCompiler.java @@ -44,8 +44,6 @@ import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.blob.Buffer; import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer; -import edu.mit.streamjit.impl.common.AbstractDrainer; -import edu.mit.streamjit.impl.common.AbstractDrainer.BlobGraph; import edu.mit.streamjit.impl.common.BlobThread; import edu.mit.streamjit.impl.common.Configuration; import edu.mit.streamjit.impl.common.Configuration.IntParameter; @@ -56,6 +54,9 @@ import edu.mit.streamjit.impl.common.OutputBufferFactory; import edu.mit.streamjit.impl.common.Portals; import edu.mit.streamjit.impl.common.VerifyStreamGraph; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; +import edu.mit.streamjit.impl.common.drainer.BlobGraph; +import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler; import edu.mit.streamjit.impl.interp.ChannelFactory; import edu.mit.streamjit.impl.interp.Interpreter; import edu.mit.streamjit.partitioner.HorizontalPartitioner; @@ -63,7 +64,14 @@ /** * A stream compiler that partitions a streamgraph into multiple blobs and - * execute it on multiple threads. + * execute them on a single node. This {@link StreamCompiler} can be used for + * following purposes + *
      + *
    1. Single blob online tuning. + *
    2. Multiple blobs on a single node. This will simulate + * {@link DistributedStreamCompiler} on a single node to find out deadlocks and + * other issues. + *
    * * @author Sumanan sumanan@mit.edu * @since Apr 8, 2013 @@ -72,9 +80,8 @@ public class ConcurrentStreamCompiler implements StreamCompiler { int noOfBlobs; /** - * @param Patrions - * a stream graph up to noOfBlobs many blobs and executes each - * blob on each thread. + * @param noOfBlobs + * Maximum number of blobs that can be created. */ public ConcurrentStreamCompiler(int noOfBlobs) { if (noOfBlobs < 1) @@ -83,15 +90,20 @@ public ConcurrentStreamCompiler(int noOfBlobs) { this.noOfBlobs = noOfBlobs; } - public ConcurrentStreamCompiler(Configuration cfg) { + public ConcurrentStreamCompiler() { + this(1); + } - IntParameter threadCount = cfg.getParameter("threadCount", - IntParameter.class); - this.noOfBlobs = threadCount.getValue(); - if (noOfBlobs < 1) - throw new IllegalArgumentException( - "noOfBlobs should be 1 or greater"); - this.noOfBlobs = noOfBlobs; + public ConcurrentStreamCompiler(Configuration cfg) { + IntParameter nBlobs = cfg.getParameter("noOfBlobs", IntParameter.class); + if (nBlobs == null) + this.noOfBlobs = 1; + else { + this.noOfBlobs = nBlobs.getValue(); + if (noOfBlobs < 1) + throw new IllegalArgumentException( + "noOfBlobs should be 1 or greater"); + } } @Override diff --git a/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java b/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java index 022631b2..4074cdd6 100644 --- a/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java +++ b/src/edu/mit/streamjit/impl/distributed/ConfigurationManager.java @@ -21,372 +21,65 @@ */ package edu.mit.streamjit.impl.distributed; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Deque; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.Map.Entry; -import edu.mit.streamjit.api.Filter; -import edu.mit.streamjit.api.Joiner; -import edu.mit.streamjit.api.Splitter; +import edu.mit.streamjit.api.StreamCompilationFailedException; import edu.mit.streamjit.api.Worker; -import edu.mit.streamjit.impl.blob.BlobFactory; -import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.common.Workers; -import edu.mit.streamjit.impl.common.Configuration.PartitionParameter; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.distributed.common.Utils; -import edu.mit.streamjit.impl.distributed.node.StreamNode; -import edu.mit.streamjit.impl.interp.Interpreter; -import edu.mit.streamjit.partitioner.AbstractPartitioner; +import edu.mit.streamjit.impl.common.drainer.BlobGraph; +import edu.mit.streamjit.tuner.OnlineTuner; -/** - * ConfigurationManager deals with {@link Configuration}. Mainly, It does - * following two tasks. - *
      - *
    1. Generates configuration for with appropriate tuning parameters for - * tuning. - *
    2. Dispatch the configuration given by the open tuner and make blobs - * accordingly. - *
    - * - * One can implement this interface to try different search space designs as - * they want. - * - * @author Sumanan sumanan@mit.edu - * @since Jan 16, 2014 - * - */ -public interface ConfigurationManager { +public class ConfigurationManager { - /** - * Generates default configuration with all tuning parameters for tuning. - * - * @param streamGraph - * @param source - * @param sink - * @param noOfMachines - * @return - */ - public Configuration getDefaultConfiguration(Set> workers, - int noOfMachines); + private final StreamJitApp app; + + private final PartitionManager partitionManager; + + public ConfigurationManager(StreamJitApp app, + PartitionManager partitionManager) { + this.app = app; + this.partitionManager = partitionManager; + } /** - * When opentuner gives a new configuration, this method may be called to - * interpret the configuration and execute the steramjit app with the new + * This method may be called to by the {@link OnlineTuner} to interpret a + * new configuration and execute the steramjit app with the new * configuration. + *

    + * Builds partitionsMachineMap and {@link BlobGraph} from the new + * Configuration, and verifies for any cycles among blobs. If it is a valid + * configuration, (i.e., no cycles among the blobs), then {@link #app} + * object's member variables {@link StreamJitApp#blobConfiguration}, + * {@link StreamJitApp#blobGraph} and + * {@link StreamJitApp#partitionsMachineMap} will be assigned according to + * reflect the new configuration, no changes otherwise. * * @param config - * configuration from opentuner. + * configuration from {@link OnlineTuner}. * @return true iff valid configuration is passed. */ - public boolean newConfiguration(Configuration config); - - /** - * Generates static information of the app that is needed by steramnodes. - * This configuration will be sent to streamnodes when setting up a new app - * for execution (Only once). - * - * @return static information of the app that is needed by steramnodes. - */ - public Configuration getStaticConfiguration(); - - /** - * For every reconfiguration, this method may be called by the appropriate - * class to get new configuration information that can be sent to all - * participating {@link StreamNode}s. - * - * @return new partition information - */ - public Configuration getDynamicConfiguration(); - - /** - * Implements the functions those can be called by runtimer to send - * configuration information to streamnodes. - * - * @author Sumanan sumanan@mit.edu - * @since Jan 17, 2014 - */ - public static abstract class AbstractConfigurationManager - implements - ConfigurationManager { - - protected final StreamJitApp app; - - AbstractConfigurationManager(StreamJitApp app) { - this.app = app; + public boolean newConfiguration(Configuration config) { + // for (Parameter p : config.getParametersMap().values()) { + // if (p instanceof IntParameter) { + // IntParameter ip = (IntParameter) p; + // System.out.println(ip.getName() + " - " + ip.getValue()); + // } else if (p instanceof SwitchParameter) { + // SwitchParameter sp = (SwitchParameter) p; + // System.out.println(sp.getName() + " - " + sp.getValue()); + // } else + // System.out.println(p.getName() + " - Unknown type"); + // } + + Map>>> partitionsMachineMap = partitionManager + .partitionMap(config); + try { + app.verifyConfiguration(partitionsMachineMap); + } catch (StreamCompilationFailedException ex) { + return false; } - - @Override - public Configuration getStaticConfiguration() { - Configuration.Builder builder = Configuration.builder(); - builder.putExtraData(GlobalConstants.JARFILE_PATH, app.jarFilePath) - .putExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME, - app.topLevelClass); - return builder.build(); - } - - @Override - public Configuration getDynamicConfiguration() { - Configuration.Builder builder = Configuration.builder(); - - Map coresPerMachine = new HashMap<>(); - for (Entry>>> machine : app.partitionsMachineMap - .entrySet()) { - coresPerMachine - .put(machine.getKey(), machine.getValue().size()); - } - - PartitionParameter.Builder partParam = PartitionParameter.builder( - GlobalConstants.PARTITION, coresPerMachine); - - BlobFactory factory = new Interpreter.InterpreterBlobFactory(); - partParam.addBlobFactory(factory); - - app.blobtoMachineMap = new HashMap<>(); - - for (Integer machineID : app.partitionsMachineMap.keySet()) { - List>> blobList = app.partitionsMachineMap - .get(machineID); - for (Set> blobWorkers : blobList) { - // TODO: One core per blob. Need to change this. - partParam.addBlob(machineID, 1, factory, blobWorkers); - - // TODO: Temp fix to build. - Token t = Utils.getblobID(blobWorkers); - app.blobtoMachineMap.put(t, machineID); - } - } - - builder.addParameter(partParam.build()); - if (app.blobConfiguration != null) - builder.addSubconfiguration("blobConfigs", - app.blobConfiguration); - return builder.build(); - } - - /** - * Copied form {@link AbstractPartitioner} class. But modified to - * support nested splitjoiners.

    Returns all {@link Worker}s in a - * splitjoin. - * - * @param splitter - * @return Returns all {@link Filter}s in a splitjoin. - */ - protected void getAllChildWorkers(Splitter splitter, - Set> childWorkers) { - childWorkers.add(splitter); - Joiner joiner = getJoiner(splitter); - Worker cur; - for (Worker childWorker : Workers.getSuccessors(splitter)) { - cur = childWorker; - while (cur != joiner) { - if (cur instanceof Filter) - childWorkers.add(cur); - else if (cur instanceof Splitter) { - getAllChildWorkers((Splitter) cur, childWorkers); - cur = getJoiner((Splitter) cur); - } else - throw new IllegalStateException( - "Some thing wrong in the algorithm."); - - assert Workers.getSuccessors(cur).size() == 1 : "Illegal State encounted : cur can only be either a filter or a joner"; - cur = Workers.getSuccessors(cur).get(0); - } - } - childWorkers.add(joiner); - } - - /** - * Find and returns the corresponding {@link Joiner} for the passed - * {@link Splitter}. - * - * @param splitter - * : {@link Splitter} that needs it's {@link Joiner}. - * @return Corresponding {@link Joiner} of the passed {@link Splitter}. - */ - protected Joiner getJoiner(Splitter splitter) { - Worker cur = Workers.getSuccessors(splitter).get(0); - int innerSplitjoinCount = 0; - while (!(cur instanceof Joiner) || innerSplitjoinCount != 0) { - if (cur instanceof Splitter) - innerSplitjoinCount++; - if (cur instanceof Joiner) - innerSplitjoinCount--; - assert innerSplitjoinCount >= 0 : "Joiner Count is more than splitter count. Check the algorithm"; - cur = Workers.getSuccessors(cur).get(0); - } - assert cur instanceof Joiner : "Error in algorithm. Not returning a Joiner"; - return (Joiner) cur; - } - - protected String getParamName(Integer id) { - assert id > -1 : "Worker id cannot be negative"; - return String.format("worker%dtomachine", id); - } - - /** - * Goes through all workers in workerset which is passed as argument, - * find the workers which are interconnected and group them as a blob - * workers. i.e., Group the workers which are connected. - *

    - * TODO: If any dynamic edges exists then should create interpreter - * blob. - * - * @param workerset - * @return list of workers set which contains interconnected workers. - * Each worker set in the list is supposed to run in an - * individual blob. - */ - protected List>> getConnectedComponents( - Set> workerset) { - List>> ret = new ArrayList>>(); - while (!workerset.isEmpty()) { - Deque> queue = new ArrayDeque<>(); - Set> blobworkers = new HashSet<>(); - Worker w = workerset.iterator().next(); - blobworkers.add(w); - workerset.remove(w); - queue.offer(w); - while (!queue.isEmpty()) { - Worker wrkr = queue.poll(); - for (Worker succ : Workers.getSuccessors(wrkr)) { - if (workerset.contains(succ)) { - blobworkers.add(succ); - workerset.remove(succ); - queue.offer(succ); - } - } - - for (Worker pred : Workers.getPredecessors(wrkr)) { - if (workerset.contains(pred)) { - blobworkers.add(pred); - workerset.remove(pred); - queue.offer(pred); - } - } - } - ret.add(blobworkers); - } - return ret; - } - - /** - * Cycles can occur iff splitter and joiner happened to fall into a blob - * while some workers of that splitjoin falls into other blob. Here, we - * check for the above mention condition. If cycles exists, split then - * in to several blobs. - * - * @param blobworkers - * @return - */ - protected List>> breakCycles( - Set> blobworkers) { - Map, Joiner> rfctrSplitJoin = new HashMap<>(); - Set> splitterSet = getSplitters(blobworkers); - for (Splitter s : splitterSet) { - Joiner j = getJoiner(s); - if (blobworkers.contains(j)) { - Set> childWorkers = new HashSet<>(); - getAllChildWorkers(s, childWorkers); - if (!blobworkers.containsAll(childWorkers)) { - rfctrSplitJoin.put(s, j); - } - } - } - - List>> ret = new ArrayList<>(); - - for (Splitter s : rfctrSplitJoin.keySet()) { - if (blobworkers.contains(s)) { - ret.add(getSplitterReachables(s, blobworkers, - rfctrSplitJoin)); - } - } - ret.addAll(getConnectedComponents(blobworkers)); - return ret; - } - - /** - * Goes through the passed set of workers, add workers those are - * reachable from the splitter s, but not any conflicting splitter or - * joiner. - *

    - * This function has side effect. Modifies the argument. - * - * @param s - * @param blobworkers - * @return - */ - protected Set> getSplitterReachables(Splitter s, - Set> blobworkers, - Map, Joiner> rfctrSplitJoin) { - assert blobworkers.contains(s) : "Splitter s in not in blobworkers"; - Set> ret = new HashSet<>(); - Set> exclude = new HashSet<>(); - Deque> queue = new ArrayDeque<>(); - ret.add(s); - exclude.add(rfctrSplitJoin.get(s)); - blobworkers.remove(s); - queue.offer(s); - while (!queue.isEmpty()) { - Worker wrkr = queue.poll(); - for (Worker succ : Workers.getSuccessors(wrkr)) { - process(succ, blobworkers, rfctrSplitJoin, exclude, queue, - ret); - } - - for (Worker pred : Workers.getPredecessors(wrkr)) { - process(pred, blobworkers, rfctrSplitJoin, exclude, queue, - ret); - } - } - return ret; - } - - /** - * Since the code in this method repeated in two places in - * getSplitterReachables() method, It is re-factored into a private - * method to avoid code duplication. - */ - protected void process(Worker wrkr, - Set> blobworkers, - Map, Joiner> rfctrSplitJoin, - Set> exclude, Deque> queue, - Set> ret) { - if (blobworkers.contains(wrkr) && !exclude.contains(wrkr)) { - ret.add(wrkr); - blobworkers.remove(wrkr); - queue.offer(wrkr); - - for (Entry, Joiner> e : rfctrSplitJoin - .entrySet()) { - if (e.getValue().equals(wrkr)) { - exclude.add(e.getKey()); - break; - } else if (e.getKey().equals(wrkr)) { - exclude.add(e.getValue()); - break; - } - } - } - } - - protected Set> getSplitters(Set> blobworkers) { - Set> splitterSet = new HashSet<>(); - for (Worker w : blobworkers) { - if (w instanceof Splitter) { - splitterSet.add((Splitter) w); - } - } - return splitterSet; - } - + app.setConfiguration(config); + return true; } } diff --git a/src/edu/mit/streamjit/impl/distributed/ConnectionManager.java b/src/edu/mit/streamjit/impl/distributed/ConnectionManager.java new file mode 100644 index 00000000..be47742d --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/ConnectionManager.java @@ -0,0 +1,371 @@ +package edu.mit.streamjit.impl.distributed; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Configuration.Builder; +import edu.mit.streamjit.impl.common.Configuration.Parameter; +import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel; +import edu.mit.streamjit.impl.distributed.common.Connection; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionType; +import edu.mit.streamjit.impl.distributed.common.Connection.GenericConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.node.StreamNode; + +/** + * Generates configuration parameters to tune the {@link Connection}'s + * communication type such as blocking TCP connection, asynchronous TCP + * connection, Infiniband, etc. + * + * @author Sumanan sumanan@mit.edu + * @since Jun 23, 2014 + * + */ +public interface ConnectionManager { + + /** + * Generates parameters to tune {@link BoundaryChannel} and add those into + * the {@link Configuration.Builder}. + * + * @param cfgBuilder + * @param workers + */ + public void addConnectionParameters(Configuration.Builder cfgBuilder, + Set> workers); + + /** + * Generates parameters to tune {@link BoundaryChannel} and return those as + * a new {@link Configuration}. + * + * @param workers + * @return + */ + public Configuration getDefaultConfiguration(Set> workers); + + /** + * Decides {@link Connection} type for each inter-blob connection based on + * the {@link cfg}. + * + * @param cfg + * @param partitionsMachineMap + * @param source + * @param sink + * @return + */ + public Map conInfoMap(Configuration cfg, + Map>>> partitionsMachineMap, + Worker source, Worker sink); + + /** + * Sometimes an assigned TCP ports may not available to make new connection + * at {@link StreamNode}s side. In this case a new {@link ConnectionInfo} + * must be created to replace already created {@link ConnectionInfo}. + * + * @param conInfo + * : Problematic {@link ConnectionInfo}. + * @return : New {@link ConnectionInfo} to replace problematic + * {@link ConnectionInfo}. + */ + public ConnectionInfo replaceConInfo(ConnectionInfo conInfo); + + public abstract static class AbstractConnectionManager implements + ConnectionManager { + + private final int controllerNodeID; + + protected Set currentConInfos; + + protected int startPortNo = 24896; // Just a random magic number. + + public AbstractConnectionManager(int controllerNodeID) { + this.controllerNodeID = controllerNodeID; + this.currentConInfos = new HashSet<>(); + } + + public Map conInfoMap(Configuration cfg, + Map>>> partitionsMachineMap, + Worker source, Worker sink) { + + assert partitionsMachineMap != null : "partitionsMachineMap is null"; + + Set usedConInfos = new HashSet<>(); + Map conInfoMap = new HashMap<>(); + + for (Integer machineID : partitionsMachineMap.keySet()) { + List>> blobList = partitionsMachineMap + .get(machineID); + Set> allWorkers = new HashSet<>(); // Contains all + // workers those + // are + // assigned to + // the + // current + // machineID + // machine. + for (Set> blobWorkers : blobList) { + allWorkers.addAll(blobWorkers); + } + + for (Worker w : allWorkers) { + for (Worker succ : Workers.getSuccessors(w)) { + if (allWorkers.contains(succ)) + continue; + int dstMachineID = getAssignedMachine(succ, + partitionsMachineMap); + Token t = new Token(w, succ); + addtoconInfoMap(machineID, dstMachineID, t, + usedConInfos, conInfoMap, cfg); + } + } + } + + Token headToken = Token.createOverallInputToken(source); + int dstMachineID = getAssignedMachine(source, partitionsMachineMap); + addtoconInfoMap(controllerNodeID, dstMachineID, headToken, + usedConInfos, conInfoMap, cfg); + + Token tailToken = Token.createOverallOutputToken(sink); + int srcMahineID = getAssignedMachine(sink, partitionsMachineMap); + addtoconInfoMap(srcMahineID, controllerNodeID, tailToken, + usedConInfos, conInfoMap, cfg); + + return conInfoMap; + } + + /** + * @param worker + * @return the machineID where on which the passed worker is assigned. + */ + private int getAssignedMachine(Worker worker, + Map>>> partitionsMachineMap) { + for (Integer machineID : partitionsMachineMap.keySet()) { + for (Set> workers : partitionsMachineMap + .get(machineID)) { + if (workers.contains(worker)) + return machineID; + } + } + + throw new IllegalArgumentException(String.format( + "%s is not assigned to anyof the machines", worker)); + } + + protected abstract void addtoconInfoMap(int srcID, int dstID, Token t, + Set usedConInfos, + Map conInfoMap, Configuration cfg); + + protected List getTcpConInfo(ConnectionInfo conInfo) { + List conList = new ArrayList<>(); + for (ConnectionInfo tcpconInfo : currentConInfos) { + if (conInfo.equals(tcpconInfo)) + conList.add(tcpconInfo); + } + return conList; + } + + protected String getParamName(Token t) { + return String.format("ConnectionType-%d:%d", + t.getUpstreamIdentifier(), t.getDownstreamIdentifier()); + } + + public ConnectionInfo replaceConInfo(ConnectionInfo conInfo) { + if (currentConInfos.contains(conInfo)) + currentConInfos.remove(conInfo); + ConnectionInfo newConinfo; + if (conInfo.getSrcID() == 0) + newConinfo = new TCPConnectionInfo(conInfo.getSrcID(), + conInfo.getDstID(), startPortNo++); + else + newConinfo = new AsyncTCPConnectionInfo(conInfo.getSrcID(), + conInfo.getDstID(), startPortNo++); + currentConInfos.add(newConinfo); + + return newConinfo; + } + } + + public static abstract class NoParams extends AbstractConnectionManager { + + public NoParams(int controllerNodeID) { + super(controllerNodeID); + } + + @Override + public void addConnectionParameters(Builder cfgBuilder, + Set> workers) { + return; + } + + @Override + public Configuration getDefaultConfiguration(Set> workers) { + return Configuration.builder().build(); + } + + protected void addtoconInfoMap(int srcID, int dstID, Token t, + Set usedConInfos, + Map conInfoMap, Configuration cfg) { + + ConnectionInfo conInfo = new GenericConnectionInfo(srcID, dstID); + + List conSet = getTcpConInfo(conInfo); + ConnectionInfo tcpConInfo = null; + + for (ConnectionInfo con : conSet) { + if (!usedConInfos.contains(con)) { + tcpConInfo = con; + break; + } + } + + if (tcpConInfo == null) { + tcpConInfo = makeConnectionInfo(srcID, dstID); + this.currentConInfos.add(tcpConInfo); + } + + conInfoMap.put(t, tcpConInfo); + usedConInfos.add(tcpConInfo); + } + + protected abstract ConnectionInfo makeConnectionInfo(int srcID, + int dstID); + } + + public static class BlockingTCPNoParams extends NoParams { + + public BlockingTCPNoParams(int controllerNodeID) { + super(controllerNodeID); + } + + @Override + protected ConnectionInfo makeConnectionInfo(int srcID, int dstID) { + return new TCPConnectionInfo(srcID, dstID, startPortNo++); + } + } + + public static class AsyncTCPNoParams extends NoParams { + + public AsyncTCPNoParams(int controllerNodeID) { + super(controllerNodeID); + } + + @Override + protected ConnectionInfo makeConnectionInfo(int srcID, int dstID) { + return new AsyncTCPConnectionInfo(srcID, dstID, startPortNo++); + } + } + + public static class AllConnectionParams extends AbstractConnectionManager { + public AllConnectionParams(int controllerNodeID) { + super(controllerNodeID); + } + + @Override + public void addConnectionParameters(Builder cfgBuilder, + Set> workers) { + for (Worker w : workers) { + for (Worker succ : Workers.getSuccessors(w)) { + Token t = new Token(w, succ); + Parameter p = new Configuration.SwitchParameter( + getParamName(t), ConnectionType.class, + ConnectionType.BTCP, Arrays.asList(ConnectionType + .values())); + cfgBuilder.addParameter(p); + } + } + + // Add Parameter for global input channel. + Set> heads = Workers.getTopmostWorkers(workers); + assert heads.size() == 1 : "Multiple first workers"; + for (Worker firstWorker : heads) { + Token t = Token.createOverallInputToken(firstWorker); + Parameter p = new Configuration.SwitchParameter( + getParamName(t), ConnectionType.class, + ConnectionType.BTCP, Arrays.asList(ConnectionType + .values())); + cfgBuilder.addParameter(p); + } + + // Add Parameter for global output channel. + Set> tail = Workers.getBottommostWorkers(workers); + assert tail.size() == 1 : "Multiple first workers"; + for (Worker lastWorker : tail) { + Token t = Token.createOverallOutputToken(lastWorker); + Parameter p = new Configuration.SwitchParameter( + getParamName(t), ConnectionType.class, + ConnectionType.BTCP, Arrays.asList(ConnectionType + .values())); + cfgBuilder.addParameter(p); + } + } + + @Override + public Configuration getDefaultConfiguration(Set> workers) { + Configuration.Builder cfgBuilder = Configuration.builder(); + addConnectionParameters(cfgBuilder, workers); + return cfgBuilder.build(); + } + + protected void addtoconInfoMap(int srcID, int dstID, Token t, + Set usedConInfos, + Map conInfoMap, Configuration cfg) { + + ConnectionInfo conInfo = new GenericConnectionInfo(srcID, dstID); + + List conSet = getTcpConInfo(conInfo); + ConnectionInfo tcpConInfo = null; + + for (ConnectionInfo con : conSet) { + if (!usedConInfos.contains(con)) { + tcpConInfo = con; + break; + } + } + + if (tcpConInfo == null) { + tcpConInfo = makeConnectionInfo(srcID, dstID, t, cfg); + this.currentConInfos.add(tcpConInfo); + } + + conInfoMap.put(t, tcpConInfo); + usedConInfos.add(tcpConInfo); + } + + private ConnectionInfo makeConnectionInfo(int srcID, int dstID, + Token t, Configuration cfg) { + SwitchParameter p = cfg.getParameter( + getParamName(t), SwitchParameter.class, + ConnectionType.class); + + if (p == null) + throw new IllegalStateException(String.format( + "No tuning parameter for connection %s", t)); + + ConnectionInfo conInfo; + switch (p.getValue()) { + case BTCP : + conInfo = new TCPConnectionInfo(srcID, dstID, startPortNo++); + break; + case ATCP : + conInfo = new AsyncTCPConnectionInfo(srcID, dstID, + startPortNo++); + break; + default : + throw new IllegalStateException(String.format( + "Unsupported connection type - %s", p.getValue())); + } + return conInfo; + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java b/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java index 030691d0..441e981a 100644 --- a/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java +++ b/src/edu/mit/streamjit/impl/distributed/DistributedAppRunner.java @@ -24,13 +24,10 @@ import edu.mit.streamjit.api.CompiledStream; import edu.mit.streamjit.api.Output; import edu.mit.streamjit.api.StreamCompiler; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.interp.DebugStreamCompiler; import edu.mit.streamjit.test.Benchmark; -import edu.mit.streamjit.test.BenchmarkProvider; import edu.mit.streamjit.test.Benchmark.Dataset; +import edu.mit.streamjit.test.BenchmarkProvider; import edu.mit.streamjit.test.apps.channelvocoder7.ChannelVocoder7; -import edu.mit.streamjit.test.apps.fmradio.FMRadio.FMRadioBenchmarkProvider; public class DistributedAppRunner { @@ -85,8 +82,6 @@ public static void main(String[] args) throws InterruptedException { } } - GlobalConstants.tunerMode = 1; - Benchmark benchmark = bp.iterator().next(); // StreamCompiler compiler = new Compiler2StreamCompiler(); StreamCompiler compiler = new DistributedStreamCompiler(noOfNodes); diff --git a/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java b/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java index f11ebd92..d3c8d397 100644 --- a/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java +++ b/src/edu/mit/streamjit/impl/distributed/DistributedBlobFactory.java @@ -29,7 +29,10 @@ import edu.mit.streamjit.impl.blob.DrainData; import edu.mit.streamjit.impl.common.Configuration; import edu.mit.streamjit.impl.common.Configuration.Parameter; +import edu.mit.streamjit.impl.compiler.CompilerBlobFactory; import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory; +import edu.mit.streamjit.impl.distributed.ConnectionManager.BlockingTCPNoParams; +import edu.mit.streamjit.impl.distributed.common.Options; import edu.mit.streamjit.impl.interp.Interpreter.InterpreterBlobFactory; /** @@ -45,53 +48,69 @@ *

    * TODO: For the moment this factory just deal with compiler blob. Need to make * interpreter blob as well based on the dynamic edges. - * + * * @author Sumanan sumanan@mit.edu * @since Sep 24, 2013 */ public class DistributedBlobFactory implements BlobFactory { - private int noOfMachines; + private final int noOfMachines; + + private final PartitionManager partitionManager; - private final ConfigurationManager cfgManager; + private final ConnectionManager connectionManager; - public DistributedBlobFactory(ConfigurationManager cfgManager, - int noOfMachines) { - this.cfgManager = cfgManager; + public DistributedBlobFactory(PartitionManager partitionManager, + ConnectionManager connectionManager, int noOfMachines) { + this.partitionManager = partitionManager; this.noOfMachines = noOfMachines; + this.connectionManager = connectionManager; } /** - * If {@link ConfigurationManager} is not passed as a constructor argument - * then {@link WorkerMachine} will be used as default one. - * + * If {@link PartitionManager} is not passed as a constructor argument then + * {@link WorkerMachine} will be used as default one. + * * @param noOfMachines */ public DistributedBlobFactory(int noOfMachines) { - this(new WorkerMachine(null), noOfMachines); + this(new WorkerMachine(null), new BlockingTCPNoParams(0), noOfMachines); } @Override public Blob makeBlob(Set> workers, Configuration config, int maxNumCores, DrainData initialState) { - return new Compiler2BlobFactory().makeBlob(workers, config, maxNumCores, initialState); + return new Compiler2BlobFactory().makeBlob(workers, config, + maxNumCores, initialState); } @Override public Configuration getDefaultConfiguration(Set> workers) { + Configuration distCfg = partitionManager.getDefaultConfiguration( + workers, noOfMachines); + if (!Options.useCompilerBlob) + return distCfg; - Configuration distCfg = cfgManager.getDefaultConfiguration(workers, - noOfMachines); Configuration.Builder builder = Configuration.builder(distCfg); - BlobFactory compilerBf = new Compiler2BlobFactory(); Configuration compilercfg = compilerBf.getDefaultConfiguration(workers); for (Parameter p : compilercfg.getParametersMap().values()) builder.addParameter(p); + connectionManager.addConnectionParameters(builder, workers); + // addMaxCoreParam(builder); return builder.build(); } + private void addMaxCoreParam(Configuration.Builder builder) { + int min = 1; + int val = Options.maxNumCores / 2; + val = min > val ? min : val; + Parameter p = new Configuration.IntParameter("maxNumCores", min, + Options.maxNumCores, val); + builder.addParameter(p); + } + @Override public boolean equals(Object o) { return getClass() == o.getClass() diff --git a/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java b/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java index e51c451b..3262ce6f 100644 --- a/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java +++ b/src/edu/mit/streamjit/impl/distributed/DistributedStreamCompiler.java @@ -21,11 +21,8 @@ */ package edu.mit.streamjit.impl.distributed; -import java.io.BufferedReader; -import java.io.FileReader; import java.util.ArrayList; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -35,39 +32,36 @@ import com.google.common.collect.ImmutableMap; import edu.mit.streamjit.api.CompiledStream; -import edu.mit.streamjit.api.Filter; import edu.mit.streamjit.api.Input; import edu.mit.streamjit.api.Input.ManualInput; import edu.mit.streamjit.api.OneToOneElement; import edu.mit.streamjit.api.Output; import edu.mit.streamjit.api.Pipeline; -import edu.mit.streamjit.api.Portal; import edu.mit.streamjit.api.Splitjoin; -import edu.mit.streamjit.api.StreamCompilationFailedException; import edu.mit.streamjit.api.StreamCompiler; import edu.mit.streamjit.api.Worker; import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.blob.BlobFactory; import edu.mit.streamjit.impl.blob.Buffer; -import edu.mit.streamjit.impl.common.AbstractDrainer; import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.common.ConnectWorkersVisitor; import edu.mit.streamjit.impl.common.InputBufferFactory; -import edu.mit.streamjit.impl.common.MessageConstraint; import edu.mit.streamjit.impl.common.OutputBufferFactory; -import edu.mit.streamjit.impl.common.Portals; -import edu.mit.streamjit.impl.common.VerifyStreamGraph; +import edu.mit.streamjit.impl.common.TimeLogger; import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; import edu.mit.streamjit.impl.concurrent.ConcurrentStreamCompiler; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; +import edu.mit.streamjit.impl.distributed.HeadChannel.HeadBuffer; +import edu.mit.streamjit.impl.distributed.common.Options; import edu.mit.streamjit.impl.distributed.node.StreamNode; import edu.mit.streamjit.impl.distributed.runtimer.CommunicationManager.CommunicationType; import edu.mit.streamjit.impl.distributed.runtimer.Controller; import edu.mit.streamjit.impl.distributed.runtimer.DistributedDrainer; -import edu.mit.streamjit.impl.distributed.runtimer.OnlineTuner; -import edu.mit.streamjit.impl.distributed.HeadChannel.HeadBuffer; import edu.mit.streamjit.partitioner.HorizontalPartitioner; import edu.mit.streamjit.partitioner.Partitioner; +import edu.mit.streamjit.tuner.OnlineTuner; +import edu.mit.streamjit.tuner.Reconfigurer; +import edu.mit.streamjit.tuner.Verifier; +import edu.mit.streamjit.util.ConfigurationUtils; /** * @@ -89,106 +83,181 @@ public class DistributedStreamCompiler implements StreamCompiler { /** * Configuration from Opentuner. */ - Configuration cfg; + private Configuration cfg; /** * Total number of nodes including controller node. */ - int noOfnodes; + private int noOfnodes; /** - * @param noOfnodes - * : Total number of nodes the stream application intended to run - * - including controller node. If it is 1 then it means the - * whole stream application is supposed to run on controller. + * Run the whole application on the controller node. No distributions. See + * {@link #DistributedStreamCompiler(int, Configuration)} */ - public DistributedStreamCompiler(int noOfnodes) { - if (noOfnodes < 1) - throw new IllegalArgumentException("noOfnodes must be 1 or greater"); - this.noOfnodes = noOfnodes; + public DistributedStreamCompiler() { + this(1, null); } /** - * Run the whole application on the controller node. + * See {@link #DistributedStreamCompiler(int, Configuration)}. As no + * configuration is passed, tuner will activated to tune for better + * configuration. */ - public DistributedStreamCompiler() { - this(1); + public DistributedStreamCompiler(int noOfnodes) { + this(noOfnodes, null); } /** - * Run the application with the passed configureation. + * Run the application with the passed configuration. Pass null if the + * intention is to tune the application. + * + * @param noOfnodes + * : Total number of nodes the stream application intended to run + * including the controller node. If it is 1 then it means the + * whole stream application is supposed to run on controller. + * @param cfg + * Run the application with the passed {@link Configuration}. If + * it is null, tuner will be activated to tune for better + * configuration. */ public DistributedStreamCompiler(int noOfnodes, Configuration cfg) { if (noOfnodes < 1) throw new IllegalArgumentException("noOfnodes must be 1 or greater"); - this.noOfnodes = noOfnodes; + if (Options.singleNodeOnline) { + System.out + .println("Flag GlobalConstants.singleNodeOnline is enabled." + + " noOfNodes passed as compiler argument has no effect"); + this.noOfnodes = 1; + } else + this.noOfnodes = noOfnodes; + this.cfg = cfg; } public CompiledStream compile(OneToOneElement stream, Input input, Output output) { + StreamJitApp app = new StreamJitApp<>(stream); + Controller controller = establishController(); + + PartitionManager partitionManager = new HotSpotTuning(app); + ConfigurationManager cfgManager = new ConfigurationManager(app, + partitionManager); + ConnectionManager conManager = connectionManager(controller.controllerNodeID); + + setConfiguration(controller, app, partitionManager, conManager, + cfgManager); - checkforDefaultOneToOneElement(stream); + TimeLogger logger = new TimeLoggers.FileTimeLogger(app.name); + StreamJitAppManager manager = new StreamJitAppManager(controller, app, + conManager, logger); + final AbstractDrainer drainer = new DistributedDrainer(app, logger, + manager); + drainer.setBlobGraph(app.blobGraph); - ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor(); - stream.visit(primitiveConnector); - Worker source = (Worker) primitiveConnector.getSource(); - Worker sink = (Worker) primitiveConnector.getSink(); + boolean needTermination = setBufferMap(input, output, drainer, app); + + manager.reconfigure(1); + CompiledStream cs = new DistributedCompiledStream(drainer); + + if (Options.tune > 0 && this.cfg != null) { + Reconfigurer configurer = new Reconfigurer(drainer, manager, app, + cfgManager, logger); + tuneOrVerify(configurer, needTermination); + } + return cs; + } + + private ConnectionManager connectionManager(int controllerNodeID) { + switch (Options.connectionManager) { + case 0 : + return new ConnectionManager.AllConnectionParams( + controllerNodeID); + case 1 : + return new ConnectionManager.BlockingTCPNoParams( + controllerNodeID); + default : + return new ConnectionManager.AsyncTCPNoParams(controllerNodeID); + } + } - VerifyStreamGraph verifier = new VerifyStreamGraph(); - stream.visit(verifier); + private Configuration cfgFromFile(StreamJitApp app, + Controller controller, Configuration defaultCfg) { + Configuration cfg1 = ConfigurationUtils.readConfiguration(app.name, + null); + if (cfg1 == null) { + controller.closeAll(); + throw new IllegalConfigurationException(); + } else if (!verifyCfg(defaultCfg, cfg1)) { + System.err + .println("Reading the configuration from configuration file"); + System.err + .println("No matching between parameters in the read " + + "configuration and parameters in the default configuration"); + controller.closeAll(); + throw new IllegalConfigurationException(); + } + return cfg1; + } + private Controller establishController() { Map conTypeCount = new HashMap<>(); - // conTypeCount.put(CommunicationType.LOCAL, 1); - conTypeCount.put(CommunicationType.TCP, this.noOfnodes); + + if (this.noOfnodes == 1) + conTypeCount.put(CommunicationType.LOCAL, 1); + else + conTypeCount.put(CommunicationType.TCP, this.noOfnodes - 1); Controller controller = new Controller(); controller.connect(conTypeCount); + return controller; + } - StreamJitApp app = new StreamJitApp(stream, source, sink); - ConfigurationManager cfgManager = new HotSpotTuning(app); - BlobFactory bf = new DistributedBlobFactory(cfgManager, noOfnodes); - this.cfg = bf.getDefaultConfiguration(Workers - .getAllWorkersInGraph(source)); + private Map>>> getMachineWorkerMap( + Integer[] machineIds, OneToOneElement stream, + Worker source, Worker sink) { + int totalCores = machineIds.length; - if (GlobalConstants.tune) { + Partitioner horzPartitioner = new HorizontalPartitioner<>(); + List>> partitionList = horzPartitioner + .partitionEqually(stream, source, sink, totalCores); - } else { - this.cfg = readConfiguration(stream.getClass().getSimpleName()); + Map>>> partitionsMachineMap = new HashMap>>>(); + for (Integer machineID : machineIds) { + partitionsMachineMap.put(machineID, + new ArrayList>>()); } - if (cfg == null) { - System.err - .println("Configuration is null. Runs the app with horizontal partitioning."); - Integer[] machineIds = new Integer[this.noOfnodes]; - for (int i = 0; i < machineIds.length; i++) { - machineIds[i] = i + 1; + int index = 0; + while (index < partitionList.size()) { + for (Integer machineID : partitionsMachineMap.keySet()) { + if (!(index < partitionList.size())) + break; + partitionsMachineMap.get(machineID).add( + partitionList.get(index++)); } - Map>>> partitionsMachineMap = getMachineWorkerMap( - machineIds, stream, source, sink); - app.newPartitionMap(partitionsMachineMap); - } else - cfgManager.newConfiguration(cfg); - - // TODO: Copied form DebugStreamCompiler. Need to be verified for this - // context. - List constraints = MessageConstraint - .findConstraints(source); - Set> portals = new HashSet<>(); - for (MessageConstraint mc : constraints) - portals.add(mc.getPortal()); - for (Portal portal : portals) - Portals.setConstraints(portal, constraints); + } + return partitionsMachineMap; + } - StreamJitAppManager manager = new StreamJitAppManager(controller, app, - cfgManager); - final AbstractDrainer drainer = new DistributedDrainer(manager); - drainer.setBlobGraph(app.blobGraph); + private void manualPartition(StreamJitApp app) { + Integer[] machineIds = new Integer[this.noOfnodes - 1]; + for (int i = 0; i < machineIds.length; i++) { + machineIds[i] = i + 1; + } + Map>>> partitionsMachineMap = getMachineWorkerMap( + machineIds, app.streamGraph, app.source, app.sink); + app.newPartitionMap(partitionsMachineMap); + } + /** + * Sets head and tail buffers. + */ + private boolean setBufferMap(Input input, Output output, + final AbstractDrainer drainer, StreamJitApp app) { // TODO: derive a algorithm to find good buffer size and use here. Buffer head = InputBufferFactory.unwrap(input).createReadableBuffer( - 1000); + 10000); Buffer tail = OutputBufferFactory.unwrap(output).createWritableBuffer( - 1000); + 10000); boolean needTermination; @@ -201,7 +270,8 @@ public CompiledStream compile(OneToOneElement stream, head) { @Override public void drain() { - drainer.startDraining(2); + // drainer.startDraining(2); + drainer.drainFinal(false); } }); } else { @@ -212,80 +282,55 @@ public void drain() { ImmutableMap.Builder bufferMapBuilder = ImmutableMap . builder(); - bufferMapBuilder.put(Token.createOverallInputToken(source), head); - bufferMapBuilder.put(Token.createOverallOutputToken(sink), tail); + bufferMapBuilder.put(Token.createOverallInputToken(app.source), head); + bufferMapBuilder.put(Token.createOverallOutputToken(app.sink), tail); app.bufferMap = bufferMapBuilder.build(); - app.constraints = constraints; - - manager.reconfigure(); - CompiledStream cs = new DistributedCompiledStream(drainer); - - if (GlobalConstants.tune && this.cfg != null) { - OnlineTuner tuner = new OnlineTuner(drainer, manager, app, - cfgManager, needTermination); - new Thread(tuner, "OnlineTuner").start(); - } - return cs; - } - - private Configuration readConfiguration(String simpeName) { - String name = String.format("%s.cfg", simpeName); - try { - BufferedReader reader = new BufferedReader(new FileReader(name)); - String json = reader.readLine(); - reader.close(); - return Configuration.fromJson(json); - } catch (Exception ex) { - System.err.println(String.format( - "File reader error. No %s configuration file.", name)); - } - return null; + return needTermination; } - private Map>>> getMachineWorkerMap( - Integer[] machineIds, OneToOneElement stream, - Worker source, Worker sink) { - int totalCores = machineIds.length; - - Partitioner horzPartitioner = new HorizontalPartitioner<>(); - List>> partitionList = horzPartitioner - .partitionEqually(stream, source, sink, totalCores); + private void setConfiguration(Controller controller, + StreamJitApp app, PartitionManager partitionManager, + ConnectionManager conManager, ConfigurationManager cfgManager) { + BlobFactory bf = new DistributedBlobFactory(partitionManager, + conManager, Math.max(noOfnodes - 1, 1)); + Configuration defaultCfg = bf.getDefaultConfiguration(Workers + .getAllWorkersInGraph(app.source)); + + if (this.cfg != null) { + if (!verifyCfg(defaultCfg, this.cfg)) { + System.err + .println("No matching between parameters in the passed " + + "configuration and parameters in the default configuration"); + controller.closeAll(); + throw new IllegalConfigurationException(); + } + } else if (Options.tune == 0) { + this.cfg = cfgFromFile(app, controller, defaultCfg); + } else + this.cfg = defaultCfg; - Map>>> partitionsMachineMap = new HashMap>>>(); - for (Integer machineID : machineIds) { - partitionsMachineMap.put(machineID, - new ArrayList>>()); - } + cfgManager.newConfiguration(this.cfg); + } - int index = 0; - while (index < partitionList.size()) { - for (Integer machineID : partitionsMachineMap.keySet()) { - if (!(index < partitionList.size())) - break; - partitionsMachineMap.get(machineID).add( - partitionList.get(index++)); - } - } - return partitionsMachineMap; + private boolean verifyCfg(Configuration defaultCfg, Configuration cfg) { + if (defaultCfg.getParametersMap().keySet() + .equals(cfg.getParametersMap().keySet())) + return true; + return false; } - /** - * TODO: Need to check for other default subtypes of {@link OneToOneElement} - * s. Now only checks for first generation children. - * - * @param stream - * @throws StreamCompilationFailedException - * if stream is default subtype of OneToOneElement - */ - private void checkforDefaultOneToOneElement( - OneToOneElement stream) { - - if (stream.getClass() == Pipeline.class - || stream.getClass() == Splitjoin.class - || stream.getClass() == Filter.class) { - throw new StreamCompilationFailedException( - "Default subtypes of OneToOneElement are not accepted for compilation by this compiler. OneToOneElement that passed should be unique"); + private void tuneOrVerify(Reconfigurer configurer, boolean needTermination) { + Runnable r; + if (Options.tune == 1) { + r = new OnlineTuner(configurer, needTermination); + new Thread(r, "OnlineTuner").start(); + } else if (Options.tune == 2) { + r = new Verifier(configurer); + new Thread(r, "Verifier").start(); + } else { + throw new IllegalStateException( + "Neither OnlineTuner nor Verifer has been started."); } } @@ -297,11 +342,6 @@ public DistributedCompiledStream(AbstractDrainer drainer) { this.drainer = drainer; } - @Override - public boolean isDrained() { - return drainer.isDrained(); - } - @Override public void awaitDrained() throws InterruptedException { drainer.awaitDrained(); @@ -313,5 +353,25 @@ public void awaitDrained(long timeout, TimeUnit unit) throws InterruptedException, TimeoutException { drainer.awaitDrained(timeout, unit); } + + @Override + public boolean isDrained() { + return drainer.isDrained(); + } + } + + private class IllegalConfigurationException extends RuntimeException { + + private static final long serialVersionUID = 1L; + + private static final String tag = "IllegalConfigurationException"; + + private IllegalConfigurationException() { + super(tag); + } + + private IllegalConfigurationException(String msg) { + super(String.format("%s : %s", tag, msg)); + } } } \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/HeadChannel.java b/src/edu/mit/streamjit/impl/distributed/HeadChannel.java index b21665dc..4dd5a1b1 100644 --- a/src/edu/mit/streamjit/impl/distributed/HeadChannel.java +++ b/src/edu/mit/streamjit/impl/distributed/HeadChannel.java @@ -25,10 +25,11 @@ import edu.mit.streamjit.impl.blob.AbstractReadOnlyBuffer; import edu.mit.streamjit.impl.blob.Buffer; -import edu.mit.streamjit.impl.common.AbstractDrainer; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; -import edu.mit.streamjit.impl.distributed.node.TCPOutputChannel; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; +import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel; +import edu.mit.streamjit.impl.distributed.node.BlockingOutputChannel; /** * Head Channel is just a wrapper to TCPOutputChannel that skips @@ -37,15 +38,79 @@ * @author Sumanan sumanan@mit.edu * @since Oct 21, 2013 */ -public class HeadChannel extends TCPOutputChannel { +public class HeadChannel { - public HeadChannel(Buffer buffer, TCPConnectionProvider conProvider, - TCPConnectionInfo conInfo, String bufferTokenName, int debugPrint) { - super(buffer, conProvider, conInfo, bufferTokenName, debugPrint); + public static class TCPHeadChannel extends BlockingOutputChannel { + + public TCPHeadChannel(Buffer buffer, ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { + super(buffer, conProvider, conInfo, bufferTokenName, debugLevel); + } + + protected void fillUnprocessedData() { + this.unProcessedData = ImmutableList.of(); + } } - protected void fillUnprocessedData() { - this.unProcessedData = ImmutableList.of(); + public static class AsyncHeadChannel extends AsyncOutputChannel { + + final Buffer readBuffer; + private volatile boolean stopCalled; + private volatile boolean isFinal; + + public AsyncHeadChannel(Buffer buffer, ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { + super(conProvider, conInfo, bufferTokenName, debugLevel); + readBuffer = buffer; + stopCalled = false; + } + + @Override + public Runnable getRunnable() { + final Runnable supperRunnable = super.getRunnable(); + return new Runnable() { + @Override + public void run() { + supperRunnable.run(); + final Buffer writeBuffer = getBuffer(); + final int dataLength = 10000; + final Object[] data = new Object[dataLength]; + int read = 1; + int written = 0; + while (!stopCalled) { + read = readBuffer.read(data, 0, data.length); + written = 0; + while (written < read) { + written += writeBuffer.write(data, written, read + - written); + if (written == 0) { + try { + // TODO: Verify this sleep time. + Thread.sleep(500); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + } + stopSuper(isFinal); + } + }; + } + + protected void fillUnprocessedData() { + throw new Error("Method not implemented"); + } + + @Override + public void stop(boolean isFinal) { + this.isFinal = isFinal; + this.stopCalled = true; + } + + private void stopSuper(boolean isFinal) { + super.stop(isFinal); + } } /** @@ -77,6 +142,25 @@ public Object read() { return o; } + @Override + public int read(Object[] data, int offset, int length) { + int read = buffer.read(data, offset, length); + if (read == 0) { + new DrainerThread().start(); + } + return read; + } + + @Override + public boolean readAll(Object[] data) { + return buffer.readAll(data); + } + + @Override + public boolean readAll(Object[] data, int offset) { + return buffer.readAll(data, offset); + } + @Override public int size() { return buffer.size(); @@ -89,7 +173,8 @@ class DrainerThread extends Thread { public void run() { System.out.println("Input data finished"); - drainer.startDraining(2); + // drainer.startDraining(2); + drainer.drainFinal(false); } } } diff --git a/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java b/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java index 5e82260d..67c4403e 100644 --- a/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java +++ b/src/edu/mit/streamjit/impl/distributed/HotSpotTuning.java @@ -21,6 +21,8 @@ */ package edu.mit.streamjit.impl.distributed; +import static com.google.common.base.Preconditions.checkArgument; + import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -34,18 +36,16 @@ import edu.mit.streamjit.api.Pipeline; import edu.mit.streamjit.api.Splitjoin; import edu.mit.streamjit.api.Splitter; -import edu.mit.streamjit.api.StreamCompilationFailedException; import edu.mit.streamjit.api.StreamVisitor; import edu.mit.streamjit.api.Worker; import edu.mit.streamjit.impl.common.Configuration; import edu.mit.streamjit.impl.common.Configuration.IntParameter; +import edu.mit.streamjit.impl.common.Configuration.Parameter; import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; import edu.mit.streamjit.impl.common.Workers; -import edu.mit.streamjit.impl.common.Configuration.Parameter; -import edu.mit.streamjit.impl.distributed.ConfigurationManager.AbstractConfigurationManager; -import edu.mit.streamjit.tuner.OfflineTuner; +import edu.mit.streamjit.impl.distributed.PartitionManager.AbstractPartitionManager; -public final class HotSpotTuning extends AbstractConfigurationManager { +public final class HotSpotTuning extends AbstractPartitionManager { Map>> partitionGroup; Map, Set>> skippedSplitters; @@ -57,36 +57,14 @@ public HotSpotTuning(StreamJitApp app) { @Override public Configuration getDefaultConfiguration(Set> workers, int noOfMachines) { + checkArgument(noOfMachines > 0, String.format( + "noOfMachines = %d, It must be > 0", noOfMachines)); PickHotSpots visitor = new PickHotSpots(noOfMachines); app.streamGraph.visit(visitor); return visitor.builder.build(); } - @Override - public boolean newConfiguration(Configuration config) { - - for (Parameter p : config.getParametersMap().values()) { - if (p instanceof IntParameter) { - IntParameter ip = (IntParameter) p; - System.out.println(ip.getName() + " - " + ip.getValue()); - } else if (p instanceof SwitchParameter) { - SwitchParameter sp = (SwitchParameter) p; - System.out.println(sp.getName() + " - " + sp.getValue()); - } else - System.out.println(p.getName() + " - Unknown type"); - } - - Map>>> partitionsMachineMap = getMachineWorkerMap(config); - try { - app.varifyConfiguration(partitionsMachineMap); - } catch (StreamCompilationFailedException ex) { - return false; - } - app.blobConfiguration = config; - return true; - } - - private Map>>> getMachineWorkerMap( + public Map>>> partitionMap( Configuration config) { Map>> partition = new HashMap<>(); @@ -186,11 +164,10 @@ private class PickHotSpots extends StreamVisitor { private Joiner skipJoiner; - private int minSplitjoinSize = 20; + private int minSplitjoinSize = 8; /** - * Workers those are going to be part {@link OfflineTuner} - * {@link #currentHotSpot}. + * Workers those are going to be part of {@link #currentHotSpot}. */ List> workerGropups; diff --git a/src/edu/mit/streamjit/impl/distributed/PartitionManager.java b/src/edu/mit/streamjit/impl/distributed/PartitionManager.java new file mode 100644 index 00000000..235c1b91 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/PartitionManager.java @@ -0,0 +1,296 @@ +package edu.mit.streamjit.impl.distributed; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; + +import edu.mit.streamjit.api.Filter; +import edu.mit.streamjit.api.Joiner; +import edu.mit.streamjit.api.Splitter; +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.partitioner.AbstractPartitioner; + +/** + * PartitionManager is responsible to partition a stream graph for a cluster. + * Partitioning process can be tuned. Implementations of this interface must + * provide the following two tasks. + *

      + *
    1. Generate configuration with appropriate tuning parameters (Based on the + * search space design strategy) for tuning. + *
    2. Dispatch a new configuration given by the open tuner and generate + * partition machine map. + *
    + * + * @author Sumanan sumanan@mit.edu + * @since Jan 16, 2014 + * + */ +public interface PartitionManager { + + /** + * Generates default configuration with all tuning parameters for tuning. + * + * @param streamGraph + * @param source + * @param sink + * @param noOfMachines + * @return + */ + public Configuration getDefaultConfiguration(Set> workers, + int noOfMachines); + + /** + * Reads the configuration and returns a map of nodeID to list of set of + * workers (list of blob workers). Value of the returned map is list of + * worker set where each worker set is an individual blob. + * + * @param config + * @return map of nodeID to list of set of workers which are assigned to the + * node. + */ + public Map>>> partitionMap( + Configuration config); + + /** + * Implements the functions those can be called by runtimer to send + * configuration information to streamnodes. + * + * @author Sumanan sumanan@mit.edu + * @since Jan 17, 2014 + */ + public static abstract class AbstractPartitionManager implements + PartitionManager { + + protected final StreamJitApp app; + + AbstractPartitionManager(StreamJitApp app) { + this.app = app; + } + + /** + * Copied form {@link AbstractPartitioner} class. But modified to + * support nested splitjoiners.

    Returns all {@link Worker}s in a + * splitjoin. + * + * @param splitter + * @return Returns all {@link Filter}s in a splitjoin. + */ + protected void getAllChildWorkers(Splitter splitter, + Set> childWorkers) { + childWorkers.add(splitter); + Joiner joiner = getJoiner(splitter); + Worker cur; + for (Worker childWorker : Workers.getSuccessors(splitter)) { + cur = childWorker; + while (cur != joiner) { + if (cur instanceof Filter) + childWorkers.add(cur); + else if (cur instanceof Splitter) { + getAllChildWorkers((Splitter) cur, childWorkers); + cur = getJoiner((Splitter) cur); + } else + throw new IllegalStateException( + "Some thing wrong in the algorithm."); + + assert Workers.getSuccessors(cur).size() == 1 : "Illegal State encounted : cur can only be either a filter or a joner"; + cur = Workers.getSuccessors(cur).get(0); + } + } + childWorkers.add(joiner); + } + + /** + * Find and returns the corresponding {@link Joiner} for the passed + * {@link Splitter}. + * + * @param splitter + * : {@link Splitter} that needs it's {@link Joiner}. + * @return Corresponding {@link Joiner} of the passed {@link Splitter}. + */ + protected Joiner getJoiner(Splitter splitter) { + Worker cur = Workers.getSuccessors(splitter).get(0); + int innerSplitjoinCount = 0; + while (!(cur instanceof Joiner) || innerSplitjoinCount != 0) { + if (cur instanceof Splitter) + innerSplitjoinCount++; + if (cur instanceof Joiner) + innerSplitjoinCount--; + assert innerSplitjoinCount >= 0 : "Joiner Count is more than splitter count. Check the algorithm"; + cur = Workers.getSuccessors(cur).get(0); + } + assert cur instanceof Joiner : "Error in algorithm. Not returning a Joiner"; + return (Joiner) cur; + } + + protected String getParamName(Integer id) { + assert id > -1 : "Worker id cannot be negative"; + return String.format("worker%dtomachine", id); + } + + /** + * Goes through all workers in workerset which is passed as argument, + * find the workers which are interconnected and group them as a blob + * workers. i.e., Group the workers which are connected. + *

    + * TODO: If any dynamic edges exists then should create interpreter + * blob. + * + * @param workerset + * @return list of workers set which contains interconnected workers. + * Each worker set in the list is supposed to run in an + * individual blob. + */ + protected List>> getConnectedComponents( + Set> workerset) { + List>> ret = new ArrayList>>(); + while (!workerset.isEmpty()) { + Deque> queue = new ArrayDeque<>(); + Set> blobworkers = new HashSet<>(); + Worker w = workerset.iterator().next(); + blobworkers.add(w); + workerset.remove(w); + queue.offer(w); + while (!queue.isEmpty()) { + Worker wrkr = queue.poll(); + for (Worker succ : Workers.getSuccessors(wrkr)) { + if (workerset.contains(succ)) { + blobworkers.add(succ); + workerset.remove(succ); + queue.offer(succ); + } + } + + for (Worker pred : Workers.getPredecessors(wrkr)) { + if (workerset.contains(pred)) { + blobworkers.add(pred); + workerset.remove(pred); + queue.offer(pred); + } + } + } + ret.add(blobworkers); + } + return ret; + } + + /** + * Cycles can occur iff splitter and joiner happened to fall into a blob + * while some workers of that splitjoin falls into other blob. Here, we + * check for the above mention condition. If cycles exists, split then + * in to several blobs. + * + * @param blobworkers + * @return + */ + protected List>> breakCycles( + Set> blobworkers) { + Map, Joiner> rfctrSplitJoin = new HashMap<>(); + Set> splitterSet = getSplitters(blobworkers); + for (Splitter s : splitterSet) { + Joiner j = getJoiner(s); + if (blobworkers.contains(j)) { + Set> childWorkers = new HashSet<>(); + getAllChildWorkers(s, childWorkers); + if (!blobworkers.containsAll(childWorkers)) { + rfctrSplitJoin.put(s, j); + } + } + } + + List>> ret = new ArrayList<>(); + + for (Splitter s : rfctrSplitJoin.keySet()) { + if (blobworkers.contains(s)) { + ret.add(getSplitterReachables(s, blobworkers, + rfctrSplitJoin)); + } + } + ret.addAll(getConnectedComponents(blobworkers)); + return ret; + } + + /** + * Goes through the passed set of workers, add workers those are + * reachable from the splitter s, but not any conflicting splitter or + * joiner. + *

    + * This function has side effect. Modifies the argument. + * + * @param s + * @param blobworkers + * @return + */ + protected Set> getSplitterReachables(Splitter s, + Set> blobworkers, + Map, Joiner> rfctrSplitJoin) { + assert blobworkers.contains(s) : "Splitter s in not in blobworkers"; + Set> ret = new HashSet<>(); + Set> exclude = new HashSet<>(); + Deque> queue = new ArrayDeque<>(); + ret.add(s); + exclude.add(rfctrSplitJoin.get(s)); + blobworkers.remove(s); + queue.offer(s); + while (!queue.isEmpty()) { + Worker wrkr = queue.poll(); + for (Worker succ : Workers.getSuccessors(wrkr)) { + process(succ, blobworkers, rfctrSplitJoin, exclude, queue, + ret); + } + + for (Worker pred : Workers.getPredecessors(wrkr)) { + process(pred, blobworkers, rfctrSplitJoin, exclude, queue, + ret); + } + } + return ret; + } + + /** + * Since the code in this method repeated in two places in + * getSplitterReachables() method, It is re-factored into a private + * method to avoid code duplication. + */ + protected void process(Worker wrkr, + Set> blobworkers, + Map, Joiner> rfctrSplitJoin, + Set> exclude, Deque> queue, + Set> ret) { + if (blobworkers.contains(wrkr) && !exclude.contains(wrkr)) { + ret.add(wrkr); + blobworkers.remove(wrkr); + queue.offer(wrkr); + + for (Entry, Joiner> e : rfctrSplitJoin + .entrySet()) { + if (e.getValue().equals(wrkr)) { + exclude.add(e.getKey()); + break; + } else if (e.getKey().equals(wrkr)) { + exclude.add(e.getValue()); + break; + } + } + } + } + + protected Set> getSplitters(Set> blobworkers) { + Set> splitterSet = new HashSet<>(); + for (Worker w : blobworkers) { + if (w instanceof Splitter) { + splitterSet.add((Splitter) w); + } + } + return splitterSet; + } + + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java b/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java index 389c8cd6..6d52abe3 100644 --- a/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java +++ b/src/edu/mit/streamjit/impl/distributed/StreamJitApp.java @@ -22,26 +22,51 @@ package edu.mit.streamjit.impl.distributed; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import com.google.common.collect.ImmutableMap; +import edu.mit.streamjit.api.Filter; import edu.mit.streamjit.api.OneToOneElement; +import edu.mit.streamjit.api.Pipeline; +import edu.mit.streamjit.api.Portal; +import edu.mit.streamjit.api.Splitjoin; import edu.mit.streamjit.api.StreamCompilationFailedException; import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.AbstractReadOnlyBuffer; +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.blob.BlobFactory; import edu.mit.streamjit.impl.blob.Buffer; import edu.mit.streamjit.impl.blob.DrainData; -import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.common.AbstractDrainer.BlobGraph; import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Configuration.IntParameter; +import edu.mit.streamjit.impl.common.Configuration.PartitionParameter; +import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; +import edu.mit.streamjit.impl.common.ConnectWorkersVisitor; import edu.mit.streamjit.impl.common.MessageConstraint; +import edu.mit.streamjit.impl.common.Portals; +import edu.mit.streamjit.impl.common.VerifyStreamGraph; import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.common.drainer.BlobGraph; +import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory; +import edu.mit.streamjit.impl.concurrent.ConcurrentChannelFactory; +import edu.mit.streamjit.impl.distributed.common.GlobalConstants; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.impl.distributed.node.StreamNode; import edu.mit.streamjit.impl.distributed.runtimer.Controller; -import edu.mit.streamjit.impl.distributed.runtimer.OnlineTuner; +import edu.mit.streamjit.impl.interp.ChannelFactory; +import edu.mit.streamjit.impl.interp.Interpreter; +import edu.mit.streamjit.tuner.OnlineTuner; +import edu.mit.streamjit.util.Pair; /** * This class contains all information about the current streamJit application @@ -57,22 +82,22 @@ * @author Sumanan sumanan@mit.edu * @since Oct 8, 2013 */ -public class StreamJitApp { +public class StreamJitApp { /** * Since this is final, lets make public */ public final String topLevelClass; - public final Worker source; + public final Worker source; - public final Worker sink; + public final Worker sink; public final String jarFilePath; public final String name; - final OneToOneElement streamGraph; + final OneToOneElement streamGraph; public BlobGraph blobGraph; @@ -80,10 +105,12 @@ public class StreamJitApp { public ImmutableMap bufferMap; - public List constraints; + public final List constraints; public DrainData drainData = null; + public final Visualizer visualizer; + /** * Keeps track of assigned machine Ids of each blob. This information is * need for draining. TODO: If possible use a better solution. @@ -91,21 +118,24 @@ public class StreamJitApp { public Map blobtoMachineMap; /** - * blobConfiguration contains decision variables that are tuned by - * opentuner. Specifically, a {@link Configuration} that is generated by a - * {@link BlobFactory#getDefaultConfiguration(java.util.Set)}. + * The latest valid {@link Configuration} that is received from OpenTuner. + * {@link BlobFactory#getDefaultConfiguration(java.util.Set) generates the + * initial configuration}. */ - public Configuration blobConfiguration = null; + private Configuration configuration = null; - public StreamJitApp(OneToOneElement streamGraph, Worker source, - Worker sink) { + public StreamJitApp(OneToOneElement streamGraph) { this.streamGraph = streamGraph; + Pair, Worker> srcSink = visit(streamGraph); this.name = streamGraph.getClass().getSimpleName(); this.topLevelClass = streamGraph.getClass().getName(); - this.source = source; - this.sink = sink; + this.source = srcSink.first; + this.sink = srcSink.second; this.jarFilePath = this.getClass().getProtectionDomain() .getCodeSource().getLocation().getPath(); + this.constraints = getConstrains(); + Utils.newApp(name); + visualizer = new Visualizer.DotVisualizer(streamGraph); } /** @@ -123,7 +153,7 @@ public StreamJitApp(OneToOneElement streamGraph, Worker source, public boolean newPartitionMap( Map>>> partitionsMachineMap) { try { - varifyConfiguration(partitionsMachineMap); + verifyConfiguration(partitionsMachineMap); } catch (StreamCompilationFailedException ex) { return false; } @@ -143,18 +173,13 @@ public boolean newPartitionMap( * @throws StreamCompilationFailedException * if any cycles found among blobs. */ - public void varifyConfiguration( + public void verifyConfiguration( Map>>> partitionsMachineMap) { - for (int machine : partitionsMachineMap.keySet()) { - System.err.print("\nMachine - " + machine); - for (Set> blobworkers : partitionsMachineMap - .get(machine)) { - System.err.print("\n\tBlob worker set : "); - for (Worker w : blobworkers) { - System.err.print(Workers.getIdentifier(w) + " "); - } - } + + if (!Options.singleNodeOnline) { + // printPartition(partitionsMachineMap); } + List>> partitionList = new ArrayList<>(); for (List>> lst : partitionsMachineMap.values()) { partitionList.addAll(lst); @@ -164,24 +189,29 @@ public void varifyConfiguration( try { bg = new BlobGraph(partitionList); } catch (StreamCompilationFailedException ex) { - System.err.print("Cycles found in the worker->blob assignment"); - // for (int machine : partitionsMachineMap.keySet()) { - // System.err.print("\nMachine - " + machine); - // for (Set> blobworkers : partitionsMachineMap - // .get(machine)) { - // System.err.print("\n\tBlob worker set : "); - // for (Worker w : blobworkers) { - // System.err.print(Workers.getIdentifier(w) + " "); - // } - // } - // } - System.err.println(); + System.err.println("Cycles found in the worker->blob assignment"); + printPartition(partitionsMachineMap); throw ex; } this.blobGraph = bg; this.partitionsMachineMap = partitionsMachineMap; } + private void printPartition( + Map>>> partitionsMachineMap) { + for (int machine : partitionsMachineMap.keySet()) { + System.err.print("\nMachine - " + machine); + for (Set> blobworkers : partitionsMachineMap + .get(machine)) { + System.err.print("\n\tBlob worker set : "); + for (Worker w : blobworkers) { + System.err.print(Workers.getIdentifier(w) + " "); + } + } + } + System.err.println(); + } + /** * From aggregated drain data, get subset of it which is relevant to a * particular machine. Builds and returns machineID to DrainData map. @@ -203,6 +233,64 @@ public ImmutableMap getDrainData() { return builder.build(); } + /** + * Uses an {@link Interpreter} blob to clear or minimize a {@link DrainData} + * . This method can be called after a final draining to clear the data in + * the intermediate buffers. + * + * @param drainData + * : {@link DrainData} that is received after a draining. + * @return : A {@link DrainData} that remains after running an + * {@link Interpreter} blob. + */ + public DrainData minimizeDrainData(DrainData drainData) { + Interpreter.InterpreterBlobFactory interpFactory = new Interpreter.InterpreterBlobFactory(); + Blob interp = interpFactory.makeBlob(Workers + .getAllWorkersInGraph(source), interpFactory + .getDefaultConfiguration(Workers.getAllWorkersInGraph(source)), + 1, drainData); + interp.installBuffers(bufferMapWithEmptyHead()); + Runnable interpCode = interp.getCoreCode(0); + final AtomicBoolean interpFinished = new AtomicBoolean(); + interp.drain(new Runnable() { + @Override + public void run() { + interpFinished.set(true); + } + }); + while (!interpFinished.get()) + interpCode.run(); + return interp.getDrainData(); + } + + /** + * Remove the original headbuffer and replace it with a new empty buffer. + */ + private ImmutableMap bufferMapWithEmptyHead() { + ImmutableMap.Builder bufMapBuilder = ImmutableMap + .builder(); + Buffer head = new AbstractReadOnlyBuffer() { + @Override + public int size() { + return 0; + } + + @Override + public Object read() { + return null; + } + }; + + Token headToken = Token.createOverallInputToken(source); + for (Map.Entry en : bufferMap.entrySet()) { + if (en.getKey().equals(headToken)) + bufMapBuilder.put(headToken, head); + else + bufMapBuilder.put(en); + } + return bufMapBuilder.build(); + } + private Set getWorkerIds(List>> blobList) { Set workerIds = new HashSet<>(); for (Set> blobworkers : blobList) { @@ -212,4 +300,163 @@ private Set getWorkerIds(List>> blobList) { } return workerIds; } + + /** + * @return the configuration + */ + public Configuration getConfiguration() { + return configuration; + } + + /** + * @param configuration + * the configuration to set + */ + public void setConfiguration(Configuration configuration) { + this.configuration = configuration; + visualizer.newConfiguration(configuration); + visualizer.newPartitionMachineMap(partitionsMachineMap); + } + + private Pair, Worker> visit(OneToOneElement stream) { + checkforDefaultOneToOneElement(stream); + ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor(); + stream.visit(primitiveConnector); + Worker source = (Worker) primitiveConnector.getSource(); + Worker sink = (Worker) primitiveConnector.getSink(); + + VerifyStreamGraph verifier = new VerifyStreamGraph(); + stream.visit(verifier); + return new Pair, Worker>(source, sink); + } + + /** + * TODO: Need to check for other default subtypes of {@link OneToOneElement} + * s. Now only checks for first generation children. + * + * @param stream + * @throws StreamCompilationFailedException + * if stream is default subtype of OneToOneElement + */ + private void checkforDefaultOneToOneElement(OneToOneElement stream) { + if (stream.getClass() == Pipeline.class + || stream.getClass() == Splitjoin.class + || stream.getClass() == Filter.class) { + throw new StreamCompilationFailedException( + "Default subtypes of OneToOneElement are not accepted for" + + " compilation by this compiler. OneToOneElement" + + " that passed should be unique"); + } + } + + private List getConstrains() { + // TODO: Copied form DebugStreamCompiler. Need to be verified for this + // context. + List constraints = MessageConstraint + .findConstraints(source); + Set> portals = new HashSet<>(); + for (MessageConstraint mc : constraints) + portals.add(mc.getPortal()); + for (Portal portal : portals) + Portals.setConstraints(portal, constraints); + return constraints; + } + + /** + * Uses {@link StreamPathBuilder} to generate all paths in the streamGraph + * of this {@link StreamJitApp}. Check {@link StreamPathBuilder} for more + * information. + * + * @return Set of all paths in the streamGraph of this {@link StreamJitApp}. + */ + public Set> paths() { + return StreamPathBuilder.paths(streamGraph); + } + + /** + * Static information of the {@link StreamJitApp} that is essential for + * {@link StreamNode}s to set up. This configuration will be sent to + * {@link StreamNode}s when setting up a new app (Only once). + * + * @return static information of the app that is needed by steramnodes. + */ + public Configuration getStaticConfiguration() { + Configuration.Builder builder = Configuration.builder(); + builder.putExtraData(GlobalConstants.JARFILE_PATH, jarFilePath) + .putExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME, + topLevelClass); + return builder.build(); + } + + /** + * For every reconfiguration, this method may be called by an appropriate + * class to get new configuration information that can be sent to all + * participating {@link StreamNode}s. Mainly this configuration contains + * partition information. + * + * @return new partition information + */ + public Configuration getDynamicConfiguration() { + Configuration.Builder builder = Configuration.builder(); + + int maxCores = maxCores(); + + Map machineCoreMap = new HashMap<>(); + for (Entry>>> machine : partitionsMachineMap + .entrySet()) { + machineCoreMap.put(machine.getKey(), machine.getValue().size() + * maxCores); + } + + PartitionParameter.Builder partParam = PartitionParameter.builder( + GlobalConstants.PARTITION, machineCoreMap); + + BlobFactory intFactory = new Interpreter.InterpreterBlobFactory(); + BlobFactory comp2Factory = new Compiler2BlobFactory(); + partParam.addBlobFactory(intFactory); + partParam.addBlobFactory(comp2Factory); + blobtoMachineMap = new HashMap<>(); + + BlobFactory bf = Options.useCompilerBlob ? comp2Factory + : intFactory; + for (Integer machineID : partitionsMachineMap.keySet()) { + List>> blobList = partitionsMachineMap + .get(machineID); + for (Set> blobWorkers : blobList) { + // TODO: One core per blob. Need to change this. + partParam.addBlob(machineID, maxCores, bf, blobWorkers); + + // TODO: Temp fix to build. + Token t = Utils.getblobID(blobWorkers); + blobtoMachineMap.put(t, machineID); + } + } + + builder.addParameter(partParam.build()); + if (Options.useCompilerBlob) + builder.addSubconfiguration("blobConfigs", getConfiguration()); + else + builder.addSubconfiguration("blobConfigs", getInterpreterConfg()); + return builder.build(); + } + + private Configuration getInterpreterConfg() { + Configuration.Builder builder = Configuration.builder(); + List universe = Arrays + . asList(new ConcurrentChannelFactory()); + SwitchParameter cfParameter = new SwitchParameter( + "channelFactory", ChannelFactory.class, universe.get(0), + universe); + + builder.addParameter(cfParameter); + return builder.build(); + } + + private int maxCores() { + IntParameter maxCoreParam = configuration.getParameter("maxNumCores", + IntParameter.class); + if (maxCoreParam != null) + return maxCoreParam.getValue(); + return Options.maxNumCores; + } } diff --git a/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java b/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java index 690b1ef3..d2ee2cbf 100644 --- a/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java +++ b/src/edu/mit/streamjit/impl/distributed/StreamJitAppManager.java @@ -21,59 +21,75 @@ */ package edu.mit.streamjit.impl.distributed; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; import edu.mit.streamjit.api.CompiledStream; import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.blob.Buffer; import edu.mit.streamjit.impl.blob.DrainData; -import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.common.AbstractDrainer; import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; import edu.mit.streamjit.impl.distributed.common.AppStatus; +import edu.mit.streamjit.impl.distributed.common.AppStatus.AppStatusProcessor; +import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; import edu.mit.streamjit.impl.distributed.common.CTRLRMessageElement; import edu.mit.streamjit.impl.distributed.common.Command; import edu.mit.streamjit.impl.distributed.common.ConfigurationString; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.distributed.common.AppStatus.AppStatusProcessor; -import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; -import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; -import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor.ConfigType; +import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor.ConfigType; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; import edu.mit.streamjit.impl.distributed.common.Error.ErrorProcessor; +import edu.mit.streamjit.impl.distributed.common.GlobalConstants; import edu.mit.streamjit.impl.distributed.common.MiscCtrlElements.NewConInfo; +import edu.mit.streamjit.impl.distributed.common.Options; import edu.mit.streamjit.impl.distributed.common.SNDrainElement.Drained; -import edu.mit.streamjit.impl.distributed.common.SNDrainElement.DrainedData; import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainProcessor; +import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData; import edu.mit.streamjit.impl.distributed.common.SNException; import edu.mit.streamjit.impl.distributed.common.SNException.AddressBindException; import edu.mit.streamjit.impl.distributed.common.SNException.SNExceptionProcessor; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.SNTimeInfoProcessor; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfoProcessorImpl; import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.impl.distributed.profiler.MasterProfiler; +import edu.mit.streamjit.impl.distributed.profiler.ProfilerCommand; import edu.mit.streamjit.impl.distributed.runtimer.Controller; +import edu.mit.streamjit.util.ConfigurationUtils; public class StreamJitAppManager { - private SNDrainProcessorImpl dp = null; + private final StreamJitApp app; - private SNExceptionProcessorImpl exP = null; + private AppStatusProcessorImpl apStsPro = null; - private ErrorProcessor ep = null; + private Map conInfoMap; - private AppStatusProcessorImpl apStsPro = null; + private final ConnectionManager conManager; private final Controller controller; - private final StreamJitApp app; + private SNDrainProcessorImpl dp = null; - private final ConfigurationManager cfgManager; + private ErrorProcessor ep = null; - private boolean isRunning; + private SNExceptionProcessorImpl exP = null; + + private final MasterProfiler profiler; /** * A {@link BoundaryOutputChannel} for the head of the stream graph. If the @@ -83,6 +99,14 @@ public class StreamJitAppManager { */ private BoundaryOutputChannel headChannel; + private Thread headThread; + + private final Token headToken; + + private boolean isRunning; + + private volatile AppStatus status; + /** * A {@link BoundaryInputChannel} for the tail of the whole stream graph. If * the sink {@link Worker} happened to fall outside the {@link Controller}, @@ -91,44 +115,149 @@ public class StreamJitAppManager { */ private TailChannel tailChannel; - private Thread headThread; - private Thread tailThread; - private volatile AppStatus status; + private final Token tailToken; + + /** + * [2014-03-15] Just to measure the draining time + */ + AtomicReference stopwatchRef = new AtomicReference<>(); - Map conInfoMap; + private final TimeLogger logger; - public StreamJitAppManager(Controller controller, StreamJitApp app, - ConfigurationManager cfgManager) { + private final SNTimeInfoProcessor timeInfoProcessor; + + public StreamJitAppManager(Controller controller, StreamJitApp app, + ConnectionManager conManager, TimeLogger logger) { this.controller = controller; this.app = app; - this.cfgManager = cfgManager; + this.conManager = conManager; + this.logger = logger; + this.timeInfoProcessor = new SNTimeInfoProcessorImpl(logger); this.status = AppStatus.NOT_STARTED; this.exP = new SNExceptionProcessorImpl(); this.ep = new ErrorProcessorImpl(); this.apStsPro = new AppStatusProcessorImpl(controller.getAllNodeIDs() .size()); controller.registerManager(this); - controller.newApp(cfgManager.getStaticConfiguration()); // TODO: Find a - // good calling - // place. + controller.newApp(app.getStaticConfiguration()); // TODO: Find a + // good calling + // place. isRunning = false; + + headToken = Token.createOverallInputToken(app.source); + tailToken = Token.createOverallOutputToken(app.sink); + profiler = setupProfiler(); } - public boolean reconfigure() { - reset(); - Configuration.Builder builder = Configuration.builder(cfgManager - .getDynamicConfiguration()); + public AppStatusProcessor appStatusProcessor() { + return apStsPro; + } + + public void drain(Token blobID, DrainType drainType) { + // System.out.println("Drain requested to blob " + blobID); + if (!app.blobtoMachineMap.containsKey(blobID)) + throw new IllegalArgumentException(blobID + + " not found in the blobtoMachineMap"); + int nodeID = app.blobtoMachineMap.get(blobID); + controller.send(nodeID, + new CTRLRDrainElement.DoDrain(blobID, drainType)); + } + + public void drainingFinished(boolean isFinal) { + System.out.println("App Manager : Draining Finished..."); + + if (headChannel != null) { + try { + headThread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + if (tailChannel != null) { + if (Options.useDrainData) + if (isFinal) + tailChannel.stop(DrainType.FINAL); + else + tailChannel.stop(DrainType.INTERMEDIATE); + else + tailChannel.stop(DrainType.DISCARD); + + try { + tailThread.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + if (isFinal) + stop(); + + isRunning = false; - Map> tokenMachineMap = new HashMap<>(); - Map portIdMap = new HashMap<>(); + Stopwatch sw = stopwatchRef.get(); + if (sw != null && sw.isRunning()) { + sw.stop(); + long time = sw.elapsed(TimeUnit.MILLISECONDS); + System.out.println("Draining time is " + time + " milli seconds"); + } + } + + public void drainingStarted(boolean isFinal) { + stopwatchRef.set(Stopwatch.createStarted()); + if (headChannel != null) { + headChannel.stop(isFinal); + // [2014-03-16] Moved to drainingFinished. In any case if headThread + // blocked at tcp write, draining will also blocked. + // try { + // headThread.join(); + // } catch (InterruptedException e) { + // e.printStackTrace(); + // } + } + } - conInfoMap = controller.buildConInfoMap(app.partitionsMachineMap, - app.source, app.sink); + public SNDrainProcessor drainProcessor() { + return dp; + } - builder.putExtraData(GlobalConstants.TOKEN_MACHINE_MAP, tokenMachineMap) - .putExtraData(GlobalConstants.PORTID_MAP, portIdMap); + public ErrorProcessor errorProcessor() { + return ep; + } + + public SNExceptionProcessor exceptionProcessor() { + return exP; + } + + public SNTimeInfoProcessor timeInfoProcessor() { + return timeInfoProcessor; + } + + public long getFixedOutputTime(long timeout) throws InterruptedException { + long time = tailChannel.getFixedOutputTime(timeout); + if (apStsPro.error) { + return -1l; + } + return time; + } + + public AppStatus getStatus() { + return status; + } + + public boolean isRunning() { + return isRunning; + } + + public boolean reconfigure(int multiplier) { + reset(); + Configuration.Builder builder = Configuration.builder(app + .getDynamicConfiguration()); + + conInfoMap = conManager.conInfoMap(app.getConfiguration(), + app.partitionsMachineMap, app.source, app.sink); builder.putExtraData(GlobalConstants.CONINFOMAP, conInfoMap); @@ -137,17 +266,17 @@ public boolean reconfigure() { ImmutableMap drainDataMap = app.getDrainData(); + logger.compilationStarted(); for (int nodeID : controller.getAllNodeIDs()) { ConfigurationString json = new ConfigurationString(jsonStirng, ConfigType.DYNAMIC, drainDataMap.get(nodeID)); controller.send(nodeID, json); } - setupHeadTail(conInfoMap, app.bufferMap, - Token.createOverallInputToken(app.source), - Token.createOverallOutputToken(app.sink)); + setupHeadTail(conInfoMap, app.bufferMap, multiplier); boolean isCompiled = apStsPro.waitForCompilation(); + logger.compilationFinished(isCompiled, ""); if (isCompiled) { start(); @@ -156,22 +285,52 @@ public boolean reconfigure() { isRunning = false; } + if (profiler != null) { + String cfgPrefix = ConfigurationUtils.getConfigPrefix(app + .getConfiguration()); + profiler.logger().newConfiguration(cfgPrefix); + } + + System.out.println("StraemJit app is running..."); + Utils.printMemoryStatus(); return isRunning; } + public void setDrainer(AbstractDrainer drainer) { + assert dp == null : "SNDrainProcessor has already been set"; + this.dp = new SNDrainProcessorImpl(drainer); + } + + public void stop() { + this.status = AppStatus.STOPPED; + tailChannel.reset(); + controller.closeAll(); + dp.drainer.stop(); + } + + private void reset() { + exP.exConInfos = new HashSet<>(); + apStsPro.reset(); + } + + private MasterProfiler setupProfiler() { + MasterProfiler p = null; + if (Options.needProfiler) { + p = new MasterProfiler(app.name); + controller.sendToAll(ProfilerCommand.START); + } + return p; + } /** * Setup the headchannel and tailchannel. * * @param cfg * @param bufferMap - * @param headToken - * @param tailToken */ - private void setupHeadTail(Map conInfoMap, - ImmutableMap bufferMap, Token headToken, - Token tailToken) { + private void setupHeadTail(Map conInfoMap, + ImmutableMap bufferMap, int multiplier) { - TCPConnectionInfo headconInfo = conInfoMap.get(headToken); + ConnectionInfo headconInfo = conInfoMap.get(headToken); assert headconInfo != null : "No head connection info exists in conInfoMap"; assert headconInfo.getSrcID() == controller.controllerNodeID || headconInfo.getDstID() == controller.controllerNodeID : "Head channel should start from the controller. " @@ -181,11 +340,18 @@ private void setupHeadTail(Map conInfoMap, throw new IllegalArgumentException( "No head buffer in the passed bufferMap."); - headChannel = new HeadChannel(bufferMap.get(headToken), - controller.getConProvider(), headconInfo, "headChannel - " - + headToken.toString(), 0); - - TCPConnectionInfo tailconInfo = conInfoMap.get(tailToken); + if (headconInfo instanceof TCPConnectionInfo) + headChannel = new HeadChannel.TCPHeadChannel( + bufferMap.get(headToken), controller.getConProvider(), + headconInfo, "headChannel - " + headToken.toString(), 0); + else if (headconInfo instanceof AsyncTCPConnectionInfo) + headChannel = new HeadChannel.AsyncHeadChannel( + bufferMap.get(headToken), controller.getConProvider(), + headconInfo, "headChannel - " + headToken.toString(), 0); + else + throw new IllegalStateException("Head ConnectionInfo doesn't match"); + + ConnectionInfo tailconInfo = conInfoMap.get(tailToken); assert tailconInfo != null : "No tail connection info exists in conInfoMap"; assert tailconInfo.getSrcID() == controller.controllerNodeID || tailconInfo.getDstID() == controller.controllerNodeID : "Tail channel should ends at the controller. " @@ -195,9 +361,30 @@ private void setupHeadTail(Map conInfoMap, throw new IllegalArgumentException( "No tail buffer in the passed bufferMap."); - tailChannel = new TailChannel(bufferMap.get(tailToken), - controller.getConProvider(), tailconInfo, "tailChannel - " - + tailToken.toString(), 0, 1000); + int skipCount = Math.max(Options.outputCount, multiplier * 5); + tailChannel = tailChannel(bufferMap.get(tailToken), tailconInfo, + skipCount); + } + + private TailChannel tailChannel(Buffer buffer, ConnectionInfo conInfo, + int skipCount) { + String appName = app.name; + int steadyCount = Options.outputCount; + int debugLevel = 0; + String bufferTokenName = "tailChannel - " + tailToken.toString(); + ConnectionProvider conProvider = controller.getConProvider(); + String cfgPrefix = ConfigurationUtils.getConfigPrefix(app + .getConfiguration()); + switch (Options.tailChannel) { + case 1 : + return new TailChannels.BlockingTailChannel1(buffer, + conProvider, conInfo, bufferTokenName, debugLevel, + skipCount, steadyCount, appName, cfgPrefix); + default : + return new TailChannels.BlockingTailChannel2(buffer, + conProvider, conInfo, bufferTokenName, debugLevel, + skipCount, steadyCount, appName, cfgPrefix); + } } /** @@ -216,101 +403,109 @@ private void start() { controller.sendToAll(Command.START); if (tailChannel != null) { - tailChannel.reset(); tailThread = new Thread(tailChannel.getRunnable(), tailChannel.name()); tailThread.start(); } } - public boolean isRunning() { - return isRunning; + public MasterProfiler getProfiler() { + return profiler; } - public void drainingStarted(boolean isFinal) { - if (headChannel != null) { - headChannel.stop(isFinal); - try { - headThread.join(); - } catch (InterruptedException e) { - e.printStackTrace(); - } + /** + * {@link AppStatusProcessor} at {@link Controller} side. + * + * @author Sumanan sumanan@mit.edu + * @since Aug 11, 2013 + */ + private class AppStatusProcessorImpl implements AppStatusProcessor { + + private boolean compilationError; + + private CountDownLatch compileLatch; + + private volatile boolean error; + + private final int noOfnodes; + + private AppStatusProcessorImpl(int noOfnodes) { + this.noOfnodes = noOfnodes; } - } - public void drain(Token blobID, boolean isFinal) { - // System.out.println("Drain requested to blob " + blobID); - if (!app.blobtoMachineMap.containsKey(blobID)) - throw new IllegalArgumentException(blobID - + " not found in the blobtoMachineMap"); - int nodeID = app.blobtoMachineMap.get(blobID); - controller - .send(nodeID, new CTRLRDrainElement.DoDrain(blobID, !isFinal)); - } + @Override + public void processCOMPILATION_ERROR() { + System.err.println("Compilation error"); + this.compilationError = true; + compileLatch.countDown(); + } - public void drainingFinished(boolean isFinal) { - System.out.println("App Manager : Draining Finished..."); - if (tailChannel != null) { - if (isFinal) - tailChannel.stop(1); - else if (GlobalConstants.useDrainData) - tailChannel.stop(2); - else - tailChannel.stop(3); - try { - tailThread.join(); - } catch (InterruptedException e) { - e.printStackTrace(); - } + @Override + public void processCOMPILED() { + compileLatch.countDown(); } - if (isFinal) { - this.status = AppStatus.STOPPED; + @Override + public void processERROR() { + this.error = true; + // This will release the OpenTuner thread which is waiting for fixed + // output. tailChannel.reset(); - controller.closeAll(); } - isRunning = false; - } - public void awaitForFixInput() throws InterruptedException { - tailChannel.awaitForFixInput(); - } + @Override + public void processNO_APP() { + } - public void setDrainer(AbstractDrainer drainer) { - assert dp == null : "SNDrainProcessor has already been set"; - this.dp = new SNDrainProcessorImpl(drainer); - } + @Override + public void processNOT_STARTED() { + } - public SNDrainProcessor drainProcessor() { - return dp; - } + @Override + public void processRUNNING() { + } - public SNExceptionProcessor exceptionProcessor() { - return exP; - } + @Override + public void processSTOPPED() { + } - public ErrorProcessor errorProcessor() { - return ep; - } + private void reset() { + compileLatch = new CountDownLatch(noOfnodes); + this.compilationError = false; + this.error = false; + } - public AppStatusProcessor appStatusProcessor() { - return apStsPro; + private boolean waitForCompilation() { + try { + compileLatch.await(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + return !this.compilationError; + } } - public AppStatus getStatus() { - return status; - } + /** + * {@link ErrorProcessor} at {@link Controller} side. + * + * @author Sumanan sumanan@mit.edu + * @since Aug 11, 2013 + */ + private class ErrorProcessorImpl implements ErrorProcessor { - private void reset() { - exP.exConInfos = new HashSet<>(); - apStsPro.reset(); - } + @Override + public void processFILE_NOT_FOUND() { + System.err + .println("No application jar file in streamNode. Terminating..."); + stop(); + } - public void stop() { - this.status = AppStatus.STOPPED; - tailChannel.reset(); - controller.closeAll(); - dp.drainer.stop(); + @Override + public void processWORKER_NOT_FOUND() { + System.err + .println("No top level class in the jar file. Terminating..."); + stop(); + } } /** @@ -333,9 +528,9 @@ public void process(Drained drained) { } @Override - public void process(DrainedData drainedData) { - if (GlobalConstants.useDrainData) - drainer.newDrainData(drainedData); + public void process(SNDrainedData snDrainedData) { + if (Options.useDrainData) + drainer.newSNDrainData(snDrainedData); } } @@ -343,16 +538,12 @@ private class SNExceptionProcessorImpl implements SNExceptionProcessor { private final Object abExLock = new Object(); - private Set exConInfos; + private Set exConInfos; private SNExceptionProcessorImpl() { exConInfos = new HashSet<>(); } - @Override - public void process(SNException ex) { - } - @Override public void process(AddressBindException abEx) { synchronized (abExLock) { @@ -363,7 +554,7 @@ public void process(AddressBindException abEx) { } Token t = null; - for (Map.Entry entry : conInfoMap + for (Map.Entry entry : conInfoMap .entrySet()) { if (abEx.conInfo.equals(entry.getValue())) { t = entry.getKey(); @@ -376,8 +567,8 @@ public void process(AddressBindException abEx) { "Illegal TCP connection - " + abEx.conInfo); } - TCPConnectionInfo coninfo = controller - .getNewTCPConInfo(abEx.conInfo); + ConnectionInfo coninfo = conManager + .replaceConInfo(abEx.conInfo); exConInfos.add(abEx.conInfo); @@ -386,93 +577,9 @@ public void process(AddressBindException abEx) { controller.send(coninfo.getDstID(), msg); } } - } - - /** - * {@link ErrorProcessor} at {@link Controller} side. - * - * @author Sumanan sumanan@mit.edu - * @since Aug 11, 2013 - */ - private class ErrorProcessorImpl implements ErrorProcessor { - - @Override - public void processFILE_NOT_FOUND() { - System.err - .println("No application jar file in streamNode. Terminating..."); - stop(); - } - - @Override - public void processWORKER_NOT_FOUND() { - System.err - .println("No top level class in the jar file. Terminating..."); - stop(); - } - } - - /** - * {@link AppStatusProcessor} at {@link Controller} side. - * - * @author Sumanan sumanan@mit.edu - * @since Aug 11, 2013 - */ - private class AppStatusProcessorImpl implements AppStatusProcessor { - - private CountDownLatch compileLatch; - - private boolean compilationError; - - private final int noOfnodes; - - private AppStatusProcessorImpl(int noOfnodes) { - this.noOfnodes = noOfnodes; - } @Override - public void processRUNNING() { - } - - @Override - public void processSTOPPED() { - } - - @Override - public void processERROR() { - } - - @Override - public void processNOT_STARTED() { - } - - @Override - public void processNO_APP() { - } - - @Override - public void processCOMPILED() { - compileLatch.countDown(); - } - - @Override - public void processCOMPILATION_ERROR() { - System.err.println("Compilation error"); - this.compilationError = true; - compileLatch.countDown(); - } - - private void reset() { - compileLatch = new CountDownLatch(noOfnodes); - this.compilationError = false; - } - - private boolean waitForCompilation() { - try { - compileLatch.await(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - return !this.compilationError; + public void process(SNException ex) { } } } diff --git a/src/edu/mit/streamjit/impl/distributed/StreamPathBuilder.java b/src/edu/mit/streamjit/impl/distributed/StreamPathBuilder.java new file mode 100644 index 00000000..dbef4b31 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/StreamPathBuilder.java @@ -0,0 +1,170 @@ +package edu.mit.streamjit.impl.distributed; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import com.google.common.collect.ImmutableSet; + +import edu.mit.streamjit.api.Filter; +import edu.mit.streamjit.api.Joiner; +import edu.mit.streamjit.api.OneToOneElement; +import edu.mit.streamjit.api.Pipeline; +import edu.mit.streamjit.api.Splitjoin; +import edu.mit.streamjit.api.Splitter; +import edu.mit.streamjit.api.StreamVisitor; +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.common.ConnectWorkersVisitor; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.test.apps.filterbank6.FilterBank6; + +/** + * Generate all stream paths in a stream graph. + * + * @author sumanan + * @since 13 Jan, 2015 + */ +public class StreamPathBuilder { + + /** + * streamGraph must be connected before requesting for paths. Use + * {@link ConnectWorkersVisitor} to connect the streamGraph. + * + * @param streamGraph + * @return Set of all paths in the streamGraph. + */ + public static Set> paths(OneToOneElement streamGraph) { + PathVisitor v = new PathVisitor(); + streamGraph.visit(v); + return v.currentUnfinishedPathSet; + } + + private static class PathVisitor extends StreamVisitor { + + /** + * Paths those are currently being built. These paths will get extended + * as StreamPathBuilder visits through the stream graph. + */ + private Set> currentUnfinishedPathSet; + + /** + * Keeps track of paths to all {@link Splitter} encountered in a stack. + * Once corresponding {@link Joiner} is visited, path set will be popped + * from this stack. + */ + private Deque>> pathToSplitterStack; + + /** + * Unfinished path sets which are waiting for a correct joiner. Path set + * will be popped from this stack once correct joiner is reached. + */ + private Deque>> waitingForJoinerStack; + + private PathVisitor() { + currentUnfinishedPathSet = new HashSet<>(); + pathToSplitterStack = new ArrayDeque<>(); + waitingForJoinerStack = new ArrayDeque<>(); + } + + @Override + public void beginVisit() { + currentUnfinishedPathSet.clear(); + pathToSplitterStack.clear(); + waitingForJoinerStack.clear(); + currentUnfinishedPathSet.add(new LinkedList()); + } + + @Override + public void visitFilter(Filter filter) { + addToCurrentPath(filter); + } + + @Override + public boolean enterPipeline(Pipeline pipeline) { + return true; + } + + @Override + public void exitPipeline(Pipeline pipeline) { + } + + @Override + public boolean enterSplitjoin(Splitjoin splitjoin) { + return true; + } + + @Override + public void visitSplitter(Splitter splitter) { + addToCurrentPath(splitter); + pathToSplitterStack.push(currentUnfinishedPathSet); + waitingForJoinerStack.push(new HashSet>()); + } + + @Override + public boolean enterSplitjoinBranch(OneToOneElement element) { + currentUnfinishedPathSet = new HashSet>(); + for (List splitterPath : pathToSplitterStack.peek()) { + currentUnfinishedPathSet.add(new LinkedList( + splitterPath)); + } + return true; + } + + @Override + public void exitSplitjoinBranch(OneToOneElement element) { + waitingForJoinerStack.peek().addAll(currentUnfinishedPathSet); + } + + @Override + public void visitJoiner(Joiner joiner) { + currentUnfinishedPathSet = waitingForJoinerStack.pop(); + addToCurrentPath(joiner); + pathToSplitterStack.pop(); + } + + @Override + public void exitSplitjoin(Splitjoin splitjoin) { + } + + @Override + public void endVisit() { + if (!waitingForJoinerStack.isEmpty()) + throw new IllegalStateException("waitingForJoiner not empty"); + if (!pathToSplitterStack.isEmpty()) + throw new IllegalStateException("pathToSplitter not empty"); + // printPaths(); + } + + /** + * Extends all current unfinished path set with the {@link Worker} w. + * + * @param w + */ + private void addToCurrentPath(Worker w) { + int id = Workers.getIdentifier(w); + for (List path : currentUnfinishedPathSet) + path.add(id); + } + + /** + * Prints all paths in the stream graph. + */ + private void printPaths() { + for (List path : currentUnfinishedPathSet) { + for (Integer i : path) { + System.out.print(i.toString() + "->"); + } + System.out.println(); + } + } + } + + public static void main(String[] args) { + OneToOneElement stream = new FilterBank6.FilterBankPipeline(); + new StreamJitApp<>(stream); // Connects the stream into stream graph. + StreamPathBuilder.paths(stream); + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/TailChannel.java b/src/edu/mit/streamjit/impl/distributed/TailChannel.java index 791e6a3e..dd077533 100644 --- a/src/edu/mit/streamjit/impl/distributed/TailChannel.java +++ b/src/edu/mit/streamjit/impl/distributed/TailChannel.java @@ -21,93 +21,67 @@ */ package edu.mit.streamjit.impl.distributed; -import java.io.FileWriter; -import java.io.IOException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import com.google.common.base.Stopwatch; - -import edu.mit.streamjit.impl.blob.Buffer; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; -import edu.mit.streamjit.impl.distributed.node.TCPInputChannel; - -public class TailChannel extends TCPInputChannel { - - int limit; - - int count; - - private volatile CountDownLatch latch; - - public TailChannel(Buffer buffer, TCPConnectionProvider conProvider, - TCPConnectionInfo conInfo, String bufferTokenName, int debugPrint, - int limit) { - super(buffer, conProvider, conInfo, bufferTokenName, debugPrint); - this.limit = limit; - count = 0; - latch = new CountDownLatch(1); - if (!GlobalConstants.tune) - new performanceLogger().start(); - } - - @Override - public void receiveData() { - super.receiveData(); - count++; - // System.err.println(count); - if (count > limit) - latch.countDown(); - } - - public void awaitForFixInput() throws InterruptedException { - latch.await(); - } - - public void reset() { - latch.countDown(); - latch = new CountDownLatch(1); - count = 0; - } - - private class performanceLogger extends Thread { - - public void run() { - int i = 0; - FileWriter writer; - try { - writer = new FileWriter("FixedOutPut.txt"); - } catch (IOException e1) { - e1.printStackTrace(); - return; - } - while (++i < 30) { - try { - Stopwatch stopwatch = Stopwatch.createStarted(); - latch.await(); - stopwatch.stop(); - Long time = stopwatch.elapsed(TimeUnit.MILLISECONDS); - - System.out.println("Execution time is " + time - + " milli seconds"); - - writer.write(time.toString()); - writer.write('\n'); - - reset(); - - } catch (InterruptedException | IOException e) { - e.printStackTrace(); - } - } - try { - writer.close(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - } -} \ No newline at end of file +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; + +/** + * This is a {@link BoundaryInputChannel} with counting facility. + * Implementations need to count the number of elements received and provide + * other services based on the count. + * + * @author sumanan + * @since 24 Jan, 2015 + */ +public interface TailChannel extends BoundaryInputChannel { + + /** + * @return Number of elements received after the last reset() + */ + public int count(); + + /** + * Returns the time to receive fixed number of outputs. The fixed number can + * be hard coded in side an implementation or passed as a constructor + * argument. + *

    + * The caller will be blocked until the fixed number of outputs are + * received. + * + * @return the time(ms) to receive fixed number of outputs. + * + * @throws InterruptedException + */ + public long getFixedOutputTime() throws InterruptedException; + + /** + * Returns the time to receive fixed number of outputs. The fixed number can + * be hard coded in side an implementation or passed as a constructor + * argument. Waits at most timeout time to receive fixed number + * of output. Returns -1 if timeout occurred. + * + *

    + * If timeout < 1, then the behavior this method is equivalent to calling + * {@link #getFixedOutputTime()}. + *

    + * + *

    + * The caller will be blocked until the fixed number of output is received + * or timeout occurred, whatever happens early. + * + * @param timeout + * Wait at most timeout time to receive fixed number of output. + * + * @return the time(ms) to receive fixed number of outputs or -1 if timeout + * occurred. + * + * @throws InterruptedException + */ + public long getFixedOutputTime(long timeout) throws InterruptedException; + + /** + * Resets all counters and other resources. Any thread blocked on either + * {@link #getFixedOutputTime()} or {@link #getFixedOutputTime(long)} should + * be released after this call. + */ + public void reset(); + +} diff --git a/src/edu/mit/streamjit/impl/distributed/TailChannels.java b/src/edu/mit/streamjit/impl/distributed/TailChannels.java new file mode 100644 index 00000000..b4135d82 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/TailChannels.java @@ -0,0 +1,521 @@ +package edu.mit.streamjit.impl.distributed; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.google.common.base.Stopwatch; + +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel; + +public class TailChannels { + + private static class PerformanceLogger extends Thread { + + private AtomicBoolean stopFlag; + + private final String appName; + + private final TailChannel tailChannel; + + private PerformanceLogger(TailChannel tailChannel, String appName) { + super("PerformanceLogger"); + stopFlag = new AtomicBoolean(false); + this.appName = appName; + this.tailChannel = tailChannel; + } + + public void run() { + int i = 0; + FileWriter writer; + try { + writer = new FileWriter(String.format("%s%sFixedOutPut.txt", + appName, File.separator)); + } catch (IOException e1) { + e1.printStackTrace(); + return; + } + + writeInitialInfo(writer); + + Long sum = 0l; + + while (++i < 10 && !stopFlag.get()) { + try { + Long time = tailChannel.getFixedOutputTime(); + + sum += time; + System.out.println("Execution time is " + time + + " milli seconds"); + + writer.write(time.toString()); + writer.write('\n'); + writer.flush(); + } catch (InterruptedException | IOException e) { + e.printStackTrace(); + } + } + try { + writer.write("Average = " + sum / (i - 1)); + writer.write('\n'); + writer.flush(); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + + System.out.println("PerformanceLogger exits. App will run till " + + "inputdata exhausted."); + } + + private void writeInitialInfo(FileWriter writer) { + System.out.println(String.format( + "PerformanceLogger starts to log the time to" + + " produce %d number of outputs", + Options.outputCount)); + + try { + writer.write(String.format("GlobalConstants.outputCount = %d", + Options.outputCount)); + writer.write('\n'); + writer.flush(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + public void stopLogging() { + stopFlag.set(true); + } + } + + /** + * Periodically prints the number of outputs received by a + * {@link TailChannel}. + */ + private static class OutputCountPrinter { + + private final String appName; + + private final TailChannel tailChannel; + + /** + * The no of outputs received at the end of last period. + */ + private int lastCount; + + private FileWriter writer; + + private RuntimeMXBean rb = ManagementFactory.getRuntimeMXBean(); + + private ScheduledExecutorService scheduledExecutorService; + + OutputCountPrinter(TailChannel tailChannel, String appName) { + this.tailChannel = tailChannel; + this.appName = appName; + printOutputCount(); + } + + private void printOutputCount() { + if (Options.printOutputCountPeriod < 1) + return; + writer = Utils.fileWriter(appName, "outputCount.txt", true); + lastCount = 0; + scheduledExecutorService = Executors + .newSingleThreadScheduledExecutor(); + scheduledExecutorService.scheduleAtFixedRate( + new Runnable() { + @Override + public void run() { + int currentCount = tailChannel.count(); + int newOutputs = currentCount - lastCount; + lastCount = currentCount; + String msg = String.format("%d\t\t%d\t\t%d\n", + rb.getUptime(), currentCount, newOutputs); + try { + writer.write(msg); + writer.flush(); + } catch (IOException e) { + e.printStackTrace(); + } + } + }, Options.printOutputCountPeriod, + Options.printOutputCountPeriod, + TimeUnit.MILLISECONDS); + } + + private void stop() { + if (scheduledExecutorService != null) + scheduledExecutorService.shutdown(); + if (writer != null) + try { + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * This method writes to the file in a non thread safe way. But this is + * enough to serve the purpose. + *

    + * TODO: This method is just for debugging purpose, Remove this method + * and its usage later. + */ + private boolean write(String msg) { + if (writer != null) + try { + writer.write(msg); + return true; + } catch (Exception e) { + } + return false; + } + } + + private static abstract class AbstractBlockingTailChannel + extends + BlockingInputChannel implements TailChannel { + + protected final int skipCount; + + protected final int totalCount; + + protected int count; + + private PerformanceLogger pLogger = null; + + private OutputCountPrinter outputCountPrinter = null; + + private final String cfgPrefix; + + protected abstract void releaseAndInitilize(); + + /** + * @param buffer + * @param conProvider + * @param conInfo + * @param bufferTokenName + * @param debugLevel + * @param skipCount + * : Skips this amount of output before evaluating the + * running time. This is added to avoid the noise from init + * schedule and the drain data. ( i.e., In order to get real + * steady state execution time) + * @param steadyCount + * : {@link #getFixedOutputTime()} calculates the time taken + * to get this amount of outputs ( after skipping skipCount + * number of outputs at the beginning). + */ + public AbstractBlockingTailChannel(Buffer buffer, + ConnectionProvider conProvider, ConnectionInfo conInfo, + String bufferTokenName, int debugLevel, int skipCount, + int steadyCount, String appName, String cfgPrefix) { + super(buffer, conProvider, conInfo, bufferTokenName, debugLevel); + this.skipCount = skipCount; + this.totalCount = steadyCount + skipCount; + count = 0; + this.cfgPrefix = cfgPrefix; + if (Options.tune == 0) { + // TODO: Leaks this object from the constructor. May cause + // subtle bugs. Re-factor it. + pLogger = new PerformanceLogger(this, appName); + pLogger.start(); + } + if (Options.printOutputCountPeriod > 0) + outputCountPrinter = new OutputCountPrinter(this, appName); + } + + @Override + public void stop(DrainType type) { + super.stop(type); + if (pLogger != null) { + releaseAndInitilize(); + pLogger.stopLogging(); + } + if (outputCountPrinter != null) + outputCountPrinter.stop(); + } + + @Override + public int count() { + return count; + } + + protected long normalizedTime(long time) { + return (Options.outputCount * time) / (totalCount - skipCount); + } + + /** + * Opposite to the {@link #normalizedTime(long)}'s equation. + * time=unnormalizedTime(normalizedTime(time)) + */ + protected long unnormalizedTime(long time) { + return (time * (totalCount - skipCount)) / Options.outputCount; + } + + /** + * Logs the time reporting event. + * + * TODO: This method is just for debugging purpose, Remove this method + * and its usage later. + */ + protected void reportingTime(long time) { + if (outputCountPrinter != null) { + String msg = String.format( + "Reporting-%s.cfg,time=%d,TotalCount=%d\n", cfgPrefix, + time, count); + outputCountPrinter.write(msg); + } + } + } + + public static final class BlockingTailChannel1 + extends + AbstractBlockingTailChannel { + + private volatile CountDownLatch steadyLatch; + + private volatile CountDownLatch skipLatch; + + private boolean skipLatchUp; + + private boolean steadyLatchUp; + + /** + * @param buffer + * @param conProvider + * @param conInfo + * @param bufferTokenName + * @param debugLevel + * For all above 5 parameters, see + * {@link BlockingInputChannel#BlockingInputChannel(Buffer, ConnectionProvider, ConnectionInfo, String, int)} + * @param skipCount + * : Skips this amount of output before evaluating the + * running time. This is added to avoid the noise from init + * schedule and the drain data. ( i.e., In order to get real + * steady state execution time) + * @param steadyCount + * : {@link #getFixedOutputTime()} calculates the time taken + * to get this amount of outputs ( after skipping skipCount + * number of outputs at the beginning). + */ + public BlockingTailChannel1(Buffer buffer, + ConnectionProvider conProvider, ConnectionInfo conInfo, + String bufferTokenName, int debugLevel, int skipCount, + int steadyCount, String appName, String cfgPrefix) { + super(buffer, conProvider, conInfo, bufferTokenName, debugLevel, + skipCount, steadyCount, appName, cfgPrefix); + steadyLatch = new CountDownLatch(1); + skipLatch = new CountDownLatch(1); + this.skipLatchUp = true; + this.steadyLatchUp = true; + } + + @Override + public void receiveData() { + super.receiveData(); + count++; + + if (skipLatchUp && count > skipCount) { + skipLatch.countDown(); + skipLatchUp = false; + } + + if (steadyLatchUp && count > totalCount) { + steadyLatch.countDown(); + steadyLatchUp = false; + } + } + + /** + * Skips skipCount amount of output at the beginning and then calculates + * the time taken to get steadyCount amount of outputs. skipCount is + * added to avoid the noise from init schedule and the drain data. ( + * i.e., In order to get real steady state execution time). + * + * @return time in MILLISECONDS. + * @throws InterruptedException + */ + public long getFixedOutputTime() throws InterruptedException { + releaseAndInitilize(); + skipLatch.await(); + Stopwatch stopwatch = Stopwatch.createStarted(); + steadyLatch.await(); + stopwatch.stop(); + long time = stopwatch.elapsed(TimeUnit.MILLISECONDS); + reportingTime(time); + return normalizedTime(time); + } + + @Override + public long getFixedOutputTime(long timeout) + throws InterruptedException { + if (timeout < 1) + return getFixedOutputTime(); + + timeout = unnormalizedTime(timeout); + releaseAndInitilize(); + skipLatch.await(); + Stopwatch stopwatch = Stopwatch.createStarted(); + while (steadyLatch.getCount() > 0 + && stopwatch.elapsed(TimeUnit.MILLISECONDS) < timeout) { + Thread.sleep(100); + } + + stopwatch.stop(); + long time = stopwatch.elapsed(TimeUnit.MILLISECONDS); + reportingTime(time); + if (time > timeout) + return -1; + return normalizedTime(time); + } + + /** + * Releases all latches, and re-initializes the latches and counters. + */ + protected void releaseAndInitilize() { + count = 0; + skipLatch.countDown(); + skipLatch = new CountDownLatch(1); + skipLatchUp = true; + steadyLatch.countDown(); + steadyLatch = new CountDownLatch(1); + steadyLatchUp = true; + } + + public void reset() { + steadyLatch.countDown(); + skipLatch.countDown(); + count = 0; + } + } + + public static final class BlockingTailChannel2 + extends + AbstractBlockingTailChannel { + + private volatile CountDownLatch skipLatch; + + private boolean skipLatchUp; + + private final Stopwatch stopWatch; + + /** + * @param buffer + * @param conProvider + * @param conInfo + * @param bufferTokenName + * @param debugLevel + * For all above 5 parameters, see + * {@link BlockingInputChannel#BlockingInputChannel(Buffer, ConnectionProvider, ConnectionInfo, String, int)} + * @param skipCount + * : Skips this amount of output before evaluating the + * running time. This is added to avoid the noise from init + * schedule and the drain data. ( i.e., In order to get real + * steady state execution time) + * @param steadyCount + * : {@link #getFixedOutputTime()} calculates the time taken + * to get this amount of outputs ( after skipping skipCount + * number of outputs at the beginning). + */ + public BlockingTailChannel2(Buffer buffer, + ConnectionProvider conProvider, ConnectionInfo conInfo, + String bufferTokenName, int debugLevel, int skipCount, + int steadyCount, String appName, String cfgPrefix) { + super(buffer, conProvider, conInfo, bufferTokenName, debugLevel, + skipCount, steadyCount, appName, cfgPrefix); + stopWatch = Stopwatch.createUnstarted(); + skipLatch = new CountDownLatch(1); + this.skipLatchUp = true; + } + + @Override + public void receiveData() { + super.receiveData(); + count++; + + if (skipLatchUp && count > skipCount) { + skipLatch.countDown(); + skipLatchUp = false; + } + + if (stopWatch.isRunning() && count > totalCount) { + stopWatch.stop(); + } + } + + /** + * Skips skipCount amount of output at the beginning and then calculates + * the time taken to get steadyCount amount of outputs. skipCount is + * added to avoid the noise from init schedule and the drain data. ( + * i.e., In order to get real steady state execution time). + * + * @return time in MILLISECONDS. + * @throws InterruptedException + */ + public long getFixedOutputTime() throws InterruptedException { + releaseAndInitilize(); + skipLatch.await(); + stopWatch.start(); + while (stopWatch.isRunning()) + Thread.sleep(250); + long time = stopWatch.elapsed(TimeUnit.MILLISECONDS); + reportingTime(time); + return normalizedTime(time); + } + + @Override + public long getFixedOutputTime(long timeout) + throws InterruptedException { + if (timeout < 1) + return getFixedOutputTime(); + + timeout = unnormalizedTime(timeout); + releaseAndInitilize(); + skipLatch.await(); + stopWatch.start(); + while (stopWatch.isRunning() + && stopWatch.elapsed(TimeUnit.MILLISECONDS) < timeout) { + Thread.sleep(250); + } + + long time = stopWatch.elapsed(TimeUnit.MILLISECONDS); + reportingTime(time); + if (time > timeout) + return -1; + else + return normalizedTime(time); + } + + /** + * Releases all latches, and re-initializes the latches and counters. + */ + protected void releaseAndInitilize() { + count = 0; + skipLatch.countDown(); + skipLatch = new CountDownLatch(1); + skipLatchUp = true; + stopWatch.reset(); + } + + public void reset() { + stopWatch.reset(); + skipLatch.countDown(); + count = 0; + } + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/TimeLoggers.java b/src/edu/mit/streamjit/impl/distributed/TimeLoggers.java new file mode 100644 index 00000000..593a91e4 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/TimeLoggers.java @@ -0,0 +1,297 @@ +package edu.mit.streamjit.impl.distributed; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.util.concurrent.TimeUnit; + +import com.google.common.base.Stopwatch; + +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.distributed.common.Utils; + +/** + * Collection of various {@link TimeLogger} implementations. + * + * @author Sumanan sumanan@mit.edu + * @since Nov 24, 2014 + * + */ +public class TimeLoggers { + + /** + * Creates three files named compileTime.txt, runTime.txt and drainTime.txt + * inside app.name directory, and logs the time information. + * + * @author sumanan + * @since Nov 25, 2014 + */ + public static class FileTimeLogger extends TimeLoggerImpl { + + public FileTimeLogger(String appName) { + super(Utils.fileWriter(appName, "compileTime.txt"), Utils + .fileWriter(appName, "runTime.txt"), Utils.fileWriter( + appName, "drainTime.txt"), Utils.fileWriter(appName, + "searchTime.txt")); + } + } + + /** + * Logs nothing. + */ + public static class NoTimeLogger implements TimeLogger { + + @Override + public void compilationFinished(boolean isCompiled, String msg) { + } + + @Override + public void compilationStarted() { + + } + + @Override + public void drainingFinished(String msg) { + } + + @Override + public void drainingStarted() { + } + + @Override + public void logCompileTime(long time) { + } + + @Override + public void logCompileTime(String msg) { + } + + @Override + public void logDrainDataCollectionTime(long time) { + } + + @Override + public void logDrainTime(long time) { + } + + @Override + public void logDrainTime(String msg) { + } + + @Override + public void logRunTime(long time) { + } + + @Override + public void logRunTime(String msg) { + } + + @Override + public void newConfiguration(String cfgPrefix) { + } + + @Override + public void drainDataCollectionStarted() { + } + + @Override + public void drainDataCollectionFinished(String msg) { + } + + @Override + public void logSearchTime(long time) { + } + } + + /** + * Prints the values to the StdOut. + * + */ + public static class PrintTimeLogger extends TimeLoggerImpl { + + public PrintTimeLogger() { + super(System.out, System.out, System.out, System.out); + } + } + + private static class TimeLoggerImpl implements TimeLogger { + + private final OutputStreamWriter compileTimeWriter; + + private final OutputStreamWriter drainTimeWriter; + + private final OutputStreamWriter runTimeWriter; + + private final OutputStreamWriter searchTimeWriter; + + private int reconfigNo = 0; + + private Stopwatch compileTimeSW = null; + + private Stopwatch drainTimeSW = null; + + private Stopwatch drainDataCollectionTimeSW = null; + + private Stopwatch tuningRoundSW = null; + + TimeLoggerImpl(OutputStream compileOS, OutputStream runOs, + OutputStream drainOs, OutputStream searchOs) { + this(getOSWriter(compileOS), getOSWriter(runOs), + getOSWriter(drainOs), getOSWriter(searchOs)); + } + + TimeLoggerImpl(OutputStreamWriter compileW, OutputStreamWriter runW, + OutputStreamWriter drainW, OutputStreamWriter searchW) { + compileTimeWriter = compileW; + runTimeWriter = runW; + drainTimeWriter = drainW; + searchTimeWriter = searchW; + } + + @Override + public void compilationFinished(boolean isCompiled, String msg) { + if (compileTimeSW != null) { + compileTimeSW.stop(); + long time = compileTimeSW.elapsed(TimeUnit.MILLISECONDS); + logCompileTime(time); + } + } + + @Override + public void compilationStarted() { + compileTimeSW = Stopwatch.createStarted(); + } + + @Override + public void drainingFinished(String msg) { + if (drainTimeSW != null && drainTimeSW.isRunning()) { + drainTimeSW.stop(); + long time = drainTimeSW.elapsed(TimeUnit.MILLISECONDS); + logDrainTime(time); + } + } + + @Override + public void drainingStarted() { + drainTimeSW = Stopwatch.createStarted(); + } + + @Override + public void drainDataCollectionStarted() { + drainDataCollectionTimeSW = Stopwatch.createStarted(); + } + + @Override + public void drainDataCollectionFinished(String msg) { + if (drainDataCollectionTimeSW != null) { + drainDataCollectionTimeSW.stop(); + long time = drainDataCollectionTimeSW + .elapsed(TimeUnit.MILLISECONDS); + logDrainDataCollectionTime(time); + } + } + + @Override + public void logCompileTime(long time) { + write(compileTimeWriter, + String.format("Total compile time %dms\n", time)); + } + + @Override + public void logCompileTime(String msg) { + write(compileTimeWriter, msg); + } + + @Override + public void logDrainDataCollectionTime(long time) { + write(drainTimeWriter, + String.format("Drain data collection time is %dms\n", time)); + } + + @Override + public void logDrainTime(long time) { + write(drainTimeWriter, String.format("Drain time is %dms\n", time)); + } + + @Override + public void logDrainTime(String msg) { + write(drainTimeWriter, msg); + } + + @Override + public void logRunTime(long time) { + write(runTimeWriter, + String.format("Execution time is %dms\n", time)); + } + + @Override + public void logRunTime(String msg) { + write(runTimeWriter, msg); + } + + /** + * [24-02-2015] When a new configuration come from the OpenTuner, we + * drain previous configuration. So the drainTime file should be updated + * with previous configuration prefix. + */ + String prevcfgPrefix = ""; + + @Override + public void newConfiguration(String cfgPrefix) { + reconfigNo++; + if (cfgPrefix == null || cfgPrefix.isEmpty()) + cfgPrefix = new Integer(reconfigNo).toString(); + + updateTuningRoundTime(); + + String msg = String + .format("----------------------------%s----------------------------\n", + cfgPrefix); + String msg1 = String + .format("----------------------------%s----------------------------\n", + prevcfgPrefix); + write(compileTimeWriter, msg); + write(runTimeWriter, msg); + write(searchTimeWriter, msg); + write(drainTimeWriter, msg1); + prevcfgPrefix = cfgPrefix; + } + + private void updateTuningRoundTime() { + long time = 0; + if (tuningRoundSW == null) + tuningRoundSW = Stopwatch.createStarted(); + else { + tuningRoundSW.stop(); + time = tuningRoundSW.elapsed(TimeUnit.SECONDS); + tuningRoundSW.reset(); + tuningRoundSW.start(); + write(runTimeWriter, + String.format("Tuning round time %dS\n", time)); + } + } + + private static OutputStreamWriter getOSWriter(OutputStream os) { + if (os == null) + return null; + return new OutputStreamWriter(os); + } + + private void write(OutputStreamWriter osWriter, String msg) { + if (osWriter != null) { + try { + osWriter.write(msg); + osWriter.flush(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + @Override + public void logSearchTime(long time) { + write(searchTimeWriter, + String.format("Search time is %dms\n", time)); + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/Visualizer.java b/src/edu/mit/streamjit/impl/distributed/Visualizer.java new file mode 100644 index 00000000..a52e5096 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/Visualizer.java @@ -0,0 +1,303 @@ +package edu.mit.streamjit.impl.distributed; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import edu.mit.streamjit.api.Filter; +import edu.mit.streamjit.api.Joiner; +import edu.mit.streamjit.api.OneToOneElement; +import edu.mit.streamjit.api.Pipeline; +import edu.mit.streamjit.api.Splitjoin; +import edu.mit.streamjit.api.Splitter; +import edu.mit.streamjit.api.StreamVisitor; +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.util.ConfigurationUtils; + +/** + * Interface to visualize a stream graph and it's configurations. Use the + * constructor to get the stream graph. + * + * @author Sumanan + * @since 29 Dec, 2014 + */ +public interface Visualizer { + + /** + * Call this method with new configuration, whenever the configuration + * changes. + * + * @param cfg + */ + public void newConfiguration(Configuration cfg); + + /** + * Partitions Machine Map of the current configuration. Only the + * {@link PartitionManager} has the information to generate this map. + * Visualizer has no glue to generate this partitionsMachineMap. + * + * @param partitionsMachineMap + */ + public void newPartitionMachineMap( + Map>>> partitionsMachineMap); + + /** + * Use this class to have no visualization. + * + */ + public static class NoVisualizer implements Visualizer { + + @Override + public void newConfiguration(Configuration cfg) { + return; + } + + @Override + public void newPartitionMachineMap( + Map>>> partitionsMachineMap) { + return; + } + } + + /** + * Generates dot file and then from the dot file generates graph. Before + * using this class, ensure that Graphviz is properly installed in the + * system. + */ + public static class DotVisualizer implements Visualizer { + + protected final OneToOneElement streamGraph; + + private final String appName; + + /** + * namePrefix of the current configuration. + */ + private String namePrefix = ""; + + /** + * Tells whether the dot tool is installed in the system or not. + */ + private boolean hasDot; + + public DotVisualizer(OneToOneElement streamGraph) { + this.streamGraph = streamGraph; + this.appName = streamGraph.getClass().getSimpleName(); + hasDot = true; + DOTstreamVisitor dotSV = new DOTstreamVisitor(); + streamGraph.visit(dotSV); + } + + /** + * Visits through the Stream graph and generates dot file. + * + * @author sumanan + * @since 29 Dec, 2014 + */ + private class DOTstreamVisitor extends StreamVisitor { + + private final FileWriter writer; + + DOTstreamVisitor() { + writer = Utils.fileWriter(appName, "streamgraph.dot"); + } + + private void initilizeDot() { + try { + writer.write(String.format("digraph %s {\n", appName)); + writer.write("\trankdir=TD;\n"); + writer.write("\tnodesep=0.5;\n"); + writer.write("\tranksep=equally;\n"); + // writer.write("\tnode [shape = circle];\n"); + } catch (IOException e) { + e.printStackTrace(); + } + } + + private void closeDot() { + try { + writer.write("}"); + writer.flush(); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + @Override + public void beginVisit() { + initilizeDot(); + } + + @Override + public void visitFilter(Filter filter) { + updateDot(filter); + } + + @Override + public boolean enterPipeline(Pipeline pipeline) { + return true; + } + + @Override + public void exitPipeline(Pipeline pipeline) { + + } + + @Override + public boolean enterSplitjoin(Splitjoin splitjoin) { + return true; + } + + @Override + public void visitSplitter(Splitter splitter) { + updateDot(splitter); + } + + @Override + public boolean enterSplitjoinBranch(OneToOneElement element) { + return true; + } + + @Override + public void exitSplitjoinBranch(OneToOneElement element) { + } + + @Override + public void visitJoiner(Joiner joiner) { + updateDot(joiner); + } + + @Override + public void exitSplitjoin(Splitjoin splitjoin) { + } + + @Override + public void endVisit() { + closeDot(); + runDot("streamgraph"); + } + + private void updateDot(Worker w) { + for (Worker suc : Workers.getSuccessors(w)) { + String first = w.getClass().getSimpleName(); + String second = suc.getClass().getSimpleName(); + int id = Workers.getIdentifier(w); + int sucID = Workers.getIdentifier(suc); + try { + writer.write(String.format("\t%d -> %d;\n", id, sucID)); + // writer.write(String.format("\t%s -> %s;\n", first, + // second)); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } + + @Override + public void newConfiguration(Configuration cfg) { + namePrefix = ConfigurationUtils.getConfigPrefix(cfg); + } + + private void runDot(String file) { + String outFileFormat = "svg"; + String fileName = String.format("./%s%s%s.dot", appName, + File.separator, file); + String outFileName = String.format("./%s%s%s%s%s_%s.%s", appName, + File.separator, ConfigurationUtils.configDir, + File.separator, namePrefix, file, outFileFormat); + ProcessBuilder pb = new ProcessBuilder("dot", "-T" + outFileFormat, + fileName, "-o", outFileName); + try { + Process p = pb.start(); + // TODO: [20-2-2015]. I am commenting the following line for + // some performance improvement. Look for bugs. + // p.waitFor(); + } catch (IOException e) { + System.err + .println("DotVisualizer: dot(Graphviz) tool is not properly installed in the system"); + hasDot = false; + // e.printStackTrace(); + } + } + + @Override + public void newPartitionMachineMap( + Map>>> partitionsMachineMap) { + if (!hasDot) + return; + FileWriter writer; + try { + writer = blobGraphWriter(); + for (int machine : partitionsMachineMap.keySet()) { + for (Set> blobworkers : partitionsMachineMap + .get(machine)) { + Token blobID = Utils.getblobID(blobworkers); + writer.write(String + .format("\tsubgraph \"cluster_%s\" { color=" + + "royalblue1; label = \"Blob-%s:Machine-%d\";", + blobID, blobID, machine)); + Set workerIDs = getWorkerIds(blobworkers); + for (Integer id : workerIDs) + writer.write(String.format(" %d;", id)); + writer.write("}\n"); + } + } + writer.write("}\n"); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + runDot("blobgraph"); + } + private Set getWorkerIds(Set> blobworkers) { + Set workerIds = new HashSet<>(); + for (Worker w : blobworkers) { + workerIds.add(Workers.getIdentifier(w)); + } + return workerIds; + } + + /** + * Copies all lines except the final closing bracket from + * streamgraph.dot to blobgraph.dot. + * + * @return + * @throws IOException + */ + private FileWriter blobGraphWriter() throws IOException { + File streamGraph = new File(String.format("./%s%sstreamgraph.dot", + appName, File.separator)); + File blobGraph = new File(String.format("./%s%sblobgraph.dot", + appName, File.separator)); + BufferedReader reader = new BufferedReader(new FileReader( + streamGraph)); + FileWriter writer = new FileWriter(blobGraph, false); + String line; + int unclosedParenthesis = 0; + while ((line = reader.readLine()) != null) { + if (line.contains("{")) + unclosedParenthesis++; + if (line.contains("}")) + unclosedParenthesis--; + if (unclosedParenthesis > 0) { + writer.write(line); + writer.write("\n"); + } + } + reader.close(); + return writer; + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java b/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java index 3d97b6cb..bcdc3d1d 100644 --- a/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java +++ b/src/edu/mit/streamjit/impl/distributed/WorkerMachine.java @@ -28,17 +28,15 @@ import java.util.Map; import java.util.Set; -import com.google.common.collect.ImmutableSet; +import static com.google.common.base.Preconditions.*; -import edu.mit.streamjit.api.StreamCompilationFailedException; import edu.mit.streamjit.api.Worker; import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.common.Workers; -import edu.mit.streamjit.impl.common.AbstractDrainer.BlobGraph; import edu.mit.streamjit.impl.common.Configuration.IntParameter; import edu.mit.streamjit.impl.common.Configuration.Parameter; import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; -import edu.mit.streamjit.impl.distributed.ConfigurationManager.AbstractConfigurationManager; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.distributed.PartitionManager.AbstractPartitionManager; /** * This class implements one type of search space. Adds "worker to machine" @@ -58,15 +56,20 @@ * @since Jan 16, 2014 * */ -public final class WorkerMachine extends AbstractConfigurationManager { +public final class WorkerMachine extends AbstractPartitionManager { + + private final Set> workerset; - WorkerMachine(StreamJitApp app) { + public WorkerMachine(StreamJitApp app) { super(app); + this.workerset = Workers.getAllWorkersInGraph(app.source); } @Override public Configuration getDefaultConfiguration(Set> workers, int noOfMachines) { + checkArgument(noOfMachines > 0, String.format( + "noOfMachines = %d, It must be > 0", noOfMachines)); Configuration.Builder builder = Configuration.builder(); List machinelist = new ArrayList<>(noOfMachines); for (int i = 1; i <= noOfMachines; i++) @@ -90,49 +93,8 @@ public Configuration getDefaultConfiguration(Set> workers, return builder.build(); } - /** - * Builds partitionsMachineMap and {@link BlobGraph} from the new - * Configuration, and verifies for any cycles among blobs. If it is a valid - * configuration, (i.e., no cycles among the blobs), then {@link #app} - * object's member variables {@link StreamJitApp#blobConfiguration}, - * {@link StreamJitApp#blobGraph} and - * {@link StreamJitApp#partitionsMachineMap} will be assigned according to - * reflect the new configuration, no changes otherwise. - * - * @param config - * New configuration form Opentuer. - * @return true iff no cycles among blobs - */ - @Override - public boolean newConfiguration(Configuration config) { - - Map>>> partitionsMachineMap = getMachineWorkerMap( - config, app.source); - try { - app.varifyConfiguration(partitionsMachineMap); - } catch (StreamCompilationFailedException ex) { - return false; - } - app.blobConfiguration = config; - return true; - } - - /** - * Reads the configuration and returns a map of nodeID to list of set of - * workers (list of blob workers) which are assigned to the node. Value of - * the returned map is list of worker set where each worker set is an - * individual blob. - * - * @param config - * @param workerset - * @return map of nodeID to list of set of workers which are assigned to the - * node. - */ - private Map>>> getMachineWorkerMap( - Configuration config, Worker source) { - - ImmutableSet> workerset = Workers - .getAllWorkersInGraph(source); + public Map>>> partitionMap( + Configuration config) { Map>> partition = new HashMap<>(); for (Worker w : workerset) { diff --git a/src/edu/mit/streamjit/impl/distributed/common/AsyncTCPConnection.java b/src/edu/mit/streamjit/impl/distributed/common/AsyncTCPConnection.java new file mode 100644 index 00000000..bf3c569c --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/AsyncTCPConnection.java @@ -0,0 +1,821 @@ +package edu.mit.streamjit.impl.distributed.common; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectOutputStream; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.net.InetAddress; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.CompletionHandler; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +import edu.mit.streamjit.impl.blob.AbstractWriteOnlyBuffer; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel; +import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel; +import edu.mit.streamjit.impl.distributed.node.StreamNode; + +/** + * Uses {@link AsynchronousSocketChannel} from Java's NIO.2 to send data. This + * class only supports bulk asynchronous write. Reads ({@link #readObject()}) or + * single object writes ({@link #writeObject(Object)}) are not supported. + * Serialises object array into {@link ByteBuffer} and sends it over a + * {@link AsynchronousSocketChannel}. Further, for the performance purposes, in + * oder to parallelise serialisation task and sending task, multiple + * {@link ByteBuffer}s are used. So that while user thread is serialising the + * data into a {@link ByteBuffer}, Java threads can send the already written + * bytebuffers. + * + * @author Sumanan sumanan@mit.edu + * @since May 05, 2014 + * + */ +public class AsyncTCPConnection implements Connection { + /** + * Backed by {@link ByteBufferArrayOutputStream}. + */ + private ObjectOutputStream ooStream = null; + + private AsynchronousSocketChannel asyncSktChannel; + + private ByteBufferArrayOutputStream bBAos; + + private boolean isconnected = false; + + public AsyncTCPConnection(AsynchronousSocketChannel asyncSktChannel) { + this(asyncSktChannel, 5000); + } + + /** + * @param socket + * @param resetCount + * reset the {@link ObjectOutputStream} after this no of sends. + * To avoid out of memory error. + */ + public AsyncTCPConnection(AsynchronousSocketChannel asyncSktChannel, + int resetCount) { + try { + this.asyncSktChannel = asyncSktChannel; + + bBAos = new ByteBufferArrayOutputStream(2); + ooStream = new ObjectOutputStream(bBAos); + isconnected = true; + } catch (IOException iex) { + isconnected = false; + iex.printStackTrace(); + } + } + + @Override + public void writeObject(Object obj) throws IOException { + throw new java.lang.Error("Method not Implemented"); + /* + * if (isStillConnected()) { + * + * while (!canWrite.get()) ; + * + * try { ooStream.writeObject(obj); send(); } catch (IOException ix) { + * isconnected = false; throw ix; } } else { throw new + * IOException("TCPConnection: Socket is not connected"); } + */ + } + + public int write(Object[] data, int offset, int length) throws IOException { + + final ObjectOutputStream objOS = this.ooStream; + final ByteBufferArrayOutputStream bBAos = this.bBAos; + + int written = 0; + if (bBAos.newWrite()) { + while (written < length) { + objOS.writeObject(data[offset++]); + ++written; + } + objOS.reset(); + bBAos.writeCompleted(); + } + + send(); + return written; + } + + private void send() { + final ByteBufferOutputStream bBos; + final ByteBufferArrayOutputStream bBAos; + + bBAos = this.bBAos; + + bBos = bBAos.newRead(); + if (bBos == null) + return; + + ByteBuffer bb = bBos.getByteBuffer(); + bb.flip(); + asyncSktChannel.write(bb, bb, + new CompletionHandler() { + @Override + public void completed(Integer result, ByteBuffer attachment) { + + if (attachment.hasRemaining()) { + asyncSktChannel.write(attachment, attachment, this); + } else { + bBAos.readCompleted(); + send(); + } + } + + @Override + public void failed(Throwable exc, ByteBuffer attachment) { + isconnected = false; + exc.printStackTrace(); + } + }); + } + + public final void closeConnection() { + isconnected = false; + try { + if (ooStream != null) + this.ooStream.close(); + if (asyncSktChannel != null) + this.asyncSktChannel.close(); + } catch (IOException ex) { + ex.printStackTrace(); + } + } + + @Override + public final boolean isStillConnected() { + // return (this.socket.isConnected() && !this.socket.isClosed()); + return isconnected; + } + + @Override + public T readObject() throws IOException, ClassNotFoundException { + throw new java.lang.Error( + "Reading object is not supported in asynchronous tcp mode"); + } + + @Override + public void softClose() throws IOException { + while (!bBAos.newWrite()) { + try { + // TODO : Find correct time for sleep. + Thread.sleep(100); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + this.ooStream.write('\u001a'); + this.ooStream.flush(); + bBAos.writeCompleted(); + send(); + // System.err.println("Softclose is called"); + } + + /** + * This class implements an output stream in which the data is written into + * a byte array. The buffer automatically grows as data is written to it. + * The data can be retrieved using toByteArray() and + * toString(). + *

    + * Closing a ByteArrayOutputStream has no effect. The methods in + * this class can be called after the stream has been closed without + * generating an IOException. + * + * @author Arthur van Hoff + * @since JDK1.0 + * + * This is a copy of {@link ByteArrayOutputStream} and byte array in + * ByteArrayOutputStream is replaced by {@link ByteBuffer} for + * performance. + * @author sumanan + * @since May 10, 2014 + */ + public class ByteBufferOutputStream extends OutputStream { + + /** + * The buffer where data is stored. + */ + protected ByteBuffer bb; + + /** + * The number of valid bytes in the buffer. + */ + protected int count; + + /** + * Creates a new byte array output stream. The buffer capacity is + * initially 32 bytes, though its size increases if necessary. + */ + public ByteBufferOutputStream() { + this(10 * 1024 * 1024); + } + + public int getCount() { + return count; + } + + /** + * Creates a new byte array output stream, with a buffer capacity of the + * specified size, in bytes. + * + * @param size + * the initial size. + * @exception IllegalArgumentException + * if size is negative. + */ + public ByteBufferOutputStream(int size) { + if (size < 0) { + throw new IllegalArgumentException("Negative initial size: " + + size); + } + bb = ByteBuffer.allocateDirect(size); + } + + /** + * Increases the capacity if necessary to ensure that it can hold at + * least the number of elements specified by the minimum capacity + * argument. + * + * @param minCapacity + * the desired minimum capacity + * @throws OutOfMemoryError + * if {@code minCapacity < 0}. This is interpreted as a + * request for the unsatisfiably large capacity + * {@code (long) Integer.MAX_VALUE + (minCapacity - Integer.MAX_VALUE)} + * . + */ + private void ensureCapacity(int minCapacity) { + // overflow-conscious code + if (minCapacity - bb.capacity() > 0) + grow(minCapacity); + } + + /** + * Increases the capacity to ensure that it can hold at least the number + * of elements specified by the minimum capacity argument. + * + * @param minCapacity + * the desired minimum capacity + */ + private void grow(int minCapacity) { + // overflow-conscious code + int oldCapacity = bb.capacity(); + int newCapacity = oldCapacity << 1; + if (newCapacity - minCapacity < 0) + newCapacity = minCapacity; + if (newCapacity < 0) { + if (minCapacity < 0) // overflow + throw new OutOfMemoryError(); + newCapacity = Integer.MAX_VALUE; + } + ByteBuffer newBb = ByteBuffer.allocateDirect(newCapacity); + newBb.clear(); + bb.flip(); + newBb.put(bb); + bb = newBb; + System.out.println("Growing bytebuffer. newCapacity = " + + newCapacity); + } + + /** + * Writes len bytes from the specified byte array starting + * at offset off to this output stream. The general + * contract for write(b, off, len) is that some of the + * bytes in the array b are written to the output stream in + * order; element b[off] is the first byte written and + * b[off+len-1] is the last byte written by this operation. + *

    + * The write method of OutputStream calls the + * write method of one argument on each of the bytes to be written out. + * Subclasses are encouraged to override this method and provide a more + * efficient implementation. + *

    + * If b is null, a + * NullPointerException is thrown. + *

    + * If off is negative, or len is negative, or + * off+len is greater than the length of the array + * b, then an IndexOutOfBoundsException is thrown. + * + * @param b + * the data. + * @param off + * the start offset in the data. + * @param len + * the number of bytes to write. + * @exception IOException + * if an I/O error occurs. In particular, an + * IOException is thrown if the output + * stream is closed. + */ + public void write(byte b[], int off, int len) throws IOException { + if (b == null) { + throw new NullPointerException(); + } else if ((off < 0) || (off > b.length) || (len < 0) + || ((off + len) > b.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + ensureCapacity(count + len); + bb.put(b, off, len); + count += len; + assert count == bb.position() : "count != bb.position()"; + } + + /** + * Writes the specified byte to this byte array output stream. + * + * @param b + * the byte to be written. + */ + public synchronized void write(int b) { + ensureCapacity(count + 1); + bb.put((byte) b); + count += 1; + assert count == bb.position() : "count != bb.position()"; + } + + /** + * Writes len bytes from the specified byte array starting + * at offset off to this byte array output stream. + * + * @param b + * the data. + * @param off + * the start offset in the data. + * @param len + * the number of bytes to write. + * + * public synchronized void write(byte b[], int off, int len) + * { if ((off < 0) || (off > b.length) || (len < 0) || ((off + * + len) - b.length > 0)) { throw new + * IndexOutOfBoundsException(); } ensureCapacity(count + + * len); System.arraycopy(b, off, buf, count, len); count += + * len; } + */ + + /** + * Writes the complete contents of this byte array output stream to the + * specified output stream argument, as if by calling the output + * stream's write method using out.write(buf, 0, count). + * + * @param out + * the output stream to which to write the data. + * @exception IOException + * if an I/O error occurs. + */ + public synchronized void writeTo(OutputStream out) throws IOException { + out.write(getByteArray(), 0, count); + } + + /** + * Resets the count field of this byte array output stream + * to zero, so that all currently accumulated output in the output + * stream is discarded. The output stream can be used again, reusing the + * already allocated buffer space. + * + * @see java.io.ByteArrayInputStream#count + */ + public synchronized void reset() { + bb.position(0); + bb.limit(bb.capacity()); + count = 0; + } + + /** + * Creates a newly allocated byte array. Its size is the current size of + * this output stream and the valid contents of the buffer have been + * copied into it. + * + * @return the current contents of this output stream, as a byte array. + * @see java.io.ByteArrayOutputStream#size() + */ + public synchronized byte toByteArray()[] { + return getByteArray(); + } + + /** + * Returns the current size of the buffer. + * + * @return the value of the count field, which is the + * number of valid bytes in this output stream. + * @see java.io.ByteArrayOutputStream#count + */ + public synchronized int size() { + assert count == bb.position() : "count != bb.position()"; + return count; + } + + /** + * Converts the buffer's contents into a string decoding bytes using the + * platform's default character set. The length of the new + * String is a function of the character set, and hence may not + * be equal to the size of the buffer. + * + *

    + * This method always replaces malformed-input and unmappable-character + * sequences with the default replacement string for the platform's + * default character set. The + * {@linkplain java.nio.charset.CharsetDecoder} class should be used + * when more control over the decoding process is required. + * + * @return String decoded from the buffer's contents. + * @since JDK1.1 + */ + public synchronized String toString() { + return new String(getByteArray(), 0, count); + } + + /** + * Converts the buffer's contents into a string by decoding the bytes + * using the specified {@link java.nio.charset.Charset charsetName}. The + * length of the new String is a function of the charset, and + * hence may not be equal to the length of the byte array. + * + *

    + * This method always replaces malformed-input and unmappable-character + * sequences with this charset's default replacement string. The + * {@link java.nio.charset.CharsetDecoder} class should be used when + * more control over the decoding process is required. + * + * @param charsetName + * the name of a supported + * {@linkplain java.nio.charset.Charset charset} + * @return String decoded from the buffer's contents. + * @exception UnsupportedEncodingException + * If the named charset is not supported + * @since JDK1.1 + */ + public synchronized String toString(String charsetName) + throws UnsupportedEncodingException { + return new String(getByteArray(), 0, count, charsetName); + } + + /** + * Creates a newly allocated string. Its size is the current size of the + * output stream and the valid contents of the buffer have been copied + * into it. Each character c in the resulting string is + * constructed from the corresponding element b in the byte array + * such that:

    + * + *
    +		 * c == (char) (((hibyte & 0xff) << 8) | (b & 0xff))
    +		 * 
    + * + *
    + * + * @deprecated This method does not properly convert bytes into + * characters. As of JDK 1.1, the preferred way to do + * this is via the toString(String enc) method, + * which takes an encoding-name argument, or the + * toString() method, which uses the platform's + * default character encoding. + * + * @param hibyte + * the high byte of each resulting Unicode character. + * @return the current contents of the output stream, as a string. + * @see java.io.ByteArrayOutputStream#size() + * @see java.io.ByteArrayOutputStream#toString(String) + * @see java.io.ByteArrayOutputStream#toString() + */ + @Deprecated + public synchronized String toString(int hibyte) { + return new String(getByteArray(), hibyte, 0, count); + } + + /** + * Closing a ByteArrayOutputStream has no effect. The methods + * in this class can be called after the stream has been closed without + * generating an IOException. + *

    + * + */ + public void close() throws IOException { + } + + private byte[] getByteArray() { + bb.flip(); + final int size = bb.remaining(); + byte[] buf = new byte[size]; + bb.get(buf, 0, size); + assert count == bb.position() : "count != bb.position()"; + return buf; + } + + public ByteBuffer getByteBuffer() { + return bb; + } + } + + /** + * A {@link ByteBufferOutputStream} ( implicitly {@link ByteBuffer} ) can be + * in one of following 4 state. State of a {@link ByteBufferOutputStream} + * expected to change in a cyclic manner, from canWrite -> beingWritten -> + * canRead -> beingRead -> canWrite. + * + * @author sumanan + */ + private enum Status { + canWrite, beingWritten, canRead, beingRead + } + + /** + * Writers must call {@link #newWrite()} before begins the write process and + * call {@link #writeCompleted()} after the end of write process. Whatever + * written in between these two calls will be captured into single + * {@link ByteBufferOutputStream}. + * + * Like writers, readers also call {@link #newRead()} to get the current + * {@link ByteBufferOutputStream} to read and must call + * {@link #readCompleted()} after the end of read process. + * + * @author sumanan + * + */ + public class ByteBufferArrayOutputStream extends OutputStream { + + private final int debugLevel; + + /** + * Read index of {@link #bytebufferArray}. + */ + private int readIndex; + + /** + * Write index of {@link #bytebufferArray}. + */ + private int writeIndex; + + private final ByteBufferOutputStream[] bytebufferArray; + + /** + * Keeps the {@link Status} of each element in the + * {@link #bytebufferArray} + */ + private Map> bufferStatus; + + public ByteBufferArrayOutputStream(int listSize) { + debugLevel = 0; + writeIndex = 0; + readIndex = 0; + bytebufferArray = new ByteBufferOutputStream[listSize]; + bufferStatus = new HashMap<>(listSize); + for (int i = 0; i < bytebufferArray.length; i++) { + bytebufferArray[i] = new ByteBufferOutputStream(); + bufferStatus.put(i, + new AtomicReference(Status.canWrite)); + } + } + + @Override + public void write(int b) throws IOException { + bytebufferArray[writeIndex].write(b); + } + + public void write(byte b[], int off, int len) throws IOException { + bytebufferArray[writeIndex].write(b, off, len); + } + + /** + * Do not forget to call {@link #writeCompleted()} after every + * successful bulk writes. Whatever written in between these two calls + * will be captured into single {@link ByteBufferOutputStream}. + * + * @return true iff the next buffer is free to write. + */ + public boolean newWrite() { + if (bufferStatus.get(writeIndex).compareAndSet(Status.canWrite, + Status.beingWritten)) { + + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : newWrite-canWrite : " + "writeIndex - " + + writeIndex + ", readIndex - " + readIndex); + return true; + } else { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : newWrite-failed : " + "writeIndex - " + + writeIndex + ", readIndex - " + readIndex); + return false; + } + } + + /** + * Writer must call this method right after the writing of an collection + * of objects is completed. + */ + public void writeCompleted() { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : writeCompleted : " + "writeIndex - " + writeIndex + + ", readIndex - " + readIndex); + int w = writeIndex; + writeIndex = (writeIndex + 1) % bytebufferArray.length; + boolean ret = bufferStatus.get(w).compareAndSet( + Status.beingWritten, Status.canRead); + if (!ret) { + String msg = String.format("BufferState conflict : " + + "writeIndex - " + writeIndex + ", readIndex - " + + readIndex + " - Status of the writeBuffer is " + + bufferStatus.get(w).get()); + throw new IllegalStateException(msg); + } + } + + /** + * Do not forget to call {@link #readCompleted()} after every successful + * read of a {@link ByteBufferOutputStream}. + * + * @return Next available {@link ByteBufferOutputStream} if available or + * null if no {@link ByteBufferOutputStream} is + * available to read. + */ + public synchronized ByteBufferOutputStream newRead() { + if (bufferStatus.get(readIndex).get() == Status.beingRead) { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : newRead-beingRead : " + "writeIndex - " + + writeIndex + ", readIndex - " + readIndex); + return null; + } + + if (bufferStatus.get(readIndex).compareAndSet(Status.canRead, + Status.beingRead)) { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : newRead-canRead : " + "writeIndex - " + + writeIndex + ", readIndex - " + readIndex); + if (bytebufferArray[readIndex].getCount() == 0) { + throw new IllegalStateException( + "bytebufferArray[a].getCount() != 0 is expected."); + } + return bytebufferArray[readIndex]; + } else { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : newRead - not can read " + readIndex); + return null; + } + } + + /** + * Reader must call this method right after the reading process is + * completed. + */ + public void readCompleted() { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " : readCompleted : " + "writeIndex - " + writeIndex + + ", readIndex - " + readIndex); + bytebufferArray[readIndex].reset(); + int r = readIndex; + readIndex = (readIndex + 1) % bytebufferArray.length; + boolean ret = bufferStatus.get(r).compareAndSet(Status.beingRead, + Status.canWrite); + if (!ret) + throw new IllegalStateException("bufferStatus conflict"); + } + } + + /** + * Uniquely identifies a Asynchronous TCP connection among all connected + * machines. + * + *

    + * NOTE: IPAddress is not included for the moment to avoid re-sending same + * information again and again for every reconfiguration. machineId to + * {@link NodeInfo} map will be sent initially. So {@link StreamNode}s can + * get ipAddress of a machine from that map. + */ + public static class AsyncTCPConnectionInfo extends ConnectionInfo { + + private static final long serialVersionUID = 1L; + + private final int portNo; + + public AsyncTCPConnectionInfo(int srcID, int dstID, int portNo) { + super(srcID, dstID, false); + Ipv4Validator validator = Ipv4Validator.getInstance(); + if (!validator.isValid(portNo)) + throw new IllegalArgumentException("Invalid port No"); + this.portNo = portNo; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + portNo; + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!super.equals(obj)) + return false; + if (getClass() != obj.getClass()) + return false; + AsyncTCPConnectionInfo other = (AsyncTCPConnectionInfo) obj; + if (portNo != other.portNo) + return false; + return true; + } + + @Override + public String toString() { + return "AsyncTCPConnectionInfo [srcID=" + getSrcID() + ", dstID=" + + getDstID() + ", portID=" + portNo + "]"; + } + + @Override + public Connection makeConnection(int nodeID, NetworkInfo networkInfo, + int timeOut) { + Connection con = null; + if (srcID == nodeID) { + try { + con = ConnectionFactory.getAsyncConnection(portNo); + } catch (IOException e) { + e.printStackTrace(); + } + } + + else if (dstID == nodeID) { + InetAddress ipAddress = networkInfo.getInetAddress(srcID); + try { + con = ConnectionFactory.getConnection( + ipAddress.getHostAddress(), portNo, false); + } catch (IOException e) { + e.printStackTrace(); + } + } else { + throw new IllegalArgumentException( + "Neither srcID nor dstID matches with nodeID"); + } + return con; + } + + @Override + public BoundaryInputChannel inputChannel(Token t, int bufSize, + ConnectionProvider conProvider) { + return new BlockingInputChannel(bufSize, conProvider, this, + t.toString(), 0); + } + + @Override + public BoundaryOutputChannel outputChannel(Token t, int bufSize, + ConnectionProvider conProvider) { + return new AsyncOutputChannel(conProvider, this, t.toString(), 0); + } + } + + public static class AsyncTCPBuffer extends AbstractWriteOnlyBuffer { + + private final AsyncTCPConnection con; + + public AsyncTCPBuffer(AsyncTCPConnection con) { + this.con = con; + } + + @Override + public boolean write(Object t) { + try { + con.writeObject(t); + return true; + } catch (IOException e) { + e.printStackTrace(); + } + return false; + } + + public int write(Object[] data, int offset, int length) { + try { + return con.write(data, offset, length); + } catch (IOException e) { + e.printStackTrace(); + } + return 0; + } + + @Override + public int size() { + return 0; + } + + @Override + public int capacity() { + return Integer.MAX_VALUE; + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java index a2f51f56..68af5fd3 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java +++ b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannel.java @@ -21,11 +21,11 @@ */ package edu.mit.streamjit.impl.distributed.common; -import java.io.IOException; - import com.google.common.collect.ImmutableList; import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; /** * {@link BoundaryChannel} wraps a {@link Buffer} that crosses over the @@ -41,30 +41,19 @@ public interface BoundaryChannel { String name(); - /** - * Close the connection. - * - * @throws IOException - */ - void closeConnection() throws IOException; - - /** - * @return true iff the connection with the other node is still valid. - */ - boolean isStillConnected(); - /** * @return {@link Runnable} that does all IO communication and send * data(stream tuples) to other node (or receive from other node). */ Runnable getRunnable(); - /** - * @return Other end of the node's ID. - */ - int getOtherNodeID(); + ImmutableList getUnprocessedData(); + + Connection getConnection(); + + ConnectionInfo getConnectionInfo(); - public ImmutableList getUnprocessedData(); + Buffer getBuffer(); /** * Interface that represents input channels. @@ -88,20 +77,10 @@ public interface BoundaryInputChannel extends BoundaryChannel { *

    * Based on the type argument, implementation may treat uncounsumed data * differently - *

      - *
    1. 1 - No extraBuffer. Wait and push all received data in to the - * actual buffer. May be used at final draining. - *
    2. 2 - Create extra buffer and put all unconsumed data. This can be - * send to the controller as draindata. May be used at intermediate - * draining. - *
    3. 3 - Discard all unconsumed data. This is useful, if we don't care - * about the data while tuning for performance. *

      * - * @param type - * : Can be 1, 2 or 3. rest are illegal. */ - void stop(int type); + void stop(DrainType type); /** * Receive data from other node. diff --git a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelFactory.java b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelFactory.java new file mode 100644 index 00000000..28a282d5 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelFactory.java @@ -0,0 +1,101 @@ +package edu.mit.streamjit.impl.distributed.common; + +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; +import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel; +import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel; +import edu.mit.streamjit.impl.distributed.node.BlockingOutputChannel; + +/** + * {@link BoundaryChannel} maker. + * + * @author Sumanan sumanan@mit.edu + * @since May 28, 2014 + */ +public interface BoundaryChannelFactory { + + BoundaryInputChannel makeInputChannel(Token t, Buffer buffer, + ConnectionInfo conInfo); + + BoundaryOutputChannel makeOutputChannel(Token t, Buffer buffer, + ConnectionInfo conInfo); + + BoundaryInputChannel makeInputChannel(Token t, int bufSize, + ConnectionInfo conInfo); + + BoundaryOutputChannel makeOutputChannel(Token t, int bufSize, + ConnectionInfo conInfo); + + /** + * Makes blocking {@link BlockingInputChannel} and {@link BlockingOutputChannel}. + * + */ + public static class TCPBoundaryChannelFactory + implements + BoundaryChannelFactory { + + protected final ConnectionProvider conProvider; + + public TCPBoundaryChannelFactory(ConnectionProvider conProvider) { + this.conProvider = conProvider; + } + + @Override + public BoundaryInputChannel makeInputChannel(Token t, Buffer buffer, + ConnectionInfo conInfo) { + return new BlockingInputChannel(buffer, conProvider, conInfo, + t.toString(), 0); + } + + @Override + public BoundaryOutputChannel makeOutputChannel(Token t, Buffer buffer, + ConnectionInfo conInfo) { + return new BlockingOutputChannel(buffer, conProvider, conInfo, + t.toString(), 0); + } + + @Override + public BoundaryInputChannel makeInputChannel(Token t, int bufSize, + ConnectionInfo conInfo) { + return new BlockingInputChannel(bufSize, conProvider, conInfo, + t.toString(), 0); + } + + @Override + public BoundaryOutputChannel makeOutputChannel(Token t, int bufSize, + ConnectionInfo conInfo) { + return new BlockingOutputChannel(bufSize, conProvider, conInfo, + t.toString(), 0); + } + } + + /** + * Makes blocking {@link BlockingInputChannel} and asynchronous + * {@link AsyncOutputChannel}. + * + */ + public class AsyncBoundaryChannelFactory extends TCPBoundaryChannelFactory { + + public AsyncBoundaryChannelFactory(ConnectionProvider conProvider) { + super(conProvider); + } + + @Override + public BoundaryOutputChannel makeOutputChannel(Token t, Buffer buffer, + ConnectionInfo conInfo) { + return new AsyncOutputChannel(conProvider, conInfo, + t.toString(), 0); + } + + @Override + public BoundaryOutputChannel makeOutputChannel(Token t, int bufSize, + ConnectionInfo conInfo) { + return new AsyncOutputChannel(conProvider, conInfo, + t.toString(), 0); + } + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelManager.java b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelManager.java new file mode 100644 index 00000000..951b77cb --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/BoundaryChannelManager.java @@ -0,0 +1,176 @@ +package edu.mit.streamjit.impl.distributed.common; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.ImmutableMap; + +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.impl.distributed.node.AsyncOutputChannel; + +/** + * Manages set of {@link BoundaryChannel}s. + * + * @author Sumanan sumanan@mit.edu + * @since May 28, 2014 + */ +public interface BoundaryChannelManager { + + void start(); + + void waitToStart(); + + void waitToStop(); + + public interface BoundaryInputChannelManager extends BoundaryChannelManager { + + /** + * In streamJit, a channel can be identified by a {@link Token}. + * + * @return map of channel {@link Token}, {@link BoundaryInputChannel} + * handled by this manager. + */ + ImmutableMap inputChannelsMap(); + + /** + * @param stopType + * See {@link BoundaryInputChannel#stop(int)} + */ + void stop(DrainType stopType); + } + + public interface BoundaryOutputChannelManager + extends + BoundaryChannelManager { + + /** + * In streamJit, a channel can be identified by a {@link Token}. + * + * @return map of channel {@link Token}, {@link BoundaryOutputChannel} + * handled by this manager. + */ + ImmutableMap outputChannelsMap(); + + /** + * @param stopType + * See {@link BoundaryOutputChannel#stop(boolean)} + */ + void stop(boolean stopType); + } + + public static class InputChannelManager + implements + BoundaryInputChannelManager { + + private final ImmutableMap inputChannels; + + private final Set inputChannelThreads; + + public InputChannelManager( + final ImmutableMap inputChannels) { + this.inputChannels = inputChannels; + inputChannelThreads = new HashSet<>(inputChannels.values().size()); + } + + @Override + public void start() { + for (BoundaryInputChannel bc : inputChannels.values()) { + Thread t = new Thread(bc.getRunnable(), bc.name()); + t.start(); + inputChannelThreads.add(t); + } + } + + @Override + public void waitToStart() { + } + + @Override + public void waitToStop() { + for (Thread t : inputChannelThreads) { + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + @Override + public void stop(DrainType stopType) { + for (BoundaryInputChannel bc : inputChannels.values()) { + bc.stop(stopType); + } + } + + @Override + public ImmutableMap inputChannelsMap() { + return inputChannels; + } + } + + public static class OutputChannelManager + implements + BoundaryOutputChannelManager { + + protected final ImmutableMap outputChannels; + protected final Map outputChannelThreads; + + public OutputChannelManager( + ImmutableMap outputChannels) { + this.outputChannels = outputChannels; + outputChannelThreads = new HashMap<>(outputChannels.values().size()); + } + + @Override + public void start() { + for (BoundaryOutputChannel bc : outputChannels.values()) { + Thread t = new Thread(bc.getRunnable(), bc.name()); + t.start(); + outputChannelThreads.put(bc, t); + } + } + + @Override + public void waitToStart() { + for (Map.Entry en : outputChannelThreads + .entrySet()) { + if (en.getKey() instanceof AsyncOutputChannel) { + try { + en.getValue().join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + } + + @Override + public void stop(boolean stopType) { + for (BoundaryOutputChannel bc : outputChannels.values()) { + bc.stop(stopType); + } + } + + @Override + public void waitToStop() { + for (Thread t : outputChannelThreads.values()) { + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + @Override + public ImmutableMap outputChannelsMap() { + return outputChannels; + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java b/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java index 478cee6c..9b024e9b 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java +++ b/src/edu/mit/streamjit/impl/distributed/common/CTRLRDrainElement.java @@ -25,7 +25,7 @@ import edu.mit.streamjit.impl.blob.Blob; import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.blob.DrainData; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; import edu.mit.streamjit.impl.distributed.node.StreamNode; import edu.mit.streamjit.impl.distributed.runtimer.Controller; @@ -81,12 +81,7 @@ public void process(CTRLRDrainProcessor dp) { public static final class DoDrain extends CTRLRDrainElement { private static final long serialVersionUID = 1L; - /** - * Instead of sending another object to get the {@link DrainData}, - * {@link Controller} can set this flag to get the drain data once - * draining is done. - */ - public final boolean reqDrainData; + public final DrainType drainType; /** * Identifies the blob. Since {@link Blob}s do not have an unique @@ -95,9 +90,9 @@ public static final class DoDrain extends CTRLRDrainElement { */ public final Token blobID; - public DoDrain(Token blobID, boolean reqDrainData) { + public DoDrain(Token blobID, DrainType drainType) { this.blobID = blobID; - this.reqDrainData = reqDrainData; + this.drainType = drainType; } @Override @@ -119,4 +114,35 @@ public interface CTRLRDrainProcessor { public void process(DoDrain drain); } + + /** + * Three types of draining are possible. + */ + public enum DrainType { + /** + * Final draining. No drain data. All {@link Blob}s are expected to run + * and finish data in input buffers buffers. + */ + FINAL(1), /** + * Intermediate draining. Drain data is required in this mode. + * {@link BoundaryInputChannel}s may create extra buffer and put all + * unconsumed data, and finally send this drain data to the + * {@link Controller} for reconfiguration. + */ + INTERMEDIATE(2), /** + * Discard all unconsumed data. This is useful, if we + * don't care about the data while tuning for performance. + * + */ + DISCARD(3); + private final int code; + + DrainType(int code) { + this.code = code; + } + + public int toint() { + return code; + } + } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java b/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java index 3deb0a17..4cdf8e81 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java +++ b/src/edu/mit/streamjit/impl/distributed/common/CTRLRMessageVisitor.java @@ -21,6 +21,8 @@ */ package edu.mit.streamjit.impl.distributed.common; +import edu.mit.streamjit.impl.distributed.profiler.ProfilerCommand; + /** * Visitor pattern. We have to have overloaded visit method to all sub type of * {@link MessageElement}s. See the {@link MessageElement}. @@ -39,4 +41,6 @@ public interface CTRLRMessageVisitor { public void visit(CTRLRDrainElement ctrlrDrainElement); public void visit(MiscCtrlElements miscCtrlElements); + + public void visit(ProfilerCommand command); } diff --git a/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java b/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java index 5395559c..8880af68 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java +++ b/src/edu/mit/streamjit/impl/distributed/common/ConfigurationString.java @@ -23,7 +23,7 @@ import edu.mit.streamjit.impl.blob.DrainData; import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor.ConfigType; +import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor.ConfigType; import edu.mit.streamjit.impl.distributed.node.StreamNode; import edu.mit.streamjit.impl.distributed.runtimer.Controller; @@ -55,7 +55,7 @@ public void accept(CTRLRMessageVisitor visitor) { visitor.visit(this); } - public void process(ConfigurationStringProcessor jp) { + public void process(ConfigurationProcessor jp) { jp.process(jsonString, type, drainData); } @@ -66,7 +66,7 @@ public void process(ConfigurationStringProcessor jp) { * @author Sumanan sumanan@mit.edu * @since May 27, 2013 */ - public interface ConfigurationStringProcessor { + public interface ConfigurationProcessor { public void process(String cfg, ConfigType type, DrainData drainData); diff --git a/src/edu/mit/streamjit/impl/distributed/common/Connection.java b/src/edu/mit/streamjit/impl/distributed/common/Connection.java index 22b2fa91..f2091c82 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/Connection.java +++ b/src/edu/mit/streamjit/impl/distributed/common/Connection.java @@ -21,16 +21,23 @@ */ package edu.mit.streamjit.impl.distributed.common; +import static com.google.common.base.Preconditions.checkNotNull; + import java.io.IOException; import java.io.ObjectInputStream; import java.io.Serializable; +import java.net.SocketTimeoutException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; import edu.mit.streamjit.impl.distributed.node.StreamNode; -import edu.mit.streamjit.impl.distributed.runtimer.Controller; /** - * Communication interface for both {@link StreamNode} and {@link Controller} - * side. This interface is for an IO connection that is already created, i.e., + * Communication interface for an IO connection that is already created, i.e., * creating a connections is not handled at here. Consider * {@link ConnectionFactory} to create a connection.

      For the moment, * communicates at object granularity level. We may need to add primitive @@ -77,7 +84,7 @@ public interface Connection { * when the thread is blocked at {@link ObjectInputStream#readObject()} * method call. *

      - * + * * @throws IOException */ public void softClose() throws IOException; @@ -90,10 +97,16 @@ public interface Connection { public boolean isStillConnected(); /** - * Describes a connection between two machines. ConnectionInfo is considered + * Describes a connection between two machines. + *
        + *
      1. if isSymmetric is true, ConnectionInfo is considered * symmetric for equal() and hashCode() calculation. As long as same * machineIDs are involved, irrespect of srcID and dstID positions, these * methods return same result. + *
      2. + * if isSymmetric is false srcID and dstID will be treated as + * not interchangeable entities. + *
      * *

      * Note : All instances of ConnectionInfo, including subclass @@ -101,17 +114,27 @@ public interface Connection { * hashCode() and equals() methods. The whole point of this class is to * identify a connection between two machines. */ - public class ConnectionInfo implements Serializable { + public abstract class ConnectionInfo implements Serializable { private static final long serialVersionUID = 1L; - private int srcID; + protected final int srcID; - private int dstID; + protected final int dstID; + + /** + * Tells whether this connection is symmetric or not. + */ + protected final boolean isSymmetric; public ConnectionInfo(int srcID, int dstID) { + this(srcID, dstID, true); + } + + protected ConnectionInfo(int srcID, int dstID, boolean isSymmetric) { this.srcID = srcID; this.dstID = dstID; + this.isSymmetric = isSymmetric; } public int getSrcID() { @@ -122,17 +145,36 @@ public int getDstID() { return dstID; } + public boolean isSymmetric() { + return isSymmetric; + } + @Override public int hashCode() { final int prime = 31; int result = 1; - int min = Math.min(srcID, dstID); - int max = Math.max(srcID, dstID); - result = prime * result + min; - result = prime * result + max; + if (isSymmetric) { + int min = Math.min(srcID, dstID); + int max = Math.max(srcID, dstID); + result = prime * result + min; + result = prime * result + max; + } else { + result = prime * result + srcID; + result = prime * result + dstID; + } + result = prime * result + (isSymmetric ? 1231 : 1237); return result; } + /* + * (non-Javadoc) + * + * @see java.lang.Object#equals(java.lang.Object) equals() overwritten + * here breaks the reflexive(), symmetric() and transitive() properties, + * especially when subclasses involves. The purpose of this overwriting + * is to check whether an already established connection could be + * reused. + */ @Override public boolean equals(Object obj) { if (this == obj) @@ -142,20 +184,199 @@ public boolean equals(Object obj) { if (!(obj instanceof ConnectionInfo)) return false; ConnectionInfo other = (ConnectionInfo) obj; - int myMin = Math.min(srcID, dstID); - int myMax = Math.max(srcID, dstID); - int otherMin = Math.min(other.srcID, other.dstID); - int otherMax = Math.max(other.srcID, other.dstID); - if (myMin != otherMin) - return false; - if (myMax != otherMax) - return false; + if (other.isSymmetric) { + int myMin = Math.min(srcID, dstID); + int myMax = Math.max(srcID, dstID); + int otherMin = Math.min(other.srcID, other.dstID); + int otherMax = Math.max(other.srcID, other.dstID); + if (myMin != otherMin) + return false; + if (myMax != otherMax) + return false; + } else { + if (srcID != other.srcID) + return false; + if (dstID != other.dstID) + return false; + } return true; } @Override public String toString() { - return "ConnectionInfo [srcID=" + srcID + ", dstID=" + dstID + "]"; + return String.format( + "ConnectionInfo [srcID=%d, dstID=%d, isSymmetric=%s]", + srcID, dstID, isSymmetric); + } + + /** + * This function will establish a new connection according to the + * connection info. + * + * @param nodeID + * : nodeID of the {@link StreamNode} that invokes this + * method. + * @param networkInfo + * : network info of the system. + * @return {@link Connection} that is described by this + * {@link ConnectionInfo}. + */ + public abstract Connection makeConnection(int nodeID, + NetworkInfo networkInfo, int timeOut); + + public abstract BoundaryInputChannel inputChannel(Token t, int bufSize, + ConnectionProvider conProvider); + + public abstract BoundaryOutputChannel outputChannel(Token t, + int bufSize, ConnectionProvider conProvider); + } + + /** + * We need an instance of {@link ConnectionInfo} to compare and get a + * concrete {@link ConnectionInfo} from the list of already created + * {@link ConnectionInfo}s. This class is added for that purpose. + */ + public static class GenericConnectionInfo extends ConnectionInfo { + + private static final long serialVersionUID = 1L; + + public GenericConnectionInfo(int srcID, int dstID) { + super(srcID, dstID); + } + + public GenericConnectionInfo(int srcID, int dstID, boolean isSymmetric) { + super(srcID, dstID, isSymmetric); + } + + @Override + public Connection makeConnection(int nodeID, NetworkInfo networkInfo, + int timeOut) { + throw new java.lang.Error("This method is not supposed to call"); + } + + @Override + public BoundaryInputChannel inputChannel(Token t, int bufSize, + ConnectionProvider conProvider) { + throw new java.lang.Error("This method is not supposed to call"); + } + + @Override + public BoundaryOutputChannel outputChannel(Token t, int bufSize, + ConnectionProvider conProvider) { + throw new java.lang.Error("This method is not supposed to call"); + } + } + + /** + * ConnectionType serves two purposes + *

        + *
      1. Tune the connections. This will passed to opentuner. + *
      2. Indicate the {@link StreamNode} to create appropriate + * {@link BoundaryChannel}. This will be bound with {@link ConnectionInfo}. + *
      + */ + public enum ConnectionType { + /** + * Blocking TCP socket connection + */ + BTCP, /** + * Non-Blocking TCP socket connection + * + * NBTCP, + */ + /** + * Asynchronous TCP socket connection + */ + ATCP, + /** + * Blocking InfiniBand + * + * BIB, + */ + /** + * Non-Blocking InfiniBand + * + * NBIB + */ + } + + /** + * Keeps all opened {@link TCPConnection}s for a machine. Each machine + * should have a single instance of this class and use this class to make + * new connections. + * + *

      + * TODO: Need to make this class singleton. I didn't do it now because in + * current way, controller and a local {@link StreamNode} are running in a + * same JVM. So first, local {@link StreamNode} should be made to run on a + * different JVM and then make this class singleton. + */ + public static class ConnectionProvider { + + private ConcurrentMap allConnections; + + private final int myNodeID; + + private final NetworkInfo networkInfo; + + public ConnectionProvider(int myNodeID, NetworkInfo networkInfo) { + checkNotNull(networkInfo, "networkInfo is null"); + this.myNodeID = myNodeID; + this.networkInfo = networkInfo; + this.allConnections = new ConcurrentHashMap<>(); + } + + /** + * See {@link #getConnection(TCPConnectionInfo, int)}. + * + * @param conInfo + * @return + * @throws IOException + */ + public Connection getConnection(ConnectionInfo conInfo) + throws IOException { + return getConnection(conInfo, 0); + } + +/** + * If the connection corresponds to conInfo is already established + * returns the connection. Try to make a new connection otherwise. + * + * @param conInfo - Information that uniquely identifies a {@link TCPConnection + * @param timeOut - Time out only valid if making connection needs to be + * done through a listener socket. i.e, conInfo.getSrcID() == myNodeID. + * @return + * @throws SocketTimeoutException + * @throws IOException + */ + public Connection getConnection(ConnectionInfo conInfo, int timeOut) + throws SocketTimeoutException, IOException { + Connection con = allConnections.get(conInfo); + if (con != null) { + if (con.isStillConnected()) { + return con; + } else { + throw new AssertionError("con.closeConnection()"); + // con.closeConnection(); + } + } + + con = conInfo.makeConnection(myNodeID, networkInfo, timeOut); + if (con == null) + throw new IOException("Connection making process failed."); + + allConnections.put(conInfo, con); + return con; + } + + public void closeAllConnections() { + for (Connection con : allConnections.values()) { + try { + con.closeConnection(); + } catch (IOException e) { + e.printStackTrace(); + } + } } } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java b/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java index 5aaee42b..b3382cc7 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java +++ b/src/edu/mit/streamjit/impl/distributed/common/ConnectionFactory.java @@ -22,8 +22,13 @@ package edu.mit.streamjit.impl.distributed.common; import java.io.IOException; +import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.nio.channels.AsynchronousServerSocketChannel; +import java.nio.channels.AsynchronousSocketChannel; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import edu.mit.streamjit.impl.distributed.runtimer.ListenerSocket; @@ -100,4 +105,25 @@ public static Connection getConnection(Socket socket, boolean needSync) else return new TCPConnection(socket); } + + public static AsyncTCPConnection getAsyncConnection(int portNo) + throws IOException { + AsynchronousServerSocketChannel ssc; + AsynchronousSocketChannel sc2; + InetSocketAddress isa = new InetSocketAddress("0.0.0.0", portNo); + + ssc = AsynchronousServerSocketChannel.open().bind(isa); + Future accepted = ssc.accept(); + System.out.println("Waiting for asynchronous socket connection @ port " + + portNo); + try { + sc2 = accepted.get(); + } catch (InterruptedException | ExecutionException ex) { + ex.printStackTrace(); + return null; + } + + ssc.close(); + return new AsyncTCPConnection(sc2); + } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java b/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java index b31a1ec9..2eb039fa 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java +++ b/src/edu/mit/streamjit/impl/distributed/common/GlobalConstants.java @@ -21,11 +21,8 @@ */ package edu.mit.streamjit.impl.distributed.common; -import edu.mit.streamjit.impl.common.AbstractDrainer; -import edu.mit.streamjit.impl.distributed.TailChannel; import edu.mit.streamjit.impl.distributed.node.StreamNode; import edu.mit.streamjit.impl.distributed.runtimer.StreamNodeAgent; -import edu.mit.streamjit.tuner.TCPTuner; /** * This class is to keep track of all application level constants. So we can @@ -79,48 +76,4 @@ private GlobalConstants() { public static final String PORTID_MAP = "portIdMap"; public static final String PARTITION = "partition"; public static final String CONINFOMAP = "ConInfoMap"; - - /** - * Whether to start the tuner automatically or not. - *

        - *
      1. 0 - Controller will start the tuner automatically. - *
      2. 1 - User has to manually start the tuner with correct portNo as - * argument. Port no 12563 is used in this case. But it can be changed at - * {@link TCPTuner#startTuner(String)}. We need this option to run the - * tuning on remote machines. - *
      - */ - public static int tunerMode = 0; - - /** - * To turn on or turn off the drain data. If this is false, drain data will - * be ignored and every new reconfiguration will run with fresh inputs. - */ - public static final boolean useDrainData = false; - - /** - * To turn on or off the dead lock handler. see {@link AbstractDrainer} for - * it's usage. - */ - public static final boolean needDrainDeadlockHandler = true; - - /** - * Enables tuning. Tuner will be started iff this flag is set true. - * Otherwise, just use the fixed configuration file to run the program. No - * tuning, no intermediate draining. In this mode (tune = false), time taken - * to pass fixed number of input will be measured for 30 rounds and logged - * into FixedOutPut.txt. See {@link TailChannel} for the file logging - * details. - */ - public static final boolean tune = false; - - /** - * Save all configurations tired by open tuner in to - * "configurations//app.name" directory. - */ - public static final boolean saveAllConfigurations = true; - - static { - - } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java b/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java index d9a7fc35..316be158 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java +++ b/src/edu/mit/streamjit/impl/distributed/common/MiscCtrlElements.java @@ -22,7 +22,7 @@ package edu.mit.streamjit.impl.distributed.common; import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; public abstract class MiscCtrlElements implements CTRLRMessageElement { @@ -38,10 +38,10 @@ public void accept(CTRLRMessageVisitor visitor) { public static final class NewConInfo extends MiscCtrlElements { private static final long serialVersionUID = 1L; - public final TCPConnectionInfo conInfo; + public final ConnectionInfo conInfo; public final Token token; - public NewConInfo(TCPConnectionInfo conInfo, Token token) { + public NewConInfo(ConnectionInfo conInfo, Token token) { this.conInfo = conInfo; this.token = token; } diff --git a/src/edu/mit/streamjit/impl/distributed/common/NetworkInfo.java b/src/edu/mit/streamjit/impl/distributed/common/NetworkInfo.java new file mode 100644 index 00000000..a7456228 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/NetworkInfo.java @@ -0,0 +1,28 @@ +package edu.mit.streamjit.impl.distributed.common; + +import java.net.InetAddress; +import java.util.Map; + +/** + * Keeps network information of all nodes in the system. + * + * @author Sumanan sumanan@mit.edu + * @since May 23, 2014 + */ +public class NetworkInfo { + + private final Map iNetAddressMap; + + public NetworkInfo(Map iNetAddressMap) { + this.iNetAddressMap = iNetAddressMap; + } + + public InetAddress getInetAddress(int nodeID) { + if (this.iNetAddressMap == null) + return null; + InetAddress ipAddress = iNetAddressMap.get(nodeID); + if (ipAddress.isLoopbackAddress()) + ipAddress = iNetAddressMap.get(0); + return ipAddress; + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/common/Options.java b/src/edu/mit/streamjit/impl/distributed/common/Options.java new file mode 100644 index 00000000..ed028e2e --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/Options.java @@ -0,0 +1,278 @@ +package edu.mit.streamjit.impl.distributed.common; + +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Properties; + +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; +import edu.mit.streamjit.impl.distributed.ConnectionManager.AllConnectionParams; +import edu.mit.streamjit.impl.distributed.ConnectionManager.AsyncTCPNoParams; +import edu.mit.streamjit.impl.distributed.ConnectionManager.BlockingTCPNoParams; +import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler; +import edu.mit.streamjit.impl.distributed.TailChannels; +import edu.mit.streamjit.impl.distributed.TailChannels.BlockingTailChannel1; +import edu.mit.streamjit.impl.distributed.TailChannels.BlockingTailChannel2; +import edu.mit.streamjit.tuner.OnlineTuner; +import edu.mit.streamjit.tuner.TCPTuner; + +/** + * Program options. Loads the values from "options.properties". + * + * @author sumanan + * @since 1 Mar, 2015 + */ +public final class Options { + + /** + * We can set this value at class loading time also as follows. + * + * maxThreadCount = Math.max(Runtime.getntime().availableProcessors() / 2, + * 1); + * + * Lets hard code this for the moment. + */ + public static final int maxNumCores; + + /** + * To turn on or off the dead lock handler. see {@link AbstractDrainer} for + * it's usage. + */ + public static final boolean needDrainDeadlockHandler; + + /** + * Turn On/Off the profiling. + */ + public static final boolean needProfiler; + + /** + * Output count for tuning. Tuner measures the running time for this number + * of outputs. + */ + public static final int outputCount; + + /** + * Period to print output count periodically. This printing feature get + * turned off if this value is less than 1. Time unit is ms. See + * {@link TailChannels}. + */ + public static final int printOutputCountPeriod; + + /** + * Save all configurations tired by open tuner in to + * "configurations//app.name" directory. + */ + public static final boolean saveAllConfigurations; + + /** + * Enables {@link DistributedStreamCompiler} to run on a single node. When + * this is enabled, noOfNodes passed as compiler argument has no effect. + */ + public static final boolean singleNodeOnline; + + /** + * Enables tuning. Tuner will be started iff this flag is set true. + * Otherwise, just use the fixed configuration file to run the program. No + * tuning, no intermediate draining. In this mode (tune = false), time taken + * to pass fixed number of input will be measured for 30 rounds and logged + * into FixedOutPut.txt. See {@link TailChannels} for the file logging + * details. + *
        + * 0 - No tuning, uses configuration file to run. + *
          + * 1 - Tuning. + *
            + * 2 - Evaluate configuration files. ( compares final cfg with hand tuned + * cfg. Both file should be presented in the running directory. + */ + public static final int tune; + + /** + * Decides how to start the opentuner. In first 2 cases, controller starts + * opentuner and establishes connection with it on a random port no range + * from 5000-65536. User can provide port no in 3 case. + * + *
              + *
            1. 0 - Controller starts the tuner automatically on a terminal. User can + * see Opentuner related outputs in the new terminal. + *
            2. 1 - Controller starts the tuner automatically as a Python process. No + * explicit window will be opened. Suitable for remote running through SSH + * terminal. + *
            3. 2 - User has to manually start the tuner with correct portNo as + * argument. Port no 12563 is used in this case. But it can be changed at + * {@link TCPTuner#startTuner(String)}. We need this option to run the + * tuning on remote machines. + *
            + */ + public static final int tunerStartMode; + + /** + * if true uses Compiler2, interpreter otherwise. + */ + public static final boolean useCompilerBlob; + + /** + * To turn on or turn off the drain data. If this is false, drain data will + * be ignored and every new reconfiguration will run with fresh inputs. + */ + public static final boolean useDrainData; + + // Following are miscellaneous options to avoid rebuilding jar files every + // time to change some class selections. You may decide to remove these + // variables in a stable release. + // TODO: Fix all design pattern related issues. + + /** + *
              + *
            1. 0 - {@link AllConnectionParams} + *
            2. 1 - {@link BlockingTCPNoParams} + *
            3. 2 - {@link AsyncTCPNoParams} + *
            4. default: {@link AsyncTCPNoParams} + *
            + */ + public static final int connectionManager; + + /** + *
              + *
            1. 1 - {@link BlockingTailChannel1} + *
            2. 2 - {@link BlockingTailChannel2} + *
            3. default: {@link BlockingTailChannel2} + *
            + */ + public static final int tailChannel; + + /** + * {@link OnlineTuner}'s verifier verifies the configurations if + * {@link #tune}==2. evaluationCount determines the number of re runs for a + * configuration. Default value is 2. + */ + public static final int evaluationCount; + + /** + * {@link OnlineTuner}'s verifier verifies the configurations if + * {@link #tune}==2. verificationCount determines the number of re runs for + * a set of configurations in the verify.txt. Default value is 1. + */ + public static final int verificationCount; + + /** + * Large multiplier -> Large compilation time and Large waiting time. + */ + public static final int multiplierMaxValue; + + public static final boolean prognosticate; + + public static final int bigToSmallBlobRatio; + + public static final int loadRatio; + + public static final int blobToNodeRatio; + + public static final int boundaryChannelRatio; + + public static final boolean timeOut; + + static { + Properties prop = loadProperties(); + printOutputCountPeriod = Integer.parseInt(prop + .getProperty("printOutputCountPeriod"));; + maxNumCores = Integer.parseInt(prop.getProperty("maxNumCores")); + useCompilerBlob = Boolean.parseBoolean(prop + .getProperty("useCompilerBlob")); + needDrainDeadlockHandler = Boolean.parseBoolean(prop + .getProperty("needDrainDeadlockHandler")); + needProfiler = Boolean.parseBoolean(prop.getProperty("needProfiler")); + outputCount = Integer.parseInt(prop.getProperty("outputCount")); + tune = Integer.parseInt(prop.getProperty("tune")); + tunerStartMode = Integer.parseInt(prop.getProperty("tunerStartMode")); + saveAllConfigurations = Boolean.parseBoolean(prop + .getProperty("saveAllConfigurations")); + singleNodeOnline = Boolean.parseBoolean(prop + .getProperty("singleNodeOnline")); + useDrainData = Boolean.parseBoolean(prop.getProperty("useDrainData")); + connectionManager = Integer.parseInt(prop + .getProperty("connectionManager")); + tailChannel = Integer.parseInt(prop.getProperty("tailChannel")); + evaluationCount = Integer.parseInt(prop.getProperty("evaluationCount")); + verificationCount = Integer.parseInt(prop + .getProperty("verificationCount")); + multiplierMaxValue = Integer.parseInt(prop + .getProperty("multiplierMaxValue")); + prognosticate = Boolean.parseBoolean(prop.getProperty("prognosticate")); + bigToSmallBlobRatio = Integer.parseInt(prop + .getProperty("bigToSmallBlobRatio")); + loadRatio = Integer.parseInt(prop.getProperty("loadRatio")); + blobToNodeRatio = Integer.parseInt(prop.getProperty("blobToNodeRatio")); + boundaryChannelRatio = Integer.parseInt(prop + .getProperty("boundaryChannelRatio")); + timeOut = Boolean.parseBoolean(prop.getProperty("timeOut")); + } + + public static Properties getProperties() { + Properties prop = new Properties(); + setProperty(prop, "tunerStartMode", tunerStartMode); + setProperty(prop, "useDrainData", useDrainData); + setProperty(prop, "needDrainDeadlockHandler", needDrainDeadlockHandler); + setProperty(prop, "tune", tune); + setProperty(prop, "saveAllConfigurations", saveAllConfigurations); + setProperty(prop, "outputCount", outputCount); + setProperty(prop, "useCompilerBlob", useCompilerBlob); + setProperty(prop, "printOutputCountPeriod", printOutputCountPeriod); + setProperty(prop, "singleNodeOnline", singleNodeOnline); + setProperty(prop, "maxNumCores", maxNumCores); + setProperty(prop, "needProfiler", needProfiler); + setProperty(prop, "connectionManager", connectionManager); + setProperty(prop, "tailChannel", tailChannel); + setProperty(prop, "evaluationCount", evaluationCount); + setProperty(prop, "verificationCount", verificationCount); + setProperty(prop, "multiplierMaxValue", multiplierMaxValue); + setProperty(prop, "prognosticate", prognosticate); + setProperty(prop, "bigToSmallBlobRatio", bigToSmallBlobRatio); + setProperty(prop, "loadRatio", loadRatio); + setProperty(prop, "blobToNodeRatio", blobToNodeRatio); + setProperty(prop, "boundaryChannelRatio", boundaryChannelRatio); + setProperty(prop, "timeOut", timeOut); + return prop; + } + + private static Properties loadProperties() { + Properties prop = new Properties(); + InputStream input = null; + try { + input = new FileInputStream("options.properties"); + prop.load(input); + } catch (IOException ex) { + System.err.println("Failed to load options.properties"); + } + return prop; + } + + private static void setProperty(Properties prop, String name, Boolean val) { + prop.setProperty(name, val.toString()); + } + + private static void setProperty(Properties prop, String name, Integer val) { + prop.setProperty(name, val.toString()); + } + + public static void storeProperties() { + OutputStream output = null; + try { + output = new FileOutputStream("options.properties"); + Properties prop = getProperties(); + prop.store(output, null); + } catch (IOException io) { + io.printStackTrace(); + } finally { + if (output != null) { + try { + output.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java b/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java index 6baa7944..84729821 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java +++ b/src/edu/mit/streamjit/impl/distributed/common/SNDrainElement.java @@ -79,7 +79,7 @@ public void process(SNDrainProcessor dp) { * the drain data of the blobs after the draining. See {@link DrainData} for * more information. */ - public static final class DrainedData extends SNDrainElement { + public static final class SNDrainedData extends SNDrainElement { private static final long serialVersionUID = 1L; public final Token blobID; @@ -87,7 +87,7 @@ public static final class DrainedData extends SNDrainElement { public final ImmutableMap> inputData; public final ImmutableMap> outputData; - public DrainedData(Token blobID, DrainData drainData, + public SNDrainedData(Token blobID, DrainData drainData, ImmutableMap> inputData, ImmutableMap> outputData) { this.blobID = blobID; @@ -113,6 +113,6 @@ public interface SNDrainProcessor { public void process(Drained drained); - public void process(DrainedData drainedData); + public void process(SNDrainedData snDrainedData); } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNException.java b/src/edu/mit/streamjit/impl/distributed/common/SNException.java index 32da1a2e..d42e3ef3 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/SNException.java +++ b/src/edu/mit/streamjit/impl/distributed/common/SNException.java @@ -21,7 +21,7 @@ */ package edu.mit.streamjit.impl.distributed.common; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; public class SNException implements SNMessageElement { @@ -39,9 +39,9 @@ public void accept(SNMessageVisitor visitor) { public static final class AddressBindException extends SNException { private static final long serialVersionUID = 1L; - public final TCPConnectionInfo conInfo; + public final ConnectionInfo conInfo; - public AddressBindException(TCPConnectionInfo conInfo) { + public AddressBindException(ConnectionInfo conInfo) { this.conInfo = conInfo; } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java b/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java index ecdef2e3..5db2abc5 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java +++ b/src/edu/mit/streamjit/impl/distributed/common/SNMessageVisitor.java @@ -21,6 +21,8 @@ */ package edu.mit.streamjit.impl.distributed.common; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement; + public interface SNMessageVisitor { void visit(Error error); @@ -34,4 +36,8 @@ public interface SNMessageVisitor { void visit(SNDrainElement snDrainElement); void visit(SNException snException); + + void visit(SNTimeInfo timeInfo); + + void visit(SNProfileElement snProfileElement); } \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfo.java b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfo.java new file mode 100644 index 00000000..505c0b3c --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfo.java @@ -0,0 +1,71 @@ +package edu.mit.streamjit.impl.distributed.common; + +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.distributed.node.StreamNode; + +/** + * {@link StreamNode}s shall send the timing information such as compilation + * time of each blob, draining time, draindata collection time, Init schedule + * time and etc by sending {@link SNTimeInfo}. + * + * @author Sumanan sumanan@mit.edu + * @since Nov 20, 2014 + * + */ +public abstract class SNTimeInfo implements SNMessageElement { + + private static final long serialVersionUID = 1L; + + public abstract void process(SNTimeInfoProcessor snTimeInfoProcessor); + + @Override + public void accept(SNMessageVisitor visitor) { + visitor.visit(this); + } + + public static final class CompilationTime extends SNTimeInfo { + + private static final long serialVersionUID = 1L; + + public final Token blobID; + + public final double milliSec; + + public CompilationTime(Token blobID, double milliSec) { + this.blobID = blobID; + this.milliSec = milliSec; + } + + @Override + public void process(SNTimeInfoProcessor snTimeInfoProcessor) { + snTimeInfoProcessor.process(this); + } + + } + + public static final class DrainingTime extends SNTimeInfo { + + private static final long serialVersionUID = 1L; + + public final Token blobID; + + public final double milliSec; + + public DrainingTime(Token blobID, double milliSec) { + this.blobID = blobID; + this.milliSec = milliSec; + } + + @Override + public void process(SNTimeInfoProcessor snTimeInfoProcessor) { + snTimeInfoProcessor.process(this); + } + + } + + public interface SNTimeInfoProcessor { + public void process(CompilationTime compilationTime); + + public void process(DrainingTime drainingTime); + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfoProcessorImpl.java b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfoProcessorImpl.java new file mode 100644 index 00000000..94ff2a4a --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/common/SNTimeInfoProcessorImpl.java @@ -0,0 +1,35 @@ +package edu.mit.streamjit.impl.distributed.common; + +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.CompilationTime; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.DrainingTime; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.SNTimeInfoProcessor; + +/** + * Uses {@link TimeLogger} to log timing information. + * + * @author sumanan + * @since Nov 24, 2014 + */ +public class SNTimeInfoProcessorImpl implements SNTimeInfoProcessor { + + private final TimeLogger logger; + + public SNTimeInfoProcessorImpl(TimeLogger logger) { + this.logger = logger; + } + + @Override + public void process(CompilationTime compilationTime) { + String msg = String.format("Blob-%s-%.0fms\n", compilationTime.blobID, + compilationTime.milliSec); + logger.logCompileTime(msg); + } + + @Override + public void process(DrainingTime drainingTime) { + String msg = String.format("Blob-%s-%.0fms\n", drainingTime.blobID, + drainingTime.milliSec); + logger.logDrainTime(msg); + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java b/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java index 024ccbfc..6d9431ff 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java +++ b/src/edu/mit/streamjit/impl/distributed/common/SynchronizedTCPConnection.java @@ -42,7 +42,7 @@ public class SynchronizedTCPConnection extends TCPConnection { * @param socket */ public SynchronizedTCPConnection(Socket socket) { - super(socket, 50); + super(socket, 5); } @Override diff --git a/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java b/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java index 9d1d0dc1..53ea1f23 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java +++ b/src/edu/mit/streamjit/impl/distributed/common/TCPConnection.java @@ -21,14 +21,19 @@ */ package edu.mit.streamjit.impl.distributed.common; -import java.io.*; -import java.net.*; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import static com.google.common.base.Preconditions.*; - +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.OptionalDataException; +import java.net.InetAddress; +import java.net.Socket; + +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.node.BlockingInputChannel; +import edu.mit.streamjit.impl.distributed.node.BlockingOutputChannel; import edu.mit.streamjit.impl.distributed.node.StreamNode; /** @@ -85,8 +90,9 @@ public void writeObject(Object obj) throws IOException { try { ooStream.writeObject(obj); + n++; // TODO: Any way to improve the performance? - if (n++ > resetCount) { + if (n > resetCount) { n = 0; ooStream.reset(); } @@ -113,6 +119,7 @@ public void writeObject(Object obj) throws IOException { } public final void closeConnection() { + isconnected = false; try { if (ooStream != null) this.ooStream.close(); @@ -121,7 +128,6 @@ public final void closeConnection() { if (socket != null) this.socket.close(); } catch (IOException ex) { - isconnected = false; ex.printStackTrace(); } } @@ -195,10 +201,10 @@ public static class TCPConnectionInfo extends ConnectionInfo { private static final long serialVersionUID = 1L; - int portNo; + private final int portNo; public TCPConnectionInfo(int srcID, int dstID, int portNo) { - super(srcID, dstID); + super(srcID, dstID, true); Ipv4Validator validator = Ipv4Validator.getInstance(); if (!validator.isValid(portNo)) throw new IllegalArgumentException("Invalid port No"); @@ -236,90 +242,47 @@ public String toString() { return "TCPConnectionInfo [srcID=" + getSrcID() + ", dstID=" + getDstID() + ", portID=" + portNo + "]"; } - } - /** - * Keeps all opened {@link TCPConnection}s for a machine. Each machine - * should have a single instance of this class and use this class to make - * new connections. - * - *

            - * TODO: Need to make this class singleton. I didn't do it now because in - * current way, controller and a local {@link StreamNode} are running in a - * same JVM. So first, local {@link StreamNode} should be made to run on a - * different JVM and then make this class singleton. - */ - public static class TCPConnectionProvider { - - private ConcurrentMap allConnections; - - private final int myNodeID; - - private final Map iNetAddressMap; - - public TCPConnectionProvider(int myNodeID, - Map iNetAddressMap) { - checkNotNull(iNetAddressMap, "nodeInfoMap is null"); - this.myNodeID = myNodeID; - this.iNetAddressMap = iNetAddressMap; - this.allConnections = new ConcurrentHashMap<>(); - } - - /** - * See {@link #getConnection(TCPConnectionInfo, int)}. - * - * @param conInfo - * @return - * @throws IOException - */ - public Connection getConnection(TCPConnectionInfo conInfo) - throws IOException { - return getConnection(conInfo, 0); - } - -/** - * If the connection corresponds to conInfo is already established - * returns the connection. Try to make a new connection otherwise. - * - * @param conInfo - Information that uniquely identifies a {@link TCPConnection - * @param timeOut - Time out only valid if making connection needs to be - * done through a listener socket. i.e, conInfo.getSrcID() == myNodeID. - * @return - * @throws SocketTimeoutException - * @throws IOException - */ - public Connection getConnection(TCPConnectionInfo conInfo, int timeOut) - throws SocketTimeoutException, IOException { - TCPConnection con = allConnections.get(conInfo); - if (con != null) { - if (con.isStillConnected()) { - return con; - } else { - throw new AssertionError("con.closeConnection()"); - // con.closeConnection(); + @Override + public Connection makeConnection(int nodeID, NetworkInfo networkInfo, + int timeOut) { + Connection con = null; + if (srcID == nodeID) { + try { + con = ConnectionFactory.getConnection(portNo, timeOut, + false); + } catch (IOException e) { + e.printStackTrace(); } } - if (conInfo.getSrcID() == myNodeID) { - con = ConnectionFactory.getConnection(conInfo.getPortNo(), - timeOut, false); - } else if (conInfo.getDstID() == myNodeID) { - InetAddress ipAddress = iNetAddressMap.get(conInfo.getSrcID()); - if (ipAddress.isLoopbackAddress()) - ipAddress = iNetAddressMap.get(0); - - int portNo = conInfo.getPortNo(); - con = ConnectionFactory.getConnection( - ipAddress.getHostAddress(), portNo, false); + else if (dstID == nodeID) { + InetAddress ipAddress = networkInfo.getInetAddress(srcID); + try { + con = ConnectionFactory.getConnection( + ipAddress.getHostAddress(), portNo, false); + } catch (IOException e) { + e.printStackTrace(); + } + } else { + throw new IllegalArgumentException( + "Neither srcID nor dstID matches with nodeID"); } - allConnections.put(conInfo, con); return con; } - public void closeAllConnections() { - for (TCPConnection con : allConnections.values()) { - con.closeConnection(); - } + @Override + public BoundaryInputChannel inputChannel(Token t, int bufSize, + ConnectionProvider conProvider) { + return new BlockingInputChannel(bufSize, conProvider, this, + t.toString(), 0); + } + + @Override + public BoundaryOutputChannel outputChannel(Token t, int bufSize, + ConnectionProvider conProvider) { + return new BlockingOutputChannel(bufSize, conProvider, this, + t.toString(), 0); } } } \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/common/Tester.java b/src/edu/mit/streamjit/impl/distributed/common/Tester.java index 781692b5..1f2171a7 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/Tester.java +++ b/src/edu/mit/streamjit/impl/distributed/common/Tester.java @@ -28,6 +28,18 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.ObjectOutputStream; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionType; +import edu.mit.streamjit.impl.distributed.common.Connection.GenericConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; + +; public class Tester { @@ -35,7 +47,15 @@ public class Tester { * @param args */ public static void main(String[] args) { + // test1(); + // test2(); + test3(); + } + /** + * Testing one - tests the size of an object. + */ + private static void test1() { Error er = Error.FILE_NOT_FOUND; AppStatus apSts = AppStatus.STOPPED; ByteArrayOutputStream byteAos = new ByteArrayOutputStream(); @@ -54,10 +74,108 @@ public static void main(String[] args) { e.printStackTrace(); } - /* - * try { os.writeInt(34345); } catch (IOException e) { e.printStackTrace(); } - */ + try { + os.writeInt(34345); + } catch (IOException e) { + e.printStackTrace(); + } System.out.println(byteAos.toByteArray().length); } + + /** + * Tests the equals and hascode. + */ + private static void test2() { + + ConnectionInfo asyConInfo1 = new AsyncTCPConnectionInfo(1, 4, 8989); + ConnectionInfo asyConInfo2 = new AsyncTCPConnectionInfo(1, 4, 8980); + ConnectionInfo asyConInfo3 = new AsyncTCPConnectionInfo(4, 1, 8989); + ConnectionInfo asyConInfo4 = new AsyncTCPConnectionInfo(4, 1, 8980); + ConnectionInfo asyConInfo5 = new AsyncTCPConnectionInfo(1, 4, 8989); + + ConnectionInfo tcpConInfo1 = new TCPConnectionInfo(1, 4, 8989); + ConnectionInfo tcpConInfo2 = new TCPConnectionInfo(1, 4, 8980); + ConnectionInfo tcpConInfo3 = new TCPConnectionInfo(4, 1, 8989); + ConnectionInfo tcpConInfo4 = new TCPConnectionInfo(4, 1, 8980); + + ConnectionInfo conInfo1 = new GenericConnectionInfo(1, 4, true); + ConnectionInfo conInfo2 = new GenericConnectionInfo(1, 4, false); + ConnectionInfo conInfo3 = new GenericConnectionInfo(4, 1, true); + ConnectionInfo conInfo4 = new GenericConnectionInfo(4, 1, false); + + System.out.println("AsyncTCPConnectionInfo - AsyncTCPConnectionInfo"); + System.out.println(asyConInfo1.equals(asyConInfo2)); + System.out.println(asyConInfo1.equals(asyConInfo3)); + System.out.println(asyConInfo1.equals(asyConInfo4)); + System.out.println(asyConInfo2.equals(asyConInfo3)); + System.out.println(asyConInfo2.equals(asyConInfo4)); + System.out.println(asyConInfo3.equals(asyConInfo4)); + System.out.println(); + + System.out.println("ConnectionInfo - AsyncTCPConnectionInfo"); + System.out.println(conInfo1.equals(asyConInfo1)); + System.out.println(conInfo1.equals(asyConInfo2)); + System.out.println(conInfo1.equals(asyConInfo3)); + System.out.println(conInfo1.equals(asyConInfo4)); + System.out.println(conInfo2.equals(asyConInfo1)); + System.out.println(conInfo2.equals(asyConInfo2)); + System.out.println(conInfo2.equals(asyConInfo3)); + System.out.println(conInfo2.equals(asyConInfo4)); + System.out.println(conInfo3.equals(asyConInfo1)); + System.out.println(conInfo3.equals(asyConInfo2)); + System.out.println(conInfo3.equals(asyConInfo3)); + System.out.println(conInfo3.equals(asyConInfo4)); + System.out.println(conInfo4.equals(asyConInfo1)); + System.out.println(conInfo4.equals(asyConInfo2)); + System.out.println(conInfo4.equals(asyConInfo3)); + System.out.println(conInfo4.equals(asyConInfo4)); + System.out.println(); + + System.out.println("ConnectionInfo - TCPConnectionInfo"); + System.out.println(conInfo1.equals(tcpConInfo1)); + System.out.println(conInfo1.equals(tcpConInfo2)); + System.out.println(conInfo1.equals(tcpConInfo3)); + System.out.println(conInfo1.equals(tcpConInfo4)); + System.out.println(conInfo2.equals(tcpConInfo1)); + System.out.println(conInfo2.equals(tcpConInfo2)); + System.out.println(conInfo2.equals(tcpConInfo3)); + System.out.println(conInfo2.equals(tcpConInfo4)); + System.out.println(conInfo3.equals(tcpConInfo1)); + System.out.println(conInfo3.equals(tcpConInfo2)); + System.out.println(conInfo3.equals(tcpConInfo3)); + System.out.println(conInfo3.equals(tcpConInfo4)); + System.out.println(conInfo4.equals(tcpConInfo1)); + System.out.println(conInfo4.equals(tcpConInfo2)); + System.out.println(conInfo4.equals(tcpConInfo3)); + System.out.println(conInfo4.equals(tcpConInfo4)); + System.out.println(); + + Map tesMap = new HashMap<>(); + tesMap.put(tcpConInfo1, 1); + tesMap.put(asyConInfo1, 2); + + System.out.println(tesMap.containsKey(tcpConInfo1)); + System.out.println(tesMap.containsKey(tcpConInfo2)); + System.out.println(tesMap.containsKey(tcpConInfo3)); + System.out.println(tesMap.containsKey(tcpConInfo4)); + + System.out.println(tesMap.containsKey(asyConInfo1)); + System.out.println(tesMap.containsKey(asyConInfo2)); + System.out.println(tesMap.containsKey(asyConInfo3)); + System.out.println(tesMap.containsKey(asyConInfo4)); + System.out.println(tesMap.containsKey(asyConInfo5)); + + System.out.println(tesMap.containsKey(conInfo1)); + System.out.println(tesMap.containsKey(conInfo2)); + System.out.println(tesMap.containsKey(conInfo3)); + System.out.println(tesMap.containsKey(conInfo4)); + } + + private static void test3() { + List conlist = Arrays.asList(ConnectionType.values()); + for (ConnectionType connectionType : conlist) { + System.out.println(connectionType); + } + } } diff --git a/src/edu/mit/streamjit/impl/distributed/common/Utils.java b/src/edu/mit/streamjit/impl/distributed/common/Utils.java index 292e3f9c..8ee4c41c 100644 --- a/src/edu/mit/streamjit/impl/distributed/common/Utils.java +++ b/src/edu/mit/streamjit/impl/distributed/common/Utils.java @@ -21,15 +21,41 @@ */ package edu.mit.streamjit.impl.distributed.common; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; + +import java.io.File; +import java.io.FileWriter; +import java.io.FilenameFilter; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.lang.management.MemoryUsage; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Calendar; import java.util.Collections; +import java.util.Properties; import java.util.Set; +import java.util.concurrent.TimeUnit; +import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableSet; +import edu.mit.streamjit.api.OneToOneElement; import edu.mit.streamjit.api.Worker; import edu.mit.streamjit.impl.blob.Blob; import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.common.Configuration; import edu.mit.streamjit.impl.common.IOInfo; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.distributed.ConfigurationManager; +import edu.mit.streamjit.impl.distributed.HotSpotTuning; +import edu.mit.streamjit.impl.distributed.PartitionManager; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.test.apps.fmradio.FMRadio; +import edu.mit.streamjit.util.ConfigurationUtils; /** * @author Sumanan sumanan@mit.edu @@ -50,4 +76,269 @@ public static Token getblobID(Set> workers) { return Collections.min(inputBuilder.build()); } + + /** + * Prints heapMaxSize, current heapSize and heapFreeSize. + */ + public static void printMemoryStatus() { + long heapMaxSize = Runtime.getRuntime().maxMemory(); + long heapSize = Runtime.getRuntime().totalMemory(); + long heapFreeSize = Runtime.getRuntime().freeMemory(); + int MEGABYTE = 1024 * 1024; + System.out.println("#########################"); + printCurrentDateTime(); + System.out.println(String.format("heapMaxSize = %dMB", heapMaxSize + / MEGABYTE)); + System.out.println(String + .format("heapSize = %dMB", heapSize / MEGABYTE)); + System.out.println(String.format("heapFreeSize = %dMB", heapFreeSize + / MEGABYTE)); + System.out.println("#########################"); + } + + /** + * Prints current date and time in "yyyy/MM/dd HH:mm:ss" format. + */ + public static void printCurrentDateTime() { + DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss"); + Calendar cal = Calendar.getInstance(); + System.out.println(dateFormat.format(cal.getTime())); + } + + public static void printOutOfMemory() { + MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean(); + System.out.println("******OutOfMemoryError******"); + MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage(); + int MEGABYTE = 1024 * 1024; + long maxMemory = heapUsage.getMax() / MEGABYTE; + long usedMemory = heapUsage.getUsed() / MEGABYTE; + System.out + .println("Memory Use :" + usedMemory + "M/" + maxMemory + "M"); + } + + /** + * @param name + * name of the directory. + * @return true if and only if the directory was created; false + * otherwise. + */ + public static boolean createDir(String name) { + File dir = new File(name); + if (dir.exists()) { + if (dir.isDirectory()) + return true; + else { + System.err.println("A file exists in the name of dir-" + name); + return false; + } + } else + return dir.mkdirs(); + } + + /** + * Creates app directory with the name of appName, and creates a sub + * directory "configurations". + * + * @param name + * name of the directory. + * @return true if and only if the directories were created; + * false otherwise. + */ + public static boolean createAppDir(String appName) { + if (createDir(appName)) + return createDir(String.format("%s%s%s", appName, File.separator, + ConfigurationUtils.configDir)); + else + return false; + } + + /** + * Writes README.txt. Mainly saves GlobalConstant values. + * + * @param appName + */ + public static void writeReadMeTxt(String appName) { + try { + // rename(appName, "README.txt"); + FileWriter writer = new FileWriter(String.format("%s%sREADME.txt", + appName, File.separator)); + DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss"); + Calendar cal = Calendar.getInstance(); + writer.write(dateFormat.format(cal.getTime()) + "\n"); + writer.write(appName + "\n"); + Properties prop = Options.getProperties(); + prop.store(writer, "GlobalConstants.Properties"); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * @return true iff renaming is success. + */ + public static boolean rename(String appName, String fileName) { + File file = new File(String.format("%s%s%s", appName, File.separator, + fileName)); + File fileOrig = new File(String.format("%s%s%s.orig", appName, + File.separator, fileName)); + if (fileOrig.exists()) + return false; + if (file.exists()) + file.renameTo(fileOrig); + return true; + } + + /** + * Returns a {@link FileWriter} of the file "dirName/fileName" with append = + * false. Creates the file if it not exists. Suppresses {@link IOException} + * and returns null if exception occurred. This method is added to keep + * other classes clean. + * + * @return {@link FileWriter} or null. + */ + public static FileWriter fileWriter(String dirName, String fileName) { + return fileWriter(dirName, fileName, false); + } + + /** + * Returns a {@link FileWriter} of the file "dirName/fileName". Creates the + * file if it not exists. Suppresses {@link IOException} and returns null if + * exception occurred. This method is added to keep other classes clean. + * + * @return {@link FileWriter} or null. + */ + public static FileWriter fileWriter(String dirName, String fileName, + boolean append) { + String fullFileName = String.format("%s%s%s", dirName, File.separator, + fileName); + return fileWriter(fullFileName, append); + } + /** + * Creates and returns a {@link FileWriter} with append = false. Suppresses + * {@link IOException} and returns null if exception occurred. This method + * is added to keep other classes clean. + * + * @return {@link FileWriter} or null. + */ + public static FileWriter fileWriter(String name) { + return fileWriter(name, false); + } + + /** + * Creates and returns a {@link FileWriter}. Suppresses {@link IOException} + * and returns null if exception occurred. This method is added to keep + * other classes clean. + * + * @return {@link FileWriter} or null. + */ + public static FileWriter fileWriter(String name, boolean append) { + FileWriter fw = null; + try { + fw = new FileWriter(name, append); + } catch (IOException e) { + e.printStackTrace(); + } + return fw; + } + + /** + * [16-02-2015] - I couldn't run dot tools in Lanka cluster. So as a hack, i + * implemented this method to generate blob graph for each configuration. + * TODO: This generation process is damn slow. Takes 40 mins to process 5000 + * cfgs. + * + * @param stream + * @throws IOException + */ + public static void generateBlobGraphs(OneToOneElement stream) + throws IOException { + StreamJitApp app = new StreamJitApp<>(stream); + PartitionManager partitionManager = new HotSpotTuning(app); + partitionManager.getDefaultConfiguration( + Workers.getAllWorkersInGraph(app.source), 2); + ConfigurationManager cfgManager = new ConfigurationManager(app, + partitionManager); + Stopwatch sw = Stopwatch.createStarted(); + for (Integer i = 1; i < 5010; i++) { + String prefix = i.toString(); + Configuration cfg = ConfigurationUtils.readConfiguration(app.name, + prefix); + if (cfg != null) { + cfg = ConfigurationUtils.addConfigPrefix(cfg, prefix); + cfgManager.newConfiguration(cfg); + } + } + + Configuration cfg = ConfigurationUtils.readConfiguration(app.name, + "final"); + if (cfg != null) { + cfg = ConfigurationUtils.addConfigPrefix(cfg, "final"); + cfgManager.newConfiguration(cfg); + } + sw.stop(); + System.out.println(sw.elapsed(TimeUnit.SECONDS)); + } + + public static void main(String[] args) throws IOException { + generateBlobGraphs(new FMRadio.FMRadioCore()); + } + + /** + * Backups the files generated during tuning. + */ + public static void backup(String appName) { + rename(appName, "summary"); + rename(appName, "compileTime.txt"); + rename(appName, "runTime.txt"); + rename(appName, "drainTime.txt"); + rename(appName, "GraphProperty.txt"); + rename(appName, "profile.txt"); + } + + /** + * Move all files and directories, except the configuration directory, from + * appDir to appDir/tune directory. Does nothing if tune directory exists. + * + * @param appName + */ + public static void backup1(String appName) { + File[] listOfFilesMove = listOfFilesMove(appName); + if (listOfFilesMove.length == 0) + return; + + File tuneDir = new File(String.format("%s%stune", appName, + File.separator)); + if (tuneDir.exists()) + return; + + if (!createDir(tuneDir.getPath())) + System.err.println(String.format("Creating %s dir failed.", + tuneDir.getPath())); + for (File f : listOfFilesMove) { + try { + Files.move(f.toPath(), + Paths.get(tuneDir.getPath(), f.getName()), + REPLACE_EXISTING); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + + public static File[] listOfFilesMove(final String appName) { + File dir = new File(appName); + File[] files = dir.listFiles(new FilenameFilter() { + public boolean accept(File dir, String name) { + return !name.equals(ConfigurationUtils.configDir); + } + }); + return files; + } + + public static void newApp(String appName) { + createAppDir(appName); + backup1(appName); + Utils.writeReadMeTxt(appName); + } } diff --git a/src/edu/mit/streamjit/impl/distributed/node/AffinityManager.java b/src/edu/mit/streamjit/impl/distributed/node/AffinityManager.java new file mode 100644 index 00000000..4a6ca579 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/AffinityManager.java @@ -0,0 +1,24 @@ +package edu.mit.streamjit.impl.distributed.node; + +import com.google.common.collect.ImmutableSet; + +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.distributed.node.BlobExecuter.BlobThread2; + +/** + * Assigns CPU cores to {@link BlobThread2}s. {@link BlobThread2}s are expected + * to set their processor affinity which is given by + * {@link AffinityManager#getAffinity(Blob, int)} before start running. + * + * @author sumanan + * @since 4 Feb, 2015 + */ +public interface AffinityManager { + + /** + * @param blob + * @param coreCode + * @return Set of CPU cores that is assigned the blob's coreCode. + */ + ImmutableSet getAffinity(Blob blob, int coreCode); +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/AffinityManagers.java b/src/edu/mit/streamjit/impl/distributed/node/AffinityManagers.java new file mode 100644 index 00000000..7d19f705 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/AffinityManagers.java @@ -0,0 +1,29 @@ +package edu.mit.streamjit.impl.distributed.node; + +import com.google.common.collect.ImmutableSet; + +import edu.mit.streamjit.impl.blob.Blob; + +/** + * Various implementations of the interface {@link AffinityManager}. + * + * @author sumanan + * @since 4 Feb, 2015 + */ +public class AffinityManagers { + + /** + * This is an empty {@link AffinityManager}. {@link #getAffinity(Blob, int)} + * always returns null. + * + * @author sumanan + * @since 4 Feb, 2015 + */ + public static class EmptyAffinityManager implements AffinityManager { + + @Override + public ImmutableSet getAffinity(Blob blob, int coreCode) { + return null; + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/AsyncOutputChannel.java b/src/edu/mit/streamjit/impl/distributed/node/AsyncOutputChannel.java new file mode 100644 index 00000000..c36f7369 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/AsyncOutputChannel.java @@ -0,0 +1,100 @@ +package edu.mit.streamjit.impl.distributed.node; + +import java.io.IOException; + +import com.google.common.collect.ImmutableList; + +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection; +import edu.mit.streamjit.impl.distributed.common.AsyncTCPConnection.AsyncTCPBuffer; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.common.Connection; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; + +public class AsyncOutputChannel implements BoundaryOutputChannel { + + private volatile Connection con; + + private final String name; + + private final ConnectionProvider conProvider; + + private final ConnectionInfo conInfo; + + private AsyncTCPBuffer buffer = null; + + private volatile boolean isFinal; + + private volatile boolean stopCalled; + + public AsyncOutputChannel(ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { + name = "AsyncTCPOutputChannel " + bufferTokenName; + this.conProvider = conProvider; + this.conInfo = conInfo; + isFinal = false; + stopCalled = false; + } + + @Override + public String name() { + return name; + } + + @Override + public Runnable getRunnable() { + return new Runnable() { + @Override + public void run() { + if (con == null || !con.isStillConnected()) { + try { + con = conProvider.getConnection(conInfo); + buffer = new AsyncTCPBuffer((AsyncTCPConnection) con); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + }; + } + + @Override + public ImmutableList getUnprocessedData() { + return ImmutableList.of(); + } + + @Override + public void stop(boolean isFinal) { + while (con == null); + this.isFinal = isFinal; + if (!stopCalled) { + try { + con.softClose(); + } catch (IOException e) { + e.printStackTrace(); + } + } + stopCalled = true; + } + + @Override + public void sendData() { + + } + + @Override + public Connection getConnection() { + return con; + } + + @Override + public ConnectionInfo getConnectionInfo() { + return conInfo; + } + + @Override + public Buffer getBuffer() { + return buffer; + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/BlobExecuter.java b/src/edu/mit/streamjit/impl/distributed/node/BlobExecuter.java new file mode 100644 index 00000000..95f27428 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/BlobExecuter.java @@ -0,0 +1,508 @@ +package edu.mit.streamjit.impl.distributed.node; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import com.google.common.base.Stopwatch; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableTable; + +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.blob.Buffers; +import edu.mit.streamjit.impl.blob.DrainData; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.distributed.common.AppStatus; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.BoundaryInputChannelManager; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.BoundaryOutputChannelManager; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.InputChannelManager; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannelManager.OutputChannelManager; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.impl.distributed.common.Connection; +import edu.mit.streamjit.impl.distributed.common.SNDrainElement; +import edu.mit.streamjit.impl.distributed.common.SNDrainElement.SNDrainedData; +import edu.mit.streamjit.impl.distributed.common.SNMessageElement; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo; +import edu.mit.streamjit.impl.distributed.runtimer.Controller; +import edu.mit.streamjit.util.affinity.Affinity; + +/** + * This class was an inner class of {@link BlobsManagerImpl}. I have re factored + * {@link BlobsManagerImpl} and moved this class a new file. + * + * @author sumanan + * @since 4 Feb, 2015 + */ +class BlobExecuter { + + /** + * + */ + private final BlobsManagerImpl blobsManagerImpl; + + Blob blob; + + final Token blobID; + + final private Set blobThreads; + + /** + * Buffers for all input and output edges of the {@link #blob}. + */ + ImmutableMap bufferMap; + + private ImmutableMap outputLocalBuffers; + + /** + * This flag will be set to true if an exception thrown by the core code of + * the {@link Blob}. Any exception occurred in a blob's corecode will be + * informed to {@link Controller} to halt the application. See the + * {@link BlobThread2}. + */ + AtomicBoolean crashed; + + volatile int drainState; + + final BoundaryInputChannelManager inChnlManager; + + final BoundaryOutputChannelManager outChnlManager; + + private DrainType drainType; + + BlobExecuter(BlobsManagerImpl blobsManagerImpl, Token t, Blob blob, + ImmutableMap inputChannels, + ImmutableMap outputChannels) { + this.blobsManagerImpl = blobsManagerImpl; + this.crashed = new AtomicBoolean(false); + this.blob = blob; + this.blobThreads = new HashSet<>(); + assert blob.getInputs().containsAll(inputChannels.keySet()); + assert blob.getOutputs().containsAll(outputChannels.keySet()); + this.inChnlManager = new InputChannelManager(inputChannels); + this.outChnlManager = new OutputChannelManager(outputChannels); + + String baseName = getName(blob); + for (int i = 0; i < blob.getCoreCount(); i++) { + String name = String.format("%s - %d", baseName, i); + blobThreads.add(new BlobThread2(blob.getCoreCode(i), this, name, + blobsManagerImpl.affinityManager.getAffinity(blob, i))); + } + + if (blobThreads.size() < 1) + throw new IllegalStateException("No blobs to execute"); + + drainState = 0; + this.blobID = t; + } + + public Token getBlobID() { + return blobID; + } + + /** + * Gets buffer from {@link BoundaryChannel}s and builds bufferMap. The + * bufferMap will contain all input and output edges of the {@link #blob}. + * + * Note that, Some {@link BoundaryChannel}s (e.g., + * {@link AsyncOutputChannel}) create {@link Buffer}s after establishing + * {@link Connection} with other end. So this method must be called after + * establishing all IO connections. + * {@link InputChannelManager#waitToStart()} and + * {@link OutputChannelManager#waitToStart()} ensure that the IO connections + * are successfully established. + * + * @return Buffer map which contains {@link Buffers} for all input and + * output edges of the {@link #blob}. + */ + private ImmutableMap buildBufferMap() { + ImmutableMap.Builder bufferMapBuilder = ImmutableMap + .builder(); + ImmutableMap.Builder outputLocalBufferBuilder = ImmutableMap + .builder(); + ImmutableMap localBufferMap = this.blobsManagerImpl.bufferManager + .localBufferMap(); + ImmutableMap inputChannels = inChnlManager + .inputChannelsMap(); + ImmutableMap outputChannels = outChnlManager + .outputChannelsMap(); + + for (Token t : blob.getInputs()) { + if (localBufferMap.containsKey(t)) { + assert !inputChannels.containsKey(t) : "Same channels is exists in both localBuffer and inputChannel"; + bufferMapBuilder.put(t, localBufferMap.get(t)); + } else if (inputChannels.containsKey(t)) { + BoundaryInputChannel chnl = inputChannels.get(t); + bufferMapBuilder.put(t, chnl.getBuffer()); + } else { + throw new AssertionError(String.format( + "No Buffer for input channel %s ", t)); + } + } + + for (Token t : blob.getOutputs()) { + if (localBufferMap.containsKey(t)) { + assert !outputChannels.containsKey(t) : "Same channels is exists in both localBuffer and outputChannel"; + LocalBuffer buf = localBufferMap.get(t); + bufferMapBuilder.put(t, buf); + outputLocalBufferBuilder.put(t, buf); + } else if (outputChannels.containsKey(t)) { + BoundaryOutputChannel chnl = outputChannels.get(t); + bufferMapBuilder.put(t, chnl.getBuffer()); + } else { + throw new AssertionError(String.format( + "No Buffer for output channel %s ", t)); + } + } + outputLocalBuffers = outputLocalBufferBuilder.build(); + return bufferMapBuilder.build(); + } + + void doDrain(DrainType drainType) { + // System.out.println("Blob " + blobID + "is doDrain"); + this.drainType = drainType; + drainState = 1; + + inChnlManager.stop(drainType); + // TODO: [2014-03-14] I commented following line to avoid one dead + // lock case when draining. Deadlock 5 and 6. + // [2014-09-17] Lets waitToStop() if drain data is required. + if (drainType != DrainType.DISCARD) + inChnlManager.waitToStop(); + + for (LocalBuffer buf : outputLocalBuffers.values()) { + buf.drainingStarted(drainType); + } + + if (this.blob != null) { + DrainCallback dcb = new DrainCallback(this); + drainState = 2; + this.blob.drain(dcb); + } + // System.out.println("Blob " + blobID + + // "this.blob.drain(dcb); passed"); + + if (this.blobsManagerImpl.useBufferCleaner + && drainType != DrainType.FINAL) { + boolean isLastBlob = true; + for (BlobExecuter be : this.blobsManagerImpl.blobExecuters.values()) { + if (be.drainState == 0) { + isLastBlob = false; + break; + } + } + + if (isLastBlob && this.blobsManagerImpl.bufferCleaner == null) { + System.out.println("****Starting BufferCleaner***"); + this.blobsManagerImpl.bufferCleaner = this.blobsManagerImpl.new BufferCleaner( + drainType == DrainType.INTERMEDIATE); + this.blobsManagerImpl.bufferCleaner.start(); + } + } + } + + void drained() { + // System.out.println("Blob " + blobID + "drained at beg"); + if (drainState < 3) + drainState = 3; + else + return; + + for (BlobThread2 bt : blobThreads) { + bt.requestStop(); + } + + outChnlManager.stop(drainType == DrainType.FINAL); + outChnlManager.waitToStop(); + + if (drainState > 3) + return; + + drainState = 4; + SNMessageElement drained = new SNDrainElement.Drained(blobID); + try { + this.blobsManagerImpl.streamNode.controllerConnection + .writeObject(drained); + } catch (IOException e) { + e.printStackTrace(); + } + // System.out.println("Blob " + blobID + "is drained at mid"); + + if (drainType != DrainType.DISCARD) { + SNMessageElement me; + if (crashed.get()) + me = getEmptyDrainData(); + else + me = getSNDrainData(); + + try { + this.blobsManagerImpl.streamNode.controllerConnection + .writeObject(me); + // System.out.println(blobID + " DrainData has been sent"); + drainState = 6; + + } catch (IOException e) { + e.printStackTrace(); + } + // System.out.println("**********************************"); + } + + this.blob = null; + boolean isLastBlob = true; + for (BlobExecuter be : this.blobsManagerImpl.blobExecuters.values()) { + if (be.drainState < 4) { + isLastBlob = false; + break; + } + } + + if (isLastBlob) { + if (this.blobsManagerImpl.monBufs != null) + this.blobsManagerImpl.monBufs.stopMonitoring(); + + if (this.blobsManagerImpl.bufferCleaner != null) + this.blobsManagerImpl.bufferCleaner.stopit(); + + } + // printDrainedStatus(); + } + + private SNDrainedData getSNDrainData() { + if (this.blob == null) + return getEmptyDrainData(); + + DrainData dd = blob.getDrainData(); + drainState = 5; + // printDrainDataStats(dd); + + ImmutableMap.Builder> inputDataBuilder = new ImmutableMap.Builder<>(); + ImmutableMap.Builder> outputDataBuilder = new ImmutableMap.Builder<>(); + + ImmutableMap inputChannels = inChnlManager + .inputChannelsMap(); + + // In a proper system the following line should be called inside + // doDrain(), just after inChnlManager.stop(). Read the comment + // in doDrain(). + inChnlManager.waitToStop(); + + for (Token t : blob.getInputs()) { + if (inputChannels.containsKey(t)) { + BoundaryChannel chanl = inputChannels.get(t); + ImmutableList draindata = chanl.getUnprocessedData(); + // if (draindata.size() > 0) + // System.out.println(String.format("From %s - %d", + // chanl.name(), draindata.size())); + inputDataBuilder.put(t, draindata); + } + + else { + unprocessedDataFromLocalBuffer(inputDataBuilder, t); + } + } + + ImmutableMap outputChannels = outChnlManager + .outputChannelsMap(); + for (Token t : blob.getOutputs()) { + if (outputChannels.containsKey(t)) { + BoundaryChannel chanl = outputChannels.get(t); + ImmutableList draindata = chanl.getUnprocessedData(); + // if (draindata.size() > 0) + // System.out.println(String.format("From %s - %d", + // chanl.name(), draindata.size())); + outputDataBuilder.put(t, draindata); + } + } + + return new SNDrainElement.SNDrainedData(blobID, dd, + inputDataBuilder.build(), outputDataBuilder.build()); + } + + // TODO: Unnecessary data copy. Optimise this. + private void unprocessedDataFromLocalBuffer( + ImmutableMap.Builder> inputDataBuilder, + Token t) { + Object[] bufArray; + if (this.blobsManagerImpl.bufferCleaner == null) { + Buffer buf = bufferMap.get(t); + bufArray = new Object[buf.size()]; + buf.readAll(bufArray); + assert buf.size() == 0 : String.format( + "buffer size is %d. But 0 is expected", buf.size()); + } else { + bufArray = this.blobsManagerImpl.bufferCleaner.copiedBuffer(t); + } + // if (bufArray.length > 0) + // System.out.println(String.format("From LocalBuffer: %s - %d", + // t, bufArray.length)); + inputDataBuilder.put(t, ImmutableList.copyOf(bufArray)); + } + + private SNDrainedData getEmptyDrainData() { + drainState = 5; + ImmutableMap.Builder> inputDataBuilder = new ImmutableMap.Builder<>(); + ImmutableMap.Builder> outputDataBuilder = new ImmutableMap.Builder<>(); + ImmutableMap.Builder> dataBuilder = ImmutableMap + .builder(); + ImmutableTable.Builder stateBuilder = ImmutableTable + .builder(); + DrainData dd = new DrainData(dataBuilder.build(), stateBuilder.build()); + return new SNDrainElement.SNDrainedData(blobID, dd, + inputDataBuilder.build(), outputDataBuilder.build()); + } + + /** + * Returns a name for thread. + * + * @param blob + * @return + */ + private String getName(Blob blob) { + StringBuilder sb = new StringBuilder("Workers-"); + int limit = 0; + for (Worker w : blob.getWorkers()) { + sb.append(Workers.getIdentifier(w)); + sb.append(","); + if (++limit > 5) + break; + } + return sb.toString(); + } + + private void printDrainDataStats(DrainData dd) { + System.out.println("**********printDrainDataStats*************"); + if (dd != null) { + for (Token t : dd.getData().keySet()) { + int size = dd.getData().get(t).size(); + if (size > 0) + System.out.println("From Blob: " + t.toString() + " - " + + size); + } + } + } + + void start() { + outChnlManager.waitToStart(); + inChnlManager.waitToStart(); + + bufferMap = buildBufferMap(); + blob.installBuffers(bufferMap); + + for (Thread t : blobThreads) + t.start(); + + System.out.println(blobID + " started"); + } + + void startChannels() { + outChnlManager.start(); + inChnlManager.start(); + } + + void stop() { + inChnlManager.stop(DrainType.FINAL); + outChnlManager.stop(true); + + for (Thread t : blobThreads) { + try { + t.join(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + inChnlManager.waitToStop(); + outChnlManager.waitToStop(); + + if (this.blobsManagerImpl.monBufs != null) + this.blobsManagerImpl.monBufs.stopMonitoring(); + } + + final class BlobThread2 extends Thread { + + private final Set cores; + + private final BlobExecuter be; + + private final Runnable coreCode; + + private volatile boolean stopping = false; + + BlobThread2(Runnable coreCode, BlobExecuter be, String name, + Set cores) { + super(name); + this.coreCode = coreCode; + this.be = be; + this.cores = cores; + } + + public void requestStop() { + stopping = true; + } + + @Override + public void run() { + if (cores != null && cores.size() > 0) + Affinity.setThreadAffinity(cores); + + try { + while (!stopping) + coreCode.run(); + } catch (Error | Exception e) { + System.out.println(Thread.currentThread().getName() + + " crashed..."); + if (be.crashed.compareAndSet(false, true)) { + e.printStackTrace(); + if (be.drainState == 1 || be.drainState == 2) + be.drained(); + else if (be.drainState == 0) { + try { + blobsManagerImpl.streamNode.controllerConnection + .writeObject(AppStatus.ERROR); + } catch (IOException e1) { + e1.printStackTrace(); + } + } + } + } + } + } + + class DrainCallback implements Runnable { + + private final BlobExecuter blobExec; + + // TODO: [2014-03-17] Just to added for checking the drain time. Remove + // it later. + private final Stopwatch sw; + + DrainCallback(BlobExecuter be) { + this.blobExec = be; + sw = Stopwatch.createStarted(); + } + + @Override + public void run() { + sw.stop(); + long time = sw.elapsed(TimeUnit.MILLISECONDS); + // System.out.println("Time taken to drain " + blobExec.blobID + + // " is " + time + " ms"); + try { + blobsManagerImpl.streamNode.controllerConnection + .writeObject(new SNTimeInfo.DrainingTime( + blobExec.blobID, time)); + } catch (IOException e) { + e.printStackTrace(); + } + blobExec.drained(); + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/BlobsManager.java b/src/edu/mit/streamjit/impl/distributed/node/BlobsManager.java index f32e3345..4a8c42d5 100644 --- a/src/edu/mit/streamjit/impl/distributed/node/BlobsManager.java +++ b/src/edu/mit/streamjit/impl/distributed/node/BlobsManager.java @@ -21,8 +21,12 @@ */ package edu.mit.streamjit.impl.distributed.node; +import java.util.Set; + +import edu.mit.streamjit.impl.blob.Blob; import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.CTRLRDrainProcessor; import edu.mit.streamjit.impl.distributed.common.Command.CommandProcessor; +import edu.mit.streamjit.impl.distributed.profiler.StreamNodeProfiler; /** * {@link BlobsManager} is the main dispatcher for all blobs. Received commands @@ -38,4 +42,18 @@ public interface BlobsManager { public CTRLRDrainProcessor getDrainProcessor(); public CommandProcessor getCommandProcessor(); + + /** + * For all final resource cleanup. Mainly all started threads must be + * stopped safely. + *

            + * TODO: [2014-03-05] I added this as a quick fix to clean up + * {@link BlobsManagerImpl}#MonitorBuffers thread. Revise this. + */ + public void stop(); + + /** + * @return Set of profilers in this BlobManager. + */ + public Set profilers(); } diff --git a/src/edu/mit/streamjit/impl/distributed/node/BlobsManagerImpl.java b/src/edu/mit/streamjit/impl/distributed/node/BlobsManagerImpl.java index 2c8632b7..3da5b29b 100644 --- a/src/edu/mit/streamjit/impl/distributed/node/BlobsManagerImpl.java +++ b/src/edu/mit/streamjit/impl/distributed/node/BlobsManagerImpl.java @@ -21,39 +21,41 @@ */ package edu.mit.streamjit.impl.distributed.node; +import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; -import edu.mit.streamjit.api.Worker; import edu.mit.streamjit.impl.blob.Blob; -import edu.mit.streamjit.impl.blob.Buffer; -import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer; import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.blob.DrainData; -import edu.mit.streamjit.impl.common.BlobThread; -import edu.mit.streamjit.impl.common.Workers; -import edu.mit.streamjit.impl.distributed.common.BoundaryChannel; +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.distributed.common.AppStatus; import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.CTRLRDrainProcessor; import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DoDrain; import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainDataRequest; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; import edu.mit.streamjit.impl.distributed.common.Command.CommandProcessor; -import edu.mit.streamjit.impl.distributed.common.AppStatus; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.distributed.common.SNDrainElement; -import edu.mit.streamjit.impl.distributed.common.SNMessageElement; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.impl.distributed.node.BufferManager.SNLocalBufferManager; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData.BlobBufferStatus; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData.BufferStatus; +import edu.mit.streamjit.impl.distributed.profiler.StreamNodeProfiler; /** * {@link BlobsManagerImpl} responsible to run all {@link Blob}s those are @@ -64,43 +66,86 @@ */ public class BlobsManagerImpl implements BlobsManager { - private Set blobExecuters; - private final StreamNode streamNode; - private final TCPConnectionProvider conProvider; - private Map conInfoMap; + Map blobExecuters; - private final CTRLRDrainProcessor drainProcessor; + final BufferManager bufferManager; private final CommandProcessor cmdProcessor; - private final ImmutableMap bufferMap; + private final Map conInfoMap; + + private final ConnectionProvider conProvider; + + volatile BufferCleaner bufferCleaner = null; + + private final CTRLRDrainProcessor drainProcessor; + + MonitorBuffers monBufs = null; + + final StreamNode streamNode; + + /** + * if true {@link BufferCleaner} will be used to unlock the draining time + * dead lock. Otherwise dynamic buffer will be used for local buffers to + * handled drain time data growth. + */ + final boolean useBufferCleaner = false; + + /** + * if true {@link MonitorBuffers} will be started to log the buffer sizes + * periodically. + */ + private final boolean monitorBuffers = false; + + private final String appName; + + private ImmutableSet profilers; + + final AffinityManager affinityManager; public BlobsManagerImpl(ImmutableSet blobSet, - Map conInfoMap, StreamNode streamNode, - TCPConnectionProvider conProvider) { + Map conInfoMap, StreamNode streamNode, + ConnectionProvider conProvider, String appName) { this.conInfoMap = conInfoMap; this.streamNode = streamNode; this.conProvider = conProvider; this.cmdProcessor = new CommandProcessorImpl(); this.drainProcessor = new CTRLRDrainProcessorImpl(); + this.bufferManager = new SNLocalBufferManager(blobSet); + this.affinityManager = new AffinityManagers.EmptyAffinityManager(); - bufferMap = createBufferMap(blobSet); + this.appName = appName; + bufferManager.initialise(); + if (bufferManager.isbufferSizesReady()) + createBEs(blobSet); + } - for (Blob b : blobSet) { - b.installBuffers(bufferMap); + /** + * Drain the blob identified by the token. + */ + public void drain(Token blobID, DrainType drainType) { + for (BlobExecuter be : blobExecuters.values()) { + if (be.getBlobID().equals(blobID)) { + be.doDrain(drainType); + return; + } } + throw new IllegalArgumentException(String.format( + "No blob with blobID %s", blobID)); + } - Set locaTokens = getLocalTokens(blobSet); - blobExecuters = new HashSet<>(); - for (Blob b : blobSet) { - ImmutableMap inputChannels = createInputChannels( - Sets.difference(b.getInputs(), locaTokens), bufferMap); - ImmutableMap outputChannels = createOutputChannels( - Sets.difference(b.getOutputs(), locaTokens), bufferMap); - blobExecuters - .add(new BlobExecuter(b, inputChannels, outputChannels)); - } + public CommandProcessor getCommandProcessor() { + return cmdProcessor; + } + + public CTRLRDrainProcessor getDrainProcessor() { + return drainProcessor; + } + + public void reqDrainedData(Set blobSet) { + throw new UnsupportedOperationException( + "Method reqDrainedData not implemented"); } /** @@ -108,8 +153,17 @@ public BlobsManagerImpl(ImmutableSet blobSet, * manage all CPU and I/O threads those are related to the {@link Blob}s. */ public void start() { - for (BlobExecuter be : blobExecuters) + for (BlobExecuter be : blobExecuters.values()) + be.startChannels(); + + for (BlobExecuter be : blobExecuters.values()) be.start(); + + if (monitorBuffers && monBufs == null) { + // System.out.println("Creating new MonitorBuffers"); + monBufs = new MonitorBuffers(); + monBufs.start(); + } } /** @@ -117,360 +171,70 @@ public void start() { * stopped. */ public void stop() { - for (BlobExecuter be : blobExecuters) + for (BlobExecuter be : blobExecuters.values()) be.stop(); - } - - // TODO: Buffer sizes, including head and tail buffers, must be optimized. - // consider adding some tuning factor - private ImmutableMap createBufferMap(Set blobSet) { - ImmutableMap.Builder bufferMapBuilder = ImmutableMap - . builder(); - - Map minInputBufCapaciy = new HashMap<>(); - Map minOutputBufCapaciy = new HashMap<>(); - for (Blob b : blobSet) { - Set inputs = b.getInputs(); - for (Token t : inputs) { - minInputBufCapaciy.put(t, b.getMinimumBufferCapacity(t)); - } + if (monBufs != null) + monBufs.stopMonitoring(); - Set outputs = b.getOutputs(); - for (Token t : outputs) { - minOutputBufCapaciy.put(t, b.getMinimumBufferCapacity(t)); - } - } - - Set localTokens = Sets.intersection(minInputBufCapaciy.keySet(), - minOutputBufCapaciy.keySet()); - Set globalInputTokens = Sets.difference( - minInputBufCapaciy.keySet(), localTokens); - Set globalOutputTokens = Sets.difference( - minOutputBufCapaciy.keySet(), localTokens); - - for (Token t : localTokens) { - int bufSize = lcm(minInputBufCapaciy.get(t), - minOutputBufCapaciy.get(t)); - addBuffer(t, bufSize, bufferMapBuilder); - } - - for (Token t : globalInputTokens) { - int bufSize = minInputBufCapaciy.get(t); - addBuffer(t, bufSize, bufferMapBuilder); - } - - for (Token t : globalOutputTokens) { - int bufSize = minOutputBufCapaciy.get(t); - addBuffer(t, bufSize, bufferMapBuilder); - } - return bufferMapBuilder.build(); + if (bufferCleaner != null) + bufferCleaner.stopit(); } - /** - * Just introduced to avoid code duplication. - * - * @param t - * @param minSize - * @param bufferMapBuilder - */ - private void addBuffer(Token t, int minSize, - ImmutableMap.Builder bufferMapBuilder) { - // TODO: Just to increase the performance. Change it later - int bufSize = Math.max(1000, minSize); - bufferMapBuilder.put(t, new ConcurrentArrayBuffer(bufSize)); - } - - private int gcd(int a, int b) { - while (true) { - if (a == 0) - return b; - b %= a; - if (b == 0) - return a; - a %= b; + @Override + public Set profilers() { + if (profilers == null) { + StreamNodeProfiler snp = new BufferProfiler(); + profilers = ImmutableSet.of(snp); } + return profilers; } - private int lcm(int a, int b) { - int val = gcd(a, b); - return val != 0 ? ((a * b) / val) : 0; - } - - private Set getLocalTokens(Set blobSet) { - Set inputTokens = new HashSet<>(); - Set outputTokens = new HashSet<>(); - + private void createBEs(ImmutableSet blobSet) { + assert bufferManager.isbufferSizesReady() : "Buffer sizes must be available to create BlobExecuters."; + blobExecuters = new HashMap<>(); + Set locaTokens = bufferManager.localTokens(); + ImmutableMap bufferSizesMap = bufferManager + .bufferSizes(); for (Blob b : blobSet) { - Set inputs = b.getInputs(); - for (Token t : inputs) { - inputTokens.add(t); - } - - Set outputs = b.getOutputs(); - for (Token t : outputs) { - outputTokens.add(t); - } + Token t = Utils.getBlobID(b); + ImmutableMap inputChannels = createInputChannels( + Sets.difference(b.getInputs(), locaTokens), bufferSizesMap); + ImmutableMap outputChannels = createOutputChannels( + Sets.difference(b.getOutputs(), locaTokens), bufferSizesMap); + blobExecuters.put(t, new BlobExecuter(this, t, b, inputChannels, + outputChannels)); } - return Sets.intersection(inputTokens, outputTokens); } private ImmutableMap createInputChannels( - Set inputTokens, ImmutableMap bufferMap) { + Set inputTokens, ImmutableMap bufferMap) { ImmutableMap.Builder inputChannelMap = new ImmutableMap.Builder<>(); for (Token t : inputTokens) { - TCPConnectionInfo conInfo = conInfoMap.get(t); - inputChannelMap.put(t, new TCPInputChannel(bufferMap.get(t), - conProvider, conInfo, t.toString(), 0)); + ConnectionInfo conInfo = conInfoMap.get(t); + inputChannelMap.put(t, + conInfo.inputChannel(t, bufferMap.get(t), conProvider)); } return inputChannelMap.build(); } private ImmutableMap createOutputChannels( - Set outputTokens, ImmutableMap bufferMap) { + Set outputTokens, ImmutableMap bufferMap) { ImmutableMap.Builder outputChannelMap = new ImmutableMap.Builder<>(); for (Token t : outputTokens) { - TCPConnectionInfo conInfo = conInfoMap.get(t); - outputChannelMap.put(t, new TCPOutputChannel(bufferMap.get(t), - conProvider, conInfo, t.toString(), 0)); + ConnectionInfo conInfo = conInfoMap.get(t); + outputChannelMap.put(t, + conInfo.outputChannel(t, bufferMap.get(t), conProvider)); } return outputChannelMap.build(); } - private class BlobExecuter { - - private volatile int drainState; - private final Token blobID; - - private final Blob blob; - private Set blobThreads; - - private final ImmutableMap inputChannels; - private final ImmutableMap outputChannels; - - Set inputChannelThreads; - Set outputChannelThreads; - - private boolean reqDrainData; - - private BlobExecuter(Blob blob, - ImmutableMap inputChannels, - ImmutableMap outputChannels) { - this.blob = blob; - this.blobThreads = new HashSet<>(); - assert blob.getInputs().containsAll(inputChannels.keySet()); - assert blob.getOutputs().containsAll(outputChannels.keySet()); - this.inputChannels = inputChannels; - this.outputChannels = outputChannels; - inputChannelThreads = new HashSet<>(inputChannels.values().size()); - outputChannelThreads = new HashSet<>(outputChannels.values().size()); - - for (int i = 0; i < blob.getCoreCount(); i++) { - StringBuilder sb = new StringBuilder("Workers-"); - for (Worker w : blob.getWorkers()) { - sb.append(Workers.getIdentifier(w)); - sb.append(","); - } - blobThreads.add(new BlobThread(blob.getCoreCode(i), sb - .toString())); - } - - drainState = 0; - this.blobID = Utils.getBlobID(blob); - } - - private void start() { - for (BoundaryInputChannel bc : inputChannels.values()) { - Thread t = new Thread(bc.getRunnable(), bc.name()); - t.start(); - inputChannelThreads.add(t); - } - - for (BoundaryOutputChannel bc : outputChannels.values()) { - Thread t = new Thread(bc.getRunnable(), bc.name()); - t.start(); - outputChannelThreads.add(t); - } - - for (Thread t : blobThreads) - t.start(); - } - - private void stop() { - - for (BoundaryInputChannel bc : inputChannels.values()) { - bc.stop(1); - } - - for (BoundaryOutputChannel bc : outputChannels.values()) { - bc.stop(true); - } - - for (Thread t : blobThreads) - try { - t.join(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - private void doDrain(boolean reqDrainData) { - this.reqDrainData = reqDrainData; - drainState = 1; - - for (BoundaryInputChannel bc : inputChannels.values()) { - if (!this.reqDrainData) - bc.stop(1); - else if (GlobalConstants.useDrainData) - bc.stop(2); - else - bc.stop(3); - } - - for (Thread t : inputChannelThreads) { - try { - t.join(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - DrainCallback dcb = new DrainCallback(this); - drainState = 2; - this.blob.drain(dcb); - } - - private void drained() { - drainState = 3; - for (BlobThread bt : blobThreads) { - bt.requestStop(); - } - - for (BoundaryOutputChannel bc : outputChannels.values()) { - bc.stop(!this.reqDrainData); - } - - for (Thread t : outputChannelThreads) { - try { - t.join(); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - drainState = 4; - SNMessageElement drained = new SNDrainElement.Drained(blobID); - try { - streamNode.controllerConnection.writeObject(drained); - } catch (IOException e) { - e.printStackTrace(); - } - // System.out.println("Blob " + blobID + "is drained"); - - if (GlobalConstants.useDrainData && this.reqDrainData) { - // System.out.println("**********************************"); - DrainData dd = blob.getDrainData(); - drainState = 5; - - for (Token t : dd.getData().keySet()) { - System.out.println("From Blob: " + t.toString() + " - " - + dd.getData().get(t).size()); - } - - ImmutableMap.Builder> inputDataBuilder = new ImmutableMap.Builder<>(); - ImmutableMap.Builder> outputDataBuilder = new ImmutableMap.Builder<>(); - - for (Token t : blob.getInputs()) { - if (inputChannels.containsKey(t)) { - BoundaryChannel chanl = inputChannels.get(t); - ImmutableList draindata = chanl - .getUnprocessedData(); - System.out.println(String.format( - "No of unprocessed data of %s is %d", - chanl.name(), draindata.size())); - inputDataBuilder.put(t, draindata); - } - - // TODO: Unnecessary data copy. Optimise this. - else { - Buffer buf = bufferMap.get(t); - Object[] bufArray = new Object[buf.size()]; - buf.readAll(bufArray); - assert buf.size() == 0 : String.format( - "buffer size is %d. But 0 is expected", - buf.size()); - inputDataBuilder.put(t, ImmutableList.copyOf(bufArray)); - } - } - - for (Token t : blob.getOutputs()) { - if (outputChannels.containsKey(t)) { - BoundaryChannel chanl = outputChannels.get(t); - ImmutableList draindata = chanl - .getUnprocessedData(); - System.out.println(String.format( - "No of unprocessed data of %s is %d", - chanl.name(), draindata.size())); - outputDataBuilder.put(t, draindata); - } - } - - SNMessageElement me = new SNDrainElement.DrainedData(blobID, - dd, inputDataBuilder.build(), outputDataBuilder.build()); - try { - streamNode.controllerConnection.writeObject(me); - // System.out.println(blobID + " DrainData has been sent"); - drainState = 6; - - } catch (IOException e) { - e.printStackTrace(); - } - - // System.out.println("**********************************"); - } - - // printDrainedStatus(); - } - - public Token getBlobID() { - return Utils.getBlobID(blob); - } - } - - private static class DrainCallback implements Runnable { - - private final BlobExecuter blobExec; - - DrainCallback(BlobExecuter be) { - this.blobExec = be; - } - - @Override - public void run() { - blobExec.drained(); - } - } - - /** - * Drain the blob identified by the token. - */ - public void drain(Token blobID, boolean reqDrainData) { - for (BlobExecuter be : blobExecuters) { - if (be.getBlobID().equals(blobID)) { - be.doDrain(reqDrainData); - return; - } - } - throw new IllegalArgumentException(String.format( - "No blob with blobID %s", blobID)); - } - /** * Just to added for debugging purpose. */ - private synchronized void printDrainedStatus() { + synchronized void printDrainedStatus() { System.out.println("****************************************"); - for (BlobExecuter be : blobExecuters) { + for (BlobExecuter be : blobExecuters.values()) { switch (be.drainState) { case 0 : System.out.println(String.format("%s - No Drain Called", @@ -506,30 +270,31 @@ private synchronized void printDrainedStatus() { System.out.println("****************************************"); } - public void reqDrainedData(Set blobSet) { - // ImmutableMap.Builder builder = new - // ImmutableMap.Builder<>(); - // for (BlobExecuter be : blobExecuters) { - // if (be.isDrained) { - // builder.put(be.blobID, be.blob.getDrainData()); - // } - // } - // - // try { - // streamNode.controllerConnection - // .writeObject(new SNDrainElement.DrainedData(builder.build())); - // } catch (IOException e) { - // // TODO Auto-generated catch block - // e.printStackTrace(); - // } - } + /** + * {@link CommandProcessor} at {@link StreamNode} side. + * + * @author Sumanan sumanan@mit.edu + * @since May 27, 2013 + */ + private class CommandProcessorImpl implements CommandProcessor { - public CTRLRDrainProcessor getDrainProcessor() { - return drainProcessor; - } + @Override + public void processSTART() { + start(); + System.out.println("StraemJit app is running..."); + Utils.printMemoryStatus(); + } - public CommandProcessor getCommandProcessor() { - return cmdProcessor; + @Override + public void processSTOP() { + stop(); + System.out.println("StraemJit app stopped..."); + try { + streamNode.controllerConnection.writeObject(AppStatus.STOPPED); + } catch (IOException e) { + e.printStackTrace(); + } + } } /** @@ -542,54 +307,432 @@ public CommandProcessor getCommandProcessor() { */ private class CTRLRDrainProcessorImpl implements CTRLRDrainProcessor { + @Override + public void process(DoDrain drain) { + drain(drain.blobID, drain.drainType); + } + @Override public void process(DrainDataRequest drnDataReq) { System.err.println("Not expected in current situation"); // reqDrainedData(drnDataReq.blobsSet); } + } - @Override - public void process(DoDrain drain) { - drain(drain.blobID, drain.reqDrainData); + /** + * Handles another type of deadlock which occurs when draining. A Down blob, + * that has more than one upper blob, cannot progress because some of its + * upper blobs are drained and hence no input on the corresponding input + * channels, and other upper blobs blocked at their output channels as the + * down blob is no more consuming data. So those non-drained upper blobs are + * going to stuck forever at their output channels and the down blob will + * not receive DODrain command from the controller. + * + * This class just discard the buffer contents so that blocked blobs can + * progress. + * + * See the Deadlock 5. + * + * @author sumanan + * + */ + class BufferCleaner extends Thread { + + final AtomicBoolean run; + + final boolean needToCopyDrainData; + + final Map> newlocalBufferMap; + + BufferCleaner(boolean needToCopyDrainData) { + super("BufferCleaner"); + System.out.println("Buffer Cleaner : needToCopyDrainData == " + + needToCopyDrainData); + this.run = new AtomicBoolean(true); + this.needToCopyDrainData = needToCopyDrainData; + if (needToCopyDrainData) + newlocalBufferMap = new HashMap<>(); + else + newlocalBufferMap = null; + } + + public void run() { + try { + Thread.sleep(60000); + } catch (InterruptedException e) { + return; + } + + System.out.println("BufferCleaner is going to clean buffers..."); + boolean areAllDrained = false; + + while (run.get()) { + if (needToCopyDrainData) + areAllDrained = copyLocalBuffers(); + else + areAllDrained = cleanAllBuffers(); + + if (areAllDrained) + break; + + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + break; + } + } + } + + /** + * Go through all blocked blobs and clean all input and output buffers. + * This method is useful when we don't care about the drain data. + * + * @return true iff there is no blocked blobs, i.e., all blobs have + * completed the draining. + */ + private boolean cleanAllBuffers() { + boolean areAllDrained = true; + for (BlobExecuter be : blobExecuters.values()) { + if (be.drainState == 1 || be.drainState == 2) { + // System.out.println(be.blobID + " is not drained"); + areAllDrained = false; + for (Token t : be.blob.getOutputs()) { + Buffer b = be.bufferMap.get(t); + clean(b, t); + } + + for (Token t : be.blob.getInputs()) { + Buffer b = be.bufferMap.get(t); + clean(b, t); + } + } + } + return areAllDrained; + } + + private void clean(Buffer b, Token t) { + int size = b.size(); + if (size == 0) + return; + System.out.println(String.format( + "Buffer %s has %d data. Going to clean it", t.toString(), + size)); + Object[] obArray = new Object[size]; + b.readAll(obArray); + } + + /** + * Copy only the local buffers into a new large buffer to make the + * blocked blob to progress. This copied buffer can be sent to + * controller as a drain data. + */ + private boolean copyLocalBuffers() { + ImmutableMap localBufferMap = bufferManager + .localBufferMap(); + boolean areAllDrained = true; + for (BlobExecuter be : blobExecuters.values()) { + if (be.drainState == 1 || be.drainState == 2) { + // System.out.println(be.blobID + " is not drained"); + areAllDrained = false; + for (Token t : be.blob.getOutputs()) { + if (localBufferMap.containsKey(t)) { + Buffer b = be.bufferMap.get(t); + copy(b, t); + } + } + } + } + return areAllDrained; + } + + private void copy(Buffer b, Token t) { + int size = b.size(); + if (size == 0) + return; + + if (!newlocalBufferMap.containsKey(t)) { + newlocalBufferMap.put(t, new LinkedList()); + } + + List list = newlocalBufferMap.get(t); + Object[] bufArray = new Object[size]; + b.readAll(bufArray); + assert b.size() == 0 : String.format( + "buffer size is %d. But 0 is expected", b.size()); + list.add(bufArray); + } + + public void stopit() { + this.run.set(false); + this.interrupt(); + } + + public Object[] copiedBuffer(Token t) { + assert needToCopyDrainData : "BufferCleaner is not in buffer copy mode"; + copy(bufferManager.localBufferMap().get(t), t); + List list = newlocalBufferMap.get(t); + if (list == null) + return new Object[0]; + else if (list.size() == 0) + return new Object[0]; + else if (list.size() == 1) + return list.get(0); + + int size = 0; + for (Object[] array : list) { + size += array.length; + } + + int destPos = 0; + Object[] mergedArray = new Object[size]; + for (Object[] array : list) { + System.arraycopy(array, 0, mergedArray, destPos, array.length); + destPos += array.length; + } + return mergedArray; } } + private class BlobsBufferStatus { + + /** + * @return Status of all buffers of all blobs of this + * {@link BlobsManager}. + */ + private SNBufferStatusData snBufferStatusData() { + Set blobBufferStatusSet = new HashSet<>(); + if (blobExecuters != null) { + for (BlobExecuter be : blobExecuters.values()) { + blobBufferStatusSet.add(blobBufferStatus(be)); + } + } + + return new SNBufferStatusData(streamNode.getNodeID(), + ImmutableSet.copyOf(blobBufferStatusSet)); + } + + /** + * Status of the all buffers of the blob represented by the @param be. + * + * @param be + * @return + */ + private BlobBufferStatus blobBufferStatus(BlobExecuter be) { + return new BlobBufferStatus(be.blobID, bufferStatusSet(be, true), + bufferStatusSet(be, false)); + } + + /** + * @param be + * @param isIn + * Decides whether a blob's inputbuffer's status or + * outputbuffers's status should be returned. + * @return Set of {@link BufferStatus} of a blob's set of input buffers + * or set of output buffers depends on isIn argument. + */ + private ImmutableSet bufferStatusSet(BlobExecuter be, + boolean isIn) { + // TODO: [Feb 8, 2015] "be.blob == null" condition is added to + // avoid sending profile data after the blob has been drained. But + // we may need the "after draining buffer status" when analyzing + // dead lock situations. Remove "be.blob == null" at that time. + if (be.bufferMap == null || be.blob == null) + return ImmutableSet.of(); + + Set tokenSet = tokenSet(be, isIn); + Set bufferStatus = new HashSet<>(); + for (Token t : tokenSet) { + bufferStatus.add(bufferStatus(t, be, isIn)); + } + return ImmutableSet.copyOf(bufferStatus); + } + + private BufferStatus bufferStatus(Token bufferID, BlobExecuter be, + boolean isIn) { + int min = Integer.MAX_VALUE; + // BE sets blob to null after the drained(). + if (be.blob != null) + min = be.blob.getMinimumBufferCapacity(bufferID); + + int availableResource = min; + Buffer b = be.bufferMap.get(bufferID); + if (b != null) + availableResource = isIn ? b.size() : b.capacity() - b.size(); + + return new BufferStatus(bufferID, min, availableResource); + } + + /** + * Return a blob's either input or output buffer's token set. + * + * @param be + * @param isIn + * Decides whether a blob's inputbuffer's token set or + * outputbuffers's token set should be returned. + * @return Blob's inputbuffer's token set or outputbuffers's token set. + */ + private Set tokenSet(BlobExecuter be, boolean isIn) { + Set tokenSet; + // BE sets blob to null after the drained(). + if (be.blob == null) { + if (isIn) + tokenSet = be.inChnlManager.inputChannelsMap().keySet(); + else + tokenSet = be.outChnlManager.outputChannelsMap().keySet(); + } else { + if (isIn) + tokenSet = be.blob.getInputs(); + else + tokenSet = be.blob.getOutputs(); + } + return tokenSet; + } + } + + private static int count = 0; + /** - * {@link CommandProcessor} at {@link StreamNode} side. + * TODO: [27-01-2015] Use BufferProfiler to get buffer status and then write + * the status in to the file. I created BufferProfiler by copying most of + * the code from this class. + *

            + * Profiles the buffer sizes in a timely manner and log that information + * into a text file. This information may be useful to analyse and find out + * deadlock situations. + * + * @author sumanan * - * @author Sumanan sumanan@mit.edu - * @since May 27, 2013 */ - private class CommandProcessorImpl implements CommandProcessor { + class MonitorBuffers extends Thread { - @Override - public void processSTART() { - start(); - long heapMaxSize = Runtime.getRuntime().maxMemory(); - long heapSize = Runtime.getRuntime().totalMemory(); - long heapFreeSize = Runtime.getRuntime().freeMemory(); + private final int id; - System.out - .println("##############################################"); + private final AtomicBoolean stopFlag; - System.out.println("heapMaxSize = " + heapMaxSize / 1e6); - System.out.println("heapSize = " + heapSize / 1e6); - System.out.println("heapFreeSize = " + heapFreeSize / 1e6); - System.out.println("StraemJit app is running..."); - System.out - .println("##############################################"); + int sleepTime = 25000; + MonitorBuffers() { + super("MonitorBuffers"); + stopFlag = new AtomicBoolean(false); + id = count++; } - @Override - public void processSTOP() { - stop(); - System.out.println("StraemJit app stopped..."); + public void run() { + FileWriter writer = null; try { - streamNode.controllerConnection.writeObject(AppStatus.STOPPED); + String fileName = String.format("%s%sBufferStatus%d.txt", + appName, File.separator, streamNode.getNodeID()); + writer = new FileWriter(fileName, false); + + writer.write(String.format( + "********Started*************** - %d\n", id)); + while (!stopFlag.get()) { + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + break; + } + + if (stopFlag.get()) + break; + + if (blobExecuters == null) { + writer.write("blobExecuters are null...\n"); + continue; + } + + writer.write("----------------------------------\n"); + for (BlobExecuter be : blobExecuters.values()) { + writer.write("Status of blob " + be.blobID.toString() + + "\n"); + + if (be.bufferMap == null) { + writer.write("Buffer map is null...\n"); + continue; + } + + if (stopFlag.get()) + break; + + writer.write("Input channel details\n"); + write(be, writer, true); + + writer.write("Output channel details\n"); + write(be, writer, false); + } + writer.write("----------------------------------\n"); + writer.flush(); + } + + writer.write(String.format( + "********Stopped*************** - %d\n", id)); + } catch (IOException e1) { + e1.printStackTrace(); + return; + } + + try { + if (writer != null) + writer.close(); } catch (IOException e) { e.printStackTrace(); } } + + private void write(BlobExecuter be, FileWriter writer, boolean isIn) + throws IOException { + Set tokenSet = tokenSet(be, isIn); + for (Token t : tokenSet) { + Buffer b = be.bufferMap.get(t); + if (b == null) + continue; + int min = Integer.MAX_VALUE; + // BE sets blob to null after the drained(). + if (be.blob != null) + min = be.blob.getMinimumBufferCapacity(t); + + int availableResource = isIn ? b.size() : b.capacity() + - b.size(); + + String status = availableResource >= min ? "Firable" + : "NOT firable"; + writer.write(t.toString() + "\tMin - " + min + + ",\tAvailableResource - " + availableResource + "\t" + + status + "\n"); + } + } + + private Set tokenSet(BlobExecuter be, boolean isIn) { + Set tokenSet; + // BE sets blob to null after the drained(). + if (be.blob == null) { + if (isIn) + tokenSet = be.inChnlManager.inputChannelsMap().keySet(); + else + tokenSet = be.outChnlManager.outputChannelsMap().keySet(); + } else { + if (isIn) + tokenSet = be.blob.getInputs(); + else + tokenSet = be.blob.getOutputs(); + } + return tokenSet; + } + + public void stopMonitoring() { + // System.out.println("MonitorBuffers: Stop monitoring"); + stopFlag.set(true); + this.interrupt(); + } + } + + public class BufferProfiler implements StreamNodeProfiler { + + BlobsBufferStatus bbs = new BlobsBufferStatus(); + + @Override + public SNProfileElement profile() { + return bbs.snBufferStatusData(); + } } } diff --git a/src/edu/mit/streamjit/impl/distributed/node/TCPInputChannel.java b/src/edu/mit/streamjit/impl/distributed/node/BlockingInputChannel.java similarity index 78% rename from src/edu/mit/streamjit/impl/distributed/node/TCPInputChannel.java rename to src/edu/mit/streamjit/impl/distributed/node/BlockingInputChannel.java index d1d8d524..b5cacbd6 100644 --- a/src/edu/mit/streamjit/impl/distributed/node/TCPInputChannel.java +++ b/src/edu/mit/streamjit/impl/distributed/node/BlockingInputChannel.java @@ -34,10 +34,12 @@ import edu.mit.streamjit.impl.blob.AbstractBuffer; import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer; import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryInputChannel; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; import edu.mit.streamjit.impl.distributed.common.Connection; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; /** * This is {@link BoundaryInputChannel} over TCP. Receive objects from TCP @@ -53,21 +55,21 @@ * @author Sumanan sumanan@mit.edu * @since May 29, 2013 */ -public class TCPInputChannel implements BoundaryInputChannel { +public class BlockingInputChannel implements BoundaryInputChannel { private final FileWriter writer; - private final int debugPrint; + private final int debugLevel; private final Buffer buffer; private Buffer extraBuffer; - private final TCPConnectionProvider conProvider; + private final ConnectionProvider conProvider; - private final TCPConnectionInfo conInfo; + private final ConnectionInfo conInfo; - private Connection tcpConnection; + private Connection connection; private final AtomicInteger stopType; @@ -81,22 +83,31 @@ public class TCPInputChannel implements BoundaryInputChannel { private ImmutableList unProcessedData; - public TCPInputChannel(Buffer buffer, TCPConnectionProvider conProvider, - TCPConnectionInfo conInfo, String bufferTokenName, int debugPrint) { + public BlockingInputChannel(int bufSize, ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { + this(new ConcurrentArrayBuffer(bufSize), conProvider, conInfo, + bufferTokenName, debugLevel); + } + + public BlockingInputChannel(Buffer buffer, ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { this.buffer = buffer; this.conProvider = conProvider; this.conInfo = conInfo; this.name = "TCPInputChannel - " + bufferTokenName; - this.debugPrint = debugPrint; + this.debugLevel = debugLevel; this.softClosed = false; this.extraBuffer = null; this.unProcessedData = null; this.isClosed = false; this.stopType = new AtomicInteger(0); count = 0; + writer = fileWriter(); + } + private FileWriter fileWriter() { FileWriter w = null; - if (this.debugPrint == 5) { + if (this.debugLevel == 5) { try { w = new FileWriter(name, true); w.write("---------------------------------\n"); @@ -105,28 +116,22 @@ public TCPInputChannel(Buffer buffer, TCPConnectionProvider conProvider, e.printStackTrace(); } } - writer = w; + return w; } - @Override - public void closeConnection() throws IOException { + private void closeConnection() throws IOException { // tcpConnection.closeConnection(); this.isClosed = true; } - @Override - public boolean isStillConnected() { - return tcpConnection.isStillConnected(); - } - @Override public Runnable getRunnable() { return new Runnable() { @Override public void run() { - if (tcpConnection == null || !tcpConnection.isStillConnected()) { + if (connection == null || !connection.isStillConnected()) { try { - tcpConnection = conProvider.getConnection(conInfo); + connection = conProvider.getConnection(conInfo); } catch (IOException e) { // TODO: Need to handle this exception. e.printStackTrace(); @@ -165,10 +170,12 @@ public void run() { public void receiveData() { int bufFullCount = 0; try { - Object obj = tcpConnection.readObject(); + Object obj = connection.readObject(); + if (obj == null) // [2014-03-15] Sometimes null is received. + return; count++; - if (debugPrint == 3) { + if (debugLevel == 3) { System.out.println(Thread.currentThread().getName() + " - " + obj.toString()); } @@ -179,7 +186,7 @@ public void receiveData() { } while (!this.buffer.write(obj)) { - if (debugPrint == 3) { + if (debugLevel == 3) { System.out.println(Thread.currentThread().getName() + " Buffer FULL - " + obj.toString()); } @@ -193,21 +200,20 @@ public void receiveData() { } catch (InterruptedException e) { e.printStackTrace(); } - if (stopType.get() > 1 && ++bufFullCount > 5) { + if (stopType.get() == 3) { + // System.err.println(name + " receiveData:DISCARDING...."); + break; + } else if (stopType.get() > 0 && ++bufFullCount > 20) { this.extraBuffer = new ExtraBuffer(); extraBuffer.write(obj); - System.err - .println("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); System.err .println(name + " receiveData:Writing extra data in to extra buffer"); - System.err - .println("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); break; } } - if (count % 1000 == 0 && debugPrint == 2) { + if (count % 1000 == 0 && debugLevel == 2) { System.out.println(Thread.currentThread().getName() + " - " + count + " no of items have been received"); } @@ -217,8 +223,8 @@ public void receiveData() { softClosed = true; } catch (EOFException e) { // Other side is closed. - System.out - .println("receiveData:Closing by EOFExp. Not by softClose"); + System.out.println(name + + " receiveData:Closing by EOFExp. Not by softClose"); stopType.set(2); } catch (IOException e) { // TODO: Verify the program quality. Try to reconnect until it @@ -248,10 +254,10 @@ private void finalReceive() { do { bufFullCount = 0; try { - Object obj = tcpConnection.readObject(); + Object obj = connection.readObject(); count++; - if (debugPrint == 2) { + if (debugLevel == 2) { System.out.println(Thread.currentThread().getName() + " finalReceive - " + obj.toString()); } @@ -264,7 +270,7 @@ private void finalReceive() { hasData = true; while (!buffer.write(obj)) { - if (debugPrint == 3) { + if (debugLevel == 3) { System.out.println(Thread.currentThread().getName() + " finalReceive:Buffer FULL - " + obj.toString()); @@ -281,23 +287,20 @@ private void finalReceive() { e.printStackTrace(); } - if (stopType.get() == 2 && ++bufFullCount > 5) { + if (++bufFullCount > 20) { assert buffer != this.extraBuffer : "ExtraBuffer is full. This shouldn't be the case."; assert this.extraBuffer == null : "Extra buffer has already been created."; this.extraBuffer = new ExtraBuffer(); extraBuffer.write(obj); buffer = extraBuffer; - System.err - .println("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); System.err .println(name + " finalReceive:Writing extra data in to extra buffer"); - System.err - .println("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + break; } } - if (count % 1000 == 0 && debugPrint == 2) { + if (count % 1000 == 0 && debugLevel == 2) { System.out.println(Thread.currentThread().getName() + " - " + count + " no of items have been received"); } @@ -309,8 +312,10 @@ private void finalReceive() { softClosed = true; hasData = false; } catch (IOException e) { + e.printStackTrace(); System.out - .println("finalReceive:Closing by IOException. Not by softClose."); + .println(name + + " finalReceive:Closing by IOException. Not by softClose."); hasData = false; } } while (hasData); @@ -321,11 +326,11 @@ private void finalReceive() { * care about the data and just tuning a app for performance. */ private void discardAll() { - System.out.println("Discarding input data..."); + // System.out.println(name + " Discarding input data..."); boolean hasData; do { try { - Object obj = tcpConnection.readObject(); + Object obj = connection.readObject(); hasData = true; } catch (ClassNotFoundException e) { hasData = true; @@ -335,7 +340,8 @@ private void discardAll() { hasData = false; } catch (IOException e) { System.out - .println("finalReceive:Closing by IOException. Not by softClose."); + .println(name + + " discardAll:Closing by IOException. Not by softClose."); hasData = false; } } while (hasData); @@ -344,9 +350,9 @@ private void discardAll() { private void reConnect() { while (stopType.get() == 0) { try { - System.out.println("TCPInputChannel : Reconnecting..."); - this.tcpConnection.closeConnection(); - tcpConnection = conProvider.getConnection(conInfo); + System.out.println(name + " Reconnecting..."); + this.connection.closeConnection(); + connection = conProvider.getConnection(conInfo); return; } catch (IOException e) { try { @@ -359,14 +365,12 @@ private void reConnect() { } @Override - public int getOtherNodeID() { - return 0; - } - - @Override - public void stop(int type) { - assert 0 < type && type < 4 : "Undefined stop type"; - this.stopType.set(type); + public void stop(DrainType type) { + if (this.stopType.get() == 0) { + stopType.set(type.toint()); + } else if (debugLevel > 0) { + System.err.println(name + " Stop has already been called."); + } } @Override @@ -460,4 +464,21 @@ public ImmutableList getUnprocessedData() { return unProcessedData; } + + @Override + public Connection getConnection() { + // TODO Auto-generated method stub + return null; + } + + @Override + public ConnectionInfo getConnectionInfo() { + // TODO Auto-generated method stub + return null; + } + + @Override + public Buffer getBuffer() { + return buffer; + } } diff --git a/src/edu/mit/streamjit/impl/distributed/node/TCPOutputChannel.java b/src/edu/mit/streamjit/impl/distributed/node/BlockingOutputChannel.java similarity index 68% rename from src/edu/mit/streamjit/impl/distributed/node/TCPOutputChannel.java rename to src/edu/mit/streamjit/impl/distributed/node/BlockingOutputChannel.java index b52c84fc..7a972a78 100644 --- a/src/edu/mit/streamjit/impl/distributed/node/TCPOutputChannel.java +++ b/src/edu/mit/streamjit/impl/distributed/node/BlockingOutputChannel.java @@ -29,10 +29,11 @@ import com.google.common.collect.ImmutableList; import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer; import edu.mit.streamjit.impl.distributed.common.BoundaryChannel.BoundaryOutputChannel; import edu.mit.streamjit.impl.distributed.common.Connection; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; /** * This is {@link BoundaryOutputChannel} over TCP. Reads data from the given @@ -46,19 +47,19 @@ * @author Sumanan sumanan@mit.edu * @since May 29, 2013 */ -public class TCPOutputChannel implements BoundaryOutputChannel { +public class BlockingOutputChannel implements BoundaryOutputChannel { - FileWriter writer; + private final FileWriter writer; - private final int debugPrint; + private final int debugLevel; private final Buffer buffer; - private final TCPConnectionProvider conProvider; + private final ConnectionProvider conProvider; - private final TCPConnectionInfo conInfo; + private final ConnectionInfo conInfo; - private Connection tcpConnection; + private Connection connection; private final AtomicBoolean stopFlag; @@ -70,20 +71,29 @@ public class TCPOutputChannel implements BoundaryOutputChannel { protected ImmutableList unProcessedData; - public TCPOutputChannel(Buffer buffer, TCPConnectionProvider conProvider, - TCPConnectionInfo conInfo, String bufferTokenName, int debugPrint) { + public BlockingOutputChannel(int bufSize, ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { + this(new ConcurrentArrayBuffer(bufSize), conProvider, conInfo, + bufferTokenName, debugLevel); + } + + public BlockingOutputChannel(Buffer buffer, ConnectionProvider conProvider, + ConnectionInfo conInfo, String bufferTokenName, int debugLevel) { this.buffer = buffer; this.conProvider = conProvider; this.conInfo = conInfo; this.stopFlag = new AtomicBoolean(false); this.isFinal = false; this.name = "TCPOutputChannel - " + bufferTokenName; - this.debugPrint = debugPrint; + this.debugLevel = debugLevel; this.unProcessedData = null; count = 0; + writer = fileWriter(); + } + private FileWriter fileWriter() { FileWriter w = null; - if (this.debugPrint == 5) { + if (this.debugLevel == 5) { try { w = new FileWriter(name, true); w.write("---------------------------------\n"); @@ -92,19 +102,12 @@ public TCPOutputChannel(Buffer buffer, TCPConnectionProvider conProvider, e.printStackTrace(); } } - writer = w; + return w; } - @Override - public final void closeConnection() throws IOException { + private void closeConnection() throws IOException { // tcpConnection.closeConnection(); - tcpConnection.softClose(); - } - - @Override - public final boolean isStillConnected() { - return (tcpConnection == null) ? false : tcpConnection - .isStillConnected(); + connection.softClose(); } @Override @@ -112,9 +115,9 @@ public final Runnable getRunnable() { return new Runnable() { @Override public void run() { - if (tcpConnection == null || !tcpConnection.isStillConnected()) { + if (connection == null || !connection.isStillConnected()) { try { - tcpConnection = conProvider.getConnection(conInfo); + connection = conProvider.getConnection(conInfo); } catch (IOException e) { e.printStackTrace(); } @@ -141,7 +144,7 @@ public void run() { } } - if (debugPrint > 0) { + if (debugLevel > 0) { System.err.println(Thread.currentThread().getName() + " - Exiting..."); System.out.println("isFinal " + isFinal); @@ -153,86 +156,40 @@ public void run() { public final void sendData() { while (this.buffer.size() > 0 && !stopFlag.get()) { - try { - Object obj = buffer.read(); - tcpConnection.writeObject(obj); - count++; - - if (debugPrint == 3) { - System.out.println(Thread.currentThread().getName() + " - " - + obj.toString()); - } - - if (writer != null) { - writer.write(obj.toString()); - writer.write('\n'); - } - } catch (IOException e) { - System.err - .println("TCP Output Channel. WriteObject exception."); - reConnect(); - } - if (count % 1000 == 0 && debugPrint == 2) { - System.out.println(Thread.currentThread().getName() + " - " - + count + " items have been sent"); - } + send(); } } - @Override - public final int getOtherNodeID() { - return 0; - } - - @Override - public final void stop(boolean isFinal) { - if (debugPrint > 0) - System.out.println(Thread.currentThread().getName() - + " - stop request"); - this.isFinal = isFinal; - this.stopFlag.set(true); - } - /** * This can be called when running the application with the final scheduling * configurations. Shouldn't be called when autotuner tunes. */ private void finalSend() { while (this.buffer.size() > 0) { - try { - Object o = buffer.read(); - tcpConnection.writeObject(o); - count++; - - if (debugPrint == 3) { - System.out.println(Thread.currentThread().getName() - + " FinalSend - " + o.toString()); - } - - if (writer != null) { - writer.write(o.toString()); - writer.write('\n'); - } + send(); + } + } - } catch (IOException e) { - System.err.println("TCP Output Channel. finalSend exception."); - } - if (count % 1000 == 0 && debugPrint == 2) { - System.out.println(Thread.currentThread().getName() - + " FinalSend - " + count - + " no of items have been sent"); - } + @Override + public final void stop(boolean isFinal) { + if (debugLevel > 0) + System.out.println(Thread.currentThread().getName() + + " - stop request"); + if (!this.stopFlag.get()) { + this.isFinal = isFinal; + this.stopFlag.set(true); + } else if (debugLevel > 0) { + System.err.println("Stop has already been called."); } } private void reConnect() { try { - this.tcpConnection.closeConnection(); + this.connection.closeConnection(); while (!stopFlag.get()) { System.out.println("TCPOutputChannel : Reconnecting..."); try { - this.tcpConnection = conProvider.getConnection(conInfo, - 1000); + this.connection = conProvider.getConnection(conInfo, 1000); return; } catch (SocketTimeoutException stex) { // We make this exception to recheck the stopFlag. Otherwise @@ -244,6 +201,33 @@ private void reConnect() { } } + private void send() { + try { + Object o = buffer.read(); + connection.writeObject(o); + count++; + + if (debugLevel == 3) { + System.out.println(Thread.currentThread().getName() + + " Send - " + o.toString()); + } + + if (writer != null) { + writer.write(o.toString()); + writer.write('\n'); + } + + } catch (IOException e) { + e.printStackTrace(); + System.err.println("TCP Output Channel. Send exception."); + reConnect(); + } + if (count % 1000 == 0 && debugLevel == 2) { + System.out.println(Thread.currentThread().getName() + " Send - " + + count + " no of items have been sent"); + } + } + @Override public final String name() { return name; @@ -266,4 +250,19 @@ public ImmutableList getUnprocessedData() { "Still processing... No unprocessed data"); return unProcessedData; } + + @Override + public Connection getConnection() { + return connection; + } + + @Override + public ConnectionInfo getConnectionInfo() { + return conInfo; + } + + @Override + public Buffer getBuffer() { + return buffer; + } } diff --git a/src/edu/mit/streamjit/impl/distributed/node/BufferManager.java b/src/edu/mit/streamjit/impl/distributed/node/BufferManager.java new file mode 100644 index 00000000..827cb80f --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/BufferManager.java @@ -0,0 +1,244 @@ +package edu.mit.streamjit.impl.distributed.node; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Sets; + +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer; +import edu.mit.streamjit.impl.distributed.node.LocalBuffer.ConcurrentArrayLocalBuffer; +import edu.mit.streamjit.impl.distributed.node.LocalBuffer.LocalBuffer1; + +/** + * {@link BlobsManager} will use the services from {@link BufferManager}. + * Implementation of this interface is expected to do two tasks + *
              + *
            1. Calculates buffer sizes. + *
            2. Create local buffers. + *
            + * + * @author Sumanan sumanan@mit.edu + * @since May 28, 2014 + * + */ +public interface BufferManager { + + void initialise(); + + /** + * Second initialisation. If the buffer sizes are computed by controller and + * send back to the {@link StreamNode}s, this method can be called with the + * minimum input buffer size requirement. + * + * @param minInputBufSizes + */ + void initialise2(Map minInputBufSizes); + + ImmutableSet localTokens(); + + ImmutableSet outputTokens(); + + ImmutableSet inputTokens(); + + /** + * @return buffer sizes of each local and boundary channels. Returns + * null if the buffer sizes are not calculated yet. + * {@link #isbufferSizesReady()} tells whether the buffer sizes are + * calculated or not. + */ + ImmutableMap bufferSizes(); + + /** + * @return true iff buffer sizes are calculated. + */ + boolean isbufferSizesReady(); + + /** + * @return local buffers if buffer sizes are calculated. Otherwise returns + * null. + */ + ImmutableMap localBufferMap(); + + public static abstract class AbstractBufferManager implements BufferManager { + + protected final Set blobSet; + + protected final ImmutableSet localTokens; + + protected final ImmutableSet globalInputTokens; + + protected final ImmutableSet globalOutputTokens; + + protected boolean isbufferSizesReady; + + protected ImmutableMap bufferSizes; + + ImmutableMap localBufferMap; + + public AbstractBufferManager(Set blobSet) { + this.blobSet = blobSet; + + Set inputTokens = new HashSet<>(); + Set outputTokens = new HashSet<>(); + for (Blob b : blobSet) { + inputTokens.addAll(b.getInputs()); + outputTokens.addAll(b.getOutputs()); + } + + localTokens = ImmutableSet.copyOf(Sets.intersection(inputTokens, + outputTokens)); + globalInputTokens = ImmutableSet.copyOf(Sets.difference( + inputTokens, localTokens)); + globalOutputTokens = ImmutableSet.copyOf(Sets.difference( + outputTokens, localTokens)); + + isbufferSizesReady = false; + bufferSizes = null; + localBufferMap = null; + } + + @Override + public ImmutableSet localTokens() { + return localTokens; + } + + @Override + public ImmutableSet outputTokens() { + return globalOutputTokens; + } + + @Override + public ImmutableSet inputTokens() { + return globalInputTokens; + } + + @Override + public ImmutableMap bufferSizes() { + return bufferSizes; + } + + @Override + public boolean isbufferSizesReady() { + return isbufferSizesReady; + } + + @Override + public ImmutableMap localBufferMap() { + return localBufferMap; + } + + protected final void createLocalBuffers() { + ImmutableMap.Builder bufferMapBuilder = ImmutableMap + . builder(); + for (Token t : localTokens) { + int bufSize = bufferSizes.get(t); + bufferMapBuilder + .put(t, concurrentArrayLocalBuffer1(t, bufSize)); + } + localBufferMap = bufferMapBuilder.build(); + } + + protected final LocalBuffer1 concurrentArrayLocalBuffer1(Token t, + int bufSize) { + List args = new ArrayList<>(1); + args.add(bufSize); + return new LocalBuffer1(t.toString(), ConcurrentArrayBuffer.class, + args, bufSize, 0); + } + + protected final LocalBuffer concurrentArrayLocalBuffer(Token t, + int bufSize) { + return new ConcurrentArrayLocalBuffer(bufSize); + } + + /** + * Just introduced to avoid code duplication. + */ + protected void addBufferSize(Token t, int minSize, + ImmutableMap.Builder bufferSizeMapBuilder) { + // TODO: Just to increase the performance. Change it later + int bufSize = Math.max(1000, minSize); + // System.out.println("Buffer size of " + t.toString() + " is " + + // bufSize); + bufferSizeMapBuilder.put(t, bufSize); + } + } + + /** + * Calculates buffer sizes locally at {@link StreamNode} side. No central + * calculation involved. + */ + public static class SNLocalBufferManager extends AbstractBufferManager { + public SNLocalBufferManager(Set blobSet) { + super(blobSet); + } + + @Override + public void initialise() { + bufferSizes = calculateBufferSizes(blobSet); + createLocalBuffers(); + isbufferSizesReady = true; + } + + @Override + public void initialise2(Map minInputBufSizes) { + throw new java.lang.Error( + "initialise2() is not supposed to be called"); + } + + // TODO: Buffer sizes, including head and tail buffers, must be + // optimised. consider adding some tuning factor + private ImmutableMap calculateBufferSizes( + Set blobSet) { + ImmutableMap.Builder bufferSizeMapBuilder = ImmutableMap + . builder(); + + Map minInputBufCapaciy = new HashMap<>(); + Map minOutputBufCapaciy = new HashMap<>(); + + for (Blob b : blobSet) { + Set inputs = b.getInputs(); + for (Token t : inputs) { + minInputBufCapaciy.put(t, b.getMinimumBufferCapacity(t)); + } + + Set outputs = b.getOutputs(); + for (Token t : outputs) { + minOutputBufCapaciy.put(t, b.getMinimumBufferCapacity(t)); + } + } + + Set localTokens = Sets.intersection( + minInputBufCapaciy.keySet(), minOutputBufCapaciy.keySet()); + Set globalInputTokens = Sets.difference( + minInputBufCapaciy.keySet(), localTokens); + Set globalOutputTokens = Sets.difference( + minOutputBufCapaciy.keySet(), localTokens); + + for (Token t : localTokens) { + int bufSize = Math.max(minInputBufCapaciy.get(t), + minOutputBufCapaciy.get(t)); + addBufferSize(t, bufSize, bufferSizeMapBuilder); + } + + for (Token t : globalInputTokens) { + int bufSize = minInputBufCapaciy.get(t); + addBufferSize(t, bufSize, bufferSizeMapBuilder); + } + + for (Token t : globalOutputTokens) { + int bufSize = minOutputBufCapaciy.get(t); + addBufferSize(t, bufSize, bufferSizeMapBuilder); + } + return bufferSizeMapBuilder.build(); + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/CTRLRMessageVisitorImpl.java b/src/edu/mit/streamjit/impl/distributed/node/CTRLRMessageVisitorImpl.java index 28c63ca4..98a13775 100644 --- a/src/edu/mit/streamjit/impl/distributed/node/CTRLRMessageVisitorImpl.java +++ b/src/edu/mit/streamjit/impl/distributed/node/CTRLRMessageVisitorImpl.java @@ -22,20 +22,25 @@ package edu.mit.streamjit.impl.distributed.node; import java.io.IOException; +import java.util.HashSet; import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.CTRLRDrainProcessor; import edu.mit.streamjit.impl.distributed.common.CTRLRMessageVisitor; import edu.mit.streamjit.impl.distributed.common.Command; +import edu.mit.streamjit.impl.distributed.common.Command.CommandProcessor; import edu.mit.streamjit.impl.distributed.common.ConfigurationString; +import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor; import edu.mit.streamjit.impl.distributed.common.MiscCtrlElements; -import edu.mit.streamjit.impl.distributed.common.NodeInfo; import edu.mit.streamjit.impl.distributed.common.MiscCtrlElements.MiscCtrlElementProcessor; import edu.mit.streamjit.impl.distributed.common.MiscCtrlElements.NewConInfo; +import edu.mit.streamjit.impl.distributed.common.NodeInfo; import edu.mit.streamjit.impl.distributed.common.Request; -import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.CTRLRDrainProcessor; -import edu.mit.streamjit.impl.distributed.common.Command.CommandProcessor; -import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor; import edu.mit.streamjit.impl.distributed.common.Request.RequestProcessor; +import edu.mit.streamjit.impl.distributed.profiler.Profiler; +import edu.mit.streamjit.impl.distributed.profiler.ProfilerCommand; +import edu.mit.streamjit.impl.distributed.profiler.ProfilerCommand.ProfilerCommandProcessor; +import edu.mit.streamjit.impl.distributed.profiler.StreamNodeProfiler; /** * @author Sumanan sumanan@mit.edu @@ -45,14 +50,16 @@ public class CTRLRMessageVisitorImpl implements CTRLRMessageVisitor { private final StreamNode streamNode; private final RequestProcessor rp; - private final ConfigurationStringProcessor jp; + private final ConfigurationProcessor jp; private final MiscCtrlElementProcessor miscProcessor; + private final ProfilerCommandProcessorImpl pm; public CTRLRMessageVisitorImpl(StreamNode streamNode) { this.streamNode = streamNode; this.rp = new RequestProcessorImpl(); - this.jp = new CfgStringProcessorImpl(streamNode); + this.jp = new ConfigurationProcessorImpl(streamNode); this.miscProcessor = new MiscCtrlElementProcessorImpl(); + this.pm = new ProfilerCommandProcessorImpl(); } @Override @@ -93,9 +100,13 @@ public void visit(MiscCtrlElements miscCtrlElements) { miscCtrlElements.process(miscProcessor); } - public class MiscCtrlElementProcessorImpl - implements - MiscCtrlElementProcessor { + @Override + public void visit(ProfilerCommand command) { + command.process(pm); + } + + public class MiscCtrlElementProcessorImpl implements + MiscCtrlElementProcessor { @Override public void process(NewConInfo newConInfo) { @@ -150,4 +161,57 @@ public void processEXIT() { streamNode.exit(); } } + + public class ProfilerCommandProcessorImpl implements + ProfilerCommandProcessor { + + ProfilerCommandProcessorImpl() { + + } + + @Override + public void processSTART() { + createProfiler(); + if (streamNode.profiler.getState() == Thread.State.NEW) + streamNode.profiler.start(); + } + + /** + * Creates a new profiler only if + *
              + *
            1. no any profiler has already been created OR + *
            2. the previously created profiler has been terminated. + *
            + */ + private void createProfiler() { + if (streamNode.profiler == null) { + streamNode.profiler = new Profiler( + new HashSet(), + streamNode.controllerConnection); + } else if (streamNode.profiler.getState() == Thread.State.TERMINATED) { + System.err + .println("A profiler has already been created and terminated. Creating another new profiler."); + streamNode.profiler = new Profiler( + new HashSet(), + streamNode.controllerConnection); + + } else + System.err.println("A profiler has already been started."); + } + + @Override + public void processSTOP() { + streamNode.profiler.stopProfiling(); + } + + @Override + public void processPAUSE() { + streamNode.profiler.pauseProfiling(); + } + + @Override + public void processRESUME() { + streamNode.profiler.resumeProfiling(); + } + } } diff --git a/src/edu/mit/streamjit/impl/distributed/node/CfgStringProcessorImpl.java b/src/edu/mit/streamjit/impl/distributed/node/CfgStringProcessorImpl.java deleted file mode 100644 index 70fc5857..00000000 --- a/src/edu/mit/streamjit/impl/distributed/node/CfgStringProcessorImpl.java +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright (c) 2013-2014 Massachusetts Institute of Technology - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -package edu.mit.streamjit.impl.distributed.node; - -import static com.google.common.base.Preconditions.checkNotNull; - -import java.io.File; -import java.io.IOException; -import java.net.InetAddress; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.URLClassLoader; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import com.google.common.base.Strings; -import com.google.common.collect.ImmutableSet; - -import edu.mit.streamjit.api.OneToOneElement; -import edu.mit.streamjit.api.Worker; -import edu.mit.streamjit.impl.blob.Blob; -import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.blob.BlobFactory; -import edu.mit.streamjit.impl.blob.DrainData; -import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.common.Configuration.PartitionParameter; -import edu.mit.streamjit.impl.common.Configuration.PartitionParameter.BlobSpecifier; -import edu.mit.streamjit.impl.common.ConnectWorkersVisitor; -import edu.mit.streamjit.impl.compiler2.Compiler2BlobFactory; -import edu.mit.streamjit.impl.distributed.common.AppStatus; -import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.Error; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; -import edu.mit.streamjit.impl.interp.Interpreter; -import edu.mit.streamjit.util.json.Jsonifiers; - -/** - * {@link ConfigurationStringProcessor} at {@link StreamNode} side. - * - * @author Sumanan sumanan@mit.edu - * @since May 27, 2013 - */ -public class CfgStringProcessorImpl implements ConfigurationStringProcessor { - - private StreamNode streamNode; - - private Configuration staticConfig = null; - - private TCPConnectionProvider conProvider; - - public CfgStringProcessorImpl(StreamNode streamNode) { - this.streamNode = streamNode; - } - - @Override - public void process(String json, ConfigType type, DrainData drainData) { - if (type == ConfigType.STATIC) { - if (this.staticConfig == null) { - this.staticConfig = Jsonifiers.fromJson(json, - Configuration.class); - - Map iNetAddressMap = (Map) staticConfig - .getExtraData(GlobalConstants.INETADDRESS_MAP); - - this.conProvider = new TCPConnectionProvider( - streamNode.getNodeID(), iNetAddressMap); - } else - System.err - .println("New static configuration received...But Ignored..."); - } else { - System.out.println("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); - System.out.println("New Configuration....."); - Configuration cfg = Jsonifiers.fromJson(json, Configuration.class); - ImmutableSet blobSet = getBlobs(cfg, staticConfig, drainData); - if (blobSet != null) { - try { - streamNode.controllerConnection - .writeObject(AppStatus.COMPILED); - } catch (IOException e) { - e.printStackTrace(); - } - - Map conInfoMap = (Map) cfg - .getExtraData(GlobalConstants.CONINFOMAP); - - streamNode.setBlobsManager(new BlobsManagerImpl(blobSet, - conInfoMap, streamNode, conProvider)); - } else { - try { - streamNode.controllerConnection - .writeObject(AppStatus.COMPILATION_ERROR); - } catch (IOException e) { - e.printStackTrace(); - } - - System.out.println("Couldn't get the blobset...."); - } - } - } - - private ImmutableSet getBlobs(Configuration dyncfg, - Configuration stccfg, DrainData drainData) { - - PartitionParameter partParam = dyncfg.getParameter( - GlobalConstants.PARTITION, PartitionParameter.class); - if (partParam == null) - throw new IllegalArgumentException( - "Partition parameter is not available in the received configuraion"); - - String topLevelWorkerName = (String) stccfg - .getExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME); - String jarFilePath = (String) stccfg - .getExtraData(GlobalConstants.JARFILE_PATH); - - OneToOneElement streamGraph = getStreamGraph(jarFilePath, - topLevelWorkerName); - if (streamGraph != null) { - ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor(); - streamGraph.visit(primitiveConnector); - Worker source = primitiveConnector.getSource(); - - List blobList = partParam - .getBlobsOnMachine(streamNode.getNodeID()); - - ImmutableSet.Builder blobSet = ImmutableSet.builder(); - - if (blobList == null) - return blobSet.build(); - - BlobFactory bf; - Configuration blobConfigs = dyncfg - .getSubconfiguration("blobConfigs"); - if (blobConfigs == null) { - blobConfigs = staticConfig; - bf = new Interpreter.InterpreterBlobFactory(); - } else { - bf = new Compiler2BlobFactory(); - } - - for (BlobSpecifier bs : blobList) { - Set workIdentifiers = bs.getWorkerIdentifiers(); - // DEBUG - System.out.println(String.format( - "A new blob with workers %s has been created.", - workIdentifiers.toString())); - ImmutableSet> workerset = bs.getWorkers(source); - - try { - Blob b = bf.makeBlob(workerset, blobConfigs, 1, drainData); - blobSet.add(b); - } catch (Exception ex) { - return null; - } - } - - return blobSet.build(); - } else - return null; - } - - /** - * Gets a Stream Graph from a jar file. - * - * @param jarFilePath - * @param topStreamClassName - * @return : StreamGraph - */ - private OneToOneElement getStreamGraph(String jarFilePath, - String topStreamClassName) { - checkNotNull(jarFilePath); - checkNotNull(topStreamClassName); - jarFilePath = this.getClass().getProtectionDomain().getCodeSource() - .getLocation().getPath(); - File jarFile = new java.io.File(jarFilePath); - if (!jarFile.exists()) { - System.out.println("Jar file not found...."); - try { - streamNode.controllerConnection - .writeObject(Error.FILE_NOT_FOUND); - } catch (IOException e) { - e.printStackTrace(); - } - return null; - } - - // In some benchmarks, top level stream class is written as an static - // inner class. So in that case, we have to find the outer - // class first. Java's Class.getName() returns "OutterClass$InnerClass" - // format. So if $ exists in the method argument - // topStreamClassName then the actual top level stream class is lies - // inside another class. - String outterClassName = null; - if (topStreamClassName.contains("$")) { - int pos = topStreamClassName.indexOf("$"); - outterClassName = (String) topStreamClassName.subSequence(0, pos); - topStreamClassName = topStreamClassName.substring(pos + 1); - } - - URL url; - try { - url = jarFile.toURI().toURL(); - URL[] urls = new URL[] { url }; - - ClassLoader loader = new URLClassLoader(urls); - Class topStreamClass; - if (!Strings.isNullOrEmpty(outterClassName)) { - Class clazz1 = loader.loadClass(outterClassName); - topStreamClass = getInngerClass(clazz1, topStreamClassName); - } else { - topStreamClass = loader.loadClass(topStreamClassName); - } - System.out.println(topStreamClass.getSimpleName()); - return (OneToOneElement) topStreamClass.newInstance(); - - } catch (MalformedURLException e) { - e.printStackTrace(); - System.out.println("Couldn't find the toplevel worker...Exiting"); - - // TODO: Try catch inside a catch block. Good practice??? - try { - streamNode.controllerConnection - .writeObject(Error.WORKER_NOT_FOUND); - } catch (IOException e1) { - // TODO Auto-generated catch block - e1.printStackTrace(); - } - // System.exit(0); - } catch (InstantiationException iex) { - System.err.println("InstantiationException exception."); - System.err - .println("Please ensure the top level StreamJit application" - + " class is public and have no argument constructor."); - iex.printStackTrace(); - } catch (Exception e) { - System.out.println("Couldn't find the toplevel worker."); - e.printStackTrace(); - - // TODO: Try catch inside a catch block. Good practice??? - try { - streamNode.controllerConnection - .writeObject(Error.WORKER_NOT_FOUND); - } catch (IOException e1) { - e1.printStackTrace(); - } - } - return null; - } - - private static Class getInngerClass(Class OutterClass, - String InnterClassName) throws ClassNotFoundException { - Class[] kl = OutterClass.getClasses(); - for (Class k : kl) { - if (InnterClassName.equals(k.getSimpleName())) { - return k; - } - } - throw new ClassNotFoundException( - String.format( - "Innter class %s is not found in the outter class %s. Check the accessibility/visibility of the inner class", - InnterClassName, OutterClass.getName())); - } -} diff --git a/src/edu/mit/streamjit/impl/distributed/node/ConfigurationProcessorImpl.java b/src/edu/mit/streamjit/impl/distributed/node/ConfigurationProcessorImpl.java new file mode 100644 index 00000000..814e83f6 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/ConfigurationProcessorImpl.java @@ -0,0 +1,386 @@ +package edu.mit.streamjit.impl.distributed.node; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.MalformedURLException; +import java.net.URL; +import java.net.URLClassLoader; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import com.google.common.base.Stopwatch; +import com.google.common.base.Strings; +import com.google.common.collect.ImmutableSet; + +import edu.mit.streamjit.api.OneToOneElement; +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.blob.BlobFactory; +import edu.mit.streamjit.impl.blob.DrainData; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Configuration.PartitionParameter; +import edu.mit.streamjit.impl.common.Configuration.PartitionParameter.BlobSpecifier; +import edu.mit.streamjit.impl.common.ConnectWorkersVisitor; +import edu.mit.streamjit.impl.distributed.common.AppStatus; +import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; +import edu.mit.streamjit.impl.distributed.common.Error; +import edu.mit.streamjit.impl.distributed.common.GlobalConstants; +import edu.mit.streamjit.impl.distributed.common.NetworkInfo; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.CompilationTime; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.util.json.Jsonifiers; + +/** + * {@link ConfigurationProcessor} at {@link StreamNode} side. + * + * @author Sumanan sumanan@mit.edu + * @since May 27, 2013 + */ +public class ConfigurationProcessorImpl implements ConfigurationProcessor { + + private StreamNode streamNode; + + private Configuration staticConfig = null; + + private ConnectionProvider conProvider; + + public ConfigurationProcessorImpl(StreamNode streamNode) { + this.streamNode = streamNode; + } + + @Override + public void process(String json, ConfigType type, DrainData drainData) { + if (type == ConfigType.STATIC) { + processStaticCfg(json); + } else { + processDynamicCfg(json, drainData); + } + } + + private void processStaticCfg(String json) { + if (this.staticConfig == null) { + this.staticConfig = Jsonifiers.fromJson(json, Configuration.class); + + Map iNetAddressMap = (Map) staticConfig + .getExtraData(GlobalConstants.INETADDRESS_MAP); + + NetworkInfo networkInfo = new NetworkInfo(iNetAddressMap); + + this.conProvider = new ConnectionProvider(streamNode.getNodeID(), + networkInfo); + } else + System.err + .println("New static configuration received...But Ignored..."); + } + + private void processDynamicCfg(String json, DrainData drainData) { + System.out + .println("------------------------------------------------------------"); + System.out.println("New Configuration....."); + streamNode.releaseOldBM(); + Configuration cfg = Jsonifiers.fromJson(json, Configuration.class); + ImmutableSet blobSet = getBlobs(cfg, drainData); + if (blobSet != null) { + try { + streamNode.controllerConnection.writeObject(AppStatus.COMPILED); + } catch (IOException e) { + e.printStackTrace(); + } + + Map conInfoMap = (Map) cfg + .getExtraData(GlobalConstants.CONINFOMAP); + + String appName = (String) staticConfig + .getExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME); + streamNode.setBlobsManager(new BlobsManagerImpl(blobSet, + conInfoMap, streamNode, conProvider, appName)); + } else { + try { + streamNode.controllerConnection + .writeObject(AppStatus.COMPILATION_ERROR); + } catch (IOException e) { + e.printStackTrace(); + } + + System.out.println("Couldn't get the blobset...."); + } + } + + private ImmutableSet getBlobs(Configuration dyncfg, + DrainData drainData) { + + PartitionParameter partParam = dyncfg.getParameter( + GlobalConstants.PARTITION, PartitionParameter.class); + if (partParam == null) + throw new IllegalArgumentException( + "Partition parameter is not available in the received configuraion"); + + OneToOneElement streamGraph = getStreamGraph(); + if (streamGraph != null) { + ConnectWorkersVisitor primitiveConnector = new ConnectWorkersVisitor(); + streamGraph.visit(primitiveConnector); + Worker source = primitiveConnector.getSource(); + + List blobList = partParam + .getBlobsOnMachine(streamNode.getNodeID()); + + ImmutableSet.Builder blobSet = ImmutableSet.builder(); + + if (blobList == null) + return blobSet.build(); + + Configuration blobConfigs = dyncfg + .getSubconfiguration("blobConfigs"); + return blobset1(blobSet, blobList, drainData, blobConfigs, source); + + } else + return null; + } + + private void sendCompilationTime(Stopwatch sw, Token blobID) { + sw.stop(); + CompilationTime ct = new CompilationTime(blobID, + sw.elapsed(TimeUnit.MILLISECONDS)); + try { + streamNode.controllerConnection.writeObject(ct); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * Gets a Stream Graph from a jar file. + * + * @return : StreamGraph + */ + private OneToOneElement getStreamGraph() { + String topStreamClassName = (String) staticConfig + .getExtraData(GlobalConstants.TOPLEVEL_WORKER_NAME); + String jarFilePath = (String) staticConfig + .getExtraData(GlobalConstants.JARFILE_PATH); + + checkNotNull(jarFilePath); + checkNotNull(topStreamClassName); + jarFilePath = this.getClass().getProtectionDomain().getCodeSource() + .getLocation().getPath(); + File jarFile = new java.io.File(jarFilePath); + if (!jarFile.exists()) { + System.out.println("Jar file not found...."); + try { + streamNode.controllerConnection + .writeObject(Error.FILE_NOT_FOUND); + } catch (IOException e) { + e.printStackTrace(); + } + return null; + } + + // In some benchmarks, top level stream class is written as an static + // inner class. So in that case, we have to find the outer + // class first. Java's Class.getName() returns "OutterClass$InnerClass" + // format. So if $ exists in the method argument + // topStreamClassName then the actual top level stream class is lies + // inside another class. + String outterClassName = null; + if (topStreamClassName.contains("$")) { + int pos = topStreamClassName.indexOf("$"); + outterClassName = (String) topStreamClassName.subSequence(0, pos); + topStreamClassName = topStreamClassName.substring(pos + 1); + } + + URL url; + try { + url = jarFile.toURI().toURL(); + URL[] urls = new URL[] { url }; + + ClassLoader loader = new URLClassLoader(urls); + Class topStreamClass; + if (!Strings.isNullOrEmpty(outterClassName)) { + Class clazz1 = loader.loadClass(outterClassName); + topStreamClass = getInngerClass(clazz1, topStreamClassName); + } else { + topStreamClass = loader.loadClass(topStreamClassName); + } + System.out.println(topStreamClass.getSimpleName()); + return (OneToOneElement) topStreamClass.newInstance(); + + } catch (MalformedURLException e) { + e.printStackTrace(); + System.out.println("Couldn't find the toplevel worker...Exiting"); + + // TODO: Try catch inside a catch block. Good practice??? + try { + streamNode.controllerConnection + .writeObject(Error.WORKER_NOT_FOUND); + } catch (IOException e1) { + // TODO Auto-generated catch block + e1.printStackTrace(); + } + // System.exit(0); + } catch (InstantiationException iex) { + System.err.println("InstantiationException exception."); + System.err + .println("Please ensure the top level StreamJit application" + + " class is public and have no argument constructor."); + iex.printStackTrace(); + } catch (Exception e) { + System.out.println("Couldn't find the toplevel worker."); + e.printStackTrace(); + + // TODO: Try catch inside a catch block. Good practice??? + try { + streamNode.controllerConnection + .writeObject(Error.WORKER_NOT_FOUND); + } catch (IOException e1) { + e1.printStackTrace(); + } + } + return null; + } + + private static Class getInngerClass(Class OutterClass, + String InnterClassName) throws ClassNotFoundException { + Class[] kl = OutterClass.getClasses(); + for (Class k : kl) { + if (InnterClassName.equals(k.getSimpleName())) { + return k; + } + } + throw new ClassNotFoundException( + String.format( + "Innter class %s is not found in the outter class %s. Check the accessibility/visibility of the inner class", + InnterClassName, OutterClass.getName())); + } + + /** + * Compiles the blobs in serial. + */ + private ImmutableSet blobset(ImmutableSet.Builder blobSet, + List blobList, DrainData drainData, + Configuration blobConfigs, Worker source) { + for (BlobSpecifier bs : blobList) { + Set workIdentifiers = bs.getWorkerIdentifiers(); + ImmutableSet> workerset = bs.getWorkers(source); + try { + BlobFactory bf = bs.getBlobFactory(); + int maxCores = bs.getCores(); + Stopwatch sw = Stopwatch.createStarted(); + DrainData dd = drainData == null ? null : drainData + .subset(workIdentifiers); + Blob b = bf.makeBlob(workerset, blobConfigs, maxCores, dd); + sendCompilationTime(sw, Utils.getblobID(workerset)); + blobSet.add(b); + } catch (Exception ex) { + ex.printStackTrace(); + return null; + } catch (OutOfMemoryError er) { + Utils.printOutOfMemory(); + return null; + } + // // DEBUG MSG + // if (!GlobalConstants.singleNodeOnline) + // System.out.println(String.format( + // "A new blob with workers %s has been created.", + // workIdentifiers.toString())); + } + System.out.println("All blobs have been created"); + return blobSet.build(); + } + + /** + * Compiles the blobs in parallel. + */ + private ImmutableSet blobset1(ImmutableSet.Builder blobSet, + List blobList, DrainData drainData, + Configuration blobConfigs, Worker source) { + Set> futures = new HashSet<>(); + ExecutorService executerSevce = Executors.newFixedThreadPool(blobList + .size()); + + for (BlobSpecifier bs : blobList) { + MakeBlob mb = new MakeBlob(bs, drainData, blobConfigs, source); + Future f = executerSevce.submit(mb); + futures.add(f); + } + + executerSevce.shutdown(); + + while (!executerSevce.isTerminated()) { + try { + Thread.sleep(200); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + for (Future f : futures) { + Blob b; + try { + b = f.get(); + if (b == null) + return null; + blobSet.add(b); + } catch (InterruptedException | ExecutionException e) { + e.printStackTrace(); + } + } + + System.out.println("All blobs have been created"); + return blobSet.build(); + } + + private class MakeBlob implements Callable { + private final BlobSpecifier bs; + private final DrainData drainData; + private final Configuration blobConfigs; + private final Worker source; + + private MakeBlob(BlobSpecifier bs, DrainData drainData, + Configuration blobConfigs, Worker source) { + this.bs = bs; + this.drainData = drainData; + this.blobConfigs = blobConfigs; + this.source = source; + } + + @Override + public Blob call() throws Exception { + Blob b = null; + Set workIdentifiers = bs.getWorkerIdentifiers(); + ImmutableSet> workerset = bs.getWorkers(source); + try { + BlobFactory bf = bs.getBlobFactory(); + int maxCores = bs.getCores(); + Stopwatch sw = Stopwatch.createStarted(); + DrainData dd = drainData == null ? null : drainData + .subset(workIdentifiers); + b = bf.makeBlob(workerset, blobConfigs, maxCores, dd); + sendCompilationTime(sw, Utils.getblobID(workerset)); + } catch (Exception ex) { + ex.printStackTrace(); + } catch (OutOfMemoryError er) { + Utils.printOutOfMemory(); + } + // DEBUG MSG + // if (!GlobalConstants.singleNodeOnline && b != null) + // System.out.println(String.format( + // "A new blob with workers %s has been created.", + // workIdentifiers.toString())); + return b; + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/DynamicBufferManager.java b/src/edu/mit/streamjit/impl/distributed/node/DynamicBufferManager.java new file mode 100644 index 00000000..7c308403 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/DynamicBufferManager.java @@ -0,0 +1,309 @@ +package edu.mit.streamjit.impl.distributed.node; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import edu.mit.streamjit.impl.blob.Blob; +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.util.ConstructorSupplier; +import edu.mit.streamjit.util.ReflectionUtils; + +/** + * Provides dynamic buffer backed by a buffer implementation which is passed as + * an argument to {@link #getBuffer(Class, List, int, int)} method. Use a + * {@link DynamicBufferManager} per blob ( i.e., one to one mapping between a + * blob and a DynamicBufferManager). Do not use same instance of + * {@link DynamicBufferManager} to create buffers for multiple blobs or multiple + * {@link DynamicBufferManager}s to make buffer for a single blob. Doing this + * will affect the deadlock detection algorithm and ultimately results either + * infinite buffer growth or unsolvable deadlock. + * + * @author sumanan + * @since Mar 10, 2014 + * + */ +public final class DynamicBufferManager { + + /** + * inputs of the blob in which this object is bound to. + */ + private final Set inputs; + + /** + * outputs of the blob in which this object is bound to. + */ + private final Set outputs; + + /** + * keeps track of all buffers created for a particular blob. We need to + * track this list to determine whether there is an actual deadlock or this + * blob is faster than all down blobs. + */ + private List buffers; + + public DynamicBufferManager(Blob blob) { + inputs = blob.getInputs(); + outputs = blob.getOutputs(); + buffers = new ArrayList<>(); + } + + /** + * Makes and returns a dynamic buffer backed by an instance of bufferClass. + * Passed bufferClass ( a concrete implementation of {@link Buffer}) must + * have an constructor which takes the capacity of the new buffer as an + * argument. + * + * @param Name + * : Name for this buffer. Just for documentation purpose. + * Token.toString() may be passed where the token is a token of + * the input/output edge of a blob. + * @param bufferClass + * : Any concrete implementation of {@link Buffer}. + * @param initialArguments + * : Constructor arguments. : Initial capacity of the buffer. + * @param capacityPos + * : the position of size parameter in the bufferClass. + * @return : A dynamic buffer backed by an instance of bufferClass. + */ + public Buffer getBuffer(Token t, Class bufferClass, + List initialArguments, int initialCapacity, int capacityPos) { + + if (!(outputs.contains(t) || inputs.contains(t))) + throw new IllegalAccessError( + String.format( + "%s is not related to the blob in which this dynamic buffer manager is bound to", + t.toString()));; + + Buffer buf = new DynamicBuffer(t.toString(), bufferClass, + initialArguments, initialCapacity, capacityPos); + buffers.add(buf); + return buf; + } + /** + * Dynamically increases supplied buffer capacity in order to avoid dead + * locks. Actually creates a new instance of the supplied buffer and copy + * the data from old buffer to new buffer. A decorator pattern for + * {@link Buffer}. + * + *

            + * Determining whether buffer fullness is due to deadlock situation or the + * current blob is executing on a faster node than the down stream blob is + * little tricky. + *

            + * + *

            + * TODO: {@link ConstructorSupplier} can be reused here to instantiate the + * buffer instances if we make {@link ConstructorSupplier}.arguments not + * final. + *

            + * + *

            + * TODO: Possible performance bug during read() due to volatile buffer + * variable and the need for acquire readLock for every single reading. Any + * way to improve this???. splitjoin1 show 30-40% performance overhead when + * uses {@link DynamicBuffer}. + * + * @author sumanan + * @since Mar 10, 2014 + * + */ + private class DynamicBuffer implements Buffer { + + private final String name; + + /** + * Minimum time gap between the last successful write and the current + * time in order to consider the option of doubling the buffer + */ + private final long gap; + + /** + * Every successful write operation should update this time. + */ + private long lastWrittenTime; + + /** + * When the algorithm detects there are some progress ( May be after + * some expansions), this flag is set to stop any future expansions. + * This is to prevent infinity buffer growth. + */ + private boolean expandable; + + /** + * Read lock should be acquired at every single read where as write lock + * only when switching the buffer from old to new. + */ + private ReadWriteLock rwlock; + + private final List initialArguments; + private final int initialCapacity; + private final int capacityPos; + private final Constructor cons; + + /** + * TODO: Volatileness may severely affects the reading performance. + */ + private volatile Buffer buffer; + + public DynamicBuffer(String name, Class bufferClass, + List initialArguments, int initialCapacity, int capacityPos) { + this.name = name; + this.initialArguments = initialArguments; + this.initialCapacity = initialCapacity; + this.capacityPos = capacityPos; + Constructor con = null; + try { + con = ReflectionUtils.findConstructor(bufferClass, + initialArguments); + } catch (NoSuchMethodException e1) { + e1.printStackTrace(); + } + this.cons = con; + this.buffer = getNewBuffer(initialCapacity); + this.gap = 200000000; // 200ms + expandable = true; + rwlock = new ReentrantReadWriteLock(); + lastWrittenTime = 0; + } + + private List getArguments(int newCapacity) { + List newArgs = new ArrayList<>(initialArguments.size()); + for (int i = 0; i < initialArguments.size(); i++) { + if (i == capacityPos) + newArgs.add(newCapacity); + else + newArgs.add(initialArguments.get(i)); + } + return newArgs; + } + + private Buffer getNewBuffer(int newCapacity) { + Buffer buffer; + try { + buffer = cons.newInstance(getArguments(newCapacity).toArray()); + return buffer; + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException e) { + e.printStackTrace(); + return null; + } + } + + @Override + public Object read() { + rwlock.readLock().lock(); + Object o = buffer.read(); + rwlock.readLock().unlock(); + return o; + } + + @Override + public int read(Object[] data, int offset, int length) { + rwlock.readLock().lock(); + int ret = buffer.read(data, offset, length); + rwlock.readLock().unlock(); + return ret; + } + + @Override + public boolean readAll(Object[] data) { + rwlock.readLock().lock(); + boolean ret = buffer.readAll(data); + rwlock.readLock().unlock(); + return ret; + } + + @Override + public boolean readAll(Object[] data, int offset) { + rwlock.readLock().lock(); + boolean ret = buffer.readAll(data, offset); + rwlock.readLock().unlock(); + return ret; + } + + @Override + public boolean write(Object t) { + boolean ret = buffer.write(t); + if (!ret) + writeFailed(); + else if (lastWrittenTime != 0) + lastWrittenTime = 0; + return ret; + } + + @Override + public int write(Object[] data, int offset, int length) { + int written = buffer.write(data, offset, length); + if (written == 0) + writeFailed(); + else if (lastWrittenTime != 0) + lastWrittenTime = 0; + return written; + } + + @Override + public int size() { + return buffer.size(); + } + + @Override + public int capacity() { + return buffer.capacity(); + } + + private void writeFailed() { + if (areAllFull() || !expandable) + return; + + if (lastWrittenTime == 0) { + lastWrittenTime = System.nanoTime(); + return; + } + + if (System.nanoTime() - lastWrittenTime > gap && expandable) { + doubleBuffer(); + } + } + + private boolean areAllFull() { + for (Buffer b : buffers) { + if (b.size() != b.capacity()) + return false; + } + return true; + } + + private void doubleBuffer() { + int newCapacity = 2 * buffer.capacity(); + if (newCapacity > 1024 * initialCapacity) { + expandable = false; + return; + } + System.out + .println(String + .format("%s : Doubling the buffer: initialCapacity - %d, newCapacity - %d", + name, initialCapacity, newCapacity)); + Buffer newBuf = getNewBuffer(newCapacity); + rwlock.writeLock().lock(); + final int size = buffer.size(); + // TODO: copying is done one by one. Any block level copying? + for (int i = 0; i < size; i++) { + newBuf.write(buffer.read()); + } + + if (buffer.size() != 0) { + throw new IllegalStateException( + "Buffter is not empty after copying all data"); + } + this.buffer = newBuf; + lastWrittenTime = 0; + rwlock.writeLock().unlock(); + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/LocalBuffer.java b/src/edu/mit/streamjit/impl/distributed/node/LocalBuffer.java new file mode 100644 index 00000000..55dfb06c --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/node/LocalBuffer.java @@ -0,0 +1,310 @@ +package edu.mit.streamjit.impl.distributed.node; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; + +import edu.mit.streamjit.impl.blob.Buffer; +import edu.mit.streamjit.impl.blob.ConcurrentArrayBuffer; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; +import edu.mit.streamjit.util.ConstructorSupplier; +import edu.mit.streamjit.util.ReflectionUtils; + +/** + * {@link LocalBuffer} connects up blob and down blob where both are running at + * same StreamNode. In this case, we can simply use any {@link Buffer} + * implementation to connect up blob and down blob. But at the draining time + * blobs write large amount of data, which blobs buffered inside during the init + * schedule, and limited buffer size causes deadlock. Implementations of the + * {@link LocalBuffer} are supposed to understand drainingStarted event and + * increase the buffer size smartly to avoid deadlock situation. see Deadlock 5. + * + * @author Sumanan sumanan@mit.edu + * @since Sept 23, 2014 + */ +public interface LocalBuffer extends Buffer { + + public void drainingStarted(DrainType drainType); + + /** + * Just a wrapper for {@link ConcurrentArrayBuffer} + */ + public class ConcurrentArrayLocalBuffer implements LocalBuffer { + private final ConcurrentArrayBuffer buffer; + + public ConcurrentArrayLocalBuffer(int capacity) { + buffer = new ConcurrentArrayBuffer(capacity); + } + + @Override + public Object read() { + return buffer.read(); + } + + @Override + public int read(Object[] data, int offset, int length) { + return buffer.read(data, offset, length); + } + + @Override + public boolean readAll(Object[] data) { + return buffer.readAll(data); + } + + @Override + public boolean readAll(Object[] data, int offset) { + return buffer.readAll(data, offset); + } + + @Override + public boolean write(Object t) { + return buffer.write(t); + } + + @Override + public int write(Object[] data, int offset, int length) { + return buffer.write(data, offset, length); + } + + @Override + public int size() { + return buffer.size(); + } + + @Override + public int capacity() { + return buffer.capacity(); + } + + @Override + public void drainingStarted(DrainType drainType) { + System.out.println("drainingStarted: Not supported"); + } + } + + /** + * Modified version of {@link DynamicBufferManager#DynamicBuffer}. Instead + * of dynamically increase the buffer sizes, this implementation creates an + * additional buffer to unlock the draining time deadlock. + * + * + * Blobs write more than expected amount of data during the draining time + * and, sometimes the output buffers become full forever at the draining + * time and blobs spin on write() forever. This implementation creates new a + * supplied buffer in order to avoid dead locks during draining time. + * + *

            + * Determining whether buffer fullness is due to deadlock situation or the + * current blob is executing on a faster node than the down stream blob is + * little tricky. + *

            + * + *

            + * TODO: {@link ConstructorSupplier} can be reused here to instantiate the + * buffer instances if we make {@link ConstructorSupplier}.arguments not + * final. + *

            + * + */ + public class LocalBuffer1 implements LocalBuffer { + + private final int capacityPos; + + private final Constructor cons; + + private final Buffer defaultBuffer; + + private volatile Buffer drainBuffer; + + /** + * Minimum time gap between the last successful write and the current + * time in order to consider the option of doubling the buffer + */ + private final long gap; + + private volatile boolean hasDrainingStarted; + + private final List initialArguments; + + private final int initialCapacity; + + /** + * Every successful write operation should update this time. + */ + private long lastWrittenTime; + + private final String name; + + private Buffer writeBuffer; + + public LocalBuffer1(String name, Class bufferClass, + List initialArguments, int initialCapacity, int capacityPos) { + this.name = name; + this.initialArguments = initialArguments; + this.initialCapacity = initialCapacity; + this.capacityPos = capacityPos; + Constructor con = null; + try { + con = ReflectionUtils.findConstructor(bufferClass, + initialArguments); + } catch (NoSuchMethodException e1) { + e1.printStackTrace(); + } + this.cons = con; + this.defaultBuffer = getNewBuffer(initialCapacity); + this.writeBuffer = defaultBuffer; + this.gap = 10_000_000_000l; // 10s + hasDrainingStarted = false; + } + + /* + * (non-Javadoc) + * + * @see edu.mit.streamjit.impl.blob.Buffer#capacity() + * + * if hasDrainingStarted == true sends defaultBuffer.capacity() * 2 as + * capacity because if the room is 0 blobs never try to call write and + * hence wirteFailed() will never be called. Check + * Interpreter#pushOutputs(). + */ + @Override + public int capacity() { + int cap; + if (hasDrainingStarted) { + cap = drainBuffer == null ? defaultBuffer.capacity() * 2 + : drainBuffer.capacity() + defaultBuffer.capacity(); + } else + cap = defaultBuffer.capacity(); + return cap; + } + + @Override + public void drainingStarted(DrainType drainType) { + hasDrainingStarted = true; + } + + @Override + public Object read() { + Object o; + if (drainBuffer == null || defaultBuffer.size() > 0) + o = defaultBuffer.read(); + else + o = drainBuffer.read(); + return o; + } + + @Override + public int read(Object[] data, int offset, int length) { + int ret; + if (drainBuffer == null) + ret = defaultBuffer.read(data, offset, length); + else if (defaultBuffer.size() == 0) + ret = drainBuffer.read(data, offset, length); + else { + ret = defaultBuffer.read(data, offset, length); + ret += drainBuffer.read(data, offset + ret, length - ret); + } + return ret; + } + + @Override + public boolean readAll(Object[] data) { + return readAll(data, 0); + } + + @Override + public boolean readAll(Object[] data, int offset) { + boolean ret; + int need = data.length - offset; + if (drainBuffer == null) + ret = defaultBuffer.readAll(data, offset); + else if (defaultBuffer.size() == 0) + ret = drainBuffer.readAll(data, offset); + else if (need > defaultBuffer.size() + drainBuffer.size()) + ret = false; + else { + int read = defaultBuffer.read(data, offset, need); + read += drainBuffer.read(data, offset + read, need - read); + ret = true; + if (read != need) + throw new IllegalStateException( + "data buffer is not full. Check the logic"); + } + return ret; + } + + @Override + public int size() { + int size = drainBuffer == null ? defaultBuffer.size() : drainBuffer + .size() + defaultBuffer.size(); + return size; + } + + @Override + public boolean write(Object t) { + boolean ret = writeBuffer.write(t); + if (!ret) + writeFailed(); + return ret; + } + + @Override + public int write(Object[] data, int offset, int length) { + int written = writeBuffer.write(data, offset, length); + if (written != length) { + writeFailed(); + written += writeBuffer.write(data, offset + written, length + - written); + } + return written; + } + + private void createDrainBuffer() { + assert drainBuffer == null : "drainBuffer has already been created."; + int newCapacity = 4 * defaultBuffer.capacity(); + System.out + .println(String + .format("%s : Creating drain buffer: defaultBufferCapacity - %d, drainBufferCapacity - %d", + name, initialCapacity, newCapacity)); + drainBuffer = getNewBuffer(newCapacity); + this.writeBuffer = drainBuffer; + } + + private List getArguments(int newCapacity) { + List newArgs = new ArrayList<>(initialArguments.size()); + for (int i = 0; i < initialArguments.size(); i++) { + if (i == capacityPos) + newArgs.add(newCapacity); + else + newArgs.add(initialArguments.get(i)); + } + return newArgs; + } + + private Buffer getNewBuffer(int newCapacity) { + Buffer buffer; + try { + buffer = cons.newInstance(getArguments(newCapacity).toArray()); + return buffer; + } catch (InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException e) { + e.printStackTrace(); + return null; + } + } + + private void writeFailed() { + if (!hasDrainingStarted) + return; + + if (drainBuffer != null) + throw new IllegalStateException( + String.format( + "drainBuffer is full " + + "drainBuffer.size() = %d, defaultBuffer.size() = %d ", + drainBuffer.size(), defaultBuffer.size())); + createDrainBuffer(); + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/node/StreamNode.java b/src/edu/mit/streamjit/impl/distributed/node/StreamNode.java index 2db91c60..76ba1b0c 100644 --- a/src/edu/mit/streamjit/impl/distributed/node/StreamNode.java +++ b/src/edu/mit/streamjit/impl/distributed/node/StreamNode.java @@ -32,6 +32,7 @@ import edu.mit.streamjit.impl.distributed.common.ConnectionFactory; import edu.mit.streamjit.impl.distributed.common.GlobalConstants; import edu.mit.streamjit.impl.distributed.common.Ipv4Validator; +import edu.mit.streamjit.impl.distributed.profiler.Profiler; import edu.mit.streamjit.impl.distributed.runtimer.Controller; /** @@ -59,6 +60,8 @@ public class StreamNode extends Thread { private volatile BlobsManager blobsManager; + Profiler profiler; + private boolean run; // As we assume that all controller communication and // the MessageElement processing is managed by // single @@ -110,13 +113,7 @@ public void run() { run = false; } } - - try { - this.controllerConnection.closeConnection(); - } catch (IOException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } + safelyCloseResources(); } public int getNodeID() { @@ -141,7 +138,10 @@ public BlobsManager getBlobsManager() { * the blobsManager to set */ public void setBlobsManager(BlobsManager blobsManager) { + releaseOldBM(); this.blobsManager = blobsManager; + if (profiler != null && blobsManager != null) + profiler.addAll(blobsManager.profilers()); } public void exit() { @@ -152,6 +152,42 @@ public String tostString() { return "StreamNode-" + myNodeID; } + private void safelyCloseResources() { + if (blobsManager != null) + blobsManager.stop(); + + if (profiler != null) { + profiler.stopProfiling(); + + try { + profiler.join(); + } catch (InterruptedException e1) { + e1.printStackTrace(); + } + } + + if (controllerConnection.isStillConnected()) + try { + this.controllerConnection.closeConnection(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + /** + * Un-references old BlobManager object before creating new one. + */ + void releaseOldBM() { + // [2014-3-20] We need to release blobsmanager to release the + // memory. Otherwise, Blobthread2.corecode will occupy the memory. + if (blobsManager != null) { + blobsManager.stop(); + if (profiler != null) + profiler.removeAll(blobsManager.profilers()); + blobsManager = null; + } + } + /** * @param args */ diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/MasterProfiler.java b/src/edu/mit/streamjit/impl/distributed/profiler/MasterProfiler.java new file mode 100644 index 00000000..6886471e --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/MasterProfiler.java @@ -0,0 +1,37 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import edu.mit.streamjit.impl.distributed.profiler.ProfileElementLoggers.FileProfileElementLogger; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNProfileElementProcessor; + +/** + * Profiling data from all StreamNodes come to this central point. + * + * @author sumanan + * @since 27 Jan, 2015 + */ +public class MasterProfiler implements SNProfileElementProcessor { + + // private final Map BufferStatusDataMap; + + private final ProfileElementLogger logger; + + public ProfileElementLogger logger() { + return logger; + } + + public MasterProfiler(String appName) { + // BufferStatusDataMap = new ConcurrentHashMap<>(); + logger = new FileProfileElementLogger(appName); + } + + @Override + public void process(SNBufferStatusData bufferStatusData) { + // BufferStatusDataMap.put(bufferStatusData.machineID, + // bufferStatusData); + logger.process(bufferStatusData); + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/ProfileElementLogger.java b/src/edu/mit/streamjit/impl/distributed/profiler/ProfileElementLogger.java new file mode 100644 index 00000000..6bffbc79 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/ProfileElementLogger.java @@ -0,0 +1,24 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNProfileElementProcessor; + +/** + * Logs the {@link SNProfileElement}s. This interface extends + * {@link SNProfileElementProcessor} so that there is no need to manually check + * and add the process methods whenever a new {@link SNProfileElement} is added. + * + * @author sumanan + * @since 29 Jan, 2015 + */ +public interface ProfileElementLogger extends SNProfileElementProcessor { + + /** + * This method shall be called to indicate to the logger that the + * configuration has been changed. + * + * @param cfgName + * The name of the new configuration. Pass an empty String if the + * cfgName is not available (unknown) to the caller. + */ + public void newConfiguration(String cfgName); +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/ProfileElementLoggers.java b/src/edu/mit/streamjit/impl/distributed/profiler/ProfileElementLoggers.java new file mode 100755 index 00000000..e99d2d3f --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/ProfileElementLoggers.java @@ -0,0 +1,101 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; + +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData.BlobBufferStatus; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNBufferStatusData.BufferStatus; + +/** + * Collection of various {@link ProfileElementLogger} implementations. + * + * @author sumanan + * @since 29 Jan, 2015 + */ +public class ProfileElementLoggers { + + public static class FileProfileElementLogger + extends + ProfileElementLoggerImpl { + + public FileProfileElementLogger(String appName) { + super(Utils.fileWriter(appName, "profile.txt")); + } + } + + /** + * Prints the SNProfileElements to the StdOut. + * + */ + public static class PrintProfileElementLogger + extends + ProfileElementLoggerImpl { + public PrintProfileElementLogger() { + super(System.out); + } + } + + private static class ProfileElementLoggerImpl implements + ProfileElementLogger { + + private final OutputStreamWriter writer; + + private final Object lock = new Object(); + + ProfileElementLoggerImpl(OutputStream writer) { + this(getOSWriter(writer)); + } + + ProfileElementLoggerImpl(OutputStreamWriter writer) { + this.writer = writer; + } + + @Override + public void process(SNBufferStatusData bufferStatusData) { + if (writer == null) + return; + + synchronized (lock) { + try { + writer.write(String.format("MachineID=%d\n", + bufferStatusData.machineID)); + for (BlobBufferStatus bbs : bufferStatusData.blobsBufferStatusSet) { + writer.write(String.format("\tBlob=%s\n", bbs.blobID)); + writer.write("\t\tInput...\n"); + for (BufferStatus bs : bbs.inputSet) + writer.write(String.format("\t\t\t%s\n", bs)); + writer.write("\t\tOutput...\n"); + for (BufferStatus bs : bbs.outputSet) + writer.write(String.format("\t\t\t%s\n", bs)); + writer.flush(); + } + + } catch (IOException ex) { + } + } + } + + private static OutputStreamWriter getOSWriter(OutputStream os) { + if (os == null) + return null; + return new OutputStreamWriter(os); + } + + @Override + public void newConfiguration(String cfgName) { + synchronized (lock) { + try { + writer.write(String + .format("--------------------------------%s--------------------------------\n", + cfgName)); + } catch (IOException e) { + e.printStackTrace(); + } + } + } + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/ProfileLogProcessor.java b/src/edu/mit/streamjit/impl/distributed/profiler/ProfileLogProcessor.java new file mode 100644 index 00000000..fcaca93c --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/ProfileLogProcessor.java @@ -0,0 +1,114 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Just an utility that processes profile.txt. + * + * @author sumanan + * @since 4 Feb, 2015 + */ +public class ProfileLogProcessor { + + public static void main(String[] args) throws IOException { + String appName = "FilterBankPipeline"; + // process1(appName); + process2(appName); + } + + private static List process1(String appName) throws IOException { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%sprofile.txt", appName, File.separator))); + FileWriter writer = new FileWriter(String.format( + "%s%sProcessedProfile.txt", appName, File.separator)); + String line; + int i = 0; + List ret = new ArrayList(3000); + while ((line = reader.readLine()) != null) { + if (line.startsWith("--------------------------------")) { + writer.write(line); + writer.write("\n"); + } + if (line.contains("Not firable")) { + writer.write(line); + writer.write("\n"); + } + } + writer.flush(); + reader.close(); + writer.close(); + return ret; + } + + private static List process2(String appName) throws IOException { + Map inputNotFirable = new HashMap<>(); + Map outputNotFirable = new HashMap<>(); + Map notFirable = inputNotFirable; + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%sprofile.txt", appName, File.separator))); + FileWriter writer = new FileWriter(String.format( + "%s%sProcessedProfile.txt", appName, File.separator)); + String line; + int i = 0; + List ret = new ArrayList(3000); + while ((line = reader.readLine()) != null) { + if (line.startsWith("--------------------------------")) { + writer.write(line); + writer.write("\n"); + printStats(inputNotFirable, outputNotFirable); + System.out.println(line); + } else if (line.contains("Input...")) + notFirable = inputNotFirable; + else if (line.contains("Output...")) + notFirable = outputNotFirable; + if (line.contains("Not firable")) { + String t = token(line); + add(notFirable, t); + writer.write(line); + writer.write("\n"); + } + } + printStats(inputNotFirable, outputNotFirable); + writer.flush(); + reader.close(); + writer.close(); + return ret; + } + + private static void add(Map notFirable, String t) { + if (!notFirable.containsKey(t)) + notFirable.put(t, 0); + int val = notFirable.get(t); + notFirable.put(t, ++val); + } + + private static void printStats(Map inputNotFirable, + Map outputNotFirable) { + System.out.println("Input..."); + printStats(inputNotFirable); + System.out.println("Output..."); + printStats(outputNotFirable); + } + + private static void printStats(Map notFirable) { + for (Map.Entry en : notFirable.entrySet()) { + System.out.println(String.format("\t%s-%d", en.getKey(), + en.getValue())); + } + notFirable.clear(); + } + + private static String token(String line) { + int start = line.indexOf('('); + int end = line.indexOf(')'); + return String.format("Token%s", line.substring(start, end + 1)); + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/Profiler.java b/src/edu/mit/streamjit/impl/distributed/profiler/Profiler.java new file mode 100644 index 00000000..78507d06 --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/Profiler.java @@ -0,0 +1,117 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import edu.mit.streamjit.impl.distributed.common.Connection; + +public final class Profiler extends Thread { + + /** + * Sampling interval in ms. + */ + private final int sampleInterval = 2000; + + private final Set profilers; + + private final Connection controllerConnection; + + private final AtomicBoolean stopFlag; + + public Profiler(Set profilers, + Connection controllerConnection) { + super("Profiler"); + this.profilers = new HashSet<>(); + checkNotNull(profilers); + for (StreamNodeProfiler p : profilers) + if (p != null) + profilers.add(p); + this.controllerConnection = checkNotNull(controllerConnection); + stopFlag = new AtomicBoolean(false); + } + + public void run() { + while (true) { + sleep(); + + if (stopFlag.get()) + break; + + for (StreamNodeProfiler p : profilers) { + try { + controllerConnection.writeObject(p.profile()); + } catch (IOException e) { + // e.printStackTrace(); + stopFlag.set(true); + } + } + } + System.err.println("Profiler is exiting"); + } + + /** + * Sleeps for t mills where (sampleInterval - 1000) < t < + * (sampleInterval+1000). Because, sampling based profilers must sample at + * random intervals. Not at fixed periods. + */ + private void sleep() { + int min = (sampleInterval - 1000); + int sleepTime = min + (int) (Math.random() * 2000); + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + } + } + + public void stopProfiling() { + stopFlag.set(true); + this.interrupt(); + } + + public void pauseProfiling() { + } + + public void resumeProfiling() { + } + + public void add(StreamNodeProfiler p) { + checkNotNull(p, "StreamNodeProfiler is null"); + profilers.add(p); + } + + /** + * Removes the specified StreamNodeProfiler p from profiling. + * + * @param p + * StreamNodeProfiler that need to be removed from profiling. + * @return true iff p existed in the profiler set and has been + * removed successfully. + */ + public boolean remove(StreamNodeProfiler p) { + return profilers.remove(p); + } + + /** + * Removes all profilers from profiling. + * + * @param profilers + */ + public void removeAll(Set profilers) { + for (StreamNodeProfiler p : profilers) + remove(p); + } + + /** + * Add all profilers for profiling. + * + * @param profilers + */ + public void addAll(Set profilers) { + for (StreamNodeProfiler p : profilers) + add(p); + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/ProfilerCommand.java b/src/edu/mit/streamjit/impl/distributed/profiler/ProfilerCommand.java new file mode 100644 index 00000000..eb454b0d --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/ProfilerCommand.java @@ -0,0 +1,73 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import edu.mit.streamjit.impl.distributed.common.CTRLRMessageElement; +import edu.mit.streamjit.impl.distributed.common.CTRLRMessageVisitor; +import edu.mit.streamjit.impl.distributed.node.StreamNode; +import edu.mit.streamjit.impl.distributed.runtimer.Controller; + +/** + * ProfilerCommand can be send by a {@link Controller} to {@link StreamNode} to + * carry profiling related actions. + * + * @author sumanan + * @since 27 Jan, 2015 + */ +public enum ProfilerCommand implements CTRLRMessageElement { + /** + * Starts the profiler. + */ + START { + @Override + public void process(ProfilerCommandProcessor commandProcessor) { + commandProcessor.processSTART(); + } + }, + /** + * Stops the profiler. + */ + STOP { + @Override + public void process(ProfilerCommandProcessor commandProcessor) { + commandProcessor.processSTOP(); + } + }, + + /** + * Pause the profiler + */ + PAUSE { + @Override + public void process(ProfilerCommandProcessor commandProcessor) { + commandProcessor.processPAUSE(); + } + + }, + /** + * Resume the profiler + */ + RESUME { + @Override + public void process(ProfilerCommandProcessor commandProcessor) { + commandProcessor.processRESUME(); + } + }; + + @Override + public void accept(CTRLRMessageVisitor visitor) { + visitor.visit(this); + } + + public abstract void process(ProfilerCommandProcessor commandProcessor); + + public interface ProfilerCommandProcessor { + + public void processSTART(); + + public void processSTOP(); + + public void processPAUSE(); + + public void processRESUME(); + + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/SNProfileElement.java b/src/edu/mit/streamjit/impl/distributed/profiler/SNProfileElement.java new file mode 100644 index 00000000..9308674e --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/SNProfileElement.java @@ -0,0 +1,126 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +import java.io.Serializable; + +import com.google.common.collect.ImmutableSet; + +import edu.mit.streamjit.impl.blob.Blob.Token; +import edu.mit.streamjit.impl.distributed.common.SNMessageElement; +import edu.mit.streamjit.impl.distributed.common.SNMessageVisitor; +import edu.mit.streamjit.impl.distributed.node.BlobsManager; + +public abstract class SNProfileElement implements SNMessageElement { + + private static final long serialVersionUID = 1L; + + public abstract void process(SNProfileElementProcessor dp); + + @Override + public void accept(SNMessageVisitor visitor) { + visitor.visit(this); + } + + /** + * Status for all buffers from a {@link BlobsManager}. + */ + public static final class SNBufferStatusData extends SNProfileElement { + + private static final long serialVersionUID = 1L; + + public final int machineID; + + public final ImmutableSet blobsBufferStatusSet; + + public SNBufferStatusData(int machineID, + ImmutableSet blobsBufferStatusSet) { + this.machineID = machineID; + this.blobsBufferStatusSet = blobsBufferStatusSet; + } + + /** + * Status of all buffers of a blob. + */ + public static class BlobBufferStatus implements Serializable { + + private static final long serialVersionUID = 1L; + + /** + * Identifier of the blob. blobID can be get through + * Utils#getBlobID(). + */ + public final Token blobID; + + /** + * BufferStatus of all input channels of the blob. + */ + public final ImmutableSet inputSet; + + /** + * BufferStatus of all output channels of the blob. + */ + public final ImmutableSet outputSet; + + public BlobBufferStatus(Token blobID, + ImmutableSet inputSet, + ImmutableSet outputSet) { + this.blobID = blobID; + this.inputSet = inputSet; + this.outputSet = outputSet; + } + + @Override + public String toString() { + return String.format("BlobBufferStatus:blob=%s", blobID); + } + } + + /** + * Status of a single buffer. + */ + public static class BufferStatus implements Serializable { + + private static final long serialVersionUID = 1L; + + /** + * Token of the buffer. + */ + public final Token ID; + + /** + * Minimum buffer requirement. Blob.getMinimumBufferCapacity() gives + * this information. + */ + public final int min; + + /** + * Available resources in the buffer. If it is a input buffer then + * buffer.size(). If it is a output buffer then buffer.capacity() - + * buffer.size(). + */ + public final int availableResource; + + public BufferStatus(Token ID, int min, int availableResource) { + this.ID = ID; + this.min = min; + this.availableResource = availableResource; + } + + @Override + public String toString() { + String status = availableResource >= min ? "Firable" + : "Not firable"; + return String.format("Buffer=%s, min=%d, available=%d, %s", ID, + min, availableResource, status); + } + } + + @Override + public void process(SNProfileElementProcessor dp) { + dp.process(this); + } + } + + public interface SNProfileElementProcessor { + public void process(SNBufferStatusData bufferStatusData); + } +} diff --git a/src/edu/mit/streamjit/impl/distributed/profiler/StreamNodeProfiler.java b/src/edu/mit/streamjit/impl/distributed/profiler/StreamNodeProfiler.java new file mode 100644 index 00000000..f8faf9da --- /dev/null +++ b/src/edu/mit/streamjit/impl/distributed/profiler/StreamNodeProfiler.java @@ -0,0 +1,20 @@ +package edu.mit.streamjit.impl.distributed.profiler; + +/** + * Profiles a specific resources (e.g, buffer status of all blobs) of a + * StreamNode. + * + * @author sumanan + * @since 26 Jan, 2015 + */ +public interface StreamNodeProfiler { + + /** + * A profiler thread will call this method to get the current status of the + * resource that is being profiled. Implementation must be thread safe. + * + * @return Current status of the resource that is being profiled. + */ + public SNProfileElement profile(); + +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/runtimer/BlockingCommunicationManager.java b/src/edu/mit/streamjit/impl/distributed/runtimer/BlockingCommunicationManager.java index b173cacd..2cd0e486 100644 --- a/src/edu/mit/streamjit/impl/distributed/runtimer/BlockingCommunicationManager.java +++ b/src/edu/mit/streamjit/impl/distributed/runtimer/BlockingCommunicationManager.java @@ -198,12 +198,12 @@ public InetAddress getAddress() { /** * IO thread that runs a {@link StreamNodeAgent}. Since this is blocking IO * context, each {@link StreamNodeAgent} agent will be running on individual - * threaed. + * thread. * */ private static class SNAgentRunner extends Thread { - StreamNodeAgent SNAgent; - Connection connection; + final StreamNodeAgent SNAgent; + final Connection connection; private SNAgentRunner(StreamNodeAgent SNAgent, Connection connection) { super(String.format("SNAgentRunner - %d", SNAgent.getNodeID())); @@ -229,7 +229,8 @@ public void run() { public void close() { try { SNAgent.stopRequest(); - connection.writeObject(Request.EXIT); + if (connection.isStillConnected()) + connection.writeObject(Request.EXIT); connection.closeConnection(); } catch (IOException e) { // TODO Auto-generated catch block diff --git a/src/edu/mit/streamjit/impl/distributed/runtimer/CommunicationManager.java b/src/edu/mit/streamjit/impl/distributed/runtimer/CommunicationManager.java index 8bae2a51..ddc47835 100644 --- a/src/edu/mit/streamjit/impl/distributed/runtimer/CommunicationManager.java +++ b/src/edu/mit/streamjit/impl/distributed/runtimer/CommunicationManager.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; + import edu.mit.streamjit.impl.distributed.node.StreamNode; /** diff --git a/src/edu/mit/streamjit/impl/distributed/runtimer/Controller.java b/src/edu/mit/streamjit/impl/distributed/runtimer/Controller.java index 32e3c46f..29d56de4 100644 --- a/src/edu/mit/streamjit/impl/distributed/runtimer/Controller.java +++ b/src/edu/mit/streamjit/impl/distributed/runtimer/Controller.java @@ -23,34 +23,22 @@ import java.io.IOException; import java.net.InetAddress; -import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; -import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; -import edu.mit.streamjit.api.Worker; -import edu.mit.streamjit.impl.blob.Blob.Token; import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; -import edu.mit.streamjit.impl.common.Workers; -import edu.mit.streamjit.impl.concurrent.ConcurrentChannelFactory; import edu.mit.streamjit.impl.distributed.StreamJitAppManager; import edu.mit.streamjit.impl.distributed.common.CTRLRMessageElement; -import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationStringProcessor.ConfigType; -import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; import edu.mit.streamjit.impl.distributed.common.ConfigurationString; +import edu.mit.streamjit.impl.distributed.common.ConfigurationString.ConfigurationProcessor.ConfigType; +import edu.mit.streamjit.impl.distributed.common.Connection.ConnectionProvider; +import edu.mit.streamjit.impl.distributed.common.GlobalConstants; +import edu.mit.streamjit.impl.distributed.common.NetworkInfo; import edu.mit.streamjit.impl.distributed.common.NodeInfo; import edu.mit.streamjit.impl.distributed.common.Request; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionInfo; -import edu.mit.streamjit.impl.distributed.common.TCPConnection.TCPConnectionProvider; import edu.mit.streamjit.impl.distributed.node.StreamNode; import edu.mit.streamjit.impl.distributed.runtimer.CommunicationManager.CommunicationType; -import edu.mit.streamjit.impl.distributed.runtimer.StreamNodeAgent; -import edu.mit.streamjit.impl.interp.ChannelFactory; /** * {@link Controller} controls all {@link StreamNode}s in runtime. It has @@ -64,9 +52,7 @@ */ public class Controller { - private TCPConnectionProvider conProvider; - - private int startPortNo = 24896; // Just a random magic number. + private ConnectionProvider conProvider; private CommunicationManager comManager; @@ -83,12 +69,9 @@ public class Controller { */ public final int controllerNodeID; - private Set currentConInfos; - public Controller() { this.comManager = new BlockingCommunicationManager(); this.controllerNodeID = 0; - this.currentConInfos = new HashSet<>(); } /** @@ -147,116 +130,20 @@ public Map getCoreCount() { public void newApp(Configuration staticCfg) { Configuration.Builder builder = Configuration.builder(staticCfg); - Map inetMap = new HashMap<>(); for (StreamNodeAgent agent : StreamNodeMap.values()) inetMap.put(agent.getNodeID(), agent.getAddress()); inetMap.put(controllerNodeID, comManager.getLocalAddress()); - - // TODO: Ensure the need of this switch parameter. - List universe = Arrays - . asList(new ConcurrentChannelFactory()); - SwitchParameter cfParameter = new SwitchParameter( - "channelFactory", ChannelFactory.class, universe.get(0), - universe); - - builder.addParameter(cfParameter).putExtraData( - GlobalConstants.INETADDRESS_MAP, inetMap); - - this.conProvider = new TCPConnectionProvider(controllerNodeID, inetMap); - + builder.putExtraData(GlobalConstants.INETADDRESS_MAP, inetMap); + NetworkInfo networkinfo = new NetworkInfo(inetMap); + this.conProvider = new ConnectionProvider(controllerNodeID, + networkinfo); ConfigurationString json = new ConfigurationString(builder.build() .toJson(), ConfigType.STATIC, null); sendToAll(json); } - public Map buildConInfoMap( - Map>>> partitionsMachineMap, - Worker source, Worker sink) { - - assert partitionsMachineMap != null : "partitionsMachineMap is null"; - - Set usedConInfos = new HashSet<>(); - Map conInfoMap = new HashMap<>(); - - for (Integer machineID : partitionsMachineMap.keySet()) { - List>> blobList = partitionsMachineMap - .get(machineID); - Set> allWorkers = new HashSet<>(); // Contains all - // workers those are - // assigned to the - // current machineID - // machine. - for (Set> blobWorkers : blobList) { - allWorkers.addAll(blobWorkers); - } - - for (Worker w : allWorkers) { - for (Worker succ : Workers.getSuccessors(w)) { - if (allWorkers.contains(succ)) - continue; - int dstMachineID = getAssignedMachine(succ, - partitionsMachineMap); - Token t = new Token(w, succ); - addtoconInfoMap(machineID, dstMachineID, t, usedConInfos, - conInfoMap); - } - } - } - - Token headToken = Token.createOverallInputToken(source); - int dstMachineID = getAssignedMachine(source, partitionsMachineMap); - addtoconInfoMap(controllerNodeID, dstMachineID, headToken, - usedConInfos, conInfoMap); - - Token tailToken = Token.createOverallOutputToken(sink); - int srcMahineID = getAssignedMachine(sink, partitionsMachineMap); - addtoconInfoMap(srcMahineID, controllerNodeID, tailToken, usedConInfos, - conInfoMap); - - return conInfoMap; - } - - /** - * Just extracted from {@link #buildConInfoMap(Map, Worker, Worker)} because - * the code snippet in this method happened to repeat three times inside the - * {@link #buildConInfoMap(Map, Worker, Worker)} method. - */ - private void addtoconInfoMap(int srcID, int dstID, Token t, - Set usedConInfos, - Map conInfoMap) { - - ConnectionInfo conInfo = new ConnectionInfo(srcID, dstID); - - List conSet = getTcpConInfo(conInfo); - TCPConnectionInfo tcpConInfo = null; - - for (TCPConnectionInfo con : conSet) { - if (!usedConInfos.contains(con)) { - tcpConInfo = con; - break; - } - } - - if (tcpConInfo == null) { - tcpConInfo = new TCPConnectionInfo(srcID, dstID, startPortNo++); - this.currentConInfos.add(tcpConInfo); - } - - conInfoMap.put(t, tcpConInfo); - usedConInfos.add(tcpConInfo); - } - - private List getTcpConInfo(ConnectionInfo conInfo) { - List conList = new ArrayList<>(); - for (TCPConnectionInfo tcpconInfo : currentConInfos) { - if (conInfo.equals(tcpconInfo)) - conList.add(tcpconInfo); - } - return conList; - } - public Set getAllNodeIDs() { return StreamNodeMap.keySet(); } @@ -272,24 +159,6 @@ public void send(int nodeID, CTRLRMessageElement message) { } } - /** - * @param worker - * @return the machineID where on which the passed worker is assigned. - */ - private int getAssignedMachine(Worker worker, - Map>>> partitionsMachineMap) { - for (Integer machineID : partitionsMachineMap.keySet()) { - for (Set> workers : partitionsMachineMap - .get(machineID)) { - if (workers.contains(worker)) - return machineID; - } - } - - throw new IllegalArgumentException(String.format( - "%s is not assigned to anyof the machines", worker)); - } - public void sendToAll(Object object) { for (StreamNodeAgent node : StreamNodeMap.values()) { try { @@ -300,7 +169,7 @@ public void sendToAll(Object object) { } } - public TCPConnectionProvider getConProvider() { + public ConnectionProvider getConProvider() { return conProvider; } @@ -317,14 +186,4 @@ public void registerManager(StreamJitAppManager manager) { node.registerManager(manager); } } - - public TCPConnectionInfo getNewTCPConInfo(TCPConnectionInfo conInfo) { - if (currentConInfos.contains(conInfo)) - currentConInfos.remove(conInfo); - TCPConnectionInfo newConinfo = new TCPConnectionInfo( - conInfo.getSrcID(), conInfo.getDstID(), startPortNo++); - currentConInfos.add(newConinfo); - - return newConinfo; - } } diff --git a/src/edu/mit/streamjit/impl/distributed/runtimer/DistributedDrainer.java b/src/edu/mit/streamjit/impl/distributed/runtimer/DistributedDrainer.java index 22bea8f2..f2c8a4de 100644 --- a/src/edu/mit/streamjit/impl/distributed/runtimer/DistributedDrainer.java +++ b/src/edu/mit/streamjit/impl/distributed/runtimer/DistributedDrainer.java @@ -22,8 +22,11 @@ package edu.mit.streamjit.impl.distributed.runtimer; import edu.mit.streamjit.impl.blob.Blob.Token; -import edu.mit.streamjit.impl.common.AbstractDrainer; +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; +import edu.mit.streamjit.impl.distributed.StreamJitApp; import edu.mit.streamjit.impl.distributed.StreamJitAppManager; +import edu.mit.streamjit.impl.distributed.common.CTRLRDrainElement.DrainType; /** * @author Sumanan sumanan@mit.edu @@ -33,8 +36,11 @@ public class DistributedDrainer extends AbstractDrainer { StreamJitAppManager manager; - public DistributedDrainer(StreamJitAppManager manager) { + public DistributedDrainer(StreamJitApp app, TimeLogger logger, + StreamJitAppManager manager) { + super(app, logger); this.manager = manager; + // TODO: // Read this. Don't let the "this" reference escape during construction // http://www.ibm.com/developerworks/java/library/j-jtp0618/ manager.setDrainer(this); @@ -46,8 +52,8 @@ protected void drainingDone(boolean isFinal) { } @Override - protected void drain(Token blobID, boolean isFinal) { - manager.drain(blobID, isFinal); + protected void drain(Token blobID, DrainType drainType) { + manager.drain(blobID, drainType); } @Override diff --git a/src/edu/mit/streamjit/impl/distributed/runtimer/OnlineTuner.java b/src/edu/mit/streamjit/impl/distributed/runtimer/OnlineTuner.java deleted file mode 100644 index 0c86b0ad..00000000 --- a/src/edu/mit/streamjit/impl/distributed/runtimer/OnlineTuner.java +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Copyright (c) 2013-2014 Massachusetts Institute of Technology - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -package edu.mit.streamjit.impl.distributed.runtimer; - -import static com.google.common.base.Preconditions.checkNotNull; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.util.concurrent.TimeUnit; - -import com.google.common.base.Splitter; -import com.google.common.base.Stopwatch; - -import edu.mit.streamjit.impl.blob.DrainData; -import edu.mit.streamjit.impl.common.AbstractDrainer; -import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.common.Configuration.IntParameter; -import edu.mit.streamjit.impl.common.Configuration.Parameter; -import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; -import edu.mit.streamjit.impl.distributed.ConfigurationManager; -import edu.mit.streamjit.impl.distributed.StreamJitApp; -import edu.mit.streamjit.impl.distributed.StreamJitAppManager; -import edu.mit.streamjit.impl.distributed.common.AppStatus; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; -import edu.mit.streamjit.tuner.OpenTuner; -import edu.mit.streamjit.tuner.TCPTuner; -import edu.mit.streamjit.util.json.Jsonifiers; - -/** - * Online tuner does continues learning. - * - * @author Sumanan sumanan@mit.edu - * @since Oct 8, 2013 - */ -public class OnlineTuner implements Runnable { - private final AbstractDrainer drainer; - private final StreamJitAppManager manager; - private final OpenTuner tuner; - private final StreamJitApp app; - private final ConfigurationManager cfgManager; - private final boolean needTermination; - - public OnlineTuner(AbstractDrainer drainer, StreamJitAppManager manager, - StreamJitApp app, ConfigurationManager cfgManager, - boolean needTermination) { - this.drainer = drainer; - this.manager = manager; - this.app = app; - this.cfgManager = cfgManager; - this.tuner = new TCPTuner(); - this.needTermination = needTermination; - } - - @Override - public void run() { - int tryCount = 0; - try { - tuner.startTuner(String.format( - "lib%sopentuner%sstreamjit%sstreamjit2.py", File.separator, - File.separator, File.separator)); - - tuner.writeLine("program"); - tuner.writeLine(app.name); - - tuner.writeLine("confg"); - String s = getConfigurationString(app.blobConfiguration); - tuner.writeLine(s); - - System.out.println("New tune run............."); - while (manager.getStatus() != AppStatus.STOPPED) { - String pythonDict = tuner.readLine(); - if (pythonDict == null) - break; - - // At the end of the tuning, Opentuner will send "Completed" - // msg. This means no more tuning. - if (pythonDict.equals("Completed")) { - handleTermination(); - break; - } - - System.out - .println("----------------------------------------------"); - System.out.println(tryCount++); - Configuration config = rebuildConfiguration(pythonDict, - app.blobConfiguration); - - if (GlobalConstants.saveAllConfigurations) - saveConfg(config, tryCount); - - try { - if (!cfgManager.newConfiguration(config)) { - tuner.writeLine("-1"); - continue; - } - - if (manager.isRunning()) { - boolean state = drainer.startDraining(0); - if (!state) { - System.err - .println("Final drain has already been called. no more tuning."); - tuner.writeLine("exit"); - break; - } - - System.err.println("awaitDrainedIntrmdiate"); - drainer.awaitDrainedIntrmdiate(); - - if (GlobalConstants.useDrainData) { - System.err.println("awaitDrainData..."); - drainer.awaitDrainData(); - DrainData drainData = drainer.getDrainData(); - app.drainData = drainData; - } - } - - drainer.setBlobGraph(app.blobGraph); - System.err.println("Reconfiguring..."); - if (manager.reconfigure()) { - Stopwatch stopwatch = Stopwatch.createStarted(); - manager.awaitForFixInput(); - stopwatch.stop(); - // TODO: need to check the manager's status before - // passing - // the time. Exceptions, final drain, etc may causes app - // to - // stop executing. - long time = stopwatch.elapsed(TimeUnit.MILLISECONDS); - - System.out.println("Execution time is " + time - + " milli seconds"); - tuner.writeLine(new Double(time).toString()); - } else { - tuner.writeLine("-1"); - continue; - } - } catch (Exception ex) { - System.err - .println("Couldn't compile the stream graph with this configuration"); - tuner.writeLine("-1"); - } - } - - } catch (IOException e) { - e.printStackTrace(); - } - - try { - drainer.dumpDraindataStatistics(); - } catch (IOException e) { - e.printStackTrace(); - } - } - - /** - * Just excerpted from run() method for better readability. - * - * @throws IOException - */ - private void handleTermination() throws IOException { - String finalConfg = tuner.readLine(); - System.out.println("Tuning finished"); - saveConfg(rebuildConfiguration(finalConfg, app.blobConfiguration), 0); - if (needTermination) { - if (manager.isRunning()) { - drainer.startDraining(1); - } else { - manager.stop(); - } - } else { - runForever(finalConfg); - } - } - - /** - * TODO: Just copied from the run method. Code duplication between this - * method and the run() method. Try to avoid duplicate code. - * - * @param pythonDict - */ - private void runForever(String pythonDict) { - System.out.println("runForever"); - Configuration config = rebuildConfiguration(pythonDict, - app.blobConfiguration); - try { - if (!cfgManager.newConfiguration(config)) { - System.err.println("Invalid final configuration."); - return; - } - - if (manager.isRunning()) { - boolean state = drainer.startDraining(0); - if (!state) { - System.err - .println("Final drain has already been called. no more tuning."); - return; - } - - System.err.println("awaitDrainedIntrmdiate"); - drainer.awaitDrainedIntrmdiate(); - - if (GlobalConstants.useDrainData) { - System.err.println("awaitDrainData..."); - drainer.awaitDrainData(); - DrainData drainData = drainer.getDrainData(); - app.drainData = drainData; - } - - drainer.setBlobGraph(app.blobGraph); - } - - System.err.println("Reconfiguring..."); - boolean var = manager.reconfigure(); - if (var) { - System.out - .println("Application is running with the final configuration."); - } else { - System.err.println("Invalid final configuration."); - } - } catch (Exception ex) { - System.err - .println("Couldn't compile the stream graph with this configuration"); - } - } - - /** - * Creates a new {@link Configuration} from the received python dictionary - * string. This is not a good way to do. - *

            - * TODO: Need to add a method to {@link Configuration} so that the - * configuration object can be updated from the python dict string. Now we - * are destructing the old confg object and recreating a new one every time. - * Not a appreciatable way. - * - * @param pythonDict - * Python dictionary string. Autotuner gives a dictionary of - * features with trial values. - * @param config - * Old configuration object. - * @return New configuration object with updated values from the pythonDict. - */ - private Configuration rebuildConfiguration(String pythonDict, - Configuration config) { - // System.out.println(pythonDict); - checkNotNull(pythonDict, "Received Python dictionary is null"); - pythonDict = pythonDict.replaceAll("u'", ""); - pythonDict = pythonDict.replaceAll("':", ""); - pythonDict = pythonDict.replaceAll("\\{", ""); - pythonDict = pythonDict.replaceAll("\\}", ""); - Splitter dictSplitter = Splitter.on(", ").omitEmptyStrings() - .trimResults(); - Configuration.Builder builder = Configuration.builder(); - System.out.println("New parameter values from Opentuner..."); - for (String s : dictSplitter.split(pythonDict)) { - String[] str = s.split(" "); - if (str.length != 2) - throw new AssertionError("Wrong python dictionary..."); - Parameter p = config.getParameter(str[0]); - if (p == null) - continue; - // System.out.println(String.format("\t%s = %s", str[0], str[1])); - if (p instanceof IntParameter) { - IntParameter ip = (IntParameter) p; - builder.addParameter(new IntParameter(ip.getName(), - ip.getMin(), ip.getMax(), Integer.parseInt(str[1]))); - - } else if (p instanceof SwitchParameter) { - SwitchParameter sp = (SwitchParameter) p; - Class type = sp.getGenericParameter(); - int val = Integer.parseInt(str[1]); - SwitchParameter sp1 = new SwitchParameter(sp.getName(), - type, sp.getUniverse().get(val), sp.getUniverse()); - builder.addParameter(sp1); - } - - } - return builder.build(); - } - - /** - * TODO: This method is totally unnecessary if we remove the usage of the - * name "class" in side {@link Configuration}. - * - * @param cfg - * @return - */ - private String getConfigurationString(Configuration cfg) { - String s = Jsonifiers.toJson(cfg).toString(); - String s1 = s.replaceAll("__class__", "ttttt"); - String s2 = s1.replaceAll("class", "javaClassPath"); - String s3 = s2.replaceAll("ttttt", "__class__"); - return s3; - } - - /** - * Save the configuration. - */ - private void saveConfg(Configuration config, int round) { - String json = config.toJson(); - try { - - File dir = new File(String.format("configurations%s%s", - File.separator, app.name)); - if (!dir.exists()) - if (!dir.mkdirs()) { - System.err.println("Make directory failed"); - return; - } - - File file = new File(dir, - String.format("%d%s.cfg", round, app.name)); - FileWriter writer = new FileWriter(file, false); - writer.write(json); - writer.flush(); - writer.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } -} \ No newline at end of file diff --git a/src/edu/mit/streamjit/impl/distributed/runtimer/StreamNodeAgent.java b/src/edu/mit/streamjit/impl/distributed/runtimer/StreamNodeAgent.java index 942d39b6..9073005a 100644 --- a/src/edu/mit/streamjit/impl/distributed/runtimer/StreamNodeAgent.java +++ b/src/edu/mit/streamjit/impl/distributed/runtimer/StreamNodeAgent.java @@ -38,9 +38,13 @@ import edu.mit.streamjit.impl.distributed.common.SNException; import edu.mit.streamjit.impl.distributed.common.SNException.SNExceptionProcessor; import edu.mit.streamjit.impl.distributed.common.SNMessageVisitor; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo; +import edu.mit.streamjit.impl.distributed.common.SNTimeInfo.SNTimeInfoProcessor; import edu.mit.streamjit.impl.distributed.common.SystemInfo; import edu.mit.streamjit.impl.distributed.common.SystemInfo.SystemInfoProcessor; import edu.mit.streamjit.impl.distributed.node.StreamNode; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement; +import edu.mit.streamjit.impl.distributed.profiler.SNProfileElement.SNProfileElementProcessor; /** * StreamNodeAgent represents a {@link StreamNode} at {@link Controller} side. @@ -69,9 +73,9 @@ public abstract class StreamNodeAgent { private final SystemInfoProcessor sp; - // TODO: How to avoid volatile here. Because we set only once and read - // forever later. So if it is volatile, every read will need to access - // memory. Is there any way to avoid this? + // TODO: How to avoid the volatileness here. Because we set only once and + // read forever later. So if it is a volatile, every read will need to + // access the memory. Is there any way to avoid this? // Will removing volatile modifier be OK in this context? consider // using piggybacking sync or atomicreferenc with compareandset. This is // actually effectively immutable/safe publication case. But how to @@ -146,9 +150,11 @@ public NodeInfo getNodeInfo() { } catch (IOException e) { e.printStackTrace(); } - // TODO: If in any chance the IO thread call this function then - // it will get blocked on this loop forever. Need to handle - // this. + // TODO: By any chance, if the IO thread (SNAgentRunner) calls this + // function then that thread will get blocked at this loop forever. + // Because that thread is responsible to read the nodeInfo from the + // StreamNode and set the nodeInfo variable of this class. Need to + // handle this issue. while (nodeInfo == null) { try { Thread.sleep(10); @@ -306,5 +312,19 @@ public void visit(SNException snException) { SNExceptionProcessor snExP = manager.exceptionProcessor(); snException.process(snExP); } + + @Override + public void visit(SNTimeInfo timeInfo) { + assert manager != null : "StreamJitAppManager has not been set"; + SNTimeInfoProcessor snTimeP = manager.timeInfoProcessor(); + timeInfo.process(snTimeP); + } + + @Override + public void visit(SNProfileElement snProfileElement) { + assert manager != null : "StreamJitAppManager has not been set"; + SNProfileElementProcessor snProfileP = manager.getProfiler(); + snProfileElement.process(snProfileP); + } } } \ No newline at end of file diff --git a/src/edu/mit/streamjit/test/Datasets.java b/src/edu/mit/streamjit/test/Datasets.java index 48e49af0..5342e5d1 100644 --- a/src/edu/mit/streamjit/test/Datasets.java +++ b/src/edu/mit/streamjit/test/Datasets.java @@ -92,6 +92,44 @@ public Object read() { increment(1); return ret; } + + @Override + public int read(Object[] data, int offset, int length) { + int read = 0; + int min = Math.min(size(), length); + Object obj; + while (read < min && (obj = peek(read)) != null) { + data[offset++] = obj; + ++read; + } + increment(read); + return read; + } + + @Override + public boolean readAll(Object[] data) { + return readAll(data, 0); + } + + @Override + public boolean readAll(Object[] data, int offset) { + int required = data.length - offset; + if (required > size()) + return false; + int read; + for (read = 0; read < required; ++offset, ++read) { + Object e = peek(read); + // We checked size() above, so we should never fail here, except + // in + // case of concurrent modification by another reader. + assert e != null; + data[offset] = e; + } + increment(read); + assert (data.length == offset) : "data.length != offset - Check the arithmetic"; + return true; + } + @Override public int size() { if (cycles < 0) return 0; diff --git a/src/edu/mit/streamjit/test/DistAppRunner.java b/src/edu/mit/streamjit/test/DistAppRunner.java new file mode 100644 index 00000000..d8acd181 --- /dev/null +++ b/src/edu/mit/streamjit/test/DistAppRunner.java @@ -0,0 +1,86 @@ +package edu.mit.streamjit.test; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +import com.google.common.base.Stopwatch; + +import edu.mit.streamjit.api.CompiledStream; +import edu.mit.streamjit.api.Input; +import edu.mit.streamjit.api.OneToOneElement; +import edu.mit.streamjit.api.Output; +import edu.mit.streamjit.api.StreamCompiler; +import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler; +import edu.mit.streamjit.test.Benchmark.Dataset; +import edu.mit.streamjit.test.apps.fmradio.FMRadio.FMRadioBenchmarkProvider; + +/** + * @author sumanan + * @since 25 Feb, 2015 + */ +public final class DistAppRunner { + + public static void main(String[] args) throws InterruptedException, + IOException { + int noOfNodes; + Stopwatch sw = Stopwatch.createStarted(); + try { + noOfNodes = Integer.parseInt(args[0]); + } catch (Exception ex) { + noOfNodes = 3; + } + + // startSNs(noOfNodes); + StreamCompiler compiler = new DistributedStreamCompiler(noOfNodes); + + Benchmark benchmark = new FMRadioBenchmarkProvider().iterator().next(); + Dataset dataset = benchmark.inputs().get(0); + Input input = Datasets.cycle(dataset.input()); + + OneToOneElement streamGraph = benchmark.instantiate(); + CompiledStream stream = compiler.compile(streamGraph, input, + Output.blackHole()); + stream.awaitDrained(); + + sw.stop(); + long elapsedMills = sw.elapsed(TimeUnit.MILLISECONDS); + String appName = streamGraph.getClass().getSimpleName(); + updateReadMeTxt(appName, benchmark.toString(), elapsedMills); + } + + private static void startSNs(int noOfNodes) throws IOException { + for (int i = 1; i < noOfNodes; i++) + new ProcessBuilder("xterm", "-e", "java", "-jar", "StreamNode.jar") + .start(); + // new ProcessBuilder("java", "-jar", "StreamNode.jar").start(); + } + + /** + * [25 Feb, 2015] TODO: This is a temporary fix to update the benchmark name + * ( that is more descriptive than plain appName) to the README.txt. + * Consider passing the benchmarkName to the + * {@link DistributedStreamCompiler} and let it to update the README.txt. + */ + private static void updateReadMeTxt(String appName, String benchmarkName, + long elapsedMills) throws IOException { + FileWriter writer = new FileWriter(String.format("%s%sREADME.txt", + appName, File.separator), true); + writer.write(String.format("benchmarkName=%s\n", benchmarkName)); + writer.write(String.format("TotalRunningTime=%s\n", + elapsedTime(elapsedMills))); + writer.close(); + } + + private static String elapsedTime(long mills) { + String hms = String.format( + "%02dH:%02dM:%02dS", + TimeUnit.MILLISECONDS.toHours(mills), + TimeUnit.MILLISECONDS.toMinutes(mills) + % TimeUnit.HOURS.toMinutes(1), + TimeUnit.MILLISECONDS.toSeconds(mills) + % TimeUnit.MINUTES.toSeconds(1)); + return hms; + } +} diff --git a/src/edu/mit/streamjit/test/apps/beamformer1/BeamFormer1.java b/src/edu/mit/streamjit/test/apps/beamformer1/BeamFormer1.java index d24142b5..3deec410 100644 --- a/src/edu/mit/streamjit/test/apps/beamformer1/BeamFormer1.java +++ b/src/edu/mit/streamjit/test/apps/beamformer1/BeamFormer1.java @@ -24,9 +24,12 @@ import com.google.common.base.Supplier; import com.google.common.base.Suppliers; import com.jeffreybosboom.serviceproviderprocessor.ServiceProvider; + +import edu.mit.streamjit.api.CompiledStream; import edu.mit.streamjit.api.DuplicateSplitter; import edu.mit.streamjit.api.Filter; import edu.mit.streamjit.api.Input; +import edu.mit.streamjit.api.Output; import edu.mit.streamjit.api.Pipeline; import edu.mit.streamjit.api.RoundrobinJoiner; import edu.mit.streamjit.api.RoundrobinSplitter; @@ -34,12 +37,16 @@ import edu.mit.streamjit.api.StatefulFilter; import edu.mit.streamjit.api.StreamCompiler; import edu.mit.streamjit.impl.compiler2.Compiler2StreamCompiler; +import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler; +import edu.mit.streamjit.impl.distributed.common.GlobalConstants; import edu.mit.streamjit.impl.interp.DebugStreamCompiler; import edu.mit.streamjit.test.Benchmark; import edu.mit.streamjit.test.Benchmark.Dataset; import edu.mit.streamjit.test.Benchmarker; +import edu.mit.streamjit.test.Datasets; import edu.mit.streamjit.test.SuppliedBenchmark; import java.nio.ByteOrder; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collections; diff --git a/src/edu/mit/streamjit/test/apps/channelvocoder7/ChannelVocoder7.java b/src/edu/mit/streamjit/test/apps/channelvocoder7/ChannelVocoder7.java index 5e7aa61d..eb1926ef 100644 --- a/src/edu/mit/streamjit/test/apps/channelvocoder7/ChannelVocoder7.java +++ b/src/edu/mit/streamjit/test/apps/channelvocoder7/ChannelVocoder7.java @@ -110,6 +110,10 @@ public ChannelVocoder7Kernel(int numFilters, int numTaps) { add(new LowPassFilter(1, (float) ((2 * Math.PI * 5000) / 8000), 64)); add(new MainSplitjoin(numFilters, numTaps)); } + + public ChannelVocoder7Kernel() { + this(16, 64); + } } /** diff --git a/src/edu/mit/streamjit/test/apps/filterbank6/FilterBank6.java b/src/edu/mit/streamjit/test/apps/filterbank6/FilterBank6.java index 2f7a7e03..647da1e5 100644 --- a/src/edu/mit/streamjit/test/apps/filterbank6/FilterBank6.java +++ b/src/edu/mit/streamjit/test/apps/filterbank6/FilterBank6.java @@ -75,18 +75,22 @@ private static Dataset dataset() { @Override @SuppressWarnings("unchecked") public OneToOneElement instantiate() { - return (OneToOneElement)new FilterBankPipeline(8); + return (OneToOneElement)new FilterBankPipeline(32); } } /** * Top-level filterbank structure. **/ - private static final class FilterBankPipeline extends Pipeline { + public static final class FilterBankPipeline extends Pipeline { private FilterBankPipeline(int M) { add(new FilterBankSplitJoin(M)); add(new Adder(M)); } + + public FilterBankPipeline() { + this(32); + } } /** diff --git a/src/edu/mit/streamjit/test/apps/fmradio/FMRadio.java b/src/edu/mit/streamjit/test/apps/fmradio/FMRadio.java index 837ab6a6..f797da95 100644 --- a/src/edu/mit/streamjit/test/apps/fmradio/FMRadio.java +++ b/src/edu/mit/streamjit/test/apps/fmradio/FMRadio.java @@ -248,7 +248,7 @@ public static final class FMRadioCore extends Pipeline { private static final float high = 1760; public FMRadioCore() { - this(11, 64); + this(7, 128); } public FMRadioCore(int bands, int taps) { diff --git a/src/edu/mit/streamjit/tuner/ConfigGenerator.java b/src/edu/mit/streamjit/tuner/ConfigGenerator.java index 435acba3..23ecf04f 100644 --- a/src/edu/mit/streamjit/tuner/ConfigGenerator.java +++ b/src/edu/mit/streamjit/tuner/ConfigGenerator.java @@ -25,12 +25,6 @@ import java.io.File; import java.io.IOException; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; import com.google.common.collect.ImmutableSet; @@ -45,7 +39,6 @@ import edu.mit.streamjit.test.Benchmark; import edu.mit.streamjit.test.BenchmarkProvider; import edu.mit.streamjit.test.apps.fmradio.FMRadio; -import edu.mit.streamjit.util.json.Jsonifiers; /** * ConfigGenerator generates {@link Configuration} of an application and stores @@ -53,31 +46,16 @@ * In this way, Opentuner can start and stop the StreamJit app for each tuning * try so that Opentuner can tune JVM parameters such as heapsize, * inlinethreshold, GCpausetime, etc as well. - * + * * @author Sumanan sumanan@mit.edu * @since Sep 10, 2013 */ public class ConfigGenerator { - /** - * TODO: Need to remove the string "class" from the {@link Configuration} - * jsonifiers. Once it is done, this method can be removed. - * - * @param cfg - * @return - */ - private String getConfigurationString(Configuration cfg) { - String s = Jsonifiers.toJson(cfg).toString(); - String s1 = s.replaceAll("__class__", "ttttt"); - String s2 = s1.replaceAll("class", "javaClassPath"); - String s3 = s2.replaceAll("ttttt", "__class__"); - return s3; - } - /** * Generates configuration for the passed provider. - * + * * @param provider * Only first benchmark is used to generate configuration. i.e., * only first benchmark will be tuned. @@ -86,16 +64,8 @@ private String getConfigurationString(Configuration cfg) { public void generate(BenchmarkProvider provider, BlobFactory factory) { checkNotNull(provider); - sqliteAdapter sqlite; - try { - sqlite = new sqliteAdapter(); - } catch (ClassNotFoundException e) { - System.err - .println("Sqlite3 database not found...couldn't update the database with the configutaion."); - e.printStackTrace(); - return; - } - + SqliteAdapter sqlite; + sqlite = new SqliteAdapter(); String dbPath = "streamjit.db"; sqlite.connectDB(dbPath); sqlite.createTable( @@ -123,7 +93,7 @@ public void generate(BenchmarkProvider provider, BlobFactory factory) { Configuration cfg = factory.getDefaultConfiguration(workers); String name = app.toString(); - String confString = getConfigurationString(cfg); + String confString = cfg.toJson(); try { sqlite.executeUpdate(String.format( @@ -143,78 +113,6 @@ public void generate(BenchmarkProvider provider, BlobFactory factory) { // new ProcessBuilder("xterm", "-e", "python", tunerPath).start(); } - public static class sqliteAdapter { - - private Statement statement; - private Connection con = null; - - public sqliteAdapter() throws ClassNotFoundException { - Class.forName("org.sqlite.JDBC"); - } - - public void connectDB(String path) { - try { - con = DriverManager.getConnection(String.format( - "jdbc:sqlite:%s", path)); - statement = con.createStatement(); - statement.setQueryTimeout(30); // set timeout to 30 sec. - - } catch (SQLException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - - /** - * Creates table iff it is not exists - * - * @param table - * Name of the table - * @param signature - * Column format of the table - * @throws SQLException - */ - public void createTable(String table, String signature) { - checkNotNull(con); - DatabaseMetaData dbm; - try { - dbm = con.getMetaData(); - - ResultSet tables = dbm.getTables(null, null, table, null); - if (!tables.next()) { - // "create table %s ()" - statement.executeUpdate(String.format( - "create table %s (%s)", table, signature)); - } - } catch (SQLException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - - public ResultSet executeQuery(String sql) { - try { - ResultSet rs = statement.executeQuery(sql); - // con.commit(); - return rs; - } catch (SQLException e) { - e.printStackTrace(); - } - return null; - } - - public int executeUpdate(String sql) { - try { - int ret = statement.executeUpdate(sql); - // con.commit(); - return ret; - } catch (SQLException e) { - e.printStackTrace(); - } - return -1; - } - } - /** * @param args * [0] - NoofMachines that will be connected in distributed case. diff --git a/src/edu/mit/streamjit/tuner/ConfigurationPrognosticator.java b/src/edu/mit/streamjit/tuner/ConfigurationPrognosticator.java new file mode 100644 index 00000000..123cdf76 --- /dev/null +++ b/src/edu/mit/streamjit/tuner/ConfigurationPrognosticator.java @@ -0,0 +1,54 @@ +package edu.mit.streamjit.tuner; + +import edu.mit.streamjit.impl.common.Configuration; + +/** + * Prognosticates the {@link Configuration}s given by the OpenTuner and tell + * whether a {@link Configuration} is more likely to give a better search + * objective improvement or not. Depends on the prognosticated information, + * {@link OnlineTuner} may reconfigure the application or reject the + * configuration. Currently, the search objective is performance optimization. + * In future, some other resource optimization objectives may be added (e.g., + * Energy minimization). + * + * @author sumanan + * @since 6 Jan, 2015 + */ +public interface ConfigurationPrognosticator { + + /** + * Prognosticate a {@link Configuration} and tell whether a + * {@link Configuration} is more likely to give a better search objective + * improvement or not. + * + * @param config + * @return {@code true} iff the config is more likely to give a better + * search objective improvement. + */ + public boolean prognosticate(Configuration config); + + /** + * An auxiliary method that can be used to update a configuration's running + * time. Has been added for data analysis purpose. + * + * @param time + */ + public void time(double time); + + /** + * No Prognostication. The method {@link #prognosticate(Configuration)} + * always returns {@code true} + */ + public static final class NoPrognostication implements + ConfigurationPrognosticator { + + @Override + public boolean prognosticate(Configuration config) { + return true; + } + + @Override + public void time(double time) { + } + } +} diff --git a/src/edu/mit/streamjit/tuner/GraphPropertyPrognosticator.java b/src/edu/mit/streamjit/tuner/GraphPropertyPrognosticator.java new file mode 100644 index 00000000..34e4c9b6 --- /dev/null +++ b/src/edu/mit/streamjit/tuner/GraphPropertyPrognosticator.java @@ -0,0 +1,266 @@ +package edu.mit.streamjit.tuner; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import edu.mit.streamjit.api.Worker; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Workers; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.impl.distributed.common.BoundaryChannel; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.util.ConfigurationUtils; + +/** + * Assumes the cluster environment is homogeneous. + * + * @author sumanan + * @since 7 Jan, 2015 + */ +public class GraphPropertyPrognosticator implements ConfigurationPrognosticator { + + private final StreamJitApp app; + + private final FileWriter writer; + + private final Set> paths; + + public GraphPropertyPrognosticator(StreamJitApp app) { + this.app = app; + this.writer = Utils.fileWriter(app.name, "GraphProperty.txt"); + writeHeader(writer); + paths = app.paths(); + } + + @Override + public boolean prognosticate(Configuration config) { + String cfgPrefix = ConfigurationUtils.getConfigPrefix(config); + float bigToSmallBlobRatio = bigToSmallBlobRatio(); + float loadRatio = loadRatio(); + float blobToNodeRatio = blobToNodeRatio(); + float boundaryChannelRatio = totalToBoundaryChannelRatio(); + boolean hasCycle = hasCycle(); + try { + writer.write(String.format("\n%6s\t\t", cfgPrefix)); + writer.write(String.format("%.2f\t\t", bigToSmallBlobRatio)); + writer.write(String.format("%.2f\t\t", loadRatio)); + writer.write(String.format("%.2f\t\t", blobToNodeRatio)); + writer.write(String.format("%.2f\t\t", boundaryChannelRatio)); + writer.write(String.format("%s\t\t", hasCycle ? "True" : "False")); + } catch (Exception e) { + e.printStackTrace(); + } + return decide(bigToSmallBlobRatio, loadRatio, blobToNodeRatio, + boundaryChannelRatio, hasCycle); + } + + private boolean decide(float bigToSmallBlobRatio, float loadRatio, + float blobToNodeRatio, float boundaryChannelRatio, boolean hasCycle) { + StringBuilder s = new StringBuilder(); + boolean accept = true; + if (Options.prognosticate) { + if (Options.bigToSmallBlobRatio > 0 + && bigToSmallBlobRatio > Options.bigToSmallBlobRatio) { + s.append("1,"); + accept = false; + } + if (Options.loadRatio > 0 && loadRatio > Options.loadRatio) { + s.append("2,"); + accept = false; + } + if (Options.blobToNodeRatio > 0 + && blobToNodeRatio > Options.blobToNodeRatio) { + s.append("3,"); + accept = false; + } + if (Options.boundaryChannelRatio > 0 + && boundaryChannelRatio < Options.boundaryChannelRatio) { + s.append("4,"); + accept = false; + } + if (hasCycle) { + s.append("5,"); + accept = false; + } + } + + try { + writer.write(String.format("%s\t\t", + accept ? "Acptd" : s.toString())); + } catch (IOException e) { + + } + return accept; + } + + /** + * @return The ratio between the number of workers in the largest blob and + * the number of workers in the smallest blob. + */ + private float bigToSmallBlobRatio() { + int min = Integer.MAX_VALUE; + int max = Integer.MIN_VALUE; + int currentBlobSize; + for (List>> blobList : app.partitionsMachineMap + .values()) { + for (Set> blobWorkers : blobList) { + currentBlobSize = blobWorkers.size(); + min = Math.min(min, currentBlobSize); + max = Math.max(max, currentBlobSize); + } + } + float blobRatio = ((float) max) / min; + return blobRatio; + } + + /** + * @return The ratio between the highest number of workers assigned to a + * machine and the lowest number of workers assigned to a machine. + */ + private float loadRatio() { + int min = Integer.MAX_VALUE; + int max = Integer.MIN_VALUE; + int workersInCurrentNode; + for (List>> blobList : app.partitionsMachineMap + .values()) { + workersInCurrentNode = 0; + for (Set> blobWorkers : blobList) { + workersInCurrentNode += blobWorkers.size(); + } + min = Math.min(min, workersInCurrentNode); + max = Math.max(max, workersInCurrentNode); + } + float loadRatio = ((float) max) / min; + return loadRatio; + } + + /** + * @return The ratio between the total number of blobs to the total nodes. + */ + private float blobToNodeRatio() { + int nodes = 0; + int blobs = 0; + for (List>> blobList : app.partitionsMachineMap + .values()) { + nodes++; + blobs += blobList.size(); + } + float blobNodeRatio = ((float) blobs) / nodes; + return blobNodeRatio; + } + + /** + * @return The ratio between the total channels in the stream graph to the + * {@link BoundaryChannel} in the current configuration. + */ + private float totalToBoundaryChannelRatio() { + int totalChannels = 0; + int boundaryChannels = 0; + for (Integer machineID : app.partitionsMachineMap.keySet()) { + List>> blobList = app.partitionsMachineMap + .get(machineID); + Set> allWorkers = new HashSet<>(); + for (Set> blobWorkers : blobList) { + allWorkers.addAll(blobWorkers); + } + + for (Worker w : allWorkers) { + for (Worker succ : Workers.getSuccessors(w)) { + totalChannels++; + if (!allWorkers.contains(succ)) + boundaryChannels++; + } + } + } + float boundaryChannelRatio = ((float) totalChannels) / boundaryChannels; + return boundaryChannelRatio; + } + + private static void writeHeader(FileWriter writer) { + try { + writer.write(String.format("%.7s", "cfgID")); + writer.write("\t\t"); + writer.write(String.format("%.7s", "bigToSmallBlobRatio")); + writer.write("\t\t"); + writer.write(String.format("%.7s", "loadRatio")); + writer.write("\t\t"); + writer.write(String.format("%.7s", "blobToNodeRatio")); + writer.write("\t\t"); + writer.write(String.format("%.7s", "BoundaryChannelRatio")); + writer.write("\t\t"); + writer.write(String.format("%.7s", "hasCycles")); + writer.write("\t\t"); + writer.write(String.format("%.7s", "A/R")); // Accepted or Rejected. + writer.write("\t\t"); + writer.write(String.format("%.7s", "time")); + // writer.write("\t\t"); + writer.flush(); + } catch (IOException e) { + + } + } + + @Override + public void time(double time) { + try { + writer.write(String.format("%.0f", time)); + writer.flush(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + private boolean hasCycle() { + Set> machinePaths = buildMachinePaths(); + for (List path : machinePaths) { + Set machines = new HashSet(); + for (int i = 0; i < path.size() - 1; i++) { + int machine = path.get(i); + if (machines.contains(machine)) + return true; + machines.add(machine); + + } + } + return false; + } + + private Set> buildMachinePaths() { + Set> machinePaths = new HashSet>(); + List machinePath; + for (List path : paths) { + machinePath = new LinkedList(); + int curMachine = -1; + for (Integer worker : path) { + int machine = getAssignedMachine(worker); + if (curMachine != machine) { + machinePath.add(machine); + curMachine = machine; + } + } + machinePaths.add(machinePath); + } + return machinePaths; + } + + private int getAssignedMachine(int workerID) { + for (Integer machineID : app.partitionsMachineMap.keySet()) { + for (Set> blobWorkers : app.partitionsMachineMap + .get(machineID)) { + for (Worker w : blobWorkers) { + if (Workers.getIdentifier(w) == workerID) + return machineID; + } + } + } + + throw new IllegalArgumentException(String.format( + "Worker-%d is not assigned to anyof the machines", workerID)); + } +} diff --git a/src/edu/mit/streamjit/tuner/MethodTimeLogger.java b/src/edu/mit/streamjit/tuner/MethodTimeLogger.java new file mode 100644 index 00000000..eb4351cf --- /dev/null +++ b/src/edu/mit/streamjit/tuner/MethodTimeLogger.java @@ -0,0 +1,369 @@ +package edu.mit.streamjit.tuner; + +import java.io.IOException; +import java.io.OutputStream; +import java.io.OutputStreamWriter; +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; +import java.util.concurrent.TimeUnit; + +import com.google.common.base.Stopwatch; + +import edu.mit.streamjit.impl.distributed.common.Utils; + +/** + * Logs the opentuner's method call times for debugging purpose. + * + * @author sumanan + * @since 6 Mar, 2015 + */ +public interface MethodTimeLogger { + + void bStartTuner(); + void eStartTuner(); + + void bHandleTermination(); + void eHandleTermination(); + + void bNewCfg(); + void eNewCfg(int round); + + void bReconfigure(); + void eReconfigure(); + + void bTuningFinished(); + void eTuningFinished(); + + void bTerminate(); + void eTerminate(); + + void bIntermediateDraining(); + void eIntermediateDraining(); + + void bManagerReconfigure(); + void eManagerReconfigure(); + + void bGetFixedOutputTime(); + void eGetFixedOutputTime(); + + void bTuningRound(); + void eTuningRound(); + + void bCfgManagerNewcfg(); + void eCfgManagerNewcfg(); + + void bPrognosticate(); + void ePrognosticate(); + + /** + * Logs nothing. + */ + public static class NoMethodTimeLogger implements MethodTimeLogger { + + @Override + public void bStartTuner() { + } + + @Override + public void eStartTuner() { + } + + @Override + public void bHandleTermination() { + } + + @Override + public void eHandleTermination() { + } + + @Override + public void bNewCfg() { + } + + @Override + public void eNewCfg(int round) { + } + + @Override + public void bReconfigure() { + } + + @Override + public void eReconfigure() { + } + + @Override + public void bTuningFinished() { + } + + @Override + public void eTuningFinished() { + } + + @Override + public void bTerminate() { + } + + @Override + public void eTerminate() { + } + + @Override + public void bIntermediateDraining() { + } + + @Override + public void eIntermediateDraining() { + } + + @Override + public void bManagerReconfigure() { + } + + @Override + public void eManagerReconfigure() { + } + + @Override + public void bGetFixedOutputTime() { + } + + @Override + public void eGetFixedOutputTime() { + } + + @Override + public void bTuningRound() { + } + + @Override + public void eTuningRound() { + } + + @Override + public void bCfgManagerNewcfg() { + } + + @Override + public void eCfgManagerNewcfg() { + } + + @Override + public void bPrognosticate() { + } + + @Override + public void ePrognosticate() { + } + } + + public static class MethodTimeLoggerImpl implements MethodTimeLogger { + + private final OutputStreamWriter osWriter; + + private final Stopwatch startTuner; + private final Stopwatch handleTermination; + private final Stopwatch newCfg; + private final Stopwatch reconfigure; + private final Stopwatch tuningFinished; + private final Stopwatch terminate; + private final Stopwatch intermediateDraining; + private final Stopwatch managerReconfigure; + private final Stopwatch getFixedOutputTime; + private final Stopwatch tuningRound; + private final Stopwatch cfgManagerNewcfg; + private final Stopwatch prognosticate; + + private RuntimeMXBean rb = ManagementFactory.getRuntimeMXBean(); + + public MethodTimeLoggerImpl(OutputStream os) { + this(getOSWriter(os)); + } + + public MethodTimeLoggerImpl(OutputStreamWriter osWriter) { + this.osWriter = osWriter; + this.startTuner = Stopwatch.createUnstarted(); + this.handleTermination = Stopwatch.createUnstarted(); + this.newCfg = Stopwatch.createUnstarted(); + this.reconfigure = Stopwatch.createUnstarted(); + this.tuningFinished = Stopwatch.createUnstarted(); + this.terminate = Stopwatch.createUnstarted(); + this.intermediateDraining = Stopwatch.createUnstarted(); + this.managerReconfigure = Stopwatch.createUnstarted(); + this.getFixedOutputTime = Stopwatch.createUnstarted(); + this.tuningRound = Stopwatch.createUnstarted(); + this.cfgManagerNewcfg = Stopwatch.createUnstarted(); + this.prognosticate = Stopwatch.createUnstarted(); + write("Method\t\t\tUptime\t\telapsedtime\n"); + write("====================================================\n"); + } + + @Override + public void bStartTuner() { + begin(startTuner); + } + + @Override + public void eStartTuner() { + end(startTuner, "startTuner"); + } + + @Override + public void bHandleTermination() { + begin(handleTermination); + } + + @Override + public void eHandleTermination() { + end(handleTermination, "handleTermination"); + } + + @Override + public void bNewCfg() { + begin(newCfg); + } + + @Override + public void eNewCfg(int round) { + end(newCfg, String.format("newCfg-%d", round)); + } + + @Override + public void bReconfigure() { + begin(reconfigure); + } + + @Override + public void eReconfigure() { + end(reconfigure, "reconfigure"); + } + + @Override + public void bTuningFinished() { + begin(tuningFinished); + } + + @Override + public void eTuningFinished() { + end(tuningFinished, "tuningFinished"); + } + + @Override + public void bTerminate() { + begin(terminate); + } + + @Override + public void eTerminate() { + end(terminate, "terminate"); + } + + @Override + public void bIntermediateDraining() { + begin(intermediateDraining); + } + + @Override + public void eIntermediateDraining() { + end(intermediateDraining, "intermediateDraining"); + } + + @Override + public void bManagerReconfigure() { + begin(managerReconfigure); + } + + @Override + public void eManagerReconfigure() { + end(managerReconfigure, "managerReconfigure"); + } + + @Override + public void bGetFixedOutputTime() { + begin(getFixedOutputTime); + } + + @Override + public void eGetFixedOutputTime() { + end(getFixedOutputTime, "getFixedOutputTime"); + } + + @Override + public void bTuningRound() { + begin(tuningRound); + } + + @Override + public void eTuningRound() { + end(tuningRound, "tuningRound"); + write("--------------------------------------------------\n"); + } + + @Override + public void bCfgManagerNewcfg() { + begin(cfgManagerNewcfg); + } + + @Override + public void eCfgManagerNewcfg() { + end(cfgManagerNewcfg, "CfgManagerNewcfg"); + } + + @Override + public void bPrognosticate() { + begin(prognosticate); + } + + @Override + public void ePrognosticate() { + end(prognosticate, "prognosticate"); + } + + private void begin(Stopwatch sw) { + sw.reset(); + sw.start(); + } + + private void end(Stopwatch sw, String methodName) { + sw.stop(); + long uptime = rb.getUptime(); + long elapsedtime = sw.elapsed(TimeUnit.MILLISECONDS); + write(String.format("%-22s\t%-12d\t%d\n", methodName, uptime, + elapsedtime)); + } + + private void write(String msg) { + if (osWriter != null) + try { + osWriter.write(msg); + osWriter.flush(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + private static OutputStreamWriter getOSWriter(OutputStream os) { + if (os == null) + return null; + return new OutputStreamWriter(os); + } + } + + /** + * Writes the method call time info to appName/onlineTuner.txt file. + */ + public static class FileMethodTimeLogger extends MethodTimeLoggerImpl { + public FileMethodTimeLogger(String appName) { + super(Utils.fileWriter(appName, "onlineTuner.txt")); + } + } + + /** + * Prints the method call time info to the standard out. + */ + public static class PrintMethodTimeLogger extends MethodTimeLoggerImpl { + public PrintMethodTimeLogger() { + super(System.out); + } + } +} diff --git a/src/edu/mit/streamjit/tuner/OfflineTuner.java b/src/edu/mit/streamjit/tuner/OfflineTuner.java index dbd44e23..27585068 100644 --- a/src/edu/mit/streamjit/tuner/OfflineTuner.java +++ b/src/edu/mit/streamjit/tuner/OfflineTuner.java @@ -50,7 +50,6 @@ import edu.mit.streamjit.test.Benchmark.Dataset; import edu.mit.streamjit.test.BenchmarkProvider; import edu.mit.streamjit.test.Datasets; -import edu.mit.streamjit.test.apps.bitonicsort.BitonicSort; import edu.mit.streamjit.test.apps.channelvocoder7.ChannelVocoder7; import edu.mit.streamjit.util.json.Jsonifiers; @@ -112,7 +111,8 @@ public void tune(Benchmark app) throws InterruptedException { try { autoTuner.startTuner(String.format( "lib%sopentuner%sstreamjit%sstreamjit.py", File.separator, - File.separator, File.separator)); + File.separator, File.separator), + new File(System.getProperty("user.dir"))); autoTuner.writeLine("program"); autoTuner.writeLine(app.toString()); diff --git a/src/edu/mit/streamjit/tuner/OnlineTuner.java b/src/edu/mit/streamjit/tuner/OnlineTuner.java new file mode 100644 index 00000000..cf4e3a57 --- /dev/null +++ b/src/edu/mit/streamjit/tuner/OnlineTuner.java @@ -0,0 +1,203 @@ +package edu.mit.streamjit.tuner; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import com.google.common.base.Stopwatch; + +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.distributed.ConfigurationManager; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.impl.distributed.common.AppStatus; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.util.ConfigurationUtils; +import edu.mit.streamjit.util.Pair; +import edu.mit.streamjit.util.TimeLogProcessor; +import edu.mit.streamjit.util.json.Jsonifiers; + +/** + * Online tuner does continues learning. + * + * @author Sumanan sumanan@mit.edu + * @since Oct 8, 2013 + */ +public class OnlineTuner implements Runnable { + private final OpenTuner tuner; + private final StreamJitApp app; + private final ConfigurationManager cfgManager; + private final boolean needTermination; + private final TimeLogger logger; + private final ConfigurationPrognosticator prognosticator; + private final MethodTimeLogger mLogger; + private final Reconfigurer configurer; + + public OnlineTuner(Reconfigurer configurer, boolean needTermination) { + this.configurer = configurer; + this.app = configurer.app; + this.cfgManager = configurer.cfgManager; + this.tuner = new TCPTuner(); + this.needTermination = needTermination; + this.logger = configurer.logger; + this.prognosticator = configurer.prognosticator; + this.mLogger = configurer.mLogger; + } + + @Override + public void run() { + if (Options.tune == 1) + tune(); + else + System.err.println("Options.tune is not in tune mode."); + } + + private void tune() { + int round = 0; + // Keeps track of the current best time. Uses this to discard bad cfgs + // early. + long currentBestTime; + if (Options.timeOut) + currentBestTime = Long.MAX_VALUE; + else + currentBestTime = 0; + Stopwatch searchTimeSW = Stopwatch.createStarted(); + try { + mLogger.bStartTuner(); + startTuner(); + mLogger.eStartTuner(); + Pair ret; + + System.out.println("New tune run............."); + while (configurer.manager.getStatus() != AppStatus.STOPPED) { + mLogger.bTuningRound(); + String cfgJson = tuner.readLine(); + logger.logSearchTime(searchTimeSW + .elapsed(TimeUnit.MILLISECONDS)); + if (cfgJson == null) { + System.err.println("OpenTuner closed unexpectly."); + break; + } + + // At the end of the tuning, Opentuner will send "Completed" + // msg. This means no more tuning. + if (cfgJson.equals("Completed")) { + mLogger.bHandleTermination(); + handleTermination(); + mLogger.eHandleTermination(); + break; + } + + mLogger.bNewCfg(); + Configuration config = newCfg(++round, cfgJson); + mLogger.eNewCfg(round); + mLogger.bReconfigure(); + ret = configurer.reconfigure(config, 2 * currentBestTime); + mLogger.eReconfigure(); + if (ret.first) { + long time = ret.second; + currentBestTime = (time > 0 && currentBestTime > time) + ? time : currentBestTime; + prognosticator.time(ret.second); + tuner.writeLine(new Double(ret.second).toString()); + searchTimeSW.reset(); + searchTimeSW.start(); + } else { + tuner.writeLine("exit"); + break; + } + mLogger.eTuningRound(); + } + + } catch (IOException e) { + e.printStackTrace(); + mLogger.bTerminate(); + configurer.terminate(); + mLogger.eTerminate(); + } + mLogger.bTuningFinished(); + tuningFinished(); + mLogger.eTuningFinished(); + } + + private void startTuner() throws IOException { + String relativeTunerPath = String.format( + "lib%sopentuner%sstreamjit%sstreamjit2.py", File.separator, + File.separator, File.separator); + + String absoluteTunerPath = String.format("%s%s%s", + System.getProperty("user.dir"), File.separator, + relativeTunerPath); + + tuner.startTuner(absoluteTunerPath, new File(app.name)); + + tuner.writeLine("program"); + tuner.writeLine(app.name); + + tuner.writeLine("confg"); + tuner.writeLine(Jsonifiers.toJson(app.getConfiguration()).toString()); + } + + /** + * Just excerpted from run() method for better readability. + * + * @throws IOException + */ + private void handleTermination() throws IOException { + String finalConfg = tuner.readLine(); + System.out.println("Tuning finished"); + ConfigurationUtils.saveConfg(finalConfg, "final", app.name); + Configuration finalcfg = Configuration.fromJson(finalConfg); + finalcfg = ConfigurationUtils.addConfigPrefix(finalcfg, "final"); + verify(); + if (needTermination) { + configurer.terminate(); + } else { + Pair ret = configurer.reconfigure(finalcfg, 0); + if (ret.first && ret.second > 0) + System.out + .println("Application is running forever with the final configuration."); + else + System.err.println("Invalid final configuration."); + } + } + + private void verify() { + Map cfgPrefixes = new HashMap<>(); + cfgPrefixes.put("final", 0); + cfgPrefixes.put("hand", 0); + new Verifier(configurer).verifyTuningTimes(cfgPrefixes); + } + + private Configuration newCfg(int round, String cfgJson) { + String cfgPrefix = new Integer(round).toString(); + System.out.println(String.format( + "---------------------%s-------------------------", cfgPrefix)); + logger.newConfiguration(cfgPrefix); + Configuration config = Configuration.fromJson(cfgJson); + config = ConfigurationUtils.addConfigPrefix(config, cfgPrefix); + + if (Options.saveAllConfigurations) + ConfigurationUtils.saveConfg(cfgJson, cfgPrefix, app.name); + return config; + } + + private void tuningFinished() { + try { + configurer.drainer.dumpDraindataStatistics(); + } catch (IOException e) { + e.printStackTrace(); + } + + if (needTermination) + configurer.terminate(); + + try { + TimeLogProcessor.summarize(app.name); + } catch (IOException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/tuner/OpenTuner.java b/src/edu/mit/streamjit/tuner/OpenTuner.java index 9161fff1..60df0d2f 100644 --- a/src/edu/mit/streamjit/tuner/OpenTuner.java +++ b/src/edu/mit/streamjit/tuner/OpenTuner.java @@ -21,6 +21,7 @@ */ package edu.mit.streamjit.tuner; +import java.io.File; import java.io.IOException; /** @@ -58,7 +59,8 @@ public interface OpenTuner { * * @throws IOException */ - public void startTuner(String tunerPath) throws IOException; + public void startTuner(String tunerPath, File workingDir) + throws IOException; /** * Stop the Autotuner instance. diff --git a/src/edu/mit/streamjit/tuner/Reconfigurer.java b/src/edu/mit/streamjit/tuner/Reconfigurer.java new file mode 100644 index 00000000..56d912ae --- /dev/null +++ b/src/edu/mit/streamjit/tuner/Reconfigurer.java @@ -0,0 +1,146 @@ +package edu.mit.streamjit.tuner; + +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.common.Configuration.IntParameter; +import edu.mit.streamjit.impl.common.TimeLogger; +import edu.mit.streamjit.impl.common.drainer.AbstractDrainer; +import edu.mit.streamjit.impl.distributed.ConfigurationManager; +import edu.mit.streamjit.impl.distributed.StreamJitApp; +import edu.mit.streamjit.impl.distributed.StreamJitAppManager; +import edu.mit.streamjit.impl.distributed.common.AppStatus; +import edu.mit.streamjit.impl.distributed.node.StreamNode; +import edu.mit.streamjit.tuner.MethodTimeLogger.FileMethodTimeLogger; +import edu.mit.streamjit.util.Pair; + +/** + * Re-factored the {@link OnlineTuner} and moved all streamjit app + * reconfiguration related methods to this new class. + * + * @author sumanan + * @since 10 Mar, 2015 + */ +public class Reconfigurer { + + final AbstractDrainer drainer; + final StreamJitAppManager manager; + final StreamJitApp app; + final ConfigurationManager cfgManager; + final TimeLogger logger; + final ConfigurationPrognosticator prognosticator; + final MethodTimeLogger mLogger; + + public Reconfigurer(AbstractDrainer drainer, StreamJitAppManager manager, + StreamJitApp app, ConfigurationManager cfgManager, + TimeLogger logger) { + this.drainer = drainer; + this.manager = manager; + this.app = app; + this.cfgManager = cfgManager; + this.logger = logger; + this.prognosticator = new GraphPropertyPrognosticator(app); + this.mLogger = new FileMethodTimeLogger(app.name); + } + + /** + * TODO: Split this method into two methods, 1.reconfigure(), + * 2.getFixedOutputTime(). + * + * @param cfgJson + * @param round + * @return if ret.first == false, then no more tuning. ret.second = running + * time in milliseconds. ret.second may be a negative value if the + * reconfiguration is unsuccessful or a timeout is occurred. + * Meanings of the negative values are follows + *
              + *
            1. -1: Timeout has occurred. + *
            2. -2: Invalid configuration. + *
            3. -3: {@link ConfigurationPrognosticator} has rejected the + * configuration. + *
            4. -4: Draining failed. Another draining is in progress. + *
            5. -5: Reconfiguration has failed at {@link StreamNode} side. + * E.g., Compilation error. + *
            6. -6: Misc problems. + */ + public Pair reconfigure(Configuration config, long timeout) { + long time; + + if (manager.getStatus() == AppStatus.STOPPED) + return new Pair(false, 0l); + + mLogger.bCfgManagerNewcfg(); + boolean validCfg = cfgManager.newConfiguration(config); + mLogger.eCfgManagerNewcfg(); + if (!validCfg) + return new Pair(true, -2l); + + mLogger.bPrognosticate(); + boolean prog = prognosticator.prognosticate(config); + mLogger.ePrognosticate(); + if (!prog) + return new Pair(true, -3l); + + try { + mLogger.bIntermediateDraining(); + boolean intermediateDraining = intermediateDraining(); + mLogger.eIntermediateDraining(); + if (!intermediateDraining) + return new Pair(false, -4l); + + drainer.setBlobGraph(app.blobGraph); + int multiplier = getMultiplier(config); + mLogger.bManagerReconfigure(); + boolean reconfigure = manager.reconfigure(multiplier); + mLogger.eManagerReconfigure(); + if (reconfigure) { + // TODO: need to check the manager's status before passing the + // time. Exceptions, final drain, etc may causes app to stop + // executing. + mLogger.bGetFixedOutputTime(); + time = manager.getFixedOutputTime(timeout); + mLogger.eGetFixedOutputTime(); + logger.logRunTime(time); + } else { + time = -5l; + } + } catch (Exception ex) { + ex.printStackTrace(); + System.err + .println("Couldn't compile the stream graph with this configuration"); + time = -6l; + } + return new Pair(true, time); + } + + /** + * Performs intermediate draining. + * + * @return true iff the draining is success or the application + * is not running currently. + * @throws InterruptedException + */ + private boolean intermediateDraining() throws InterruptedException { + if (manager.isRunning()) { + return drainer.drainIntermediate(); + } else + return true; + } + + private int getMultiplier(Configuration config) { + int multiplier = 50; + IntParameter mulParam = config.getParameter("multiplier", + IntParameter.class); + if (mulParam != null) + multiplier = mulParam.getValue(); + System.err.println("Reconfiguring...multiplier = " + multiplier); + return multiplier; + } + + public void terminate() { + if (manager.isRunning()) { + // drainer.startDraining(1); + drainer.drainFinal(true); + } else { + manager.stop(); + } + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/tuner/RunApp.java b/src/edu/mit/streamjit/tuner/RunApp.java index 9492a714..fb834ed7 100644 --- a/src/edu/mit/streamjit/tuner/RunApp.java +++ b/src/edu/mit/streamjit/tuner/RunApp.java @@ -21,37 +21,27 @@ */ package edu.mit.streamjit.tuner; -import static com.google.common.base.Preconditions.checkNotNull; - -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryMXBean; -import java.lang.management.MemoryUsage; import java.sql.ResultSet; import java.sql.SQLException; -import com.google.common.base.Splitter; - import edu.mit.streamjit.api.StreamCompiler; import edu.mit.streamjit.impl.common.Configuration; import edu.mit.streamjit.impl.common.Configuration.IntParameter; -import edu.mit.streamjit.impl.common.Configuration.Parameter; -import edu.mit.streamjit.impl.common.Configuration.SwitchParameter; import edu.mit.streamjit.impl.compiler2.Compiler2StreamCompiler; import edu.mit.streamjit.impl.distributed.DistributedStreamCompiler; +import edu.mit.streamjit.impl.distributed.common.Utils; import edu.mit.streamjit.test.Benchmark; import edu.mit.streamjit.test.Benchmarker; -import edu.mit.streamjit.test.Datasets; -import edu.mit.streamjit.tuner.ConfigGenerator.sqliteAdapter; /** * {@link RunApp} reads configuration, streamJit's app name and location * information from streamjit.db based on the passed arguments, runs the * streamJit app and update the database with the execution time. StreamJit's * opentuner Python script calls this to run the streamJit application. - * + * * @author Sumanan sumanan@mit.edu * @since Sep 10, 2013 - * + * */ public class RunApp { @@ -71,62 +61,31 @@ public static void main(String[] args) throws SQLException { System.out.println(String.format("JAVA Executing: %s Round - %d", benchmarkName, round)); - String dbPath = "streamjit.db"; - - sqliteAdapter sqlite; - try { - sqlite = new sqliteAdapter(); - } catch (ClassNotFoundException e) { - System.err - .println("Sqlite3 database not found...couldn't update the database with the configutaion."); - e.printStackTrace(); - return; - } - sqlite.connectDB(dbPath); - - ResultSet result = sqlite.executeQuery(String.format( - "SELECT * FROM apps WHERE name='%s'", benchmarkName)); - - String confgString = result.getString("configuration"); - String sjDbPath = "sj" + benchmarkName + ".db"; - sqliteAdapter sjDb; - try { - sjDb = new sqliteAdapter(); - } catch (ClassNotFoundException e1) { - // Actually this exception will not occur. If Sqlite3 did not - // exists then it would have exit at previous return point. - System.err - .println("Sqlite3 database not found...couldn't update the database with the configutaion."); - e1.printStackTrace(); - return; - } + SqliteAdapter sjDb; + sjDb = new SqliteAdapter(); sjDb.connectDB(sjDbPath); - - ResultSet result1 = sjDb.executeQuery(String.format( + ResultSet result = sjDb.executeQuery(String.format( "SELECT * FROM results WHERE Round=%d", round)); - String pyDict = result1.getString("SJConfig"); - - Configuration config = Configuration - .fromJson(getConfigurationString(confgString)); - - Configuration cfg2 = rebuildConfiguration(pyDict, config); + String cfgJson = result.getString("SJConfig"); + Configuration cfg = Configuration.fromJson(cfgJson); Benchmark app = Benchmarker.getBenchmarkByName(benchmarkName); StreamCompiler sc; - IntParameter p = cfg2.getParameter("noOfMachines", IntParameter.class); + IntParameter p = cfg.getParameter("noOfMachines", IntParameter.class); if (p == null) { Compiler2StreamCompiler csc = new Compiler2StreamCompiler(); - csc.configuration(cfg2); + csc.configuration(cfg); sc = csc; } else { - sc = new DistributedStreamCompiler(p.getValue(), cfg2); + sc = new DistributedStreamCompiler(p.getValue(), cfg); } double time = 0; try { - Benchmarker.Result benchmarkResult = Benchmarker.runBenchmark(app, sc).get(0); + Benchmarker.Result benchmarkResult = Benchmarker.runBenchmark(app, + sc).get(0); if (benchmarkResult.isOK()) time = benchmarkResult.runMillis(); else if (benchmarkResult.kind() == Benchmarker.Result.Kind.TIMEOUT) @@ -139,18 +98,11 @@ else if (benchmarkResult.kind() == Benchmarker.Result.Kind.EXCEPTION) { time = -2; } } catch (Exception e) { - //The Benchmarker should catch everything, but just in case... + // The Benchmarker should catch everything, but just in case... e.printStackTrace(); time = -2; } catch (OutOfMemoryError er) { - MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean(); - System.out.println("******OutOfMemoryError******"); - MemoryUsage heapUsage = memoryBean.getHeapMemoryUsage(); - int MEGABYTE = 1024 * 1024; - long maxMemory = heapUsage.getMax() / MEGABYTE; - long usedMemory = heapUsage.getUsed() / MEGABYTE; - System.out.println("Memory Use :" + usedMemory + "M/" + maxMemory - + "M"); + Utils.printOutOfMemory(); time = -3; } @@ -160,63 +112,4 @@ else if (benchmarkResult.kind() == Benchmarker.Result.Kind.EXCEPTION) { "UPDATE results SET Exectime=%f WHERE Round=%d", time, round); sjDb.executeUpdate(qry); } - - private static String getConfigurationString(String s) { - String s1 = s.replaceAll("__class__", "ttttt"); - String s2 = s1.replaceAll("javaClassPath", "class"); - String s3 = s2.replaceAll("ttttt", "__class__"); - return s3; - } - - /** - * Creates a new {@link Configuration} from the received python dictionary - * string. This is not a good way to do. - *

              - * TODO: Need to add a method to {@link Configuration} so that the - * configuration object can be updated from the python dict string. Now we - * are destructing the old confg object and recreating a new one every time. - * Not a appreciatable way. - * - * @param pythonDict - * Python dictionary string. Autotuner gives a dictionary of - * features with trial values. - * @param config - * Old configuration object. - * @return New configuration object with updated values from the pythonDict. - */ - private static Configuration rebuildConfiguration(String pythonDict, - Configuration config) { - // System.out.println(pythonDict); - checkNotNull(pythonDict, "Received Python dictionary is null"); - pythonDict = pythonDict.replaceAll("u'", ""); - pythonDict = pythonDict.replaceAll("':", ""); - pythonDict = pythonDict.replaceAll("\\{", ""); - pythonDict = pythonDict.replaceAll("\\}", ""); - Splitter dictSplitter = Splitter.on(", ").omitEmptyStrings() - .trimResults(); - Configuration.Builder builder = Configuration.builder(); - System.out.println("New parameter values from Opentuner..."); - for (String s : dictSplitter.split(pythonDict)) { - String[] str = s.split(" "); - if (str.length != 2) - throw new AssertionError("Wrong python dictionary..."); - Parameter p = config.getParameter(str[0]); - if (p == null) - continue; - if (p instanceof IntParameter) { - IntParameter ip = (IntParameter) p; - builder.addParameter(new IntParameter(ip.getName(), - ip.getMin(), ip.getMax(), Integer.parseInt(str[1]))); - - } else if (p instanceof SwitchParameter) { - SwitchParameter sp = (SwitchParameter) p; - Class type = sp.getGenericParameter(); - int val = Integer.parseInt(str[1]); - SwitchParameter sp1 = new SwitchParameter(sp.getName(), - type, sp.getUniverse().get(val), sp.getUniverse()); - builder.addParameter(sp1); - } - } - return builder.build(); - } } diff --git a/src/edu/mit/streamjit/tuner/RunApp2.java b/src/edu/mit/streamjit/tuner/RunApp2.java index e0ec3c42..07e0b0a0 100644 --- a/src/edu/mit/streamjit/tuner/RunApp2.java +++ b/src/edu/mit/streamjit/tuner/RunApp2.java @@ -21,17 +21,18 @@ */ package edu.mit.streamjit.tuner; -import edu.mit.streamjit.api.StreamCompiler; -import edu.mit.streamjit.impl.common.Configuration; -import edu.mit.streamjit.impl.compiler2.Compiler2StreamCompiler; -import edu.mit.streamjit.test.Benchmark; -import edu.mit.streamjit.test.Benchmarker; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.List; +import edu.mit.streamjit.api.StreamCompiler; +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.compiler2.Compiler2StreamCompiler; +import edu.mit.streamjit.test.Benchmark; +import edu.mit.streamjit.test.Benchmarker; + /** * Runs a benchmark using a specified configuration. * @author Jeffrey Bosboom diff --git a/src/edu/mit/streamjit/tuner/SqliteAdapter.java b/src/edu/mit/streamjit/tuner/SqliteAdapter.java new file mode 100644 index 00000000..796ec9ec --- /dev/null +++ b/src/edu/mit/streamjit/tuner/SqliteAdapter.java @@ -0,0 +1,90 @@ +package edu.mit.streamjit.tuner; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; + +public class SqliteAdapter { + + private Connection con = null; + private Statement statement; + + /** + * Checks if org.sqlite.JDBC class exists. Throws + * {@link IllegalStateException} if sqlite does not exist. + */ + public SqliteAdapter() { + try { + Class.forName("org.sqlite.JDBC"); + } catch (ClassNotFoundException e) { + e.printStackTrace(); + throw new IllegalStateException("Sqlite3 database not found."); + } + } + + public void connectDB(String path) { + try { + con = DriverManager.getConnection(String.format("jdbc:sqlite:%s", + path)); + statement = con.createStatement(); + statement.setQueryTimeout(30); // set timeout to 30 sec. + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + /** + * Creates table iff it is not exists + * + * @param table + * Name of the table + * @param signature + * Column format of the table + * @throws SQLException + */ + public void createTable(String table, String signature) { + checkNotNull(con); + DatabaseMetaData dbm; + try { + dbm = con.getMetaData(); + + ResultSet tables = dbm.getTables(null, null, table, null); + if (!tables.next()) { + // "create table %s ()" + statement.executeUpdate(String.format("create table %s (%s)", + table, signature)); + } + } catch (SQLException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + + public ResultSet executeQuery(String sql) { + try { + ResultSet rs = statement.executeQuery(sql); + // con.commit(); + return rs; + } catch (SQLException e) { + e.printStackTrace(); + } + return null; + } + + public int executeUpdate(String sql) { + try { + int ret = statement.executeUpdate(sql); + // con.commit(); + return ret; + } catch (SQLException e) { + e.printStackTrace(); + } + return -1; + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/tuner/TCPTuner.java b/src/edu/mit/streamjit/tuner/TCPTuner.java index 419ae076..5352a81b 100644 --- a/src/edu/mit/streamjit/tuner/TCPTuner.java +++ b/src/edu/mit/streamjit/tuner/TCPTuner.java @@ -23,6 +23,7 @@ import java.io.BufferedReader; import java.io.BufferedWriter; +import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; @@ -32,7 +33,7 @@ import java.net.UnknownHostException; import java.util.Random; -import edu.mit.streamjit.impl.distributed.common.GlobalConstants; +import edu.mit.streamjit.impl.distributed.common.Options; public final class TCPTuner implements OpenTuner { @@ -69,19 +70,28 @@ public boolean isAlive() { } @Override - public void startTuner(String tunerPath) throws IOException { + public void startTuner(String tunerPath, File workingDir) + throws IOException { int min = 5000; Random rand = new Random(); Integer port = rand.nextInt(65535 - min) + min; - if (GlobalConstants.tunerMode == 0) { - this.tuner = new ProcessBuilder("xterm", "-e", "python", tunerPath, - port.toString()).start(); + if (Options.tunerStartMode == 0) { + ProcessBuilder xtermPB = new ProcessBuilder("xterm", "-hold", "-e", + "python", tunerPath, port.toString()); + ProcessBuilder gnomePB = new ProcessBuilder("gnome-terminal", "-e", + String.format("python %s %s", tunerPath, port.toString())); + gnomePB.directory(workingDir); + this.tuner = gnomePB.start(); + } else if (Options.tunerStartMode == 1) { + ProcessBuilder pb = new ProcessBuilder("python", tunerPath, + port.toString()); + pb.directory(workingDir); + this.tuner = pb.start(); } else port = 12563; this.connection = new TunerConnection(); connection.connect(port); } - @Override public void stopTuner() throws IOException { if (tuner == null) @@ -111,7 +121,7 @@ public void stopTuner() throws IOException { private final class TunerConnection { private BufferedReader reader = null; - private BufferedWriter writter = null; + private BufferedWriter writer = null; private Socket socket = null; private boolean isconnected = false; @@ -126,7 +136,7 @@ void connect(int port) throws IOException { InputStream is = socket.getInputStream(); OutputStream os = socket.getOutputStream(); this.reader = new BufferedReader(new InputStreamReader(is)); - this.writter = new BufferedWriter( + this.writer = new BufferedWriter( new OutputStreamWriter(os)); isconnected = true; break; @@ -150,10 +160,10 @@ void connect(int port) throws IOException { public void writeLine(String msg) throws IOException { if (isStillConnected()) { try { - writter.write(msg); + writer.write(msg); if (msg.toCharArray()[msg.length() - 1] != '\n') - writter.write('\n'); - writter.flush(); + writer.write('\n'); + writer.flush(); } catch (IOException ix) { isconnected = false; throw ix; @@ -181,8 +191,8 @@ public final void closeConnection() { try { if (reader != null) this.reader.close(); - if (writter != null) - this.writter.close(); + if (writer != null) + this.writer.close(); if (socket != null) this.socket.close(); } catch (IOException ex) { @@ -200,7 +210,8 @@ public static void main(String[] args) throws InterruptedException, OpenTuner tuner = new TCPTuner(); try { - tuner.startTuner("/lib/opentuner/streamjit/streamjit.py"); + tuner.startTuner("./lib/opentuner/streamjit/streamjit.py", new File( + System.getProperty("user.dir"))); } catch (IOException e) { e.printStackTrace(); } diff --git a/src/edu/mit/streamjit/tuner/Verifier.java b/src/edu/mit/streamjit/tuner/Verifier.java new file mode 100644 index 00000000..d3128e39 --- /dev/null +++ b/src/edu/mit/streamjit/tuner/Verifier.java @@ -0,0 +1,196 @@ +package edu.mit.streamjit.tuner; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import edu.mit.streamjit.impl.common.Configuration; +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.util.ConfigurationUtils; +import edu.mit.streamjit.util.Pair; +import edu.mit.streamjit.util.TimeLogProcessor; + +public class Verifier implements Runnable { + + private final Reconfigurer configurer; + private final String appName; + + public Verifier(Reconfigurer configurer) { + this.configurer = configurer; + this.appName = configurer.app.name; + } + + public void verify() { + Map cfgPrefixes = cfgPrefixes(appName); + verifyTuningTimes(cfgPrefixes); + generateGraphs(cfgPrefixes); + } + + private void generateGraphs(Map cfgPrefixes) { + if (Options.verificationCount > 50 || Options.evaluationCount > 50) + try { + TimeLogProcessor.processVerifycaionRun(appName, cfgPrefixes); + TimeLogProcessor.summarize(appName); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * This method just picks a few configurations and re-run the app to ensure + * the time we reported to the opentuner is correct. + * + * This method can be called after the completion of the tuning. + * + * @param cfgPrefixes + * map of cfgPrefixes and expected running time. + */ + void verifyTuningTimes(Map cfgPrefixes) { + try { + FileWriter writer = writer(); + for (int i = 0; i < Options.verificationCount; i++) { + for (Map.Entry en : cfgPrefixes.entrySet()) { + String prefix = en.getKey(); + Integer expectedRunningTime = en.getValue(); + String cfgName = String + .format("%s_%s.cfg", prefix, appName); + Configuration cfg = ConfigurationUtils.readConfiguration( + appName, prefix); + if (cfg == null) { + System.err.println(String.format("No %s file exists", + cfgName)); + continue; + } + cfg = ConfigurationUtils.addConfigPrefix(cfg, prefix); + writer.write("----------------------------------------\n"); + writer.write(String.format("Configuration name = %s\n", + cfgName)); + List runningTimes = evaluateConfig(cfg); + processRunningTimes(runningTimes, expectedRunningTime, + writer); + } + } + writer.write("**************FINISHED**************\n\n"); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + private void processRunningTimes(List runningTimes, + Integer expectedRunningTime, FileWriter writer) throws IOException { + writer.write(String.format("Expected running time = %dms\n", + expectedRunningTime)); + int correctEval = 0; + double total = 0; + for (int i = 0; i < runningTimes.size(); i++) { + long runningTime = runningTimes.get(i); + if (runningTime > 0) { + correctEval++; + total += runningTime; + } + writer.write(String.format("Evaluation %d = %dms\n", i + 1, + runningTime)); + } + double avg = total / correctEval; + writer.write(String.format("Average running time = %.3fms\n", avg)); + } + + private FileWriter writer() throws IOException { + FileWriter writer = new FileWriter(String.format("%s%sevaluation.txt", + appName, File.separator, appName), true); + writer.write("##########################################################"); + Properties prop = Options.getProperties(); + prop.store(writer, ""); + return writer; + } + + public static Map cfgPrefixes(String appName) { + Map cfgPrefixes = new HashMap<>(); + try { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%sverify.txt", appName, File.separator))); + String line; + while ((line = reader.readLine()) != null) { + if (line.contains("=")) + process1(line, cfgPrefixes); + else + process2(line, cfgPrefixes); + } + reader.close(); + } catch (IOException e) { + } + return cfgPrefixes; + } + + /** + * Processes the line that is generated by {@link TimeLogProcessor}. + */ + private static void process1(String line, Map cfgPrefixes) { + String[] arr = line.split("="); + String cfgPrefix = arr[0].trim(); + int expectedTime = 0; + if (arr.length > 1) + try { + expectedTime = Integer.parseInt(arr[1]); + } catch (NumberFormatException ex) { + System.err.println("NumberFormatException: " + arr[1]); + } + cfgPrefixes.put(cfgPrefix, expectedTime); + } + + /** + * Processes manually entered lines in the verify.txt + */ + private static void process2(String line, Map cfgPrefixes) { + String[] arr = line.split(","); + for (String s : arr) { + cfgPrefixes.put(s.trim(), 0); + } + } + + /** + * Evaluates a configuration. + * + * @param cfg + * configuration that needs to be evaluated + */ + private List evaluateConfig(Configuration cfg) { + String cfgPrefix = ConfigurationUtils.getConfigPrefix(cfg); + System.out.println("Evaluating " + cfgPrefix); + int count = Options.evaluationCount; + List runningTime = new ArrayList<>(count); + Pair ret; + if (cfg != null) { + for (int i = 0; i < count; i++) { + configurer.logger.newConfiguration(cfgPrefix); + ret = configurer.reconfigure(cfg, 0); + if (ret.first) { + configurer.prognosticator.time(ret.second); + runningTime.add(ret.second); + } else { + System.err.println("Evaluation failed..."); + } + } + } else { + System.err.println("Null configuration\n"); + } + return runningTime; + } + + @Override + public void run() { + if (Options.tune == 2) { + verify(); + configurer.terminate(); + } else + System.err.println("Options.tune is not in the verify mode"); + } +} \ No newline at end of file diff --git a/src/edu/mit/streamjit/util/ConfigurationUtils.java b/src/edu/mit/streamjit/util/ConfigurationUtils.java new file mode 100644 index 00000000..89c0cd3d --- /dev/null +++ b/src/edu/mit/streamjit/util/ConfigurationUtils.java @@ -0,0 +1,160 @@ +package edu.mit.streamjit.util; + +import static com.google.common.base.Preconditions.checkNotNull; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; + +import edu.mit.streamjit.impl.common.Configuration; + +/** + * {@link ConfigurationUtils} contains common utility methods those deal with + * {@link Configuration}. + * + * @author Sumanan sumanan@mit.edu + * @since May 10, 2013 + * + */ +public class ConfigurationUtils { + + public static final String configDir = "configurations"; + + /** + * Reads configuration from ./appName/configurations/namePrefix_appName.cfg + * and returns it. + * + * @param appName + * name of the streamJit app. + * + * @param namePrefix + * prefix to add to the cfg file name. + * + * @return {@link Configuration} object if valid file exists. Otherwise + * returns null. + */ + public static Configuration readConfiguration(String appName, + String namePrefix) { + checkNotNull(appName); + namePrefix = namePrefix == null ? "" : namePrefix; + String cfgFilePath = String.format("%s%s%s%s%s_%s.cfg", appName, + File.separator, configDir, File.separator, namePrefix, appName); + return readConfiguration(cfgFilePath); + } + + /** + * @param cfgFilePath + * path of the configuration file that need to be read. + * @return {@link Configuration} object if valid file exists. Otherwise + * returns null. + */ + public static Configuration readConfiguration(String cfgFilePath) { + try { + BufferedReader reader = new BufferedReader(new FileReader( + cfgFilePath)); + String json = reader.readLine(); + reader.close(); + return Configuration.fromJson(json); + } catch (IOException ex) { + System.err.println(String + .format("File reader error. No %s configuration file.", + cfgFilePath)); + } catch (Exception ex) { + System.err.println(String.format( + "File %s is not a configuration file.", cfgFilePath)); + } + return null; + } + + /** + * Saves the configuration into + * ./appName/configurations/namePrefix_appName.cfg. output _.cfg file will + * be named as namePrefix_appName.cfg. + * + * @param config + * {@link Configuration} that need to be saved. + * @param namePrefix + * prefix to add to the out put file name. + * @param appName + * name of the streamJit app. output _.cfg file will be named as + * namePrefix_appName.cfg. + */ + public static void saveConfg(Configuration config, String namePrefix, + String appName) { + String json = config.toJson(); + saveConfg(json, namePrefix, appName); + } + + /** + * Saves the configuration into + * ./appName/configurations/namePrefix_appName.cfg. output _.cfg file will + * be named as namePrefix_appName.cfg. + * + * @param configJson + * Json representation of the {@link Configuration} that need to + * be saved. + * @param namePrefix + * prefix to add to the out put file name. + * @param appName + * name of the streamJit app. output _.cfg file will be named as + * namePrefix_appName.cfg. + */ + public static void saveConfg(String configJson, String namePrefix, + String appName) { + try { + + File dir = new File(String.format("%s%s%s", appName, + File.separator, configDir)); + if (!dir.exists()) + if (!dir.mkdirs()) { + System.err.println("Make directory failed"); + return; + } + + File file = new File(dir, String.format("%s_%s.cfg", namePrefix, + appName)); + FileWriter writer = new FileWriter(file, false); + writer.write(configJson); + writer.flush(); + writer.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * Adds @param prefix as an extra data to the @param config. Returned + * configuration object will contain an extra data named "configPrefix". + * + * @param config + * {@link Configuration} object in which the configuration prefix + * need to be added. + * @param prefix + * prefix that need to be added to the configuration. + * @return Same @param config with configPrefix as an extra data. + */ + public static Configuration addConfigPrefix(Configuration config, + String prefix) { + if (config == null) + return config; + Configuration.Builder builder = Configuration.builder(config); + builder.putExtraData("configPrefix", prefix); + return builder.build(); + } + + /** + * Gets configuration's prefix name from the configuration and returns. + * + * @param config + * @return prefix name of the configuration if exists. null + * otherwise. + */ + public static String getConfigPrefix(Configuration config) { + if (config == null) + return null; + String prefix = (String) config.getExtraData("configPrefix"); + return prefix == null ? "" : prefix; + } +} diff --git a/src/edu/mit/streamjit/util/TimeLogProcessor.java b/src/edu/mit/streamjit/util/TimeLogProcessor.java new file mode 100644 index 00000000..621c6218 --- /dev/null +++ b/src/edu/mit/streamjit/util/TimeLogProcessor.java @@ -0,0 +1,542 @@ +package edu.mit.streamjit.util; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; + +import edu.mit.streamjit.impl.distributed.common.Options; +import edu.mit.streamjit.impl.distributed.common.Utils; +import edu.mit.streamjit.tuner.Verifier; + +/** + * Processes the Distributed StreamJit's time log files and generate summary. + * + * @author Sumanan sumanan@mit.edu + * @since Dec 5, 2014 + */ +public class TimeLogProcessor { + + private static final String pRunTimeFile = "processedRunTime.txt"; + private static final String pCompTimeFile = "processedCompileTime.txt"; + private static final String pDrainTimeFile = "processedDrainTime.txt"; + private static final String pTuneRoundTimeFile = "processedTuningRoundTime.txt"; + private static final String pEvalTimeFile = "processedEvaluation.txt"; + private static final String ptotalFile = "totalStats.txt"; + + public static void main(String[] args) throws IOException { + summarize("FMRadioCore"); + summarizeHeap("FMRadioCore"); + } + + private static Map processCompileTime(String appName, + File outDir) throws IOException { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%scompileTime.txt", appName, File.separator))); + + File outFile = new File(outDir, pCompTimeFile); + FileWriter writer = new FileWriter(outFile, false); + Map ret = process(reader, writer, "Total", true, 3); + reader.close(); + writer.close(); + return ret; + } + + private static String cfgString(String line) { + String l = line.replace('-', ' '); + return l.trim(); + } + + private static Map processRunTime(String appName, + File outDir) throws IOException { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%srunTime.txt", appName, File.separator))); + File outFile = new File(outDir, pRunTimeFile); + FileWriter writer = new FileWriter(outFile, false); + String line; + String cfgPrefix = "Init"; + int i = 0; + Map ret = new HashMap<>(5000); + int min = Integer.MAX_VALUE; + while ((line = reader.readLine()) != null) { + if (line.startsWith("----------------------------")) + cfgPrefix = cfgString(line); + else if (line.startsWith("Execution")) { + String[] arr = line.split(" "); + String time = arr[3].trim(); + time = time.substring(0, time.length() - 2); + int val = Integer.parseInt(time); + if (val < 1) + val = 2 * min; + min = Math.min(min, val); + ret.put(cfgPrefix, val); + String data = String.format("%-6d\t%-6s\t%-6d\t%-6d\n", ++i, + cfgPrefix, val, min); + writer.write(data); + } + } + writer.flush(); + reader.close(); + writer.close(); + return ret; + } + private static Map processDrainTime(String appName, + File outDir) throws IOException { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%sdrainTime.txt", appName, File.separator))); + File outFile = new File(outDir, pDrainTimeFile); + FileWriter writer = new FileWriter(outFile, false); + Map ret = process(reader, writer, "Drain time", true, + 3); + writer.flush(); + reader.close(); + writer.close(); + return ret; + } + + private static Map processTuningRoundTime(String appName, + File outDir) throws IOException { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%srunTime.txt", appName, File.separator))); + File outFile = new File(outDir, pTuneRoundTimeFile); + FileWriter writer = new FileWriter(outFile, false); + Map ret = process(reader, writer, "Tuning", false, 3); + reader.close(); + writer.close(); + return ret; + } + + private static Map process(BufferedReader reader, + FileWriter writer, String startString, boolean isms, int timepos) + throws IOException { + String line; + String cfgPrefix = "Init"; + int i = 0; + int timeUnitLength = 1; + if (isms) + timeUnitLength = 2; + Map ret = new HashMap<>(5000); + while ((line = reader.readLine()) != null) { + if (line.startsWith("----------------------------")) + cfgPrefix = cfgString(line); + else if (line.startsWith(startString)) { + String[] arr = line.split(" "); + String time = arr[timepos].trim(); + time = time.substring(0, time.length() - timeUnitLength); + int val = Integer.parseInt(time); + ret.put(cfgPrefix, val); + String data = String + .format("%d\t%s\t%d\n", ++i, cfgPrefix, val); + writer.write(data); + } + } + writer.flush(); + return ret; + } + + private static File writeHeapStat(String fileName, File outDir) + throws IOException { + List heapSize = processSNHeap(fileName, "heapSize"); + List heapMaxSize = processSNHeap(fileName, "heapMaxSize"); + List heapFreeSize = processSNHeap(fileName, "heapFreeSize"); + File f = new File(fileName); + String outFileName = String.format("%s_heapStatus.txt", f.getName()); + File outFile = new File(outDir, outFileName); + FileWriter writer = new FileWriter(outFile, false); + for (int i = 0; i < heapSize.size(); i++) { + String msg = String.format("%-6d\t%-6d\t%-6d\t%-6d\n", i + 1, + heapFreeSize.get(i), heapSize.get(i), heapMaxSize.get(i)); + writer.write(msg); + } + writer.close(); + return outFile; + } + + private static List processSNHeap(String fileName, String heapType) + throws IOException { + String slurmPrefix = "0: "; + BufferedReader reader = new BufferedReader(new FileReader(fileName)); + String line; + int i = 0; + List ret = new ArrayList(3000); + while ((line = reader.readLine()) != null) { + // Slurm adds prefix to every sysout line. + if (line.startsWith(slurmPrefix)) + line = line.substring(slurmPrefix.length()); + if (line.startsWith(heapType)) { + String[] arr = line.split(" "); + String time = arr[2].trim(); + time = time.substring(0, time.length() - 2); + int val = Integer.parseInt(time); + ret.add(val); + } + } + reader.close(); + return ret; + } + + public static void summarize(String appName) throws IOException { + File summaryDir = new File(String.format("%s%ssummary", appName, + File.separator)); + Utils.createDir(summaryDir.getPath()); + Map compileTime = processCompileTime(appName, + summaryDir); + Map runTime = processRunTime(appName, summaryDir); + Map drainTime = processDrainTime(appName, summaryDir); + Map tuningRoundTime = processTuningRoundTime(appName, + summaryDir); + + File outfile = new File(summaryDir, ptotalFile); + FileWriter writer = new FileWriter(outfile, false); + FileWriter verify = new FileWriter(String.format("%s%sverify.txt", + appName, File.separator), true); + int min = Integer.MAX_VALUE; + writer.write("cfg\tTRTime\tcomp\trun\tdrain\tmin\n"); + for (int i = 1; i <= tuningRoundTime.size(); i++) { + String key = new Integer(i).toString(); + Integer time = runTime.get(key); + + if (time == null) { + System.err.println("No running time for round " + key); + continue; + } else if (time < min) { + verify.write(String.format("%s=%d\n", key, time)); + min = time; + } + + String msg = String.format("%-6d\t%-6d\t%-6d\t%-6d\t%-6d\t%-6d\n", + i, tuningRoundTime.get(key), compileTime.get(key), + runTime.get(key), drainTime.get(key), min); + writer.write(msg); + } + verify.close(); + writer.close(); + + File f = createTotalStatsPlotFile(summaryDir, appName); + plot(summaryDir, f); + f = createProcessedPlotFile(summaryDir, appName); + plot(summaryDir, f); + } + + /** + * Creates plot file for {@link #ptotalFile}. + */ + private static File createTotalStatsPlotFile(File dir, String appName) + throws IOException { + String title = getTitle(appName); + boolean pdf = true; + File plotfile = new File(dir, "totalStats.plt"); + FileWriter writer = new FileWriter(plotfile, false); + if (pdf) { + writer.write("set terminal pdf enhanced color\n"); + writer.write(String.format("set output \"%s.pdf\"\n", title)); + } else { + writer.write("set terminal postscript eps enhanced color\n"); + writer.write(String.format("set output \"%s.eps\"\n", title)); + } + writer.write("set ylabel \"Time(ms)\"\n"); + writer.write("set xlabel \"Tuning Rounds\"\n"); + writer.write(String.format("set title \"%s\"\n", title)); + writer.write("set grid\n"); + writer.write("#set yrange [0:*]\n"); + writer.write(String + .format("plot \"%s\" using 1:6 with linespoints title \"Current best running time\"\n", + ptotalFile)); + writer.write(String.format( + "plot \"%s\" using 1:4 with linespoints title \"Run time\"\n", + ptotalFile)); + writer.write(String + .format("plot \"%s\" using 1:3 with linespoints title \"Compile time\"\n", + ptotalFile)); + writer.write(String + .format("plot \"%s\" using 1:5 with linespoints title \"Drain time\"\n", + ptotalFile)); + writer.write("set ylabel \"Time(s)\"\n"); + writer.write(String + .format("plot \"%s\" using 1:2 with linespoints title \"Tuning Round time\"\n", + ptotalFile)); + writer.close(); + return plotfile; + } + + /** + * Creates a plot file that uses data from all processed files ( + * {@link #pRunTimeFile}, {@link #pCompTimeFile}, {@link #pDrainTimeFile} + * and {@link #pTuneRoundTimeFile}). + */ + private static File createProcessedPlotFile(File dir, String appName) + throws IOException { + String title = getTitle(appName); + boolean pdf = true; + File plotfile = new File(dir, "processed.plt"); + FileWriter writer = new FileWriter(plotfile, false); + if (pdf) { + writer.write("set terminal pdf enhanced color\n"); + writer.write(String.format("set output \"%sP.pdf\"\n", title)); + } else { + writer.write("set terminal postscript eps enhanced color\n"); + writer.write(String.format("set output \"%s.eps\"\n", title)); + } + writer.write("set ylabel \"Time(ms)\"\n"); + writer.write("set xlabel \"Tuning Rounds\"\n"); + writer.write(String.format("set title \"%s\"\n", title)); + writer.write("set grid\n"); + writer.write("#set yrange [0:*]\n"); + writer.write(String + .format("plot \"%s\" using 1:4 with linespoints title \"Current best running time\"\n", + pRunTimeFile)); + writer.write(String.format( + "plot \"%s\" using 1:3 with linespoints title \"Run time\"\n", + pRunTimeFile)); + writer.write(String + .format("plot \"%s\" using 1:3 with linespoints title \"Compile time\"\n", + pCompTimeFile)); + writer.write(String + .format("plot \"%s\" using 1:3 with linespoints title \"Drain time\"\n", + pDrainTimeFile)); + writer.write("set ylabel \"Time(s)\"\n"); + writer.write(String + .format("plot \"%s\" using 1:3 with linespoints title \"Tuning Round time\"\n", + pTuneRoundTimeFile)); + writer.close(); + return plotfile; + } + + private static File makeHeapPlotFile(File dir, String name, + String dataFile1, String dataFile2) throws IOException { + boolean pdf = true; + File plotfile = new File(dir, "heapplot.plt"); + FileWriter writer = new FileWriter(plotfile, false); + if (pdf) { + writer.write("set terminal pdf enhanced color\n"); + writer.write(String.format("set output \"%sHeap.pdf\"\n", name)); + } else { + writer.write("set terminal postscript eps enhanced color\n"); + writer.write(String.format("set output \"%sHeap.eps\"\n", name)); + } + writer.write("set ylabel \"Memory(MB)\"\n"); + writer.write("set xlabel \"Tuning Rounds\"\n"); + writer.write(String.format("set title \"%sHeap\"\n", name)); + writer.write("set grid\n"); + writer.write("#set yrange [0:*]\n"); + + writer.write(String + .format("plot \"%s\" using 1:2 with linespoints title \"Heap Free Size\"," + + "\"%s\" using 1:3 with linespoints title \"Heap Size\"," + + "\"%s\" using 1:4 with linespoints title \"Heap Max Size\" \n", + dataFile1, dataFile1, dataFile1)); + writer.write(String + .format("plot \"%s\" using 1:2 with linespoints title \"Heap Free Size\"," + + "\"%s\" using 1:3 with linespoints title \"Heap Size\"," + + "\"%s\" using 1:4 with linespoints title \"Heap Max Size\" \n", + dataFile2, dataFile2, dataFile2)); + + writer.close(); + return plotfile; + } + + private static void plot(File dir, File plotFile) throws IOException { + String[] s = { "/usr/bin/gnuplot", plotFile.getName() }; + try { + ProcessBuilder pb = new ProcessBuilder(s); + pb.directory(dir); + Process proc = pb.start(); + InputStream stdin = (InputStream) proc.getErrorStream(); + InputStreamReader isr = new InputStreamReader(stdin); + BufferedReader br = new BufferedReader(isr); + String line = null; + while ((line = br.readLine()) != null) + System.err.println("gnuplot:" + line); + int exitVal = proc.waitFor(); + if (exitVal != 0) + System.out.println("gnuplot Process exitValue: " + exitVal); + proc.getInputStream().close(); + proc.getOutputStream().close(); + proc.getErrorStream().close(); + } catch (Exception e) { + System.err.println("Fail: " + e); + } + } + + public static void summarizeHeap(String appName) throws IOException { + File summaryDir = new File(String.format("%s%ssummary", appName, + File.separator)); + Utils.createDir(summaryDir.getPath()); + File f1 = writeHeapStat( + String.format("%s%sslurm-662553.out", appName, File.separator), + summaryDir); + File f2 = writeHeapStat( + String.format("%s%sslurm-662554.out", appName, File.separator), + summaryDir); + File f = makeHeapPlotFile(summaryDir, appName, f1.getName(), + f2.getName()); + plot(summaryDir, f); + } + + public static void GraphPropertyProcessor(String appName) + throws IOException { + BufferedReader reader = new BufferedReader( + new FileReader(String.format("%s%sGraphProperty.txt", appName, + File.separator))); + int truecnt = 0; + int falsecnt = 0; + String line; + while ((line = reader.readLine()) != null) { + if (line.contains("True")) + truecnt++; + else if (line.contains("False")) + falsecnt++; + else + System.err.println("Not a tuning line..."); + } + reader.close(); + double total = truecnt + falsecnt; + System.out.println(String.format( + "Total=%f, TrueCount=%d(%f), FalseCount=%d(%f)", total, + truecnt, (truecnt / total), falsecnt, (falsecnt / total))); + } + + /** + * If you evaluate a same configuration for several times by setting the + * {@link Options#evaluationCount} value, then use this method to process + * the output evaluation.txt. + */ + public static void processEvaltxt(String appName) throws IOException { + File summaryDir = new File(String.format("%s%ssummary", appName, + File.separator)); + Utils.createDir(summaryDir.getPath()); + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%sevaluation.txt", appName, File.separator))); + File outFile = new File(summaryDir, pEvalTimeFile); + FileWriter writer = new FileWriter(outFile, false); + String line; + int i = 0; + while ((line = reader.readLine()) != null) { + if (line.startsWith("Evaluation")) { + String[] arr = line.split(" "); + String time = arr[3].trim(); + time = time.substring(0, time.length() - 2); + int val = Integer.parseInt(time); + String data = String.format("%-6d\t%-6d\n", ++i, val); + writer.write(data); + } + } + writer.flush(); + reader.close(); + writer.close(); + } + + /** + * If you run a set of configurations in a cyclic manner by setting + * {@link Options#verificationCount}, then use this method to process the + * runTime.txt. + */ + private static void processVerifycaionRunTime(String appName, File outDir, + String cfgName) throws IOException { + BufferedReader reader = new BufferedReader(new FileReader( + String.format("%s%srunTime.txt", appName, File.separator))); + File outFile = new File(outDir, String.format("verification%s.txt", + cfgName)); + FileWriter writer = new FileWriter(outFile, false); + String line; + String cfgPrefix = "Init"; + int i = 0; + int min = Integer.MAX_VALUE; + while ((line = reader.readLine()) != null) { + if (line.startsWith("----------------------------")) + cfgPrefix = cfgString(line); + if (cfgName.equals(cfgPrefix)) + if (line.startsWith("Execution")) { + String[] arr = line.split(" "); + String time = arr[3].trim(); + time = time.substring(0, time.length() - 2); + int val = Integer.parseInt(time); + if (val < 1) + val = 2 * min; + min = Math.min(min, val); + String data = String.format("%-6d\t%-6s\t%-6d\t%-6d\n", + ++i, cfgPrefix, val, min); + writer.write(data); + } + } + writer.flush(); + reader.close(); + writer.close(); + } + + public static void processVerifycaionRun(String appName) throws IOException { + processVerifycaionRun(appName, Verifier.cfgPrefixes(appName)); + } + + public static void processVerifycaionRun(String appName, + Map cfgPrefixes) throws IOException { + File summaryDir = new File(String.format("%s%ssummary", appName, + File.separator)); + Utils.createDir(summaryDir.getPath()); + for (String cfgPrefix : cfgPrefixes.keySet()) { + processVerifycaionRunTime(appName, summaryDir, cfgPrefix); + File f = createVerificationPlotFile(summaryDir, appName, cfgPrefix); + plot(summaryDir, f); + } + } + + private static String getTitle(String appName) { + String benchmarkName = getBenchmarkName(appName); + return benchmarkName == null ? appName : benchmarkName; + } + + private static String getBenchmarkName(String appName) { + Properties prop = new Properties(); + InputStream input = null; + try { + input = new FileInputStream(String.format("%s%sREADME.txt", + appName, File.separator)); + prop.load(input); + } catch (IOException ex) { + System.err.println("Failed to load README.txt"); + } + return prop.getProperty("benchmarkName"); + } + + /** + * Creates a plot file that uses data from all processed files ( + * {@link #pRunTimeFile}, {@link #pCompTimeFile}, {@link #pDrainTimeFile} + * and {@link #pTuneRoundTimeFile}). + */ + private static File createVerificationPlotFile(File dir, String appName, + String cfgPrefix) throws IOException { + String title = getTitle(appName); + title = String.format("%s-cfg%s", title, cfgPrefix); + boolean pdf = true; + String dataFile = String.format("verification%s.txt", cfgPrefix); + File plotfile = new File(dir, String.format("verification%s.plt", + cfgPrefix)); + FileWriter writer = new FileWriter(plotfile, false); + if (pdf) { + writer.write("set terminal pdf enhanced color\n"); + writer.write(String.format("set output \"%s.pdf\"\n", title)); + } else { + writer.write("set terminal postscript eps enhanced color\n"); + writer.write(String.format("set output \"%s.eps\"\n", title)); + } + writer.write("set ylabel \"Time(ms)\"\n"); + writer.write("set xlabel \"Tuning Rounds\"\n"); + writer.write(String.format("set title \"%s\"\n", title)); + writer.write("set grid\n"); + writer.write("#set yrange [0:*]\n"); + writer.write(String.format( + "plot \"%s\" using 1:3 with linespoints title \"Run time\"\n", + dataFile)); + writer.close(); + return plotfile; + } +}