diff --git a/._run_ci.sh b/._run_ci.sh deleted file mode 100644 index 0581d62c..00000000 --- a/._run_ci.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -xeu - -# A simple script to build and test under Linux CI. - -uname -a -pwd -P -cat /etc/issue || echo ok - -./build_docker_images.sh run diff --git a/.dockerignore b/.dockerignore index e0438bfe..10e7f095 100644 --- a/.dockerignore +++ b/.dockerignore @@ -241,9 +241,6 @@ ModelManifest.xml # FAKE - F# Make .fake/ -# Ignore InternalImmortals, because they typically build their own Docker containers: -InternalImmortals/ - #Test run logs /AmbrosiaTest/AmbrosiaTest/AmbrosiaLogs @@ -256,3 +253,10 @@ InternalImmortals/ .git Dockerfile build_docker_images.sh + +**/launchSettings.json +CodeGenDependencies +**/GeneratedSourceFiles/*/*/ + +# Ignore InternalImmortals, because they typically build their own Docker containers: +InternalImmortals/ diff --git a/.gitignore b/.gitignore index d21185fa..c0cb07e8 100644 --- a/.gitignore +++ b/.gitignore @@ -66,6 +66,8 @@ artifacts/ *.pidb *.svclog *.scc +*.dll +*.exe # Chutzpah Test files _Chutzpah* @@ -259,4 +261,10 @@ ModelManifest.xml /Ambrosia/NuGet.Config # Local launch settings -**/launchSettings.json \ No newline at end of file +**/launchSettings.json +CodeGenDependencies + +**/publish/* +/BuildAmbrosiaAfterNugetChange.ps1 +/AmbrosiaBak +/AmbrosiaTest/JSCodeGen/out diff --git a/.set_env.sh b/.set_env.sh new file mode 100644 index 00000000..6106ba35 --- /dev/null +++ b/.set_env.sh @@ -0,0 +1,24 @@ + +# A convenience --to be sourced (source .set_env.sh) into your shell +# when developing AMBROSIA: + +echo +echo "Setting PATH for AMBROSIA development..." + +TOADD=`pwd`/bin +mkdir -p "$TOADD" +if [ "$PATH" == "" ]; then PATH=$TOADD; +elif [[ ":$PATH:" != *":$TOADD:"* ]]; then PATH="$PATH:$TOADD"; +fi +export PATH + +if [[ ${AZURE_STORAGE_CONN_STRING:-defined} ]]; then + echo "NOTE: AZURE_STORAGE_CONN_STRING is set to:" + echo + echo " $AZURE_STORAGE_CONN_STRING" + echo + echo "Confirm that this is the one you want to develop with." +else + echo "Warning AZURE_STORAGE_CONN_STRING is not set." + echo "You'll need that for registering instances and running AMBROSIA." +fi diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..5f94c8de --- /dev/null +++ b/.travis.yml @@ -0,0 +1,37 @@ + +language: csharp +mono: none +dotnet: 2.1 +dist: xenial + +services: + - docker + +addons: + apt: + packages: + - libunwind-dev + - make + - gcc + +env: + global: + # Mount the logs from outside the container when/if running PerformanceTestInterruptible: + - PTI_MOUNT_LOGS=ExternalLogs + + matrix: + # Bring up a basic test within or between containers: + - DOCK=nodocker + - DOCK=docker PTI_MODE=OneContainer +# - DOCK=docker PTI_MODE=TwoContainers + +before_install: + - sudo apt-get install -y libunwind-dev make gcc + +script: +# Need to remove the dependence on Azure Tables / +# AZURE_STORAGE_CONN_STRING if we want to do full CI in a public +# context (or find some way to use an account without leaking its auth +# info). +# In the meantime, this will just make sure that everything builds. +- ./Scripts/run_linux_ci.sh $DOCK diff --git a/AKS-scripts/ScriptBits/runAmbrosiaService.sh b/AKS-scripts/ScriptBits/runAmbrosiaService.sh deleted file mode 100644 index 728f110a..00000000 --- a/AKS-scripts/ScriptBits/runAmbrosiaService.sh +++ /dev/null @@ -1,83 +0,0 @@ -#!/bin/bash -set -euo pipefail - -################################################################################ -# Script to launch a service instance (coordinator + app), often -# inside a container. -################################################################################ - -# Responds to ENV VARS: -# * AMBROSIA_INSTANCE_NAME (required) -# -# * AMBROSIA_IMMORTALCOORDINATOR_PORT (optional) -# - this port should be open on the container, and is used for -# coordinator-coordinator communication -# -# * AMBROSIA_SILENT_COORDINATOR (optional) -# - if set, this suppresses coordinator messages to stdout, -# but they still go to /var/log/ImmortalCoordinator.log - - -if [[ ! -v AMBROSIA_INSTANCE_NAME ]]; then - echo "ERROR: unbound environment variable: AMBROSIA_INSTANCE_NAME" - echo "runAmbrosiaService.sh expects it to be bound to the service instance name." - echo "This is the same name that was registered with 'ambrosia RegisterInstance' " - exit 1 -fi - -if [[ -v AMBROSIA_IMMORTALCOORDINATOR_PORT ]]; -then - echo "Using environment var AMBROSIA_IMMORTALCOORDINATOR_PORT=$AMBROSIA_IMMORTALCOORDINATOR_PORT" -else - AMBROSIA_IMMORTALCOORDINATOR_PORT=1500 - echo "Using default AMBROSIA_IMMORTALCOORDINATOR_PORT of $AMBROSIA_IMMORTALCOORDINATOR_PORT" -fi - -COORDLOG=/var/log/ImmortalCoordinator.log - -# Arguments: all passed through to the coordinator. -# Returns: when the Coordinator is READY (in the background). -# Returns: sets "coord_pid" to the return value. -# -# ASSUMES: ImmortalCoordinator in $PATH -# -# Side effect: uses a log file on disk in the same directory as this script. -# Side effect: runs a tail proycess in the background -function start_immortal_coordinator() { - echo "Launching coordingator with: ImmortalCoordinator" $* - echo " Redirecting output to: $COORDLOG" - # Bound the total amount of output used by the ImmortalCoordinator log: - ImmortalCoordinator $* 2>1 | rotatelogs -f -t "$COORDLOG" 10M & - coord_pid=$! - - while [ ! -e "$COORDLOG" ]; do - echo " -> Waiting for $COORDLOG to appear" - sleep 1 - done - if [[ ! -v AMBROSIA_SILENT_COORDINATOR ]]; then - tail -F $COORDLOG | while read l; do echo " [ImmortalCoord] $l"; done & - fi - while ! grep -q "Ready" "$COORDLOG" && kill -0 $coord_pid 2>- ; - do sleep 2; done - - if ! kill -0 $coord_pid 2>- ; - then echo - echo "ERROR: coordinator died while we were waiting. Final log ended with:" - tail $COORDLOG - exit 1; - fi - echo "Coordinator ready." -} - -# Step 1: -start_immortal_coordinator -i $AMBROSIA_INSTANCE_NAME -p $AMBROSIA_IMMORTALCOORDINATOR_PORT - -# Step 2: -echo "Launching app client process:" -set -x -$* -set +x - -echo "Ambrosia: client exited, killing coordinator..." -kill $coord_pid || echo ok - diff --git a/Ambrosia.nuspec b/Ambrosia.nuspec index d94c09d6..75b2ed8e 100644 --- a/Ambrosia.nuspec +++ b/Ambrosia.nuspec @@ -1,34 +1,47 @@ - + - AmbrosiaLibCS - 0.0.5 - AmbrosiaLibCS - Ambrosia + Microsoft.Ambrosia.LibCS + 1.0.21 + Microsoft.Ambrosia.LibCS + Microsoft Microsoft - https://msrfranklin.visualstudio.com/_projects + https://github.com/Microsoft/AMBROSIA + MIT false - The AmbrosiaLibCS Binary Distribution + The Microsoft.AmbrosiaLibCS Binary Distribution None yet - Copyright (C) 2018 Microsoft Corporation + © Microsoft Corporation. All rights reserved. en-US - "MS Internal Only" - + + + + + + + + + + + - - + - - - - - - - - + + + + + + + + + + + + - \ No newline at end of file + diff --git a/Ambrosia/Ambrosia.sln b/Ambrosia/Ambrosia.sln index f8e79b57..ae61c2e6 100644 --- a/Ambrosia/Ambrosia.sln +++ b/Ambrosia/Ambrosia.sln @@ -1,24 +1,21 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.27004.2006 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29920.165 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "adv-file-ops", "adv-file-ops\adv-file-ops.vcxproj", "{5852AC33-6B01-44F5-BAF3-2AAF796E8449}" -EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{0BEADEF6-C937-465D-814B-726C3E2A22BA}" - ProjectSection(SolutionItems) = preProject - nuget.config = nuget.config - EndProjectSection EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ImmortalCoordinator", "..\ImmortalCoordinator\ImmortalCoordinator.csproj", "{5C94C516-377C-4113-8C5F-DF4A016D1B3A}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Ambrosia", "Ambrosia\Ambrosia.csproj", "{F704AE0A-C37B-4D30-B9ED-0C76C62D66EC}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaLib", "..\AmbrosiaLib\Ambrosia\AmbrosiaLib.csproj", "{00CD200C-75B7-4CE2-8A43-8F57DBE19FD6}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AzureBlobsLogPicker", "..\AzureBlobsLogPicker\AzureBlobsLogPicker.csproj", "{347F5EFE-683B-4E8C-A078-DE8D90BD3ADC}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GenericLogPicker", "..\GenericLogPicker\GenericLogPicker.csproj", "{B22994AB-76F3-4650-A9DD-6BEBAA7A4632}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SharedAmbrosiaTools", "..\SharedAmbrosiaTools\SharedAmbrosiaTools.csproj", "{2E0E096C-DD42-4E16-8B22-8F67B73E1BE8}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -26,10 +23,6 @@ Global Release|x64 = Release|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Debug|x64.ActiveCfg = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Debug|x64.Build.0 = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Release|x64.ActiveCfg = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Release|x64.Build.0 = Release|x64 {5C94C516-377C-4113-8C5F-DF4A016D1B3A}.Debug|x64.ActiveCfg = Debug|x64 {5C94C516-377C-4113-8C5F-DF4A016D1B3A}.Debug|x64.Build.0 = Debug|x64 {5C94C516-377C-4113-8C5F-DF4A016D1B3A}.Release|x64.ActiveCfg = Release|x64 @@ -38,6 +31,22 @@ Global {F704AE0A-C37B-4D30-B9ED-0C76C62D66EC}.Debug|x64.Build.0 = Debug|x64 {F704AE0A-C37B-4D30-B9ED-0C76C62D66EC}.Release|x64.ActiveCfg = Release|x64 {F704AE0A-C37B-4D30-B9ED-0C76C62D66EC}.Release|x64.Build.0 = Release|x64 + {00CD200C-75B7-4CE2-8A43-8F57DBE19FD6}.Debug|x64.ActiveCfg = Debug|Any CPU + {00CD200C-75B7-4CE2-8A43-8F57DBE19FD6}.Debug|x64.Build.0 = Debug|Any CPU + {00CD200C-75B7-4CE2-8A43-8F57DBE19FD6}.Release|x64.ActiveCfg = Release|Any CPU + {00CD200C-75B7-4CE2-8A43-8F57DBE19FD6}.Release|x64.Build.0 = Release|Any CPU + {347F5EFE-683B-4E8C-A078-DE8D90BD3ADC}.Debug|x64.ActiveCfg = Debug|Any CPU + {347F5EFE-683B-4E8C-A078-DE8D90BD3ADC}.Debug|x64.Build.0 = Debug|Any CPU + {347F5EFE-683B-4E8C-A078-DE8D90BD3ADC}.Release|x64.ActiveCfg = Release|Any CPU + {347F5EFE-683B-4E8C-A078-DE8D90BD3ADC}.Release|x64.Build.0 = Release|Any CPU + {B22994AB-76F3-4650-A9DD-6BEBAA7A4632}.Debug|x64.ActiveCfg = Debug|Any CPU + {B22994AB-76F3-4650-A9DD-6BEBAA7A4632}.Debug|x64.Build.0 = Debug|Any CPU + {B22994AB-76F3-4650-A9DD-6BEBAA7A4632}.Release|x64.ActiveCfg = Release|Any CPU + {B22994AB-76F3-4650-A9DD-6BEBAA7A4632}.Release|x64.Build.0 = Release|Any CPU + {2E0E096C-DD42-4E16-8B22-8F67B73E1BE8}.Debug|x64.ActiveCfg = Debug|Any CPU + {2E0E096C-DD42-4E16-8B22-8F67B73E1BE8}.Debug|x64.Build.0 = Debug|Any CPU + {2E0E096C-DD42-4E16-8B22-8F67B73E1BE8}.Release|x64.ActiveCfg = Release|Any CPU + {2E0E096C-DD42-4E16-8B22-8F67B73E1BE8}.Release|x64.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/Ambrosia/Ambrosia.snk b/Ambrosia/Ambrosia.snk new file mode 100644 index 00000000..8438597d Binary files /dev/null and b/Ambrosia/Ambrosia.snk differ diff --git a/Ambrosia/Ambrosia/Ambrosia.csproj b/Ambrosia/Ambrosia/Ambrosia.csproj index b2bcb720..0b1e8ade 100644 --- a/Ambrosia/Ambrosia/Ambrosia.csproj +++ b/Ambrosia/Ambrosia/Ambrosia.csproj @@ -1,13 +1,45 @@  - Exe - netcoreapp2.0;net46 true - x64 - win7-x64 + net461;netcoreapp3.1 + win7-x64 + Exe true Ambrosia + true + ../Ambrosia.snk + x64;ARM64 + + + + netcoreapp3.1;net461 + true + bin\ARM64\Debug\ + full + ARM64 + + + netcoreapp3.1;net461 + bin\ARM64\Release\ + true + pdbonly + ARM64 + + + netcoreapp3.1;net461 + true + bin\x64\Debug\ + full + x64 + + + netcoreapp3.1;net461 + bin\x64\Release\ + true + pdbonly + x64 + $(DefineConstants);NETFRAMEWORK @@ -15,42 +47,40 @@ $(DefineConstants);NETCORE + + 15.8.168 - + - 11.0.2 + 12.0.2 - 5.8.1 - - - 4.3.0 + 5.8.2 - - 9.3.2 - - - 2018.11.5.1 + + + + + + + 2020.9.24.1 - - - PreserveNewest - - - - + + + 4.5.0 + 4.5.0 - - ..\..\..\..\Users\talzacc\.nuget\packages\mono.options.core\1.0.0\lib\netstandard1.3\Mono.Options.Core.dll - + + + \ No newline at end of file diff --git a/Ambrosia/Ambrosia/App.config b/Ambrosia/Ambrosia/App.config index e14ceab1..068dbfe2 100644 --- a/Ambrosia/Ambrosia/App.config +++ b/Ambrosia/Ambrosia/App.config @@ -32,20 +32,6 @@ - - - - - - - - - - - - - - diff --git a/Ambrosia/Ambrosia/Native32.cs b/Ambrosia/Ambrosia/Native32.cs deleted file mode 100644 index a877b71a..00000000 --- a/Ambrosia/Ambrosia/Native32.cs +++ /dev/null @@ -1,338 +0,0 @@ - -namespace mtcollections.persistent -{ - using System; - using System.Runtime.InteropServices; - using System.Security; - using Microsoft.Win32.SafeHandles; - using System.Threading; - - /// - /// Interop with WINAPI for file I/O, threading, and NUMA functions. - /// - public static unsafe class Native32 - { - #region io constants and flags - - public const uint INFINITE = unchecked((uint)-1); - - public const int ERROR_IO_PENDING = 997; - public const uint ERROR_IO_INCOMPLETE = 996; - public const uint ERROR_NOACCESS = 998; - public const uint ERROR_HANDLE_EOF = 38; - - public const int ERROR_FILE_NOT_FOUND = 0x2; - public const int ERROR_PATH_NOT_FOUND = 0x3; - public const int ERROR_INVALID_DRIVE = 0x15; - - - public const uint FILE_BEGIN = 0; - public const uint FILE_CURRENT = 1; - public const uint FILE_END = 2; - - public const uint FORMAT_MESSAGE_ALLOCATE_BUFFER = 0x00000100; - public const uint FORMAT_MESSAGE_IGNORE_INSERTS = 0x00000200; - public const uint FORMAT_MESSAGE_FROM_SYSTEM = 0x00001000; - - public const uint INVALID_HANDLE_VALUE = unchecked((uint)-1); - - public const uint GENERIC_READ = 0x80000000; - public const uint GENERIC_WRITE = 0x40000000; - public const uint GENERIC_EXECUTE = 0x20000000; - public const uint GENERIC_ALL = 0x10000000; - - public const uint READ_CONTROL = 0x00020000; - public const uint FILE_READ_ATTRIBUTES = 0x0080; - public const uint FILE_READ_DATA = 0x0001; - public const uint FILE_READ_EA = 0x0008; - public const uint STANDARD_RIGHTS_READ = READ_CONTROL; - public const uint FILE_APPEND_DATA = 0x0004; - public const uint FILE_WRITE_ATTRIBUTES = 0x0100; - public const uint FILE_WRITE_DATA = 0x0002; - public const uint FILE_WRITE_EA = 0x0010; - public const uint STANDARD_RIGHTS_WRITE = READ_CONTROL; - - public const uint FILE_GENERIC_READ = - FILE_READ_ATTRIBUTES - | FILE_READ_DATA - | FILE_READ_EA - | STANDARD_RIGHTS_READ; - public const uint FILE_GENERIC_WRITE = - FILE_WRITE_ATTRIBUTES - | FILE_WRITE_DATA - | FILE_WRITE_EA - | STANDARD_RIGHTS_WRITE - | FILE_APPEND_DATA; - - public const uint FILE_SHARE_DELETE = 0x00000004; - public const uint FILE_SHARE_READ = 0x00000001; - public const uint FILE_SHARE_WRITE = 0x00000002; - - public const uint CREATE_ALWAYS = 2; - public const uint CREATE_NEW = 1; - public const uint OPEN_ALWAYS = 4; - public const uint OPEN_EXISTING = 3; - public const uint TRUNCATE_EXISTING = 5; - - public const uint FILE_FLAG_DELETE_ON_CLOSE = 0x04000000; - public const uint FILE_FLAG_NO_BUFFERING = 0x20000000; - public const uint FILE_FLAG_OPEN_NO_RECALL = 0x00100000; - public const uint FILE_FLAG_OVERLAPPED = 0x40000000; - public const uint FILE_FLAG_RANDOM_ACCESS = 0x10000000; - public const uint FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000; - public const uint FILE_FLAG_WRITE_THROUGH = 0x80000000; - public const uint FILE_ATTRIBUTE_ENCRYPTED = 0x4000; - - /// - /// Represents additional options for creating unbuffered overlapped file stream. - /// - [Flags] - public enum UnbufferedFileOptions : uint - { - None = 0, - WriteThrough = 0x80000000, - DeleteOnClose = 0x04000000, - OpenReparsePoint = 0x00200000, - Overlapped = 0x40000000, - } - - #endregion - - #region io functions - - [DllImport("Kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)] - public static extern SafeFileHandle CreateFileW( - [In] string lpFileName, - [In] UInt32 dwDesiredAccess, - [In] UInt32 dwShareMode, - [In] IntPtr lpSecurityAttributes, - [In] UInt32 dwCreationDisposition, - [In] UInt32 dwFlagsAndAttributes, - [In] IntPtr hTemplateFile); - - [DllImport("kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)] - public static extern void CloseHandle( - [In] SafeHandle handle); - - [DllImport("Kernel32.dll", SetLastError = true)] - public static extern bool ReadFile( - [In] SafeFileHandle hFile, - [Out] IntPtr lpBuffer, - [In] UInt32 nNumberOfBytesToRead, - [Out] out UInt32 lpNumberOfBytesRead, - [In] NativeOverlapped* lpOverlapped); - - [DllImport("Kernel32.dll", SetLastError = true)] - public static extern bool WriteFile( - [In] SafeFileHandle hFile, - [In] IntPtr lpBuffer, - [In] UInt32 nNumberOfBytesToWrite, - [Out] out UInt32 lpNumberOfBytesWritten, - [In] NativeOverlapped* lpOverlapped); - - [DllImport("Kernel32.dll", SetLastError = true)] - public static extern bool GetOverlappedResult( - [In] SafeFileHandle hFile, - [In] NativeOverlapped* lpOverlapped, - [Out] out UInt32 lpNumberOfBytesTransferred, - [In] bool bWait); - - [DllImport("adv-file-ops.dll", SetLastError = true)] - public static extern bool CreateAndSetFileSize(ref string filename, Int64 file_size); - - [DllImport("adv-file-ops.dll", SetLastError = true)] - public static extern bool EnableProcessPrivileges(); - - [DllImport("adv-file-ops.dll", SetLastError = true)] - public static extern bool EnableVolumePrivileges(ref string filename, SafeFileHandle hFile); - - [DllImport("adv-file-ops.dll", SetLastError = true)] - public static extern bool SetFileSize(SafeFileHandle hFile, Int64 file_size); - - public enum EMoveMethod : uint - { - Begin = 0, - Current = 1, - End = 2 - } - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern uint SetFilePointer( - [In] SafeFileHandle hFile, - [In] int lDistanceToMove, - [In, Out] ref int lpDistanceToMoveHigh, - [In] EMoveMethod dwMoveMethod); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern uint SetFilePointerEx( - [In] SafeFileHandle hFile, - [In] long lDistanceToMove, - [In, Out] IntPtr lpDistanceToMoveHigh, - [In] EMoveMethod dwMoveMethod); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern bool SetEndOfFile( - [In] SafeFileHandle hFile); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern IntPtr CreateIoCompletionPort( - [In] SafeFileHandle fileHandle, - [In] IntPtr existingCompletionPort, - [In] UInt32 completionKey, - [In] UInt32 numberOfConcurrentThreads); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern UInt32 GetLastError(); - - [DllImport("kernel32.dll", SetLastError = true)] - public static unsafe extern bool GetQueuedCompletionStatus( - [In] IntPtr completionPort, - [Out] out UInt32 ptrBytesTransferred, - [Out] out UInt32 ptrCompletionKey, - [Out] NativeOverlapped** lpOverlapped, - [In] UInt32 dwMilliseconds); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern bool PostQueuedCompletionStatus( - [In] IntPtr completionPort, - [In] UInt32 bytesTrasferred, - [In] UInt32 completionKey, - [In] IntPtr lpOverlapped); - - [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)] - public static extern bool GetDiskFreeSpace(string lpRootPathName, - out uint lpSectorsPerCluster, - out uint lpBytesPerSector, - out uint lpNumberOfFreeClusters, - out uint lpTotalNumberOfClusters); - #endregion - - #region thread and numa functions - [DllImport("kernel32.dll")] - public static extern IntPtr GetCurrentThread(); - [DllImport("kernel32")] - public static extern uint GetCurrentThreadId(); - [DllImport("kernel32.dll", SetLastError = true)] - public static extern uint GetCurrentProcessorNumber(); - [DllImport("kernel32.dll", SetLastError = true)] - public static extern uint GetActiveProcessorCount(uint count); - [DllImport("kernel32.dll", SetLastError = true)] - public static extern ushort GetActiveProcessorGroupCount(); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern int SetThreadGroupAffinity(IntPtr hThread, ref GROUP_AFFINITY GroupAffinity, ref GROUP_AFFINITY PreviousGroupAffinity); - - [DllImport("kernel32.dll", SetLastError = true)] - public static extern int GetThreadGroupAffinity(IntPtr hThread, ref GROUP_AFFINITY PreviousGroupAffinity); - - public static uint ALL_PROCESSOR_GROUPS = 0xffff; - - [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] - public struct GROUP_AFFINITY - { - public ulong Mask; - public uint Group; - public uint Reserved1; - public uint Reserved2; - public uint Reserved3; - } - - /// - /// Accepts thread id = 0, 1, 2, ... and sprays them round-robin - /// across all cores (viewed as a flat space). On NUMA machines, - /// this gives us [socket, core] ordering of affinitization. That is, - /// if there are N cores per socket, then thread indices of 0 to N-1 map - /// to the range [socket 0, core 0] to [socket 0, core N-1]. - /// - /// Index of thread (from 0 onwards) - public static void AffinitizeThreadRoundRobin(uint threadIdx) - { - uint nrOfProcessors = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS); - ushort nrOfProcessorGroups = GetActiveProcessorGroupCount(); - uint nrOfProcsPerGroup = nrOfProcessors / nrOfProcessorGroups; - - GROUP_AFFINITY groupAffinityThread = default(GROUP_AFFINITY); - GROUP_AFFINITY oldAffinityThread = default(GROUP_AFFINITY); - - IntPtr thread = GetCurrentThread(); - GetThreadGroupAffinity(thread, ref groupAffinityThread); - - threadIdx = threadIdx % nrOfProcessors; - - groupAffinityThread.Mask = (ulong)1L << ((int)(threadIdx % (int)nrOfProcsPerGroup)); - groupAffinityThread.Group = (uint)(threadIdx / nrOfProcsPerGroup); - - if (SetThreadGroupAffinity(thread, ref groupAffinityThread, ref oldAffinityThread) == 0) - { - Console.WriteLine("Unable to set group affinity"); - } - } - #endregion - } - - /// - /// Methods to perform high-resolution low-overhead timing - /// - public static class HiResTimer - { - private const string lib = "kernel32.dll"; - [DllImport(lib)] - [SuppressUnmanagedCodeSecurity] - public static extern int QueryPerformanceCounter(ref Int64 count); - - [DllImport(lib)] - [SuppressUnmanagedCodeSecurity] - public static extern int QueryPerformanceFrequency(ref Int64 frequency); - - [DllImport(lib)] - [SuppressUnmanagedCodeSecurity] - private static extern void GetSystemTimePreciseAsFileTime(out long filetime); - - [DllImport(lib)] - [SuppressUnmanagedCodeSecurity] - private static extern void GetSystemTimeAsFileTime(out long filetime); - - [DllImport("readtsc.dll")] - [SuppressUnmanagedCodeSecurity] - public static extern ulong rdtsc(); - - public static long Freq; - - public static long EstimateCPUFrequency() - { - long oldCps = 0, cps = 0, startT, endT; - ulong startC, endC; - long accuracy = 500; // wait for consecutive measurements to get within 300 clock cycles - - int i = 0; - while (i < 5) - { - GetSystemTimeAsFileTime(out startT); - startC = rdtsc(); - - while (true) - { - GetSystemTimeAsFileTime(out endT); - endC = rdtsc(); - - if (endT - startT >= 10000000) - { - cps = (long)(10000000 * (endC - startC) / (double)(endT - startT)); - break; - } - } - - - if ((oldCps > (cps - accuracy)) && (oldCps < (cps + accuracy))) - { - Freq = cps; - return cps; - } - oldCps = cps; - i++; - } - Freq = cps; - return cps; - } - } -} diff --git a/Ambrosia/Ambrosia/Program.cs b/Ambrosia/Ambrosia/Program.cs index d0a67a71..5bc295ac 100644 --- a/Ambrosia/Ambrosia/Program.cs +++ b/Ambrosia/Ambrosia/Program.cs @@ -23,3482 +23,9 @@ using System.Diagnostics; using System.Reflection; using System.Xml.Serialization; -using Mono.Options; namespace Ambrosia { - internal struct LongPair - { - public LongPair(long first, - long second) - { - First = first; - Second = second; - } - internal long First { get; set; } - internal long Second { get; set; } - } - - internal static class DictionaryTools - { - internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, LogWriter writeToStream) - { - writeToStream.WriteIntFixed(dict.Count); - foreach (var entry in dict) - { - var encodedKey = Encoding.UTF8.GetBytes(entry.Key); - writeToStream.WriteInt(encodedKey.Length); - writeToStream.Write(encodedKey, 0, encodedKey.Length); - writeToStream.WriteLongFixed(entry.Value); - } - } - - internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, LogReader readFromStream) - { - var _retVal = new ConcurrentDictionary(); - var dictCount = readFromStream.ReadIntFixed(); - for (int i = 0; i < dictCount; i++) - { - var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); - long seqNo = readFromStream.ReadLongFixed(); - _retVal.TryAdd(myString, seqNo); - } - return _retVal; - } - - internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, LogWriter writeToStream) - { - writeToStream.WriteIntFixed(dict.Count); - foreach (var entry in dict) - { - var encodedKey = Encoding.UTF8.GetBytes(entry.Key); - writeToStream.WriteInt(encodedKey.Length); - writeToStream.Write(encodedKey, 0, encodedKey.Length); - writeToStream.WriteLongFixed(entry.Value.First); - writeToStream.WriteLongFixed(entry.Value.Second); - } - } - - internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, LogReader readFromStream) - { - var _retVal = new ConcurrentDictionary(); - var dictCount = readFromStream.ReadIntFixed(); - for (int i = 0; i < dictCount; i++) - { - var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); - var newLongPair = new LongPair(); - newLongPair.First = readFromStream.ReadLongFixed(); - newLongPair.Second = readFromStream.ReadLongFixed(); - _retVal.TryAdd(myString, newLongPair); - } - return _retVal; - } - - internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, Stream writeToStream) - { - writeToStream.WriteIntFixed(dict.Count); - foreach (var entry in dict) - { - writeToStream.Write(entry.Key.ToByteArray(), 0, 16); - var IPBytes = entry.Value.GetAddressBytes(); - writeToStream.WriteByte((byte)IPBytes.Length); - writeToStream.Write(IPBytes, 0, IPBytes.Length); - } - } - - internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, LogReader readFromStream) - { - var _retVal = new ConcurrentDictionary(); - var dictCount = readFromStream.ReadIntFixed(); - for (int i = 0; i < dictCount; i++) - { - var myBytes = new byte[16]; - readFromStream.Read(myBytes, 0, 16); - var newGuid = new Guid(myBytes); - byte addressSize = (byte)readFromStream.ReadByte(); - if (addressSize > 16) - { - myBytes = new byte[addressSize]; - } - readFromStream.Read(myBytes, 0, addressSize); - var newAddress = new IPAddress(myBytes); - _retVal.TryAdd(newGuid, newAddress); - } - return _retVal; - } - - internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, LogWriter writeToStream) - { - writeToStream.WriteIntFixed(dict.Count); - foreach (var entry in dict) - { - var keyEncoding = Encoding.UTF8.GetBytes(entry.Key); - Console.WriteLine("input {0} seq no: {1}", entry.Key, entry.Value.LastProcessedID); - Console.WriteLine("input {0} replayable seq no: {1}", entry.Key, entry.Value.LastProcessedReplayableID); - writeToStream.WriteInt(keyEncoding.Length); - writeToStream.Write(keyEncoding, 0, keyEncoding.Length); - writeToStream.WriteLongFixed(entry.Value.LastProcessedID); - writeToStream.WriteLongFixed(entry.Value.LastProcessedReplayableID); - } - } - - internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, LogReader readFromStream) - { - var _retVal = new ConcurrentDictionary(); - var dictCount = readFromStream.ReadIntFixed(); - for (int i = 0; i < dictCount; i++) - { - var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); - long seqNo = readFromStream.ReadLongFixed(); - var newRecord = new InputConnectionRecord(); - newRecord.LastProcessedID = seqNo; - seqNo = readFromStream.ReadLongFixed(); - newRecord.LastProcessedReplayableID = seqNo; - _retVal.TryAdd(myString, newRecord); - } - return _retVal; - } - - internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, LogWriter writeToStream) - { - writeToStream.WriteIntFixed(dict.Count); - foreach (var entry in dict) - { - var keyEncoding = Encoding.UTF8.GetBytes(entry.Key); - writeToStream.WriteInt(keyEncoding.Length); - writeToStream.Write(keyEncoding, 0, keyEncoding.Length); - writeToStream.WriteLongFixed(entry.Value.LastSeqNoFromLocalService); - var trimTo = entry.Value.TrimTo; - var replayableTrimTo = entry.Value.ReplayableTrimTo; - writeToStream.WriteLongFixed(trimTo); - writeToStream.WriteLongFixed(replayableTrimTo); - entry.Value.BufferedOutput.Serialize(writeToStream); - } - } - - internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, LogReader readFromStream, AmbrosiaRuntime thisAmbrosia) - { - var _retVal = new ConcurrentDictionary(); - var dictCount = readFromStream.ReadIntFixed(); - for (int i = 0; i < dictCount; i++) - { - var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); - var newRecord = new OutputConnectionRecord(thisAmbrosia); - newRecord.LastSeqNoFromLocalService = readFromStream.ReadLongFixed(); - newRecord.TrimTo = readFromStream.ReadLongFixed(); - newRecord.ReplayableTrimTo = readFromStream.ReadLongFixed(); - newRecord.BufferedOutput = EventBuffer.Deserialize(readFromStream, thisAmbrosia, newRecord); - _retVal.TryAdd(myString, newRecord); - } - return _retVal; - } - } - - // Note about this class: contention becomes significant when MaxBufferPages > ~50. This could be reduced by having page level locking. - // It seems experimentally that having many pages is good for small message sizes, where most of the page ends up empty. More investigation - // is needed to autotune defaultPageSize and MaxBufferPages - internal class EventBuffer - { - const int defaultPageSize = 1024 * 1024; - int NormalMaxBufferPages = 30; - static ConcurrentQueue _pool = null; - int _curBufPages; - AmbrosiaRuntime _owningRuntime; - OutputConnectionRecord _owningOutputRecord; - - internal class BufferPage - { - public byte[] PageBytes { get; set; } - public int curLength { get; set; } - public long HighestSeqNo { get; set; } - public long UnsentReplayableMessages { get; set; } - public long LowestSeqNo { get; set; } - public long TotalReplayableMessages { get; internal set; } - - public BufferPage(byte[] pageBytes) - { - PageBytes = pageBytes; - curLength = 0; - HighestSeqNo = 0; - LowestSeqNo = 0; - UnsentReplayableMessages = 0; - TotalReplayableMessages = 0; - } - - public void CheckPageIntegrity() - { - var numberOfRPCs = HighestSeqNo - LowestSeqNo + 1; - var lengthOfCurrentRPC = 0; - int endIndexOfCurrentRPC = 0; - int cursor = 0; - - for (int i = 0; i < numberOfRPCs; i++) - { - lengthOfCurrentRPC = PageBytes.ReadBufferedInt(cursor); - cursor += StreamCommunicator.IntSize(lengthOfCurrentRPC); - endIndexOfCurrentRPC = cursor + lengthOfCurrentRPC; - if (endIndexOfCurrentRPC > curLength) - { - Console.WriteLine("RPC Exceeded length of Page!!"); - throw new Exception("RPC Exceeded length of Page!!"); - } - - var shouldBeRPCByte = PageBytes[cursor]; - if (shouldBeRPCByte != AmbrosiaRuntime.RPCByte) - { - Console.WriteLine("UNKNOWN BYTE: {0}!!", shouldBeRPCByte); - throw new Exception("Illegal leading byte in message"); - } - cursor++; - - var isReturnValue = (PageBytes[cursor++] == (byte)1); - - if (isReturnValue) // receiving a return value - { - var sequenceNumber = PageBytes.ReadBufferedLong(cursor); - cursor += StreamCommunicator.LongSize(sequenceNumber); - } - else // receiving an RPC - { - var methodId = PageBytes.ReadBufferedInt(cursor); - cursor += StreamCommunicator.IntSize(methodId); - var fireAndForget = (PageBytes[cursor++] == (byte)1); - - string senderOfRPC = null; - long sequenceNumber = 0; - - if (!fireAndForget) - { - // read return address and sequence number - var senderOfRPCLength = PageBytes.ReadBufferedInt(cursor); - var sizeOfSender = StreamCommunicator.IntSize(senderOfRPCLength); - cursor += sizeOfSender; - senderOfRPC = Encoding.UTF8.GetString(PageBytes, cursor, senderOfRPCLength); - cursor += senderOfRPCLength; - sequenceNumber = PageBytes.ReadBufferedLong(cursor); - cursor += StreamCommunicator.LongSize(sequenceNumber); - //Console.WriteLine("Received RPC call to method with id: {0} and sequence number {1}", methodId, sequenceNumber); - } - else - { - - //Console.WriteLine("Received fire-and-forget RPC call to method with id: {0}", methodId); - } - - var lengthOfSerializedArguments = endIndexOfCurrentRPC - cursor; - cursor += lengthOfSerializedArguments; - } - } - } - } - - long _trimLock; - long _appendLock; - - ElasticCircularBuffer _bufferQ; - - internal EventBuffer(AmbrosiaRuntime owningRuntime, - OutputConnectionRecord owningOutputRecord) - { - _bufferQ = new ElasticCircularBuffer(); - _appendLock = 0; - _owningRuntime = owningRuntime; - _curBufPages = 0; - _owningOutputRecord = owningOutputRecord; - _trimLock = 0; - } - - internal void Serialize(LogWriter writeToStream) - { - writeToStream.WriteIntFixed(_bufferQ.Count); - foreach (var currentBuf in _bufferQ) - { - writeToStream.WriteIntFixed(currentBuf.PageBytes.Length); - writeToStream.WriteIntFixed(currentBuf.curLength); - writeToStream.Write(currentBuf.PageBytes, 0, currentBuf.curLength); - writeToStream.WriteLongFixed(currentBuf.HighestSeqNo); - writeToStream.WriteLongFixed(currentBuf.LowestSeqNo); - writeToStream.WriteLongFixed(currentBuf.UnsentReplayableMessages); - writeToStream.WriteLongFixed(currentBuf.TotalReplayableMessages); - } - } - - internal static EventBuffer Deserialize(LogReader readFromStream, - AmbrosiaRuntime owningRuntime, - OutputConnectionRecord owningOutputRecord) - { - var _retVal = new EventBuffer(owningRuntime, owningOutputRecord); - var bufferCount = readFromStream.ReadIntFixed(); - for (int i = 0; i < bufferCount; i++) - { - var pageSize = readFromStream.ReadIntFixed(); - var pageFilled = readFromStream.ReadIntFixed(); - var myBytes = new byte[pageSize]; - readFromStream.Read(myBytes, 0, pageFilled); - var newBufferPage = new BufferPage(myBytes); - newBufferPage.curLength = pageFilled; - newBufferPage.HighestSeqNo = readFromStream.ReadLongFixed(); - newBufferPage.LowestSeqNo = readFromStream.ReadLongFixed(); - newBufferPage.UnsentReplayableMessages = readFromStream.ReadLongFixed(); - newBufferPage.TotalReplayableMessages = readFromStream.ReadLongFixed(); - _retVal._bufferQ.Enqueue(ref newBufferPage); - } - return _retVal; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void AcquireAppendLock(long lockVal = 1) - { - while (true) - { - var origVal = Interlocked.CompareExchange(ref _appendLock, lockVal, 0); - if (origVal == 0) - { - // We have the lock - break; - } - } - } - - internal long ReadAppendLock() - { - return Interlocked.Read(ref _appendLock); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void ReleaseAppendLock() - { - Interlocked.Exchange(ref _appendLock, 0); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void AcquireTrimLock(long lockVal) - { - while (true) - { - var origVal = Interlocked.CompareExchange(ref _trimLock, lockVal, 0); - if (origVal == 0) - { - // We have the lock - break; - } - } - } - - internal long ReadTrimLock() - { - return Interlocked.Read(ref _trimLock); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - internal void ReleaseTrimLock() - { - Interlocked.Exchange(ref _trimLock, 0); - } - - internal class BuffersCursor - { - public IEnumerator PageEnumerator { get; set; } - public int PagePos { get; set; } - public int RelSeqPos { get; set; } - public BuffersCursor(IEnumerator inPageEnumerator, - int inPagePos, - int inRelSeqPos) - { - RelSeqPos = inRelSeqPos; - PageEnumerator = inPageEnumerator; - PagePos = inPagePos; - } - } - - internal async Task SendAsync(Stream outputStream, - BuffersCursor placeToStart, - bool reconnecting) - { - // If the cursor is invalid because of trimming or reconnecting, create it again - if (placeToStart.PagePos == -1) - { - return await ReplayFromAsync(outputStream, _owningOutputRecord.LastSeqSentToReceiver + 1, reconnecting); - - } - var nextSeqNo = _owningOutputRecord.LastSeqSentToReceiver + 1; - var bufferEnumerator = placeToStart.PageEnumerator; - var posToStart = placeToStart.PagePos; - var relSeqPos = placeToStart.RelSeqPos; - - // We are guaranteed to have an enumerator and starting point. Must send output. - AcquireAppendLock(2); - bool needToUnlockAtEnd = true; - do - { - var curBuffer = bufferEnumerator.Current; - var pageLength = curBuffer.curLength; - var morePages = (curBuffer != _bufferQ.Last()); - int numReplayableMessagesToSend; - if (posToStart == 0) - { - // We are starting to send contents of the page. Send everything - numReplayableMessagesToSend = (int) curBuffer.TotalReplayableMessages; - } - else - { - // We are in the middle of sending this page. Respect the previously set counter - numReplayableMessagesToSend = (int)curBuffer.UnsentReplayableMessages; - } - int numRPCs = (int)(curBuffer.HighestSeqNo - curBuffer.LowestSeqNo + 1 - relSeqPos); - curBuffer.UnsentReplayableMessages = 0; - ReleaseAppendLock(); - Debug.Assert((nextSeqNo == curBuffer.LowestSeqNo + relSeqPos) && (nextSeqNo >= curBuffer.LowestSeqNo) && ((nextSeqNo + numRPCs - 1) <= curBuffer.HighestSeqNo)); - ReleaseTrimLock(); - // send the buffer - if (pageLength - posToStart > 0) - { - // We really have output to send. Send it. - //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Uncomment/Comment for testing - //Console.WriteLine("Wrote from {0} to {1}, {2}", curBuffer.LowestSeqNo, curBuffer.HighestSeqNo, morePages); - int bytesInBatchData = pageLength - posToStart; - if (numRPCs > 1) - { - if (numReplayableMessagesToSend == numRPCs) - { - // writing a batch - outputStream.WriteInt(bytesInBatchData + 1 + StreamCommunicator.IntSize(numRPCs)); - outputStream.WriteByte(AmbrosiaRuntime.RPCBatchByte); - outputStream.WriteInt(numRPCs); - await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); - await outputStream.FlushAsync(); - } - else - { - // writing a mixed batch - outputStream.WriteInt(bytesInBatchData + 1 + 2 * StreamCommunicator.IntSize(numRPCs)); - outputStream.WriteByte(AmbrosiaRuntime.CountReplayableRPCBatchByte); - outputStream.WriteInt(numRPCs); - outputStream.WriteInt(numReplayableMessagesToSend); - await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); - await outputStream.FlushAsync(); - } - } - else - { - // writing individual RPCs - await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); - await outputStream.FlushAsync(); - } - } - AcquireTrimLock(2); - _owningOutputRecord.LastSeqSentToReceiver += numRPCs; - - // Must handle cases where trim came in during the actual send and reset or pushed the iterator - if ((_owningOutputRecord.placeInOutput != null) && - ((_owningOutputRecord.placeInOutput.PageEnumerator != bufferEnumerator) || - _owningOutputRecord.placeInOutput.PagePos == -1)) - { - // Trim replaced the enumerator. Must reset - if (morePages) - { - // Not done outputting. Try again - if (_owningOutputRecord._sendsEnqueued == 0) - { - Interlocked.Increment(ref _owningOutputRecord._sendsEnqueued); - _owningOutputRecord.DataWorkQ.Enqueue(-1); - } - } - - // Done outputting. Just return the enumerator replacement - return _owningOutputRecord.placeInOutput; - } - - // bufferEnumerator is still good. Continue - Debug.Assert((nextSeqNo == curBuffer.LowestSeqNo + relSeqPos) && (nextSeqNo >= curBuffer.LowestSeqNo) && ((nextSeqNo + numRPCs - 1) <= curBuffer.HighestSeqNo)); - nextSeqNo += numRPCs; - if (morePages) - { - // More pages to output - posToStart = 0; - relSeqPos = 0; - } - else - { - // Future output may be put on this page - posToStart = pageLength; - relSeqPos += numRPCs; - needToUnlockAtEnd = false; - break; - } - AcquireAppendLock(2); - } - while (bufferEnumerator.MoveNext()); - placeToStart.PageEnumerator = bufferEnumerator; - placeToStart.PagePos = posToStart; - placeToStart.RelSeqPos = relSeqPos; - if (needToUnlockAtEnd) - { - ReleaseAppendLock(); - } - return placeToStart; - } - - internal async Task ReplayFromAsync(Stream outputStream, - long firstSeqNo, - bool reconnecting) - { - var bufferEnumerator = _bufferQ.GetEnumerator(); - // Scan through pages from head to tail looking for events to output - while (bufferEnumerator.MoveNext()) - { - var curBuffer = bufferEnumerator.Current; - Debug.Assert(curBuffer.LowestSeqNo <= firstSeqNo); - if (curBuffer.HighestSeqNo >= firstSeqNo) - { - // We need to send some or all of this buffer - int skipEvents = (int)(Math.Max(0, firstSeqNo - curBuffer.LowestSeqNo)); - - int bufferPos = 0; - if (reconnecting) - { - // We need to reset how many replayable messages have been sent. We want to minimize the use of - // this codepath because of the expensive locking, which can compete with new RPCs getting appended - AcquireAppendLock(2); - curBuffer.UnsentReplayableMessages = curBuffer.TotalReplayableMessages; - for (int i = 0; i < skipEvents; i++) - { - int eventSize = curBuffer.PageBytes.ReadBufferedInt(bufferPos); - if (curBuffer.PageBytes[bufferPos + StreamCommunicator.IntSize(eventSize) + 1] != (byte)RpcTypes.RpcType.Impulse) - { - curBuffer.UnsentReplayableMessages--; - } - bufferPos += eventSize + StreamCommunicator.IntSize(eventSize); - } - ReleaseAppendLock(); - } - else - { - // We assume the counter for unsent replayable messages is correct. NO LOCKING NEEDED - for (int i = 0; i < skipEvents; i++) - { - int eventSize = curBuffer.PageBytes.ReadBufferedInt(bufferPos); - bufferPos += eventSize + StreamCommunicator.IntSize(eventSize); - } - - } - return await SendAsync(outputStream, new BuffersCursor(bufferEnumerator, bufferPos, skipEvents), false); - } - } - // There's no output to replay - return new BuffersCursor(bufferEnumerator, -1, 0); - } - - private void addBufferPage(int writeLength, - long firstSeqNo) - { - BufferPage bufferPage; - ReleaseAppendLock(); - while (!_pool.TryDequeue(out bufferPage)) - { - if (_owningRuntime.Recovering || _owningOutputRecord.ResettingConnection || - _owningRuntime.CheckpointingService || _owningOutputRecord.ConnectingAfterRestart) - { - var newBufferPageBytes = new byte[Math.Max(defaultPageSize, writeLength)]; - bufferPage = new BufferPage(newBufferPageBytes); - _curBufPages++; - break; - } - Thread.Yield(); - } - AcquireAppendLock(); - { - // Grabbed a page from the pool - if (bufferPage.PageBytes.Length < writeLength) - { - // Page isn't big enough. Throw it away and create a bigger one - bufferPage.PageBytes = new byte[writeLength]; - } - } - bufferPage.LowestSeqNo = firstSeqNo; - bufferPage.HighestSeqNo = firstSeqNo; - bufferPage.UnsentReplayableMessages = 0; - bufferPage.TotalReplayableMessages = 0; - bufferPage.curLength = 0; - _bufferQ.Enqueue(ref bufferPage); - } - - internal void CreatePool(int numAlreadyAllocated = 0) - { - _pool = new ConcurrentQueue(); - for (int i = 0; i < (NormalMaxBufferPages - numAlreadyAllocated); i++) - { - var bufferPageBytes = new byte[defaultPageSize]; - var bufferPage = new BufferPage(bufferPageBytes); - _pool.Enqueue(bufferPage); - _curBufPages++; - } - } - - // Assumed that the caller releases the lock acquired here - internal BufferPage GetWritablePage(int writeLength, - long nextSeqNo) - { - if (_pool == null) - { - CreatePool(); - } - AcquireAppendLock(); - // Create a new buffer page if there is none, or if we are introducing a sequence number discontinuity - if (_bufferQ.IsEmpty() || nextSeqNo != (_bufferQ.PeekLast().HighestSeqNo + 1)) - { - addBufferPage(writeLength, nextSeqNo); - } - else - { - // There is something already in the buffer. Check it out. - var outPage = _bufferQ.PeekLast(); - if ((outPage.PageBytes.Length - outPage.curLength) < writeLength) - { - // Not enough space on last page. Add another - addBufferPage(writeLength, nextSeqNo); - } - } - var retVal = _bufferQ.PeekLast(); - return retVal; - } - - internal void Trim(long commitSeqNo, - ref BuffersCursor placeToStart) - { - // Keep trimming pages until we can't anymore or the Q is empty - while (!_bufferQ.IsEmpty()) - { - var currentHead = _bufferQ.PeekFirst(); - bool acquiredLock = false; - // Acquire the lock to ensure someone isn't adding another output to it. - AcquireAppendLock(3); - acquiredLock = true; - if (currentHead.HighestSeqNo <= commitSeqNo) - { - // Trimming for real - // First maintain the placeToStart cursor - if ((placeToStart != null) && ((placeToStart.PagePos >= 0) && (placeToStart.PageEnumerator.Current == currentHead))) - { - // Need to move the enumerator forward. Note that it may be on the last page if all output - // buffers can be trimmed - if (placeToStart.PageEnumerator.MoveNext()) - { - placeToStart.PagePos = 0; - } - else - { - placeToStart.PagePos = -1; - } - } - _bufferQ.Dequeue(); - if (acquiredLock) - { - ReleaseAppendLock(); - } - // Return page to pool - currentHead.curLength = 0; - currentHead.HighestSeqNo = 0; - currentHead.UnsentReplayableMessages = 0; - currentHead.TotalReplayableMessages = 0; - if (_pool == null) - { - CreatePool(_bufferQ.Count); - } - if (_owningRuntime.Recovering || _curBufPages <= NormalMaxBufferPages) - { - _pool.Enqueue(currentHead); - } - else - { - _curBufPages--; - } - } - else - { - // Nothing more to trim - if (acquiredLock) - { - ReleaseAppendLock(); - } - break; - } - } - } - - // Note that this method assumes that the caller has locked this connection record to avoid possible interference. Note that this method - // assumes no discontinuities in sequence numbers since adjusting can only happen on newly initialized service (no recovery), and since - // discontinuities can only happen as the result of recovery - internal long AdjustFirstSeqNoTo(long commitSeqNo) - { - var bufferEnumerator = _bufferQ.GetEnumerator(); - // Scan through pages from head to tail looking for events to output - while (bufferEnumerator.MoveNext()) - { - var curBuffer = bufferEnumerator.Current; - var seqNoDiff = curBuffer.HighestSeqNo - curBuffer.LowestSeqNo; - curBuffer.LowestSeqNo = commitSeqNo; - curBuffer.HighestSeqNo = commitSeqNo + seqNoDiff; - commitSeqNo += seqNoDiff + 1; - } - return commitSeqNo - 1; - } - - // Returns the highest sequence number left in the buffers after removing the non-replayable messages, or -1 if the - // buffers are empty. - internal long TrimAndUnbufferNonreplayableCalls(long trimSeqNo, - long matchingReplayableSeqNo) - { - // No locking necessary since this should only get called during recovery before replay and before a checkpooint is sent to service - // First trim - long highestTrimmedSeqNo = -1; - while (!_bufferQ.IsEmpty()) - { - var currentHead = _bufferQ.PeekFirst(); - if (currentHead.HighestSeqNo <= trimSeqNo) - { - // Must completely trim the page - _bufferQ.Dequeue(); - // Return page to pool - highestTrimmedSeqNo = currentHead.HighestSeqNo; - currentHead.curLength = 0; - currentHead.HighestSeqNo = 0; - currentHead.UnsentReplayableMessages = 0; - currentHead.TotalReplayableMessages = 0; - if (_pool == null) - { - CreatePool(_bufferQ.Count); - } - _pool.Enqueue(currentHead); - } - else - { - // May need to remove some data from the page - int readBufferPos = 0; - for (var i = currentHead.LowestSeqNo; i <= trimSeqNo; i++ ) - { - int eventSize = currentHead.PageBytes.ReadBufferedInt(readBufferPos); - if (currentHead.PageBytes[readBufferPos + StreamCommunicator.IntSize(eventSize) + 1] != (byte)RpcTypes.RpcType.Impulse) - { - currentHead.TotalReplayableMessages--; - } - readBufferPos += eventSize + StreamCommunicator.IntSize(eventSize); - } - Buffer.BlockCopy(currentHead.PageBytes, readBufferPos, currentHead.PageBytes, 0, currentHead.PageBytes.Length - readBufferPos); - currentHead.LowestSeqNo += trimSeqNo - currentHead.LowestSeqNo + 1; - break; - } - } - - var bufferEnumerator = _bufferQ.GetEnumerator(); - long nextReplayableSeqNo = matchingReplayableSeqNo + 1; - while (bufferEnumerator.MoveNext()) - { - var curBuffer = bufferEnumerator.Current; - var numMessagesOnPage = curBuffer.HighestSeqNo - curBuffer.LowestSeqNo + 1; - curBuffer.LowestSeqNo = nextReplayableSeqNo; - if (numMessagesOnPage > curBuffer.TotalReplayableMessages) - { - // There are some nonreplayable messsages to remove - int readBufferPos = 0; - var newPageBytes = new byte[curBuffer.PageBytes.Length]; - var pageWriteStream = new MemoryStream(newPageBytes); - for (int i = 0; i < numMessagesOnPage; i++) - { - int eventSize = curBuffer.PageBytes.ReadBufferedInt(readBufferPos); - if (curBuffer.PageBytes[readBufferPos + StreamCommunicator.IntSize(eventSize) + 1] != (byte)RpcTypes.RpcType.Impulse) - { - // Copy event over to new page bytes - pageWriteStream.Write(curBuffer.PageBytes, readBufferPos, eventSize + StreamCommunicator.IntSize(eventSize)); - } - readBufferPos += eventSize + StreamCommunicator.IntSize(eventSize); - } - curBuffer.curLength = (int)pageWriteStream.Position; - curBuffer.HighestSeqNo = curBuffer.LowestSeqNo + curBuffer.TotalReplayableMessages - 1; - curBuffer.PageBytes = newPageBytes; - } - nextReplayableSeqNo += curBuffer.TotalReplayableMessages; - } - return nextReplayableSeqNo - 1; - } - - internal void RebaseSeqNosInBuffer(long commitSeqNo, - long commitSeqNoReplayable) - { - var seqNoDiff = commitSeqNo - commitSeqNoReplayable; - var bufferEnumerator = _bufferQ.GetEnumerator(); - // Scan through pages from head to tail looking for events to output - while (bufferEnumerator.MoveNext()) - { - var curBuffer = bufferEnumerator.Current; - curBuffer.LowestSeqNo += seqNoDiff; - curBuffer.HighestSeqNo += seqNoDiff; - } - } - } - - [DataContract] - internal class InputConnectionRecord - { - public NetworkStream DataConnectionStream { get; set; } - public NetworkStream ControlConnectionStream { get; set; } - [DataMember] - public long LastProcessedID { get; set; } - [DataMember] - public long LastProcessedReplayableID { get; set; } - public InputConnectionRecord() - { - DataConnectionStream = null; - LastProcessedID = 0; - LastProcessedReplayableID = 0; - } - } - - internal class OutputConnectionRecord - { - // Set on reconnection. Established where to replay from or filter to - public long ReplayFrom { get; set; } - // The seq number from the last RPC call copied to the buffer. Not a property so interlocked read can be done - public long LastSeqNoFromLocalService; - // RPC output buffers - public EventBuffer BufferedOutput { get; set; } - // A cursor which specifies where the last RPC output ended - public EventBuffer.BuffersCursor placeInOutput; - // Work Q for output producing work. - public AsyncQueue DataWorkQ { get; set; } - // Work Q for sending trim messages and perform local trimming - public AsyncQueue ControlWorkQ { get; set; } - // Current sequence number which the output buffer may be trimmed to. - public long TrimTo { get; set; } - // Current replayable sequence number which the output buffer may be trimmed to. - public long ReplayableTrimTo { get; set; } - // The number of sends which are currently enqueued. Should be updated with interlocked increment and decrement - public long _sendsEnqueued; - public AmbrosiaRuntime MyAmbrosia { get; set; } - public bool WillResetConnection { get; set; } - public bool ResettingConnection { get; set; } - public bool ConnectingAfterRestart { get; set; } - // The latest trim location on the other side. An associated trim message MAY have already been sent - public long RemoteTrim { get; set; } - // The latest replayable trim location on the other side. An associated trim message MAY have already been sent - public long RemoteTrimReplayable { get; set; } - // The seq no of the last RPC sent to the receiver - public long LastSeqSentToReceiver; - - public OutputConnectionRecord(AmbrosiaRuntime inAmbrosia) - { - ReplayFrom = 0; - DataWorkQ = new AsyncQueue(); - ControlWorkQ = new AsyncQueue(); - _sendsEnqueued = 0; - TrimTo = -1; - ReplayableTrimTo = -1; - RemoteTrim = -1; - RemoteTrimReplayable = -1; - LastSeqNoFromLocalService = 0; - MyAmbrosia = inAmbrosia; - BufferedOutput = new EventBuffer(MyAmbrosia, this); - ResettingConnection = false; - ConnectingAfterRestart = false; - LastSeqSentToReceiver = 0; - WillResetConnection = inAmbrosia._createService; - ConnectingAfterRestart = inAmbrosia._restartWithRecovery; - } - } - - public class AmbrosiaRuntimeParams - { - public int serviceReceiveFromPort; - public int serviceSendToPort; - public string serviceName; - public string AmbrosiaBinariesLocation; - public string serviceLogPath; - public bool? createService; - public bool pauseAtStart; - public bool persistLogs; - public bool activeActive; - public long logTriggerSizeMB; - public string storageConnectionString; - public long currentVersion; - public long upgradeToVersion; - } - - public class AmbrosiaRuntime : VertexBase - { -#if _WINDOWS - [DllImport("Kernel32.dll", CallingConvention = CallingConvention.Winapi)] - private static extern void GetSystemTimePreciseAsFileTime(out long filetime); -#else - private static void GetSystemTimePreciseAsFileTime(out long filetime) - { - filetime = Stopwatch.GetTimestamp(); - } -#endif - - // Util - // Log metadata information record in _logMetadataTable - private class serviceInstanceEntity : TableEntity - { - public serviceInstanceEntity() - { - } - - public serviceInstanceEntity(string key, string inValue) - { - this.PartitionKey = "(Default)"; - this.RowKey = key; - this.value = inValue; - - } - - public string value { get; set; } - } - - - // Create a table with name tableName if it does not exist - private CloudTable CreateTableIfNotExists(String tableName) - { - try - { - CloudTable table = _tableClient.GetTableReference(tableName); - table.CreateIfNotExistsAsync().Wait(); - if (table == null) - { - OnError(AzureOperationError, "Error creating a table in Azure"); - } - return table; - } - catch - { - OnError(AzureOperationError, "Error creating a table in Azure"); - return null; - } - } - - - // Replace info for a key or create a new key. Raises an exception if the operation fails for any reason. - private void InsertOrReplaceServiceInfoRecord(string infoTitle, string info) - { - try - { - serviceInstanceEntity ServiceInfoEntity = new serviceInstanceEntity(infoTitle, info); - TableOperation insertOrReplaceOperation = TableOperation.InsertOrReplace(ServiceInfoEntity); - var myTask = this._serviceInstanceTable.ExecuteAsync(insertOrReplaceOperation); - myTask.Wait(); - var retrievedResult = myTask.Result; - if (retrievedResult.HttpStatusCode < 200 || retrievedResult.HttpStatusCode >= 300) - { - OnError(AzureOperationError, "Error replacing a record in an Azure table"); - } - } - catch - { - OnError(AzureOperationError, "Error replacing a record in an Azure table"); - } - } - - // Retrieve info for a given key - // If no key exists or _logMetadataTable does not exist, raise an exception - private string RetrieveServiceInfo(string key) - { - if (this._serviceInstanceTable != null) - { - TableOperation retrieveOperation = TableOperation.Retrieve("(Default)", key); - var myTask = this._serviceInstanceTable.ExecuteAsync(retrieveOperation); - myTask.Wait(); - var retrievedResult = myTask.Result; - if (retrievedResult.Result != null) - { - return ((serviceInstanceEntity)retrievedResult.Result).value; - } - else - { - OnError(AzureOperationError, "Error retrieving info from Azure"); - } - } - else - { - OnError(AzureOperationError, "Error retrieving info from Azure"); - } - // Make compiler happy - return null; - } - - // Used to hold the bytes which will go in the log. Note that two streams are passed in. The - // log stream must write to durable storage and be flushable, while the second stream initiates - // actual action taken after the message has been made durable. - private class Committer - { - byte[] _buf; - volatile byte[] _bufbak; - long _maxBufSize; - // Used in CAS. The first 31 bits are the #of writers, the next 32 bits is the buffer size, the last bit is the sealed bit - long _status; - const int SealedBits = 1; - const int TailBits = 32; - const int numWritesBits = 31; - const long Last32Mask = 0x00000000FFFFFFFF; - const long First32Mask = Last32Mask << 32; - LogWriter _logStream; - Stream _workStream; - ConcurrentDictionary _uncommittedWatermarks; - ConcurrentDictionary _uncommittedWatermarksBak; - internal ConcurrentDictionary _trimWatermarks; - ConcurrentDictionary _trimWatermarksBak; - internal const int HeaderSize = 24; // 4 Committer ID, 8 Write ID, 8 check bytes, 4 page size - Task _lastCommitTask; - bool _persistLogs; - int _committerID; - internal long _nextWriteID; - AmbrosiaRuntime _myAmbrosia; - - public Committer(Stream workStream, - bool persistLogs, - AmbrosiaRuntime myAmbrosia, - long maxBufSize = 8 * 1024 * 1024, - LogReader recoveryStream = null) - { - _myAmbrosia = myAmbrosia; - _persistLogs = persistLogs; - _uncommittedWatermarksBak = new ConcurrentDictionary(); - _trimWatermarksBak = new ConcurrentDictionary(); - if (maxBufSize <= 0) - { - // Recovering - _committerID = recoveryStream.ReadIntFixed(); - _nextWriteID = recoveryStream.ReadLongFixed(); - _maxBufSize = recoveryStream.ReadIntFixed(); - _buf = new byte[_maxBufSize]; - var bufSize = recoveryStream.ReadIntFixed(); - _status = bufSize << SealedBits; - recoveryStream.Read(_buf, 0, bufSize); - _uncommittedWatermarks = _uncommittedWatermarks.AmbrosiaDeserialize(recoveryStream); - _trimWatermarks = _trimWatermarks.AmbrosiaDeserialize(recoveryStream); - } - else - { - // starting for the first time - _status = HeaderSize << SealedBits; - _maxBufSize = maxBufSize; - _buf = new byte[maxBufSize]; - _uncommittedWatermarks = new ConcurrentDictionary(); - _trimWatermarks = new ConcurrentDictionary(); - long curTime; - GetSystemTimePreciseAsFileTime(out curTime); - _committerID = (int)((curTime << 33) >> 33); - _nextWriteID = 0; - } - _bufbak = new byte[_maxBufSize]; - var memWriter = new MemoryStream(_buf); - var memWriterBak = new MemoryStream(_bufbak); - memWriter.WriteIntFixed(_committerID); - memWriterBak.WriteIntFixed(_committerID); - _logStream = null; - _workStream = workStream; - } - - internal int CommitID { get { return _committerID; } } - - internal void Serialize(LogWriter serializeStream) - { - var localStatus = _status; - var bufLength = ((localStatus >> SealedBits) & Last32Mask); - serializeStream.WriteIntFixed(_committerID); - serializeStream.WriteLongFixed(_nextWriteID); - serializeStream.WriteIntFixed((int)_maxBufSize); - serializeStream.WriteIntFixed((int)bufLength); - serializeStream.Write(_buf, 0, (int)bufLength); - _uncommittedWatermarks.AmbrosiaSerialize(serializeStream); - _trimWatermarks.AmbrosiaSerialize(serializeStream); - } - - public byte[] Buf { get { return _buf; } } - - - private void SendInputWatermarks(ConcurrentDictionary uncommittedWatermarks, - ConcurrentDictionary outputs) - { - // trim output buffers of inputs - lock (outputs) - { - foreach (var kv in uncommittedWatermarks) - { - OutputConnectionRecord outputConnectionRecord; - if (!outputs.TryGetValue(kv.Key, out outputConnectionRecord)) - { - // Set up the output record for the first time and add it to the dictionary - outputConnectionRecord = new OutputConnectionRecord(_myAmbrosia); - outputs[kv.Key] = outputConnectionRecord; - Console.WriteLine("Adding output:{0}", kv.Key); - } - outputConnectionRecord.RemoteTrim = Math.Max(kv.Value.First, outputConnectionRecord.RemoteTrim); - outputConnectionRecord.RemoteTrimReplayable = Math.Max(kv.Value.Second, outputConnectionRecord.RemoteTrimReplayable); - if (outputConnectionRecord.ControlWorkQ.IsEmpty) - { - outputConnectionRecord.ControlWorkQ.Enqueue(-2); - } - } - } - } - - private async Task Commit(byte[] firstBufToCommit, - int length1, - byte[] secondBufToCommit, - int length2, - ConcurrentDictionary uncommittedWatermarks, - ConcurrentDictionary trimWatermarks, - ConcurrentDictionary outputs) - { - try - { - // writes to _logstream - don't want to persist logs when perf testing so this is optional parameter - if (_persistLogs) - { - _logStream.Write(firstBufToCommit, 0, 4); - _logStream.WriteIntFixed(length1 + length2); - _logStream.Write(firstBufToCommit, 8, 16); - await _logStream.WriteAsync(firstBufToCommit, HeaderSize, length1 - HeaderSize); - await _logStream.WriteAsync(secondBufToCommit, 0, length2); - await writeFullWaterMarksAsync(uncommittedWatermarks); - await writeSimpleWaterMarksAsync(trimWatermarks); - await _logStream.FlushAsync(); - } - - SendInputWatermarks(uncommittedWatermarks, outputs); - _workStream.Write(firstBufToCommit, 0, 4); - _workStream.WriteIntFixed(length1 + length2); - _workStream.Write(firstBufToCommit, 8, 16); - await _workStream.WriteAsync(firstBufToCommit, HeaderSize, length1 - HeaderSize); - await _workStream.WriteAsync(secondBufToCommit, 0, length2); - // Return the second byte array to the FlexReader pool - FlexReadBuffer.ReturnBuffer(secondBufToCommit); - var flushtask = _workStream.FlushAsync(); - _uncommittedWatermarksBak = uncommittedWatermarks; - _uncommittedWatermarksBak.Clear(); - _trimWatermarksBak = trimWatermarks; - _trimWatermarksBak.Clear(); - } - catch (Exception e) - { - _myAmbrosia.OnError(5, e.Message); - } - _bufbak = firstBufToCommit; - await TryCommitAsync(outputs); - } - - private async Task writeFullWaterMarksAsync(ConcurrentDictionary uncommittedWatermarks) - { - _logStream.WriteInt(uncommittedWatermarks.Count); - foreach (var kv in uncommittedWatermarks) - { - var sourceBytes = Encoding.UTF8.GetBytes(kv.Key); - _logStream.WriteInt(sourceBytes.Length); - await _logStream.WriteAsync(sourceBytes, 0, sourceBytes.Length); - _logStream.WriteLongFixed(kv.Value.First); - _logStream.WriteLongFixed(kv.Value.Second); - } - } - - private async Task writeSimpleWaterMarksAsync(ConcurrentDictionary uncommittedWatermarks) - { - _logStream.WriteInt(uncommittedWatermarks.Count); - foreach (var kv in uncommittedWatermarks) - { - var sourceBytes = Encoding.UTF8.GetBytes(kv.Key); - _logStream.WriteInt(sourceBytes.Length); - await _logStream.WriteAsync(sourceBytes, 0, sourceBytes.Length); - _logStream.WriteLongFixed(kv.Value); - } - } - private async Task Commit(byte[] buf, - int length, - ConcurrentDictionary uncommittedWatermarks, - ConcurrentDictionary trimWatermarks, - ConcurrentDictionary outputs) - { - try - { - // writes to _logstream - don't want to persist logs when perf testing so this is optional parameter - if (_persistLogs) - { - await _logStream.WriteAsync(buf, 0, length); - await writeFullWaterMarksAsync(uncommittedWatermarks); - await writeSimpleWaterMarksAsync(trimWatermarks); - await _logStream.FlushAsync(); - } - SendInputWatermarks(uncommittedWatermarks, outputs); - await _workStream.WriteAsync(buf, 0, length); - var flushtask = _workStream.FlushAsync(); - _uncommittedWatermarksBak = uncommittedWatermarks; - _uncommittedWatermarksBak.Clear(); - _trimWatermarksBak = trimWatermarks; - _trimWatermarksBak.Clear(); - } - catch (Exception e) - { - _myAmbrosia.OnError(5, e.Message); - } - _bufbak = buf; - await TryCommitAsync(outputs); - } - - public async Task SleepAsync() - { - while (true) - { - // We're going to try to seal the buffer - var localStatus = Interlocked.Read(ref _status); - // Yield if the sealed bit is set - while (localStatus % 2 == 1) - { - await Task.Yield(); - localStatus = Interlocked.Read(ref _status); - } - var newLocalStatus = localStatus + 1; - var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); - - // Check if the compare and swap succeeded, otherwise try again - if (origVal == localStatus) - { - // We successfully sealed the buffer and must wait until any active commit finishes - while (_bufbak == null) - { - await Task.Yield(); - } - - // Wait for all writes to complete before sleeping - while (true) - { - localStatus = Interlocked.Read(ref _status); - var numWrites = (localStatus >> (64 - numWritesBits)); - if (numWrites == 0) - { - break; - } - await Task.Yield(); - } - return; - } - } - } - - // This method switches the log stream to the provided stream and removes the write lock on the old file - public void SwitchLogStreams(LogWriter newLogStream) - { - if (_status % 2 != 1 || _bufbak == null) - { - _myAmbrosia.OnError(5, "Committer is trying to switch log streams when awake"); - } - // Release resources and lock on the old file - if (_logStream != null) - { - _logStream.Dispose(); - } - _logStream = newLogStream; - } - - public async Task WakeupAsync() - { - var localStatus = Interlocked.Read(ref _status); - if (localStatus % 2 == 0 || _bufbak == null) - { - _myAmbrosia.OnError(5, "Tried to wakeup committer when not asleep"); - } - // We're going to try to unseal the buffer - var newLocalStatus = localStatus - 1; - var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); - // Check if the compare and swap succeeded - if (origVal != localStatus) - { - _myAmbrosia.OnError(5, "Tried to wakeup committer when not asleep 2"); - } - await TryCommitAsync(this._myAmbrosia._outputs); - } - - byte[] _checkTempBytes = new byte[8]; - byte[] _checkTempBytes2 = new byte[8]; - - internal unsafe long CheckBytesExtra(int offset, - int length, - byte[] extraBytes, - int extraLength) - { - var firstBufferCheck = CheckBytes(offset, length); - var secondBufferCheck = CheckBytes(extraBytes, 0, extraLength); - long shiftedSecondBuffer = secondBufferCheck; - var lastByteLongOffset = length % 8; - if (lastByteLongOffset != 0) - { - fixed (byte* p = _checkTempBytes) - { - *((long*)p) = secondBufferCheck; - } - // Create new buffer with circularly shifted secondBufferCheck - for (int i = 0; i < 8; i++) - { - _checkTempBytes2[i] = _checkTempBytes[(i - lastByteLongOffset + 8) % 8]; - } - fixed (byte* p = _checkTempBytes2) - { - shiftedSecondBuffer = *((long*)p); - } - } - return firstBufferCheck ^ shiftedSecondBuffer; - } - - internal unsafe long CheckBytes(int offset, - int length) - { - long checkBytes = 0; - - fixed (byte* p = _buf) - { - if (offset % 8 == 0) - { - int startLongCalc = offset / 8; - int numLongCalcs = length / 8; - int numByteCalcs = length % 8; - long* longPtr = ((long*)p) + startLongCalc; - for (int i = 0; i < numLongCalcs; i++) - { - checkBytes ^= longPtr[i]; - } - if (numByteCalcs != 0) - { - var lastBytes = (byte*)(longPtr + numLongCalcs); - for (int i = 0; i < 8; i++) - { - if (i < numByteCalcs) - { - _checkTempBytes[i] = lastBytes[i]; - } - else - { - _checkTempBytes[i] = 0; - } - } - fixed (byte* p2 = _checkTempBytes) - { - checkBytes ^= *((long*)p2); - } - } - } - else - { - _myAmbrosia.OnError(0, "checkbytes case not implemented"); - } - } - return checkBytes; - } - - - internal unsafe long CheckBytes(byte[] bufToCalc, - int offset, - int length) - { - long checkBytes = 0; - - fixed (byte* p = bufToCalc) - { - if (offset % 8 == 0) - { - int startLongCalc = offset / 8; - int numLongCalcs = length / 8; - int numByteCalcs = length % 8; - long* longPtr = ((long*)p) + startLongCalc; - for (int i = 0; i < numLongCalcs; i++) - { - checkBytes ^= longPtr[i]; - } - if (numByteCalcs != 0) - { - var lastBytes = (byte*)(longPtr + numLongCalcs); - for (int i = 0; i < 8; i++) - { - if (i < numByteCalcs) - { - _checkTempBytes[i] = lastBytes[i]; - } - else - { - _checkTempBytes[i] = 0; - } - } - fixed (byte* p2 = _checkTempBytes) - { - checkBytes ^= *((long*)p2); - } - } - } - else - { - _myAmbrosia.OnError(0, "checkbytes case not implemented 2"); - } - } - return checkBytes; - } - - - public async Task AddRow(FlexReadBuffer copyFromFlexBuffer, - string outputToUpdate, - long newSeqNo, - long newReplayableSeqNo, - ConcurrentDictionary outputs) - { - var copyFromBuffer = copyFromFlexBuffer.Buffer; - var length = copyFromFlexBuffer.Length; - while (true) - { - bool sealing = false; - long localStatus; - localStatus = Interlocked.Read(ref _status); - - // Yield if the sealed bit is set - while (localStatus % 2 == 1) - { - await Task.Yield(); - localStatus = Interlocked.Read(ref _status); - } - var oldBufLength = ((localStatus >> SealedBits) & Last32Mask); - var newLength = oldBufLength + length; - - // Assemble the new status - long newLocalStatus; - if ((newLength > _maxBufSize) || (_bufbak != null)) - { - // We're going to try to seal the buffer - newLocalStatus = localStatus + 1; - sealing = true; - } - else - { - // We're going to try to add to the end of the existing buffer - var newWrites = (localStatus >> (64 - numWritesBits)) + 1; - newLocalStatus = ((newWrites) << (64 - numWritesBits)) | (newLength << SealedBits); - } - var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); - - // Check if the compare and swap succeeded, otherwise try again - if (origVal == localStatus) - { - if (sealing) - { - // This call successfully sealed the buffer. Remember we still have an extra - // message to take care of - - // We have just filled the backup buffer and must wait until any other commit finishes - int counter = 0; - while (_bufbak == null) - { - counter++; - if (counter == 100000) - { - counter = 0; - await Task.Yield(); - } - } - - // There is no other write going on. Take the backup buffer - var newUncommittedWatermarks = _uncommittedWatermarksBak; - var newWriteBuf = _bufbak; - _bufbak = null; - _uncommittedWatermarksBak = null; - - // Wait for other writes to complete before committing - while (true) - { - localStatus = Interlocked.Read(ref _status); - var numWrites = (localStatus >> (64 - numWritesBits)); - if (numWrites == 0) - { - break; - } - await Task.Yield(); - } - - // Filling header with enough info to detect incomplete writes and also writing the page length - var writeStream = new MemoryStream(_buf, 4, 20); - int lengthOnPage; - if (newLength <= _maxBufSize) - { - lengthOnPage = (int)newLength; - } - else - { - lengthOnPage = (int)oldBufLength; - } - writeStream.WriteIntFixed(lengthOnPage); - if (newLength <= _maxBufSize) - { - // Copy the contents into the log record buffer - Buffer.BlockCopy(copyFromBuffer, 0, _buf, (int)oldBufLength, length); - } - long checkBytes; - if (length <= (_maxBufSize - HeaderSize)) - { - // new message will end up in a commit buffer. Use normal CheckBytes - checkBytes = CheckBytes(HeaderSize, lengthOnPage - HeaderSize); - } - else - { - // new message is too big to land in a commit buffer and will be tacked on the end. - checkBytes = CheckBytesExtra(HeaderSize, lengthOnPage - HeaderSize, copyFromBuffer, length); - } - writeStream.WriteLongFixed(checkBytes); - writeStream.WriteLongFixed(_nextWriteID); - _nextWriteID++; - - // Do the actual commit - // Grab the current state of trim levels since the last write - // Note that the trim thread may want to modify the table, requiring a lock - ConcurrentDictionary oldTrimWatermarks; - lock (_trimWatermarks) - { - oldTrimWatermarks = _trimWatermarks; - _trimWatermarks = _trimWatermarksBak; - _trimWatermarksBak = null; - } - if (newLength <= _maxBufSize) - { - // add row to current buffer and commit - _uncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); - _lastCommitTask = Commit(_buf, (int)newLength, _uncommittedWatermarks, oldTrimWatermarks, outputs); - newLocalStatus = HeaderSize << SealedBits; - } - else if (length > (_maxBufSize - HeaderSize)) - { - // Steal the byte array in the flex buffer to return it after writing - copyFromFlexBuffer.StealBuffer(); - // write new event as part of commit - _uncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); - var commitTask = Commit(_buf, (int)oldBufLength, copyFromBuffer, length, _uncommittedWatermarks, oldTrimWatermarks, outputs); - newLocalStatus = HeaderSize << SealedBits; - } - else - { - // commit and add new event to new buffer - newUncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); - _lastCommitTask = Commit(_buf, (int)oldBufLength, _uncommittedWatermarks, oldTrimWatermarks, outputs); - Buffer.BlockCopy(copyFromBuffer, 0, newWriteBuf, (int)HeaderSize, length); - newLocalStatus = (HeaderSize + length) << SealedBits; - } - _buf = newWriteBuf; - _uncommittedWatermarks = newUncommittedWatermarks; - _status = newLocalStatus; - return (long)_logStream.FileSize; - } - // Add the message to the existing buffer - Buffer.BlockCopy(copyFromBuffer, 0, _buf, (int)oldBufLength, length); - _uncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); - // Reduce write count - while (true) - { - localStatus = Interlocked.Read(ref _status); - var newWrites = (localStatus >> (64 - numWritesBits)) - 1; - newLocalStatus = (localStatus & ((Last32Mask << 1) + 1)) | - (newWrites << (64 - numWritesBits)); - origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); - if (origVal == localStatus) - { - if (localStatus % 2 == 0 && _bufbak != null) - { - await TryCommitAsync(outputs); - } - return (long)_logStream.FileSize; - } - } - } - } - } - - public async Task TryCommitAsync(ConcurrentDictionary outputs) - { - long localStatus; - localStatus = Interlocked.Read(ref _status); - - var bufLength = ((localStatus >> SealedBits) & Last32Mask); - // give up and try later if the sealed bit is set or there is nothing to write - if (localStatus % 2 == 1 || bufLength == HeaderSize || _bufbak == null) - { - return; - } - - // Assemble the new status - long newLocalStatus; - newLocalStatus = localStatus + 1; - var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); - - // Check if the compare and swap succeeded, otherwise skip flush - if (origVal == localStatus) - { - // This call successfully sealed the buffer. - - // We have just filled the backup buffer and must wait until any other commit finishes - int counter = 0; - while (_bufbak == null) - { - counter++; - if (counter == 100000) - { - counter = 0; - await Task.Yield(); - } - } - - // There is no other write going on. Take the backup buffer - var newUncommittedWatermarks = _uncommittedWatermarksBak; - var newWriteBuf = _bufbak; - _bufbak = null; - _uncommittedWatermarksBak = null; - - // Wait for other writes to complete before committing - while (true) - { - localStatus = Interlocked.Read(ref _status); - var numWrites = (localStatus >> (64 - numWritesBits)); - if (numWrites == 0) - { - break; - } - await Task.Yield(); - } - - // Filling header with enough info to detect incomplete writes and also writing the page length - var writeStream = new MemoryStream(_buf, 4, 20); - writeStream.WriteIntFixed((int)bufLength); - long checkBytes = CheckBytes(HeaderSize, (int)bufLength - HeaderSize); - writeStream.WriteLongFixed(checkBytes); - writeStream.WriteLongFixed(_nextWriteID); - _nextWriteID++; - - // Grab the current state of trim levels since the last write - // Note that the trim thread may want to modify the table, requiring a lock - ConcurrentDictionary oldTrimWatermarks; - lock (_trimWatermarks) - { - oldTrimWatermarks = _trimWatermarks; - _trimWatermarks = _trimWatermarksBak; - _trimWatermarksBak = null; - } - _lastCommitTask = Commit(_buf, (int)bufLength, _uncommittedWatermarks, oldTrimWatermarks, outputs); - newLocalStatus = HeaderSize << SealedBits; - _buf = newWriteBuf; - _uncommittedWatermarks = newUncommittedWatermarks; - _status = newLocalStatus; - } - } - - internal void ClearNextWrite() - { - _uncommittedWatermarksBak.Clear(); - _trimWatermarksBak.Clear(); - _status = HeaderSize << SealedBits; - } - - internal void SendUpgradeRequest() - { - _workStream.WriteIntFixed(_committerID); - var numMessageBytes = StreamCommunicator.IntSize(1) + 1; - var messageBuf = new byte[numMessageBytes]; - var memStream = new MemoryStream(messageBuf); - memStream.WriteInt(1); - memStream.WriteByte(upgradeServiceByte); - memStream.Dispose(); - _workStream.WriteIntFixed((int)(HeaderSize + numMessageBytes)); - long checkBytes = CheckBytes(messageBuf, 0, (int)numMessageBytes); - _workStream.WriteLongFixed(checkBytes); - _workStream.WriteLongFixed(-1); - _workStream.Write(messageBuf, 0, numMessageBytes); - _workStream.Flush(); - } - - internal void QuiesceServiceWithSendCheckpointRequest(bool upgrading = false, bool becomingPrimary = false) - { - _workStream.WriteIntFixed(_committerID); - var numMessageBytes = StreamCommunicator.IntSize(1) + 1; - var messageBuf = new byte[numMessageBytes]; - var memStream = new MemoryStream(messageBuf); - memStream.WriteInt(1); - if (upgrading) - { - memStream.WriteByte(upgradeTakeCheckpointByte); - } - else if (becomingPrimary) - { - memStream.WriteByte(takeBecomingPrimaryCheckpointByte); - } - else - { - memStream.WriteByte(takeCheckpointByte); - } - memStream.Dispose(); - _workStream.WriteIntFixed((int)(HeaderSize + numMessageBytes)); - long checkBytes = CheckBytes(messageBuf, 0, (int)numMessageBytes); - _workStream.WriteLongFixed(checkBytes); - _workStream.WriteLongFixed(-1); - _workStream.Write(messageBuf, 0, numMessageBytes); - _workStream.Flush(); - } - - internal void SendCheckpointToRecoverFrom(byte[] buf, int length, LogReader checkpointStream) - { - _workStream.WriteIntFixed(_committerID); - _workStream.WriteIntFixed((int)(HeaderSize + length)); - _workStream.WriteLongFixed(0); - _workStream.WriteLongFixed(-2); - _workStream.Write(buf, 0, length); - var sizeBytes = StreamCommunicator.ReadBufferedInt(buf, 0); - var checkpointSize = StreamCommunicator.ReadBufferedLong(buf, StreamCommunicator.IntSize(sizeBytes) + 1); - checkpointStream.ReadBig(_workStream, checkpointSize); - _workStream.Flush(); - } - - internal async Task AddInitialRowAsync(FlexReadBuffer serviceInitializationMessage) - { - var numMessageBytes = serviceInitializationMessage.Length; - if (numMessageBytes > _buf.Length - HeaderSize) - { - _myAmbrosia.OnError(0, "Initial row is too many bytes"); - } - Buffer.BlockCopy(serviceInitializationMessage.Buffer, 0, _buf, (int)HeaderSize, numMessageBytes); - _status = (HeaderSize + numMessageBytes) << SealedBits; - await SleepAsync(); - } - } - - public class AmbrosiaOutput : IAsyncVertexOutputEndpoint - { - AmbrosiaRuntime myRuntime; - string _typeOfEndpoint; // Data or control endpoint - - public AmbrosiaOutput(AmbrosiaRuntime inRuntime, - string typeOfEndpoint) : base() - { - myRuntime = inRuntime; - _typeOfEndpoint = typeOfEndpoint; - } - - public void Dispose() - { - } - - public async Task ToInputAsync(IVertexInputEndpoint p, CancellationToken token) - { - await Task.Yield(); - throw new NotImplementedException(); - } - - public async Task ToStreamAsync(Stream stream, string otherProcess, string otherEndpoint, CancellationToken token) - { - if (_typeOfEndpoint == "data") - { - await myRuntime.ToDataStreamAsync(stream, otherProcess, token); - } - else - { - await myRuntime.ToControlStreamAsync(stream, otherProcess, token); - } - } - } - - public class AmbrosiaInput : IAsyncVertexInputEndpoint - { - AmbrosiaRuntime myRuntime; - string _typeOfEndpoint; // Data or control endpoint - - public AmbrosiaInput(AmbrosiaRuntime inRuntime, - string typeOfEndpoint) : base() - { - myRuntime = inRuntime; - _typeOfEndpoint = typeOfEndpoint; - } - - public void Dispose() - { - } - - public async Task FromOutputAsync(IVertexOutputEndpoint p, CancellationToken token) - { - await Task.Yield(); - throw new NotImplementedException(); - } - - public async Task FromStreamAsync(Stream stream, string otherProcess, string otherEndpoint, CancellationToken token) - { - if (_typeOfEndpoint == "data") - { - await myRuntime.FromDataStreamAsync(stream, otherProcess, token); - } - else - { - await myRuntime.FromControlStreamAsync(stream, otherProcess, token); - } - } - } - - ConcurrentDictionary _inputs; - ConcurrentDictionary _outputs; - internal int _localServiceReceiveFromPort; // specifiable on the command line - internal int _localServiceSendToPort; // specifiable on the command line - internal string _serviceName; // specifiable on the command line - internal string _serviceLogPath; - internal string _logFileNameBase; - public const string AmbrosiaDataInputsName = "Ambrosiadatain"; - public const string AmbrosiaControlInputsName = "Ambrosiacontrolin"; - public const string AmbrosiaDataOutputsName = "Ambrosiadataout"; - public const string AmbrosiaControlOutputsName = "Ambrosiacontrolout"; - bool _persistLogs; - bool _sharded; - internal bool _createService; - long _shardID; - bool _runningRepro; - long _currentVersion; - long _upgradeToVersion; - bool _upgrading; - internal bool _restartWithRecovery; - internal bool CheckpointingService { get; set; } - - // Constants for leading byte communicated between services; - public const byte RPCByte = 0; - public const byte attachToByte = 1; - public const byte takeCheckpointByte = 2; - public const byte CommitByte = 3; - public const byte replayFromByte = 4; - public const byte RPCBatchByte = 5; - public const byte PingByte = 6; - public const byte PingReturnByte = 7; - public const byte checkpointByte = 8; - public const byte InitalMessageByte = 9; - public const byte upgradeTakeCheckpointByte = 10; - public const byte takeBecomingPrimaryCheckpointByte = 11; - public const byte upgradeServiceByte = 12; - public const byte CountReplayableRPCBatchByte = 13; - public const byte trimToByte = 14; - - CRAClientLibrary _coral; - - // Connection to local service - NetworkStream _localServiceReceiveFromStream; - NetworkStream _localServiceSendToStream; - - // Precommit buffers used for writing things to append blobs - Committer _committer; - - // Azure storage clients - string _storageConnectionString; - CloudStorageAccount _storageAccount; - CloudTableClient _tableClient; - - // Azure table for service instance metadata information - CloudTable _serviceInstanceTable; - long _lastCommittedCheckpoint; - - // Azure blob for writing commit log and checkpoint - LogWriter _checkpointWriter; - - // true when this service is in an active/active configuration. False if set to single node - bool _activeActive; - - enum AARole { Primary, Secondary, Checkpointer }; - AARole _myRole; - // Log size at which we start a new log file. This triggers a checkpoint, <= 0 if manual only checkpointing is done - long _newLogTriggerSize; - // The numeric suffix of the log file currently being read or written to - long _lastLogFile; - // A locking variable (with compare and swap) used to eliminate redundant log moves - int _movingToNextLog = 0; - - - const int UnexpectedError = 0; - const int VersionMismatch = 1; - const int MissingCheckpoint = 2; - const int MissingLog = 3; - const int AzureOperationError = 4; - const int LogWriteError = 5; - - internal void OnError(int ErrNo, string ErrorMessage) - { - Console.WriteLine("FATAL ERROR " + ErrNo.ToString() + ": " + ErrorMessage); - Console.Out.Flush(); - Console.Out.Flush(); - _coral.KillLocalWorker(""); - } - - /// - /// Need a manually created backing field so it can be marked volatile. - /// - private volatile FlexReadBuffer backingFieldForLastReceivedCheckpoint; - - internal FlexReadBuffer LastReceivedCheckpoint - { - get { return backingFieldForLastReceivedCheckpoint; } - set - { - backingFieldForLastReceivedCheckpoint = value; - } - } - - internal long _lastReceivedCheckpointSize; - - bool _recovering; - internal bool Recovering - { - get { return _recovering; } - set { _recovering = value; } - } - - /// - /// Need a manually created backing field so it can be marked volatile. - /// - private volatile FlexReadBuffer backingFieldForServiceInitializationMessage; - - internal FlexReadBuffer ServiceInitializationMessage - { - get { return backingFieldForServiceInitializationMessage; } - set - { - backingFieldForServiceInitializationMessage = value; - } - } - - // Hack for enabling fast IP6 loopback in Windows on .NET - const int SIO_LOOPBACK_FAST_PATH = (-1744830448); - - void SetupLocalServiceStreams() - { - // Note that the local service must setup the listener and sender in reverse order or there will be a deadlock - // First establish receiver - Use fast IP6 loopback - Byte[] optionBytes = BitConverter.GetBytes(1); -#if _WINDOWS - Socket mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); - mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); - var ipAddress = IPAddress.IPv6Loopback; -#else - Socket mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); - var ipAddress = IPAddress.Loopback; -#endif - - var myReceiveEP = new IPEndPoint(ipAddress, _localServiceReceiveFromPort); - mySocket.Bind(myReceiveEP); - mySocket.Listen(1); - var socket = mySocket.Accept(); - _localServiceReceiveFromStream = new NetworkStream(socket); - -#if _WINDOWS - // Now establish sender - Also use fast IP6 loopback - mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); - mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); -#else - mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); -#endif - while (true) - { - try - { -#if _WINDOWS - mySocket.Connect(IPAddress.IPv6Loopback, _localServiceSendToPort); -#else - mySocket.Connect(IPAddress.Loopback, _localServiceSendToPort); -#endif - break; - } - catch { } - } - TcpClient tcpSendToClient = new TcpClient(); - tcpSendToClient.Client = mySocket; - _localServiceSendToStream = tcpSendToClient.GetStream(); - } - - private void SetupAzureConnections() - { - try - { - _storageAccount = CloudStorageAccount.Parse(_storageConnectionString); - _tableClient = _storageAccount.CreateCloudTableClient(); - _serviceInstanceTable = _tableClient.GetTableReference(_serviceName); - if ((_storageAccount == null) || (_tableClient == null) || (_serviceInstanceTable == null)) - { - OnError(AzureOperationError, "Error setting up initial connection to Azure"); - } - } - catch - { - OnError(AzureOperationError, "Error setting up initial connection to Azure"); - } - } - - private const uint FILE_FLAG_NO_BUFFERING = 0x20000000; - - private void PrepareToRecoverOrStart() - { - IPAddress localIPAddress = Dns.GetHostEntry("localhost").AddressList[0]; - LogWriter.CreateDirectoryIfNotExists(_serviceLogPath + _serviceName + "_" + _currentVersion); - _logFileNameBase = Path.Combine(_serviceLogPath + _serviceName + "_" + _currentVersion, "server"); - SetupLocalServiceStreams(); - if (!_runningRepro) - { - SetupAzureConnections(); - } - ServiceInitializationMessage = null; - Thread localListenerThread = new Thread(() => LocalListener()); - localListenerThread.Start(); - } - - private async Task RecoverOrStartAsync(long checkpointToLoad = -1, - bool testUpgrade = false) - { - CheckpointingService = false; - Recovering = false; - PrepareToRecoverOrStart(); - if (!_runningRepro) - { - RuntimeChecksOnProcessStart(); - } - // Determine if we are recovering - if (!_createService) - { - Recovering = true; - _restartWithRecovery = true; - if (!_runningRepro) - { - // We are recovering - find the last committed checkpoint - _lastCommittedCheckpoint = long.Parse(RetrieveServiceInfo("LastCommittedCheckpoint")); - } - else - { - // We are running a repro - _lastCommittedCheckpoint = checkpointToLoad; - } - // Start from the log file associated with the last committed checkpoint - _lastLogFile = _lastCommittedCheckpoint; - if (_activeActive) - { - if (!_runningRepro) - { - // Determines the role as either secondary or checkpointer. If its a checkpointer, _commitBlobWriter holds the write lock on the last checkpoint - DetermineRole(); - } - else - { - // We are running a repro. Act as a secondary - _myRole = AARole.Secondary; - } - } - - using (LogReader checkpointStream = new LogReader(_logFileNameBase + "chkpt" + _lastCommittedCheckpoint.ToString())) - { - // recover the checkpoint - Note that everything except the replay data must have been written successfully or we - // won't think we have a valid checkpoint here. Since we can only be the secondary or checkpointer, the committer doesn't write to the replay log - // Recover committer - _committer = new Committer(_localServiceSendToStream, _persistLogs, this, -1, checkpointStream); - // Recover input connections - _inputs = _inputs.AmbrosiaDeserialize(checkpointStream); - // Recover output connections - _outputs = _outputs.AmbrosiaDeserialize(checkpointStream, this); - UnbufferNonreplayableCalls(); - // Restore new service from checkpoint - var serviceCheckpoint = new FlexReadBuffer(); - FlexReadBuffer.Deserialize(checkpointStream, serviceCheckpoint); - _committer.SendCheckpointToRecoverFrom(serviceCheckpoint.Buffer, serviceCheckpoint.Length, checkpointStream); - } - - using (LogReader replayStream = new LogReader(_logFileNameBase + "log" + _lastLogFile.ToString())) - { - if (_myRole == AARole.Secondary && !_runningRepro) - { - // If this is a secondary, set up the detector to detect when this instance becomes the primary - var t = DetectBecomingPrimaryAsync(); - } - if (testUpgrade) - { - // We are actually testing an upgrade. Must upgrade the service before replay - _committer.SendUpgradeRequest(); - } - await ReplayAsync(replayStream); - } - var readVersion = long.Parse(RetrieveServiceInfo("CurrentVersion")); - if (_currentVersion != readVersion) - { - - OnError(VersionMismatch, "Version changed during recovery: Expected " + _currentVersion + " was: " + readVersion.ToString()); - } - if (_upgrading) - { - MoveServiceToUpgradeDirectory(); - } - // Now becoming the primary. Moving to next log file since the current one may have junk at the end. - bool wasUpgrading = _upgrading; - await MoveServiceToNextLogFileAsync(false, true); - if (wasUpgrading) - { - // Successfully wrote out our new first checkpoint in the upgraded version, can now officially take the version upgrade - InsertOrReplaceServiceInfoRecord("CurrentVersion", _upgradeToVersion.ToString()); - } - Recovering = false; - } - else - { - // We are starting for the first time. This is the primary - _restartWithRecovery = false; - _lastCommittedCheckpoint = 0; - _lastLogFile = 0; - _inputs = new ConcurrentDictionary(); - _outputs = new ConcurrentDictionary(); - _serviceInstanceTable.CreateIfNotExistsAsync().Wait(); - - _myRole = AARole.Primary; - - _checkpointWriter = null; - _committer = new Committer(_localServiceSendToStream, _persistLogs, this); - Connect(_serviceName, AmbrosiaDataOutputsName, _serviceName, AmbrosiaDataInputsName); - Connect(_serviceName, AmbrosiaControlOutputsName, _serviceName, AmbrosiaControlInputsName); - await MoveServiceToNextLogFileAsync(true, true); - InsertOrReplaceServiceInfoRecord("CurrentVersion", _currentVersion.ToString()); - // Shake loose initialization message - await _committer.TryCommitAsync(_outputs); - } - } - - private void UnbufferNonreplayableCalls() - { - foreach (var outputRecord in _outputs) - { - var newLastSeqNo = outputRecord.Value.BufferedOutput.TrimAndUnbufferNonreplayableCalls(outputRecord.Value.TrimTo, outputRecord.Value.ReplayableTrimTo); - if (newLastSeqNo != -1) - { - outputRecord.Value.LastSeqNoFromLocalService = newLastSeqNo; - } - } - } - - internal void MoveServiceToUpgradeDirectory() - { - LogWriter.CreateDirectoryIfNotExists(_serviceLogPath + _serviceName + "_" + _upgradeToVersion); - _logFileNameBase = Path.Combine(_serviceLogPath + _serviceName + "_" + _upgradeToVersion, "server"); - } - - public CRAErrorCode Connect(string fromProcessName, string fromEndpoint, string toProcessName, string toEndpoint) - { - foreach (var conn in _coral.GetConnectionsFromVertex(fromProcessName)) - { - if (conn.FromEndpoint.Equals(fromEndpoint) && conn.ToVertex.Equals(toProcessName) && conn.ToEndpoint.Equals(toEndpoint)) - return CRAErrorCode.Success; - } - return _coral.Connect(fromProcessName, fromEndpoint, toProcessName, toEndpoint); - } - - private LogWriter CreateNextLogFile() - { - if (LogWriter.FileExists(_logFileNameBase + "log" + (_lastLogFile + 1).ToString())) - { - File.Delete(_logFileNameBase + "log" + (_lastLogFile + 1).ToString()); - } - LogWriter retVal = null; - try - { - retVal = new LogWriter(_logFileNameBase + "log" + (_lastLogFile + 1).ToString(), 1024 * 1024, 6); - } - catch (Exception e) - { - OnError(0, "Error opening next log file:" + e.ToString()); - } - return retVal; - } - - // Closes out the old log file and starts a new one. Takes checkpoints if this instance should - private async Task MoveServiceToNextLogFileAsync(bool firstStart = false, bool becomingPrimary = false) - { - // Move to the next log file. By doing this before checkpointing, we may end up skipping a checkpoint file (failure during recovery). - // This is ok since we recover from the first committed checkpoint and will just skip empty log files during replay - await _committer.SleepAsync(); - var nextLogHandle = CreateNextLogFile(); - _lastLogFile++; - if (_sharded) - { - InsertOrReplaceServiceInfoRecord("LastLogFile" + _shardID.ToString(), _lastLogFile.ToString()); - } - else - { - InsertOrReplaceServiceInfoRecord("LastLogFile", _lastLogFile.ToString()); - } - _committer.SwitchLogStreams(nextLogHandle); - if (firstStart || !_activeActive) - { - // take the checkpoint associated with the beginning of the new log and let go of the log file lock - _committer.QuiesceServiceWithSendCheckpointRequest(_upgrading, becomingPrimary); - _upgrading = false; - if (firstStart) - { - while (ServiceInitializationMessage == null) { await Task.Yield(); }; - await _committer.AddInitialRowAsync(ServiceInitializationMessage); - } - await CheckpointAsync(); - _checkpointWriter.Dispose(); - _checkpointWriter = null; - } - await _committer.WakeupAsync(); - } - - //============================================================================================================== - // Insance compete over write permission for LOG file & CheckPoint file - private void DetermineRole() - { - try - { - // Compete for Checkpoint Write Permission - _checkpointWriter = new LogWriter(_logFileNameBase + "chkpt" + (_lastCommittedCheckpoint).ToString(), 1024 * 1024, 6, true); - _myRole = AARole.Checkpointer; // I'm a checkpointing secondary - var oldCheckpoint = _lastCommittedCheckpoint; - _lastCommittedCheckpoint = long.Parse(RetrieveServiceInfo("LastCommittedCheckpoint")); - if (oldCheckpoint != _lastCommittedCheckpoint) - { - _checkpointWriter.Dispose(); - throw new Exception("We got a handle on an old checkpoint. The checkpointer was alive when this instance started"); - } - } - catch - { - _checkpointWriter = null; - _myRole = AARole.Secondary; // I'm a secondary - } - } - - public async Task DetectBecomingPrimaryAsync() - { - // keep trying to take the write permission on LOG file - // LOG write permission acquired only in case primary failed (is down) - while (true) - { - try - { - var oldLastLogFile = _lastLogFile; - // Compete for log write permission - non destructive open for write - open for append - var lastLogFileStream = new LogWriter(_logFileNameBase + "log" + (oldLastLogFile).ToString(), 1024 * 1024, 6, true); - if (long.Parse(RetrieveServiceInfo("LastLogFile")) != oldLastLogFile) - { - // We got an old log. Try again - lastLogFileStream.Dispose(); - throw new Exception(); - } - // We got the lock! Set things up so we let go of the lock at the right moment - await _committer.SleepAsync(); - _committer.SwitchLogStreams(lastLogFileStream); - await _committer.WakeupAsync(); - _myRole = AARole.Primary; // this will stop and break the loop in the function replayInput_Sec() - Console.WriteLine("\n\nNOW I'm Primary\n\n"); - return; - } - catch - { - await Task.Delay(1000); - } - } - } - - private async Task ReplayAsync(LogReader replayStream) - { - var tempBuf = new byte[100]; - var tempBuf2 = new byte[100]; - var headerBuf = new byte[Committer.HeaderSize]; - var headerBufStream = new MemoryStream(headerBuf); - var committedInputDict = new Dictionary(); - var trimDict = new Dictionary(); - var detectedEOF = false; - var detectedEOL = false; - var clearedCommitterWrite = false; - // Keep replaying commits until we run out of replay data - while (true) - { - long logRecordPos = replayStream.Position; - int commitSize; - try - { - // First get commit ID and check for integrity - replayStream.ReadAllRequiredBytes(headerBuf, 0, Committer.HeaderSize); - headerBufStream.Position = 0; - var commitID = headerBufStream.ReadIntFixed(); - if (commitID != _committer.CommitID) - { - throw new Exception("Committer didn't match. Must be incomplete record"); - } - // Get commit page length - commitSize = headerBufStream.ReadIntFixed(); - var checkBytes = headerBufStream.ReadLongFixed(); - var writeSeqID = headerBufStream.ReadLongFixed(); - if (writeSeqID != _committer._nextWriteID) - { - throw new Exception("Out of order page. Must be incomplete record"); - } - // Remove header - commitSize -= Committer.HeaderSize; - if (commitSize > tempBuf.Length) - { - tempBuf = new byte[commitSize]; - } - replayStream.Read(tempBuf, 0, commitSize); - // Perform integrity check - long checkBytesCalc = _committer.CheckBytes(tempBuf, 0, commitSize); - if (checkBytesCalc != checkBytes) - { - throw new Exception("Integrity check failed for page. Must be incomplete record"); - } - - // Read changes in input consumption progress to reflect in _inputs - var watermarksToRead = replayStream.ReadInt(); - committedInputDict.Clear(); - for (int i = 0; i < watermarksToRead; i++) - { - var inputNameSize = replayStream.ReadInt(); - if (inputNameSize > tempBuf2.Length) - { - tempBuf2 = new byte[inputNameSize]; - } - replayStream.Read(tempBuf2, 0, inputNameSize); - var inputName = Encoding.UTF8.GetString(tempBuf2, 0, inputNameSize); - var newLongPair = new LongPair(); - newLongPair.First = replayStream.ReadLongFixed(); - newLongPair.Second = replayStream.ReadLongFixed(); - committedInputDict[inputName] = newLongPair; - } - // Read changes in trim to perform and reflect in _outputs - watermarksToRead = replayStream.ReadInt(); - trimDict.Clear(); - for (int i = 0; i < watermarksToRead; i++) - { - var inputNameSize = replayStream.ReadInt(); - if (inputNameSize > tempBuf2.Length) - { - tempBuf2 = new byte[inputNameSize]; - } - replayStream.Read(tempBuf2, 0, inputNameSize); - var inputName = Encoding.UTF8.GetString(tempBuf2, 0, inputNameSize); - long seqNo = replayStream.ReadLongFixed(); - trimDict[inputName] = seqNo; - } - } - catch - { - // Couldn't recover replay segment. Could be for a number of reasons. - if (!_activeActive || detectedEOL) - { - // Leave replay and continue recovery. - break; - } - if (detectedEOF) - { - // Move to the next log file for reading only. We may need to take a checkpoint - _lastLogFile++; - replayStream.Dispose(); - if (!LogWriter.FileExists(_logFileNameBase + "log" + _lastLogFile.ToString())) - { - OnError(MissingLog, "Missing log in replay " + _lastLogFile.ToString()); - } - replayStream = new LogReader(_logFileNameBase + "log" + _lastLogFile.ToString()); - if (_myRole == AARole.Checkpointer) - { - // take the checkpoint associated with the beginning of the new log - await _committer.SleepAsync(); - _committer.QuiesceServiceWithSendCheckpointRequest(); - await CheckpointAsync(); - await _committer.WakeupAsync(); - } - detectedEOF = false; - continue; - } - var myRoleBeforeEOLChecking = _myRole; - replayStream.Position = logRecordPos; - var newLastLogFile = _lastLogFile; - if (_runningRepro) - { - if (LogWriter.FileExists(_logFileNameBase + "log" + (_lastLogFile + 1).ToString())) - { - // If there is a next file, then move to it - newLastLogFile = _lastLogFile + 1; - } - } - else - { - newLastLogFile = long.Parse(RetrieveServiceInfo("LastLogFile")); - } - if (newLastLogFile > _lastLogFile) // a new log file has been written - { - // Someone started a new log. Try to read the last record again and then move to next file - detectedEOF = true; - continue; - } - if (myRoleBeforeEOLChecking == AARole.Primary) - { - // Became the primary and the current file is the end of the log. Make sure we read the whole file. - detectedEOL = true; - continue; - } - // The remaining case is that we hit the end of log, but someone is still writing to this file. Wait and try to read again - await Task.Delay(1000); - continue; - } - // Successfully read an entire replay segment. Go ahead and process for recovery - foreach (var kv in committedInputDict) - { - InputConnectionRecord inputConnectionRecord; - if (!_inputs.TryGetValue(kv.Key, out inputConnectionRecord)) - { - // Create input record and add it to the dictionary - inputConnectionRecord = new InputConnectionRecord(); - _inputs[kv.Key] = inputConnectionRecord; - } - inputConnectionRecord.LastProcessedID = kv.Value.First; - inputConnectionRecord.LastProcessedReplayableID = kv.Value.Second; - OutputConnectionRecord outputConnectionRecord; - // this lock prevents conflict with output arriving from the local service during replay - lock (_outputs) - { - if (!_outputs.TryGetValue(kv.Key, out outputConnectionRecord)) - { - outputConnectionRecord = new OutputConnectionRecord(this); - _outputs[kv.Key] = outputConnectionRecord; - } - } - // this lock prevents conflict with output arriving from the local service during replay and ensures maximal cleaning - lock (outputConnectionRecord) - { - outputConnectionRecord.RemoteTrim = Math.Max(kv.Value.First, outputConnectionRecord.RemoteTrim); - outputConnectionRecord.RemoteTrimReplayable = Math.Max(kv.Value.Second, outputConnectionRecord.RemoteTrimReplayable); - if (outputConnectionRecord.ControlWorkQ.IsEmpty) - { - outputConnectionRecord.ControlWorkQ.Enqueue(-2); - } - } - } - // Do the actual work on the local service - _localServiceSendToStream.Write(headerBuf, 0, Committer.HeaderSize); - _localServiceSendToStream.Write(tempBuf, 0, commitSize); - // Trim the outputs. Should clean as aggressively as during normal operation - foreach (var kv in trimDict) - { - OutputConnectionRecord outputConnectionRecord; - // this lock prevents conflict with output arriving from the local service during replay - lock (_outputs) - { - if (!_outputs.TryGetValue(kv.Key, out outputConnectionRecord)) - { - outputConnectionRecord = new OutputConnectionRecord(this); - _outputs[kv.Key] = outputConnectionRecord; - } - } - // this lock prevents conflict with output arriving from the local service during replay and ensures maximal cleaning - lock (outputConnectionRecord) - { - outputConnectionRecord.TrimTo = kv.Value; - outputConnectionRecord.ReplayableTrimTo = kv.Value; - outputConnectionRecord.BufferedOutput.Trim(kv.Value, ref outputConnectionRecord.placeInOutput); - } - } - // If this is the first replay segment, it invalidates the contents of the committer, which must be cleared. - if (!clearedCommitterWrite) - { - _committer.ClearNextWrite(); - clearedCommitterWrite = true; - } - // bump up the write ID in the committer in preparation for reading or writing the next page - _committer._nextWriteID++; - } - } - - // Thread for listening to the local service - private void LocalListener() - { - try - { - var localServiceBuffer = new FlexReadBuffer(); - var batchServiceBuffer = new FlexReadBuffer(); - var bufferSize = 128 * 1024; - byte[] bytes = new byte[bufferSize]; - byte[] bytesBak = new byte[bufferSize]; - while (_outputs == null) { Thread.Yield(); } - while (true) - { - // Do an async message read. Note that the async aspect of this is slow. - FlexReadBuffer.Deserialize(_localServiceReceiveFromStream, localServiceBuffer); - ProcessSyncLocalMessage(ref localServiceBuffer, batchServiceBuffer); -/* Disabling because of BUGBUG. Eats checkpoint bytes in some circumstances before checkpointer can deal with it. - // Process more messages from the local service if available before going async again, doing this here because - // not all language shims will be good citizens here, and we may need to process small messages to avoid inefficiencies - // in LAR. - int curPosInBuffer = 0; - int readBytes = 0; - while (readBytes != 0 || _localServiceReceiveFromStream.DataAvailable) - { - // Read data into buffer to avoid lock contention of reading directly from the stream - while ((_localServiceReceiveFromStream.DataAvailable && readBytes < bufferSize) || !bytes.EnoughBytesForReadBufferedInt(0, readBytes)) - { - readBytes += _localServiceReceiveFromStream.Read(bytes, readBytes, bufferSize - readBytes); - } - // Continue loop as long as we can meaningfully read a message length - var memStream = new MemoryStream(bytes, 0, readBytes); - while (bytes.EnoughBytesForReadBufferedInt(curPosInBuffer, readBytes - curPosInBuffer)) - { - // Read the length of the next message - var messageSize = memStream.ReadInt(); - var messageSizeSize = StreamCommunicator.IntSize(messageSize); - memStream.Position -= messageSizeSize; - if (curPosInBuffer + messageSizeSize + messageSize > readBytes) - { - // didn't read the full message into the buffer. It must be torn - if (messageSize + messageSizeSize > bufferSize) - { - // Buffer isn't big enough to hold the whole torn event even if empty. Increase the buffer size so the message can fit. - bufferSize = messageSize + messageSizeSize; - var newBytes = new byte[bufferSize]; - Buffer.BlockCopy(bytes, curPosInBuffer, newBytes, 0, readBytes - curPosInBuffer); - bytes = newBytes; - bytesBak = new byte[bufferSize]; - readBytes -= curPosInBuffer; - curPosInBuffer = 0; - } - break; - } - else - { - // Count this message since it is fully in the buffer - FlexReadBuffer.Deserialize(memStream, localServiceBuffer); - ProcessSyncLocalMessage(ref localServiceBuffer, batchServiceBuffer); - curPosInBuffer += messageSizeSize + messageSize; - } - } - memStream.Dispose(); - // Shift torn message to the beginning unless it is the first one - if (curPosInBuffer > 0) - { - Buffer.BlockCopy(bytes, curPosInBuffer, bytesBak, 0, readBytes - curPosInBuffer); - var tempBytes = bytes; - bytes = bytesBak; - bytesBak = tempBytes; - readBytes -= curPosInBuffer; - curPosInBuffer = 0; - } - } */ - } - } - catch (Exception e) - { - OnError(AzureOperationError, "Error in local listener data stream:" + e.ToString()); - return; - } - } - - private void MoveServiceToNextLogFileSimple() - { - MoveServiceToNextLogFileAsync().Wait(); - } - - private void ProcessSyncLocalMessage(ref FlexReadBuffer localServiceBuffer, FlexReadBuffer batchServiceBuffer) - { - var sizeBytes = localServiceBuffer.LengthLength; - Task createCheckpointTask = null; - // Process the Async message - switch (localServiceBuffer.Buffer[sizeBytes]) - { - case takeCheckpointByte: - // Handle take checkpoint messages - This is here for testing - createCheckpointTask = new Task(new Action(MoveServiceToNextLogFileSimple)); - createCheckpointTask.Start(); - localServiceBuffer.ResetBuffer(); - break; - - case checkpointByte: - _lastReceivedCheckpointSize = StreamCommunicator.ReadBufferedLong(localServiceBuffer.Buffer, sizeBytes + 1); - Console.WriteLine("Reading a checkpoint {0} bytes", _lastReceivedCheckpointSize); - LastReceivedCheckpoint = localServiceBuffer; - // Block this thread until checkpointing is complete - while (LastReceivedCheckpoint != null) { Thread.Yield();}; - break; - - case attachToByte: - // Get dest string - var destination = Encoding.UTF8.GetString(localServiceBuffer.Buffer, sizeBytes + 1, localServiceBuffer.Length - sizeBytes - 1); - localServiceBuffer.ResetBuffer(); - - if (!_runningRepro) - { - Console.WriteLine("Attaching to {0}", destination); - var connectionResult1 = Connect(_serviceName, AmbrosiaDataOutputsName, destination, AmbrosiaDataInputsName); - var connectionResult2 = Connect(_serviceName, AmbrosiaControlOutputsName, destination, AmbrosiaControlInputsName); - var connectionResult3 = Connect(destination, AmbrosiaDataOutputsName, _serviceName, AmbrosiaDataInputsName); - var connectionResult4 = Connect(destination, AmbrosiaControlOutputsName, _serviceName, AmbrosiaControlInputsName); - if ((connectionResult1 != CRAErrorCode.Success) || (connectionResult2 != CRAErrorCode.Success) || - (connectionResult3 != CRAErrorCode.Success) || (connectionResult4 != CRAErrorCode.Success)) - { - Console.WriteLine("Error attaching {0} to {1}", _serviceName, destination); - } - } - break; - - case RPCBatchByte: - var restOfBatchOffset = sizeBytes + 1; - var memStream = new MemoryStream(localServiceBuffer.Buffer, restOfBatchOffset, localServiceBuffer.Length - restOfBatchOffset); - var numRPCs = memStream.ReadInt(); - for (int i = 0; i < numRPCs; i++) - { - FlexReadBuffer.Deserialize(memStream, batchServiceBuffer); - ProcessRPC(batchServiceBuffer); - } - memStream.Dispose(); - localServiceBuffer.ResetBuffer(); - break; - - case InitalMessageByte: - // Process the Async RPC request - if (ServiceInitializationMessage != null) - { - OnError(0, "Getting second initialization message"); - } - ServiceInitializationMessage = localServiceBuffer; - localServiceBuffer = new FlexReadBuffer(); - break; - - case RPCByte: - ProcessRPC(localServiceBuffer); - // Now process any pending RPC requests from the local service before going async again - break; - - case PingByte: - // Write time into correct place in message - int destBytesSize = localServiceBuffer.Buffer.ReadBufferedInt(sizeBytes + 1); - memStream = new MemoryStream(localServiceBuffer.Buffer, localServiceBuffer.Length - 5 * sizeof(long), sizeof(long)); - long time; - GetSystemTimePreciseAsFileTime(out time); - memStream.WriteLongFixed(time); - // Treat as RPC - ProcessRPC(localServiceBuffer); - memStream.Dispose(); - break; - - case PingReturnByte: - // Write time into correct place in message - destBytesSize = localServiceBuffer.Buffer.ReadBufferedInt(sizeBytes + 1); - memStream = new MemoryStream(localServiceBuffer.Buffer, localServiceBuffer.Length - 2 * sizeof(long), sizeof(long)); - GetSystemTimePreciseAsFileTime(out time); - memStream.WriteLongFixed(time); - // Treat as RPC - ProcessRPC(localServiceBuffer); - memStream.Dispose(); - break; - - default: - // This one really should terminate the process; no recovery allowed. - OnError(0, "Illegal leading byte in local message"); - break; - } - } - - int _lastShuffleDestSize = -1; // must be negative because self-messages are encoded with a destination size of 0 - byte[] _lastShuffleDest = new byte[20]; - OutputConnectionRecord _shuffleOutputRecord = null; - - bool EqualBytes(byte[] data1, int data1offset, byte[] data2, int elemsCompared) - { - for (int i = 0; i < elemsCompared; i++) - { - if (data1[i + data1offset] != data2[i]) - { - return false; - } - } - return true; - } - - private void ProcessRPC(FlexReadBuffer RpcBuffer) - { - var sizeBytes = RpcBuffer.LengthLength; - int destBytesSize = RpcBuffer.Buffer.ReadBufferedInt(sizeBytes + 1); - var destOffset = sizeBytes + 1 + StreamCommunicator.IntSize(destBytesSize); - // Check to see if the _lastShuffleDest is the same as the one to process. Caching here avoids significant overhead. - if (_lastShuffleDest == null || (_lastShuffleDestSize != destBytesSize) || !EqualBytes(RpcBuffer.Buffer, destOffset, _lastShuffleDest, destBytesSize)) - { - // Find the appropriate connection record - string destination; - if (_lastShuffleDest.Length < destBytesSize) - { - _lastShuffleDest = new byte[destBytesSize]; - } - Buffer.BlockCopy(RpcBuffer.Buffer, destOffset, _lastShuffleDest, 0, destBytesSize); - _lastShuffleDestSize = destBytesSize; - destination = Encoding.UTF8.GetString(RpcBuffer.Buffer, destOffset, destBytesSize); - // locking to avoid conflict with stream reconnection immediately after replay and trim during replay - lock (_outputs) - { - // During replay, the output connection won't exist if this is the first message ever and no trim record has been processed yet. - if (!_outputs.TryGetValue(destination, out _shuffleOutputRecord)) - { - _shuffleOutputRecord = new OutputConnectionRecord(this); - _outputs[destination] = _shuffleOutputRecord; - } - } - } - - int restOfRPCOffset = destOffset + destBytesSize; - int restOfRPCMessageSize = RpcBuffer.Length - restOfRPCOffset; - var totalSize = StreamCommunicator.IntSize(1 + restOfRPCMessageSize) + - 1 + restOfRPCMessageSize; - - // lock to avoid conflict and ensure maximum memory cleaning during replay. No possible conflict during primary operation - lock (_shuffleOutputRecord) - { - // Buffer the output if it is at or beyond the replay or trim point (during recovery). If we are recovering, this may not be the case. - if ((_shuffleOutputRecord.LastSeqNoFromLocalService + 1 >= _shuffleOutputRecord.ReplayFrom) && - (_shuffleOutputRecord.LastSeqNoFromLocalService + 1 >= _shuffleOutputRecord.TrimTo)) - { - var writablePage = _shuffleOutputRecord.BufferedOutput.GetWritablePage(totalSize, _shuffleOutputRecord.LastSeqNoFromLocalService + 1); - writablePage.HighestSeqNo = _shuffleOutputRecord.LastSeqNoFromLocalService + 1; - if (RpcBuffer.Buffer[restOfRPCOffset] != (byte) RpcTypes.RpcType.Impulse) - { - writablePage.UnsentReplayableMessages++; - writablePage.TotalReplayableMessages++; - } - - // Write the bytes into the page - writablePage.curLength += writablePage.PageBytes.WriteInt(writablePage.curLength, 1 + restOfRPCMessageSize); - writablePage.PageBytes[writablePage.curLength] = RpcBuffer.Buffer[sizeBytes]; - writablePage.curLength++; - Buffer.BlockCopy(RpcBuffer.Buffer, restOfRPCOffset, writablePage.PageBytes, writablePage.curLength, restOfRPCMessageSize); - writablePage.curLength += restOfRPCMessageSize; - - // Done making modifications to the output buffer and grabbed important state. Can execute the rest concurrently. Release the lock - _shuffleOutputRecord.BufferedOutput.ReleaseAppendLock(); - RpcBuffer.ResetBuffer(); - - // Make sure there is a send enqueued in the work Q. - if (_shuffleOutputRecord._sendsEnqueued == 0) - { - _shuffleOutputRecord.DataWorkQ.Enqueue(-1); - Interlocked.Increment(ref _shuffleOutputRecord._sendsEnqueued); - } - } - else - { - RpcBuffer.ResetBuffer(); - } - _shuffleOutputRecord.LastSeqNoFromLocalService++; - } - } - - private async Task ToDataStreamAsync(Stream writeToStream, - string destString, - CancellationToken ct) - - { - OutputConnectionRecord outputConnectionRecord; - if (destString.Equals(_serviceName)) - { - destString = ""; - } - lock (_outputs) - { - if (!_outputs.TryGetValue(destString, out outputConnectionRecord)) - { - // Set up the output record for the first time and add it to the dictionary - outputConnectionRecord = new OutputConnectionRecord(this); - _outputs[destString] = outputConnectionRecord; - Console.WriteLine("Adding output:{0}", destString); - } - else - { - Console.WriteLine("restoring output:{0}", destString); - } - } - try - { - // Reset the output cursor if it exists - outputConnectionRecord.BufferedOutput.AcquireTrimLock(2); - outputConnectionRecord.placeInOutput = new EventBuffer.BuffersCursor(null, -1, 0); - outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); - // Process replay message - var inputFlexBuffer = new FlexReadBuffer(); - await FlexReadBuffer.DeserializeAsync(writeToStream, inputFlexBuffer, ct); - var sizeBytes = inputFlexBuffer.LengthLength; - // Get the seqNo of the replay/filter point - var commitSeqNo = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); - var commitSeqNoReplayable = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); - inputFlexBuffer.ResetBuffer(); - if (outputConnectionRecord.ConnectingAfterRestart) - { - // We've been through recovery (at least partially), and have scrubbed all ephemeral calls. Must now rebase - // seq nos using the markers which were sent by the listener. Must first take locks to ensure no interference - lock (outputConnectionRecord) - { - // Don't think I actually need this lock, but can't hurt and shouldn't affect perf. - outputConnectionRecord.BufferedOutput.AcquireTrimLock(2); - outputConnectionRecord.BufferedOutput.RebaseSeqNosInBuffer(commitSeqNo, commitSeqNoReplayable); - outputConnectionRecord.ConnectingAfterRestart = false; - outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); - } - } - - // If recovering, make sure event replay will be filtered out - outputConnectionRecord.ReplayFrom = commitSeqNo; - - if (outputConnectionRecord.WillResetConnection) - { - // Register our immediate intent to set the connection. This unblocks output writers - outputConnectionRecord.ResettingConnection = true; - // This lock avoids interference with buffering RPCs - lock (outputConnectionRecord) - { - // If first reconnect/connect after reset, simply adjust the seq no for the first sent message to the received commit seq no - outputConnectionRecord.ResettingConnection = false; - outputConnectionRecord.LastSeqNoFromLocalService = outputConnectionRecord.BufferedOutput.AdjustFirstSeqNoTo(commitSeqNo); - outputConnectionRecord.WillResetConnection = false; - } - } - outputConnectionRecord.LastSeqSentToReceiver = commitSeqNo - 1; - - // Enqueue a replay send - if (outputConnectionRecord._sendsEnqueued == 0) - { - - Interlocked.Increment(ref outputConnectionRecord._sendsEnqueued); - outputConnectionRecord.DataWorkQ.Enqueue(-1); - } - - // Make sure enough recovery output has been produced before we allow output to start being sent, which means that the next - // message has to be the first for replay. - while (Interlocked.Read(ref outputConnectionRecord.LastSeqNoFromLocalService) < - Interlocked.Read(ref outputConnectionRecord.LastSeqSentToReceiver)) { await Task.Yield(); }; - bool reconnecting = true; - while (true) - { - var nextEntry = await outputConnectionRecord.DataWorkQ.DequeueAsync(ct); - if (nextEntry == -1) - { - // This is a send output - Interlocked.Decrement(ref outputConnectionRecord._sendsEnqueued); - - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Code to manually trim for performance testing - // int placeToTrimTo = outputConnectionRecord.LastSeqNoFromLocalService; - // Console.WriteLine("send to {0}", outputConnectionRecord.LastSeqNoFromLocalService); - outputConnectionRecord.BufferedOutput.AcquireTrimLock(2); - var placeAtCall = outputConnectionRecord.LastSeqSentToReceiver; - outputConnectionRecord.placeInOutput = - await outputConnectionRecord.BufferedOutput.SendAsync(writeToStream, outputConnectionRecord.placeInOutput, reconnecting); - reconnecting = false; - outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); - // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Code to manually trim for performance testing - // outputConnectionRecord.TrimTo = placeToTrimTo; - } - } - } - catch (Exception e) - { - // Cleanup held locks if necessary - await Task.Yield(); - var lockVal = outputConnectionRecord.BufferedOutput.ReadTrimLock(); - if (lockVal == 1 || lockVal == 2) - { - outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); - } - var bufferLockVal = outputConnectionRecord.BufferedOutput.ReadAppendLock(); - if (bufferLockVal == 2) - { - outputConnectionRecord.BufferedOutput.ReleaseAppendLock(); - } - throw e; - } - } - - private async Task ToControlStreamAsync(Stream writeToStream, - string destString, - CancellationToken ct) - - { - OutputConnectionRecord outputConnectionRecord; - if (destString.Equals(_serviceName)) - { - destString = ""; - } - lock (_outputs) - { - if (!_outputs.TryGetValue(destString, out outputConnectionRecord)) - { - // Set up the output record for the first time and add it to the dictionary - outputConnectionRecord = new OutputConnectionRecord(this); - _outputs[destString] = outputConnectionRecord; - Console.WriteLine("Adding output:{0}", destString); - } - else - { - Console.WriteLine("restoring output:{0}", destString); - } - } - // Process remote trim message - var inputFlexBuffer = new FlexReadBuffer(); - await FlexReadBuffer.DeserializeAsync(writeToStream, inputFlexBuffer, ct); - var sizeBytes = inputFlexBuffer.LengthLength; - // Get the seqNo of the replay/filter point - var lastRemoteTrim = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); - - // This code dequeues output producing tasks and runs them - long currentTrim = -1; - int maxSizeOfWatermark = sizeof(int) + 4 + 2 * sizeof(long); - var watermarkArr = new byte[maxSizeOfWatermark]; - var watermarkStream = new MemoryStream(watermarkArr); - try - { - while (true) - { - // Always try to trim output buffers if possible to free up resources - if (outputConnectionRecord.TrimTo > currentTrim) - { - currentTrim = outputConnectionRecord.TrimTo; - outputConnectionRecord.BufferedOutput.AcquireTrimLock(3); - outputConnectionRecord.BufferedOutput.Trim(currentTrim, ref outputConnectionRecord.placeInOutput); - outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); - } - var nextEntry = await outputConnectionRecord.ControlWorkQ.DequeueAsync(ct); - if (lastRemoteTrim < outputConnectionRecord.RemoteTrim) - { - // This is a send watermark - lastRemoteTrim = outputConnectionRecord.RemoteTrim; - var lastRemoteTrimReplayable = outputConnectionRecord.RemoteTrimReplayable; - watermarkStream.Position = 0; - var watermarkLength = 1 + StreamCommunicator.LongSize(lastRemoteTrim) + StreamCommunicator.LongSize(lastRemoteTrimReplayable); - watermarkStream.WriteInt(watermarkLength); - watermarkStream.WriteByte(AmbrosiaRuntime.CommitByte); - watermarkStream.WriteLong(lastRemoteTrim); - watermarkStream.WriteLong(lastRemoteTrimReplayable); - await writeToStream.WriteAsync(watermarkArr, 0, watermarkLength + StreamCommunicator.IntSize(watermarkLength)); - var flushTask = writeToStream.FlushAsync(); - } - } - } - catch (Exception e) - { - // Cleanup held locks if necessary - await Task.Yield(); - var lockVal = outputConnectionRecord.BufferedOutput.ReadTrimLock(); - if (lockVal == 3) - { - outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); - } - var bufferLockVal = outputConnectionRecord.BufferedOutput.ReadAppendLock(); - if (bufferLockVal == 3) - { - outputConnectionRecord.BufferedOutput.ReleaseAppendLock(); - } - throw e; - } - } - - private async Task SendReplayMessageAsync(Stream sendToStream, - long lastProcessedID, - long lastProcessedReplayableID, - CancellationToken ct) - { - // Send FilterTo message to the destination command stream - // Write message size - sendToStream.WriteInt(1 + StreamCommunicator.LongSize(lastProcessedID) + StreamCommunicator.LongSize(lastProcessedReplayableID)); - // Write message type - sendToStream.WriteByte(replayFromByte); - // Write the output filter seqNo for the other side - sendToStream.WriteLong(lastProcessedID); - sendToStream.WriteLong(lastProcessedReplayableID); - await sendToStream.FlushAsync(ct); - } - - - private async Task SendTrimStateMessageAsync(Stream sendToStream, - long trimTo, - CancellationToken ct) - { - // Send FilterTo message to the destination command stream - // Write message size - sendToStream.WriteInt(1 + StreamCommunicator.LongSize(trimTo)); - // Write message type - sendToStream.WriteByte(trimToByte); - // Write the output filter seqNo for the other side - sendToStream.WriteLong(trimTo); - await sendToStream.FlushAsync(ct); - } - - private async Task FromDataStreamAsync(Stream readFromStream, - string sourceString, - CancellationToken ct) - { - InputConnectionRecord inputConnectionRecord; - if (sourceString.Equals(_serviceName)) - { - sourceString = ""; - } - if (!_inputs.TryGetValue(sourceString, out inputConnectionRecord)) - { - // Create input record and add it to the dictionary - inputConnectionRecord = new InputConnectionRecord(); - _inputs[sourceString] = inputConnectionRecord; - Console.WriteLine("Adding input:{0}", sourceString); - } - else - { - Console.WriteLine("restoring input:{0}", sourceString); - } - inputConnectionRecord.DataConnectionStream = (NetworkStream)readFromStream; - await SendReplayMessageAsync(readFromStream, inputConnectionRecord.LastProcessedID + 1, inputConnectionRecord.LastProcessedReplayableID + 1, ct); - // Create new input task for monitoring new input - Task inputTask; - inputTask = InputDataListenerAsync(inputConnectionRecord, sourceString, ct); - await inputTask; - } - - private async Task FromControlStreamAsync(Stream readFromStream, - string sourceString, - CancellationToken ct) - { - InputConnectionRecord inputConnectionRecord; - if (sourceString.Equals(_serviceName)) - { - sourceString = ""; - } - if (!_inputs.TryGetValue(sourceString, out inputConnectionRecord)) - { - // Create input record and add it to the dictionary - inputConnectionRecord = new InputConnectionRecord(); - _inputs[sourceString] = inputConnectionRecord; - Console.WriteLine("Adding input:{0}", sourceString); - } - else - { - Console.WriteLine("restoring input:{0}", sourceString); - } - inputConnectionRecord.ControlConnectionStream = (NetworkStream)readFromStream; - OutputConnectionRecord outputConnectionRecord; - long outputTrim = -1; - lock (_outputs) - { - if (_outputs.TryGetValue(sourceString, out outputConnectionRecord)) - { - outputTrim = outputConnectionRecord.TrimTo; - } - } - await SendTrimStateMessageAsync(readFromStream, outputTrim, ct); - // Create new input task for monitoring new input - Task inputTask; - inputTask = InputControlListenerAsync(inputConnectionRecord, sourceString, ct); - await inputTask; - } - - - private async Task InputDataListenerAsync(InputConnectionRecord inputRecord, - string inputName, - CancellationToken ct) - { - var inputFlexBuffer = new FlexReadBuffer(); - var bufferSize = 128 * 1024; - byte[] bytes = new byte[bufferSize]; - byte[] bytesBak = new byte[bufferSize]; - while (true) - { - await FlexReadBuffer.DeserializeAsync(inputRecord.DataConnectionStream, inputFlexBuffer, ct); - await ProcessInputMessage(inputRecord, inputName, inputFlexBuffer); - } - } - - private async Task InputControlListenerAsync(InputConnectionRecord inputRecord, - string inputName, - CancellationToken ct) - { - var inputFlexBuffer = new FlexReadBuffer(); - var myBytes = new byte[20]; - var bufferSize = 128 * 1024; - byte[] bytes = new byte[bufferSize]; - byte[] bytesBak = new byte[bufferSize]; - while (true) - { - await FlexReadBuffer.DeserializeAsync(inputRecord.ControlConnectionStream, inputFlexBuffer, ct); - var sizeBytes = inputFlexBuffer.LengthLength; - switch (inputFlexBuffer.Buffer[sizeBytes]) - { - case CommitByte: - long commitSeqNo = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); - long replayableCommitSeqNo = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1 + StreamCommunicator.LongSize(commitSeqNo)); - inputFlexBuffer.ResetBuffer(); - - // Find the appropriate connection record - var outputConnectionRecord = _outputs[inputName]; - // Check to make sure this is progress, otherwise, can ignore - if (commitSeqNo > outputConnectionRecord.TrimTo && !outputConnectionRecord.WillResetConnection && !outputConnectionRecord.ConnectingAfterRestart) - { - outputConnectionRecord.TrimTo = Math.Max(outputConnectionRecord.TrimTo, commitSeqNo); - outputConnectionRecord.ReplayableTrimTo = Math.Max(outputConnectionRecord.TrimTo, replayableCommitSeqNo); - if (outputConnectionRecord.ControlWorkQ.IsEmpty) - { - outputConnectionRecord.ControlWorkQ.Enqueue(-2); - } - lock (_committer._trimWatermarks) - { - _committer._trimWatermarks[inputName] = replayableCommitSeqNo; - } - } - break; - default: - // Bubble the exception up to CRA - throw new Exception("Illegal leading byte in input control message"); - break; - } - } - } - - private async Task ProcessInputMessage(InputConnectionRecord inputRecord, - string inputName, - FlexReadBuffer inputFlexBuffer) - { - var sizeBytes = inputFlexBuffer.LengthLength; - switch (inputFlexBuffer.Buffer[sizeBytes]) - { - case RPCByte: - if (inputFlexBuffer.Buffer[sizeBytes + 1] != (byte) RpcTypes.RpcType.Impulse) - { - inputRecord.LastProcessedReplayableID++; - } - inputRecord.LastProcessedID++; - var newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID, inputRecord.LastProcessedReplayableID, _outputs); - inputFlexBuffer.ResetBuffer(); - //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Uncomment for testing - //Console.WriteLine("Received {0}", inputRecord.LastProcessedID); - if (_newLogTriggerSize > 0 && newFileSize >= _newLogTriggerSize) - { - // Make sure only one input thread is moving to the next log file. Won't break the system if we don't do this, but could result in - // empty log files - if (Interlocked.CompareExchange(ref _movingToNextLog, 1, 0) == 0) - { - await MoveServiceToNextLogFileAsync(); - _movingToNextLog = 0; - } - } - break; - - case CountReplayableRPCBatchByte: - var restOfBatchOffset = inputFlexBuffer.LengthLength + 1; - var memStream = new MemoryStream(inputFlexBuffer.Buffer, restOfBatchOffset, inputFlexBuffer.Length - restOfBatchOffset); - var numRPCs = memStream.ReadInt(); - var numReplayableRPCs = memStream.ReadInt(); - inputRecord.LastProcessedID += numRPCs; - inputRecord.LastProcessedReplayableID += numReplayableRPCs; - newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID, inputRecord.LastProcessedReplayableID, _outputs); - inputFlexBuffer.ResetBuffer(); - memStream.Dispose(); - //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Uncomment for testing - //Console.WriteLine("Received {0}", inputRecord.LastProcessedID); - if (_newLogTriggerSize > 0 && newFileSize >= _newLogTriggerSize) - { - // Move to next log if checkpoints aren't manual, and we've hit the trigger size - await MoveServiceToNextLogFileAsync(); - } - break; - - case RPCBatchByte: - restOfBatchOffset = inputFlexBuffer.LengthLength + 1; - memStream = new MemoryStream(inputFlexBuffer.Buffer, restOfBatchOffset, inputFlexBuffer.Length - restOfBatchOffset); - numRPCs = memStream.ReadInt(); - inputRecord.LastProcessedID += numRPCs; - inputRecord.LastProcessedReplayableID += numRPCs; - newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID, inputRecord.LastProcessedReplayableID, _outputs); - inputFlexBuffer.ResetBuffer(); - memStream.Dispose(); - //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Uncomment for testing - //Console.WriteLine("Received {0}", inputRecord.LastProcessedID); - if (_newLogTriggerSize > 0 && newFileSize >= _newLogTriggerSize) - { - // Make sure only one input thread is moving to the next log file. Won't break the system if we don't do this, but could result in - // empty log files - if (Interlocked.CompareExchange(ref _movingToNextLog, 1, 0) == 0) - { - await MoveServiceToNextLogFileAsync(); - _movingToNextLog = 0; - } - } - break; - - case PingByte: - // Write time into correct place in message - memStream = new MemoryStream(inputFlexBuffer.Buffer, inputFlexBuffer.Length - 4 * sizeof(long), sizeof(long)); - long time; - GetSystemTimePreciseAsFileTime(out time); - memStream.WriteLongFixed(time); - // Treat as RPC - inputRecord.LastProcessedID++; - await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID, inputRecord.LastProcessedReplayableID, _outputs); - inputFlexBuffer.ResetBuffer(); - memStream.Dispose(); - break; - - case PingReturnByte: - // Write time into correct place in message - memStream = new MemoryStream(inputFlexBuffer.Buffer, inputFlexBuffer.Length - 1 * sizeof(long), sizeof(long)); - GetSystemTimePreciseAsFileTime(out time); - memStream.WriteLongFixed(time); - // Treat as RPC - inputRecord.LastProcessedID++; - await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID, inputRecord.LastProcessedReplayableID, _outputs); - inputFlexBuffer.ResetBuffer(); - memStream.Dispose(); - break; - - default: - // Bubble the exception up to CRA - throw new Exception("Illegal leading byte in input data message"); - } - } - - private LogWriter OpenNextCheckpointFile() - { - if (LogWriter.FileExists(_logFileNameBase + "chkpt" + (_lastCommittedCheckpoint + 1).ToString())) - { - File.Delete(_logFileNameBase + (_lastCommittedCheckpoint + 1).ToString()); - } - LogWriter retVal = null; - try - { - retVal = new LogWriter(_logFileNameBase + "chkpt" + (_lastCommittedCheckpoint + 1).ToString(), 1024 * 1024, 6); - } - catch (Exception e) - { - OnError(0, "Error opening next checkpoint file" + e.ToString()); - } - return retVal; - } - - private void CleanupOldCheckpoint() - { - var fileNameToDelete = _logFileNameBase + (_lastCommittedCheckpoint - 1).ToString(); - if (LogWriter.FileExists(fileNameToDelete)) - { - File.Delete(fileNameToDelete); - } - } - - // This method takes a checkpoint and bumps the counter. It DOES NOT quiesce anything - public async Task CheckpointAsync() - { - var oldCheckpointWriter = _checkpointWriter; - // Take lock on new checkpoint file - _checkpointWriter = OpenNextCheckpointFile(); - // Make sure the service is quiesced before continuing - CheckpointingService = true; - while (LastReceivedCheckpoint == null) { await Task.Yield(); } - // Now that the service has sent us its checkpoint, we need to quiesce the output connections, which may be sending - foreach (var outputRecord in _outputs) - { - outputRecord.Value.BufferedOutput.AcquireAppendLock(); - } - - CheckpointingService = false; - // Serialize committer - _committer.Serialize(_checkpointWriter); - // Serialize input connections - _inputs.AmbrosiaSerialize(_checkpointWriter); - // Serialize output connections - _outputs.AmbrosiaSerialize(_checkpointWriter); - foreach (var outputRecord in _outputs) - { - outputRecord.Value.BufferedOutput.ReleaseAppendLock(); - } - - // Serialize the service note that the local listener task is blocked after reading the checkpoint until the end of this method - _checkpointWriter.Write(LastReceivedCheckpoint.Buffer, 0, LastReceivedCheckpoint.Length); - _checkpointWriter.Write(_localServiceReceiveFromStream, _lastReceivedCheckpointSize); - _checkpointWriter.Flush(); - _lastCommittedCheckpoint++; - if (_sharded) - { - InsertOrReplaceServiceInfoRecord("LastCommittedCheckpoint" + _shardID.ToString(), _lastCommittedCheckpoint.ToString()); - } - else - { - InsertOrReplaceServiceInfoRecord("LastCommittedCheckpoint", _lastCommittedCheckpoint.ToString()); - } - - // Trim output buffers of inputs, since the inputs are now part of the checkpoint and can't be lost. Must do this after the checkpoint has been - // successfully written - foreach (var kv in _inputs) - { - OutputConnectionRecord outputConnectionRecord; - if (!_outputs.TryGetValue(kv.Key, out outputConnectionRecord)) - { - outputConnectionRecord = new OutputConnectionRecord(this); - _outputs[kv.Key] = outputConnectionRecord; - } - outputConnectionRecord.RemoteTrim = Math.Max (kv.Value.LastProcessedID, outputConnectionRecord.RemoteTrim); - outputConnectionRecord.RemoteTrimReplayable = Math.Max(kv.Value.LastProcessedReplayableID, outputConnectionRecord.RemoteTrimReplayable); - if (outputConnectionRecord.ControlWorkQ.IsEmpty) - { - outputConnectionRecord.ControlWorkQ.Enqueue(-2); - } - } - - if (oldCheckpointWriter != null) - { - // Release lock on previous checkpoint file - oldCheckpointWriter.Dispose(); - } - - // Unblock the local input processing task - LastReceivedCheckpoint.ThrowAwayBuffer(); - LastReceivedCheckpoint = null; - } - - public AmbrosiaRuntime() : base() - { - } - - public override void Initialize(object param) - { - // Workaround because of parameter type limitation in CRA - AmbrosiaRuntimeParams p = new AmbrosiaRuntimeParams(); - XmlSerializer xmlSerializer = new XmlSerializer(p.GetType()); - using (StringReader textReader = new StringReader((string)param)) - { - p = (AmbrosiaRuntimeParams)xmlSerializer.Deserialize(textReader); - } - - Initialize( - p.serviceReceiveFromPort, - p.serviceSendToPort, - p.serviceName, - p.serviceLogPath, - p.createService, - p.pauseAtStart, - p.persistLogs, - p.activeActive, - p.logTriggerSizeMB, - p.storageConnectionString, - p.currentVersion, - p.upgradeToVersion - ); - } - - internal void RuntimeChecksOnProcessStart() - { - if (!_createService) - { - long readVersion = -1; - try - { - readVersion = long.Parse(RetrieveServiceInfo("CurrentVersion")); - } - catch - { - OnError(VersionMismatch, "Version mismatch on process start: Expected " + _currentVersion + " was: " + RetrieveServiceInfo("CurrentVersion")); - } - if (_currentVersion != readVersion) - { - OnError(VersionMismatch, "Version mismatch on process start: Expected " + _currentVersion + " was: " + readVersion.ToString()); - } - if (!_runningRepro) - { - if (long.Parse(RetrieveServiceInfo("LastCommittedCheckpoint")) < 1) - { - OnError(MissingCheckpoint, "No checkpoint in metadata"); - - } - } - if (!LogWriter.DirectoryExists(_serviceLogPath + _serviceName + "_" + _currentVersion)) - { - OnError(MissingCheckpoint, "No checkpoint/logs directory"); - } - var lastCommittedCheckpoint = long.Parse(RetrieveServiceInfo("LastCommittedCheckpoint")); - if (!LogWriter.FileExists(Path.Combine(_serviceLogPath + _serviceName + "_" + _currentVersion, - "server" + "chkpt" + lastCommittedCheckpoint))) - { - OnError(MissingCheckpoint, "Missing checkpoint " + lastCommittedCheckpoint.ToString()); - } - if (!LogWriter.FileExists(Path.Combine(_serviceLogPath + _serviceName + "_" + _currentVersion, - "server" + "log" + lastCommittedCheckpoint))) - { - OnError(MissingLog, "Missing log " + lastCommittedCheckpoint.ToString()); - } - } - } - - public void Initialize(int serviceReceiveFromPort, - int serviceSendToPort, - string serviceName, - string serviceLogPath, - bool? createService, - bool pauseAtStart, - bool persistLogs, - bool activeActive, - long logTriggerSizeMB, - string storageConnectionString, - long currentVersion, - long upgradeToVersion - ) - { - _runningRepro = false; - _currentVersion = currentVersion; - _upgradeToVersion = upgradeToVersion; - _upgrading = (_currentVersion < _upgradeToVersion); - if (pauseAtStart == true) - { - Console.WriteLine("Hit Enter to continue:"); - Console.ReadLine(); - } - else - { - Console.WriteLine("Ready ..."); - } - - _persistLogs = persistLogs; - _activeActive = activeActive; - _newLogTriggerSize = logTriggerSizeMB * 1000000; - _serviceLogPath = serviceLogPath; - _localServiceReceiveFromPort = serviceReceiveFromPort; - _localServiceSendToPort = serviceSendToPort; - _serviceName = serviceName; - _storageConnectionString = storageConnectionString; - _sharded = false; - _coral = ClientLibrary; - - Console.WriteLine("Logs directory: {0}", _serviceLogPath); - - if (createService == null) - { - if (LogWriter.DirectoryExists(_serviceLogPath + _serviceName + "_" + _currentVersion)) - { - createService = false; - } - else - { - createService = true; - } - } - AddAsyncInputEndpoint(AmbrosiaDataInputsName, new AmbrosiaInput(this, "data")); - AddAsyncInputEndpoint(AmbrosiaControlInputsName, new AmbrosiaInput(this, "control")); - AddAsyncOutputEndpoint(AmbrosiaDataOutputsName, new AmbrosiaOutput(this, "data")); - AddAsyncOutputEndpoint(AmbrosiaControlOutputsName, new AmbrosiaOutput(this, "control")); - _createService = createService.Value; - RecoverOrStartAsync().Wait(); - } - - internal void InitializeRepro(string serviceName, - string serviceLogPath, - long checkpointToLoad, - int version, - bool testUpgrade, - int serviceReceiveFromPort, - int serviceSendToPort) - { - _localServiceReceiveFromPort = serviceReceiveFromPort; - _localServiceSendToPort = serviceSendToPort; - _currentVersion = version; - _runningRepro = true; - _persistLogs = false; - _activeActive = true; - _serviceLogPath = serviceLogPath; - _serviceName = serviceName; - _sharded = false; - _createService = false; - RecoverOrStartAsync(checkpointToLoad, testUpgrade).Wait(); - } - } - class Program { private static LocalAmbrosiaRuntimeModes _runtimeMode; @@ -3508,7 +35,7 @@ class Program private static int _serviceSendToPort = -1; private static string _serviceLogPath = Path.Combine(Path.GetPathRoot(Path.GetFullPath(".")), "AmbrosiaLogs") + Path.DirectorySeparatorChar; private static string _binariesLocation = "AmbrosiaBinaries"; - private static long _checkpointToLoad = 0; + private static long _checkpointToLoad = 1; private static bool _isTestingUpgrade = false; private static AmbrosiaRecoveryModes _recoveryMode = AmbrosiaRecoveryModes.A; private static bool _isActiveActive = false; @@ -3520,8 +47,11 @@ class Program static void Main(string[] args) { + GenericLogsInterface.SetToGenericLogs(); ParseAndValidateOptions(args); + Trace.Listeners.Add(new TextWriterTraceListener(Console.Out)); + switch (_runtimeMode) { case LocalAmbrosiaRuntimeModes.DebugInstance: @@ -3531,13 +61,19 @@ static void Main(string[] args) return; case LocalAmbrosiaRuntimeModes.AddReplica: case LocalAmbrosiaRuntimeModes.RegisterInstance: - var client = new CRAClientLibrary(Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING")); + if (_runtimeMode == LocalAmbrosiaRuntimeModes.AddReplica) + { + _isActiveActive = true; + } + + var dataProvider = new CRA.DataProvider.Azure.AzureDataProvider(Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING")); + var client = new CRAClientLibrary(dataProvider); client.DisableArtifactUploading(); var replicaName = $"{_instanceName}{_replicaNumber}"; AmbrosiaRuntimeParams param = new AmbrosiaRuntimeParams(); param.createService = _recoveryMode == AmbrosiaRecoveryModes.A - ? (bool?) null + ? (bool?)null : (_recoveryMode != AmbrosiaRecoveryModes.N); param.pauseAtStart = _isPauseAtStart; param.persistLogs = _isPersistLogs; @@ -3554,7 +90,7 @@ static void Main(string[] args) try { - if (client.DefineVertex(param.AmbrosiaBinariesLocation, () => new AmbrosiaRuntime()) != CRAErrorCode.Success) + if (client.DefineVertexAsync(param.AmbrosiaBinariesLocation, () => new AmbrosiaRuntime()).GetAwaiter().GetResult() != CRAErrorCode.Success) { throw new Exception(); } @@ -3568,14 +104,14 @@ static void Main(string[] args) serializedParams = textWriter.ToString(); } - if (client.InstantiateVertex(replicaName, param.serviceName, param.AmbrosiaBinariesLocation, serializedParams) != CRAErrorCode.Success) + if (client.InstantiateVertexAsync(replicaName, param.serviceName, param.AmbrosiaBinariesLocation, serializedParams).GetAwaiter().GetResult() != CRAErrorCode.Success) { throw new Exception(); } - client.AddEndpoint(param.serviceName, AmbrosiaRuntime.AmbrosiaDataInputsName, true, true); - client.AddEndpoint(param.serviceName, AmbrosiaRuntime.AmbrosiaDataOutputsName, false, true); - client.AddEndpoint(param.serviceName, AmbrosiaRuntime.AmbrosiaControlInputsName, true, true); - client.AddEndpoint(param.serviceName, AmbrosiaRuntime.AmbrosiaControlOutputsName, false, true); + client.AddEndpointAsync(param.serviceName, AmbrosiaRuntime.AmbrosiaDataInputsName, true, true).Wait(); + client.AddEndpointAsync(param.serviceName, AmbrosiaRuntime.AmbrosiaDataOutputsName, false, true).Wait(); + client.AddEndpointAsync(param.serviceName, AmbrosiaRuntime.AmbrosiaControlInputsName, true, true).Wait(); + client.AddEndpointAsync(param.serviceName, AmbrosiaRuntime.AmbrosiaControlOutputsName, false, true).Wait(); } catch (Exception e) { @@ -3593,7 +129,7 @@ private static void ParseAndValidateOptions(string[] args) var options = ParseOptions(args, out var shouldShowHelp); ValidateOptions(options, shouldShowHelp); } - + private static OptionSet ParseOptions(string[] args, out bool shouldShowHelp) { var showHelp = false; @@ -3631,7 +167,7 @@ private static OptionSet ParseOptions(string[] args, out bool shouldShowHelp) }.AddMany(registerInstanceOptionSet); var debugInstanceOptionSet = basicOptions.AddMany(new OptionSet { - + { "c|checkpoint=", "The checkpoint # to load.", c => _checkpointToLoad = long.Parse(c) }, { "cv|currentVersion=", "The version # to debug.", cv => _currentVersion = int.Parse(cv) }, { "tu|testingUpgrade", "Is testing upgrade.", u => _isTestingUpgrade = true }, @@ -3794,4 +330,4 @@ public static string GetDescription(this Enum value) return (attribute as DescriptionAttribute)?.Description; // ?? string.Empty maybe added } } -} +} \ No newline at end of file diff --git a/Ambrosia/Ambrosia/ReturnValueTypes.cs b/Ambrosia/Ambrosia/ReturnValueTypes.cs deleted file mode 100644 index 3a34103f..00000000 --- a/Ambrosia/Ambrosia/ReturnValueTypes.cs +++ /dev/null @@ -1,14 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Text; - -namespace LocalAmbrosiaRuntime -{ - public enum ReturnValueTypes - { - None = 0, - ReturnValue = 1, - EmptyReturnValue = 2, - Exception = 3, - } -} diff --git a/Ambrosia/Ambrosia/RpcTypes.cs b/Ambrosia/Ambrosia/RpcTypes.cs deleted file mode 100644 index bd5491fa..00000000 --- a/Ambrosia/Ambrosia/RpcTypes.cs +++ /dev/null @@ -1,17 +0,0 @@ -namespace Ambrosia -{ - public static class RpcTypes - { - public enum RpcType : byte - { - ReturnValue = 0, - FireAndForget = 1, - Impulse = 2, - } - - public static bool IsFireAndForget(this RpcType rpcType) - { - return rpcType == RpcType.FireAndForget || rpcType == RpcType.Impulse; - } - } -} \ No newline at end of file diff --git a/Ambrosia/adv-file-ops/adv-file-ops.cpp b/Ambrosia/adv-file-ops/adv-file-ops.cpp deleted file mode 100644 index 1e915f9a..00000000 --- a/Ambrosia/adv-file-ops/adv-file-ops.cpp +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -#include -#include -#include -#include -#include - -std::string FormatWin32AndHRESULT(DWORD win32_result) { - std::stringstream ss; - ss << "Win32(" << win32_result << ") HRESULT(" - << std::showbase << std::uppercase << std::setfill('0') << std::hex - << HRESULT_FROM_WIN32(win32_result) << ")"; - return ss.str(); -} - -extern "C" -__declspec(dllexport) bool EnableProcessPrivileges() { - HANDLE token; - - TOKEN_PRIVILEGES token_privileges; - token_privileges.PrivilegeCount = 1; - token_privileges.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; - - if (!LookupPrivilegeValue(0, SE_MANAGE_VOLUME_NAME, - &token_privileges.Privileges[0].Luid)) return false; - if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token)) return false; - if (!AdjustTokenPrivileges(token, 0, (PTOKEN_PRIVILEGES)&token_privileges, 0, 0, 0)) return false; - if (GetLastError() != ERROR_SUCCESS) return false; - - ::CloseHandle(token); - - return true; -} - -extern "C" -__declspec(dllexport) bool EnableVolumePrivileges(std::string& filename, HANDLE file_handle) -{ - std::string volume_string = "\\\\.\\" + filename.substr(0, 2); - HANDLE volume_handle = ::CreateFile(volume_string.c_str(), 0, 0, nullptr, OPEN_EXISTING, - FILE_ATTRIBUTE_NORMAL, nullptr); - if (INVALID_HANDLE_VALUE == volume_handle) { - // std::cerr << "Error retrieving volume handle: " << FormatWin32AndHRESULT(::GetLastError()); - return false; - } - - MARK_HANDLE_INFO mhi; - mhi.UsnSourceInfo = USN_SOURCE_DATA_MANAGEMENT; - mhi.VolumeHandle = volume_handle; - mhi.HandleInfo = MARK_HANDLE_PROTECT_CLUSTERS; - - DWORD bytes_returned = 0; - BOOL result = DeviceIoControl(file_handle, FSCTL_MARK_HANDLE, &mhi, sizeof(MARK_HANDLE_INFO), nullptr, - 0, &bytes_returned, nullptr); - - if (!result) { - // std::cerr << "Error in DeviceIoControl: " << FormatWin32AndHRESULT(::GetLastError()); - return false; - } - - ::CloseHandle(volume_handle); - return true; -} - - -extern "C" -__declspec(dllexport) bool SetFileSize(HANDLE file_handle, int64_t file_size) -{ - LARGE_INTEGER li; - li.QuadPart = file_size; - - BOOL result = ::SetFilePointerEx(file_handle, li, NULL, FILE_BEGIN); - if (!result) { - std::cerr << "SetFilePointer failed with error: " << FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - - // Set a fixed file length - result = ::SetEndOfFile(file_handle); - if (!result) { - std::cerr << "SetEndOfFile failed with error: " << FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - - result = ::SetFileValidData(file_handle, file_size); - if (!result) { - std::cerr << "SetFileValidData failed with error: " << FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - return true; -} - -extern "C" -__declspec(dllexport) bool CreateAndSetFileSize(std::string& filename, int64_t file_size) -{ - BOOL result = ::EnableProcessPrivileges(); - if (!result) { - std::cerr << "EnableProcessPrivileges failed with error: " - << FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - - DWORD desired_access = GENERIC_READ | GENERIC_WRITE; - DWORD const flags = FILE_FLAG_RANDOM_ACCESS | FILE_FLAG_NO_BUFFERING; - DWORD create_disposition = CREATE_ALWAYS; - DWORD shared_mode = FILE_SHARE_READ; - - // Create our test file - HANDLE file_handle = ::CreateFile(filename.c_str(), desired_access, shared_mode, NULL, - create_disposition, flags, NULL); - if (INVALID_HANDLE_VALUE == file_handle) { - std::cerr << "write file (" << filename << ") not created. Error: " << - FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - - result = ::EnableVolumePrivileges(filename, file_handle); - if (!result) { - std::cerr << "EnableVolumePrivileges failed with error: " - << FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - - result = ::SetFileSize(file_handle, file_size); - if (!result) { - std::cerr << "SetFileSize failed with error: " << FormatWin32AndHRESULT(::GetLastError()) << std::endl; - return false; - } - - ::CloseHandle(file_handle); - - return true; -} diff --git a/Ambrosia/adv-file-ops/adv-file-ops.vcxproj b/Ambrosia/adv-file-ops/adv-file-ops.vcxproj deleted file mode 100644 index d04dbdc8..00000000 --- a/Ambrosia/adv-file-ops/adv-file-ops.vcxproj +++ /dev/null @@ -1,82 +0,0 @@ - - - - - Debug - x64 - - - Release - x64 - - - - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - directdrivereadwrite - 10.0.17134.0 - adv-file-ops - - - - DynamicLibrary - true - v141 - MultiByte - false - - - DynamicLibrary - false - v141 - true - MultiByte - false - - - - - - - - - - - - - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - $(ProjectDir)$(Platform)\$(Configuration)\ - - - - Level3 - Disabled - true - MultiThreadedDebug - - - - - Level3 - MaxSpeed - true - true - true - MultiThreaded - Guard - - - true - true - - - - - - - - - \ No newline at end of file diff --git a/AmbrosiaLib/Ambrosia/AmbrosiaLib.csproj b/AmbrosiaLib/Ambrosia/AmbrosiaLib.csproj new file mode 100644 index 00000000..58689b52 --- /dev/null +++ b/AmbrosiaLib/Ambrosia/AmbrosiaLib.csproj @@ -0,0 +1,42 @@ + + + + netstandard2.0 + true + true + true + ../../Ambrosia/Ambrosia.snk + AnyCPU;x64 + + + + $(DefineConstants);NETSTANDARD + + + + + + 15.8.168 + + + 12.0.2 + + + 5.8.2 + + + + + + + + + + 2020.9.24.1 + + + + + + + diff --git a/AmbrosiaLib/Ambrosia/App.config b/AmbrosiaLib/Ambrosia/App.config new file mode 100644 index 00000000..068dbfe2 --- /dev/null +++ b/AmbrosiaLib/Ambrosia/App.config @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/AmbrosiaLib/Ambrosia/Program.cs b/AmbrosiaLib/Ambrosia/Program.cs new file mode 100644 index 00000000..24ac10d9 --- /dev/null +++ b/AmbrosiaLib/Ambrosia/Program.cs @@ -0,0 +1,4166 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using System.Configuration; +using System.Net.Sockets; +using System.Net; +using System.Threading; +using System.IO; +using Microsoft.WindowsAzure.Storage; +using Microsoft.WindowsAzure.Storage.Table; +using Microsoft.VisualStudio.Threading; +using System.Collections.Concurrent; +using System.Runtime.Serialization; +using System.Runtime.CompilerServices; +using CRA.ClientLibrary; +using System.Diagnostics; +using System.Xml.Serialization; +using System.IO.Pipes; +using Microsoft.CodeAnalysis.CSharp.Syntax; + +namespace Ambrosia +{ + internal struct LongPair + { + public LongPair(long first, + long second) + { + First = first; + Second = second; + } + internal long First { get; set; } + internal long Second { get; set; } + } + + internal static class DictionaryTools + { + internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, ILogWriter writeToStream) + { + writeToStream.WriteIntFixed(dict.Count); + foreach (var entry in dict) + { + var encodedKey = Encoding.UTF8.GetBytes(entry.Key); + writeToStream.WriteInt(encodedKey.Length); + writeToStream.Write(encodedKey, 0, encodedKey.Length); + writeToStream.WriteLongFixed(entry.Value); + } + } + + internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, ILogReader readFromStream) + { + var _retVal = new ConcurrentDictionary(); + var dictCount = readFromStream.ReadIntFixed(); + for (int i = 0; i < dictCount; i++) + { + var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); + long seqNo = readFromStream.ReadLongFixed(); + _retVal.TryAdd(myString, seqNo); + } + return _retVal; + } + + internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, ILogWriter writeToStream) + { + writeToStream.WriteIntFixed(dict.Count); + foreach (var entry in dict) + { + var encodedKey = Encoding.UTF8.GetBytes(entry.Key); + writeToStream.WriteInt(encodedKey.Length); + writeToStream.Write(encodedKey, 0, encodedKey.Length); + writeToStream.WriteLongFixed(entry.Value.First); + writeToStream.WriteLongFixed(entry.Value.Second); + } + } + + internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, ILogReader readFromStream) + { + var _retVal = new ConcurrentDictionary(); + var dictCount = readFromStream.ReadIntFixed(); + for (int i = 0; i < dictCount; i++) + { + var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); + var newLongPair = new LongPair(); + newLongPair.First = readFromStream.ReadLongFixed(); + newLongPair.Second = readFromStream.ReadLongFixed(); + _retVal.TryAdd(myString, newLongPair); + } + return _retVal; + } + + internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, Stream writeToStream) + { + writeToStream.WriteIntFixed(dict.Count); + foreach (var entry in dict) + { + writeToStream.Write(entry.Key.ToByteArray(), 0, 16); + var IPBytes = entry.Value.GetAddressBytes(); + writeToStream.WriteByte((byte)IPBytes.Length); + writeToStream.Write(IPBytes, 0, IPBytes.Length); + } + } + + internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, ILogReader readFromStream) + { + var _retVal = new ConcurrentDictionary(); + var dictCount = readFromStream.ReadIntFixed(); + for (int i = 0; i < dictCount; i++) + { + var myBytes = new byte[16]; + readFromStream.ReadAllRequiredBytes(myBytes, 0, 16); + var newGuid = new Guid(myBytes); + byte addressSize = (byte)readFromStream.ReadByte(); + if (addressSize > 16) + { + myBytes = new byte[addressSize]; + } + readFromStream.ReadAllRequiredBytes(myBytes, 0, addressSize); + var newAddress = new IPAddress(myBytes); + _retVal.TryAdd(newGuid, newAddress); + } + return _retVal; + } + + internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, ILogWriter writeToStream) + { + writeToStream.WriteIntFixed(dict.Count); + foreach (var entry in dict) + { + var keyEncoding = Encoding.UTF8.GetBytes(entry.Key); + Trace.TraceInformation("input {0} seq no: {1}", entry.Key, entry.Value.LastProcessedID); + Trace.TraceInformation("input {0} replayable seq no: {1}", entry.Key, entry.Value.LastProcessedReplayableID); + writeToStream.WriteInt(keyEncoding.Length); + writeToStream.Write(keyEncoding, 0, keyEncoding.Length); + writeToStream.WriteLongFixed(entry.Value.LastProcessedID); + writeToStream.WriteLongFixed(entry.Value.LastProcessedReplayableID); + } + } + + internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, ILogReader readFromStream) + { + var _retVal = new ConcurrentDictionary(); + var dictCount = readFromStream.ReadIntFixed(); + for (int i = 0; i < dictCount; i++) + { + var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); + long seqNo = readFromStream.ReadLongFixed(); + var newRecord = new InputConnectionRecord(); + newRecord.LastProcessedID = seqNo; + seqNo = readFromStream.ReadLongFixed(); + newRecord.LastProcessedReplayableID = seqNo; + _retVal.TryAdd(myString, newRecord); + } + return _retVal; + } + + internal static void AmbrosiaSerialize(this ConcurrentDictionary dict, ILogWriter writeToStream) + { + writeToStream.WriteIntFixed(dict.Count); + foreach (var entry in dict) + { + var keyEncoding = Encoding.UTF8.GetBytes(entry.Key); + writeToStream.WriteInt(keyEncoding.Length); + writeToStream.Write(keyEncoding, 0, keyEncoding.Length); + writeToStream.WriteLongFixed(entry.Value.LastSeqNoFromLocalService); + // Lock to ensure atomic update of both variables due to race in InputControlListenerAsync + long trimTo; + long replayableTrimTo; + lock (entry.Value._trimLock) + { + trimTo = entry.Value.TrimTo; + replayableTrimTo = entry.Value.ReplayableTrimTo; + } + writeToStream.WriteLongFixed(trimTo); + writeToStream.WriteLongFixed(replayableTrimTo); + entry.Value.BufferedOutput.Serialize(writeToStream); + } + } + + internal static ConcurrentDictionary AmbrosiaDeserialize(this ConcurrentDictionary dict, ILogReader readFromStream, AmbrosiaRuntime thisAmbrosia) + { + var _retVal = new ConcurrentDictionary(); + var dictCount = readFromStream.ReadIntFixed(); + for (int i = 0; i < dictCount; i++) + { + var myString = Encoding.UTF8.GetString(readFromStream.ReadByteArray()); + var newRecord = new OutputConnectionRecord(thisAmbrosia); + newRecord.LastSeqNoFromLocalService = readFromStream.ReadLongFixed(); + newRecord.TrimTo = readFromStream.ReadLongFixed(); + newRecord.ReplayableTrimTo = readFromStream.ReadLongFixed(); + newRecord.BufferedOutput = EventBuffer.Deserialize(readFromStream, thisAmbrosia, newRecord); + _retVal.TryAdd(myString, newRecord); + } + return _retVal; + } + } + + // Note about this class: contention becomes significant when MaxBufferPages > ~50. This could be reduced by having page level locking. + // It seems experimentally that having many pages is good for small message sizes, where most of the page ends up empty. More investigation + // is needed to autotune defaultPageSize and MaxBufferPages + internal class EventBuffer + { + const int defaultPageSize = 1024 * 1024; + int NormalMaxBufferPages = 30; + static ConcurrentQueue _pool = null; + int _curBufPages; + AmbrosiaRuntime _owningRuntime; + OutputConnectionRecord _owningOutputRecord; + + internal class BufferPage + { + public byte[] PageBytes { get; set; } + public int curLength { get; set; } + public long HighestSeqNo { get; set; } + public long UnsentReplayableMessages { get; set; } + public long LowestSeqNo { get; set; } + public long TotalReplayableMessages { get; internal set; } + + public BufferPage(byte[] pageBytes) + { + PageBytes = pageBytes; + curLength = 0; + HighestSeqNo = 0; + LowestSeqNo = 0; + UnsentReplayableMessages = 0; + TotalReplayableMessages = 0; + } + + public void CheckPageIntegrity() + { + var numberOfRPCs = HighestSeqNo - LowestSeqNo + 1; + var lengthOfCurrentRPC = 0; + int endIndexOfCurrentRPC = 0; + int cursor = 0; + + for (int i = 0; i < numberOfRPCs; i++) + { + lengthOfCurrentRPC = PageBytes.ReadBufferedInt(cursor); + cursor += StreamCommunicator.IntSize(lengthOfCurrentRPC); + endIndexOfCurrentRPC = cursor + lengthOfCurrentRPC; + if (endIndexOfCurrentRPC > curLength) + { + Trace.TraceError("RPC Exceeded length of Page!!"); + throw new Exception("RPC Exceeded length of Page!!"); + } + + var shouldBeRPCByte = PageBytes[cursor]; + if (shouldBeRPCByte != AmbrosiaRuntime.RPCByte) + { + Trace.TraceError("UNKNOWN BYTE: {0}!!", shouldBeRPCByte); + throw new Exception("Illegal leading byte in message"); + } + cursor++; + + var isReturnValue = (PageBytes[cursor++] == (byte)1); + + if (isReturnValue) // receiving a return value + { + var sequenceNumber = PageBytes.ReadBufferedLong(cursor); + cursor += StreamCommunicator.LongSize(sequenceNumber); + } + else // receiving an RPC + { + var methodId = PageBytes.ReadBufferedInt(cursor); + cursor += StreamCommunicator.IntSize(methodId); + var fireAndForget = (PageBytes[cursor] == (byte)1) || (PageBytes[cursor] == (byte)2); + cursor++; + + string senderOfRPC = null; + long sequenceNumber = 0; + + if (!fireAndForget) + { + // read return address and sequence number + var senderOfRPCLength = PageBytes.ReadBufferedInt(cursor); + var sizeOfSender = StreamCommunicator.IntSize(senderOfRPCLength); + cursor += sizeOfSender; + senderOfRPC = Encoding.UTF8.GetString(PageBytes, cursor, senderOfRPCLength); + cursor += senderOfRPCLength; + sequenceNumber = PageBytes.ReadBufferedLong(cursor); + cursor += StreamCommunicator.LongSize(sequenceNumber); + //StartupParamOverrides.OutputStream.WriteLine("Received RPC call to method with id: {0} and sequence number {1}", methodId, sequenceNumber); + } + else + { + + //StartupParamOverrides.OutputStream.WriteLine("Received fire-and-forget RPC call to method with id: {0}", methodId); + } + + var lengthOfSerializedArguments = endIndexOfCurrentRPC - cursor; + cursor += lengthOfSerializedArguments; + } + } + } + + internal void CheckSendBytes(int posToStart, + int numRPCs, + int bytes) + { + int cursor = posToStart; + for (int i = 0; i < numRPCs; i++) + { + var lengthOfCurrentRPC = PageBytes.ReadBufferedInt(cursor); + cursor += StreamCommunicator.IntSize(lengthOfCurrentRPC); + var endIndexOfCurrentRPC = cursor + lengthOfCurrentRPC; + if (endIndexOfCurrentRPC > curLength) + { + Trace.TraceError("RPC Exceeded length of Page!!"); + throw new Exception("RPC Exceeded length of Page!!"); + } + + var shouldBeRPCByte = PageBytes[cursor]; + if (shouldBeRPCByte != AmbrosiaRuntime.RPCByte) + { + Trace.TraceError("UNKNOWN BYTE: {0}!!", shouldBeRPCByte); + throw new Exception("Illegal leading byte in message"); + } + cursor++; + + var isReturnValue = (PageBytes[cursor++] == (byte)1); + + if (isReturnValue) // receiving a return value + { + var sequenceNumber = PageBytes.ReadBufferedLong(cursor); + cursor += StreamCommunicator.LongSize(sequenceNumber); + } + else // receiving an RPC + { + var methodId = PageBytes.ReadBufferedInt(cursor); + cursor += StreamCommunicator.IntSize(methodId); + var fireAndForget = (PageBytes[cursor] == (byte)1) || (PageBytes[cursor] == (byte)2); + cursor++; + string senderOfRPC = null; + long sequenceNumber = 0; + + if (!fireAndForget) + { + // read return address and sequence number + var senderOfRPCLength = PageBytes.ReadBufferedInt(cursor); + var sizeOfSender = StreamCommunicator.IntSize(senderOfRPCLength); + cursor += sizeOfSender; + senderOfRPC = Encoding.UTF8.GetString(PageBytes, cursor, senderOfRPCLength); + cursor += senderOfRPCLength; + sequenceNumber = PageBytes.ReadBufferedLong(cursor); + cursor += StreamCommunicator.LongSize(sequenceNumber); + //StartupParamOverrides.OutputStream.WriteLine("Received RPC call to method with id: {0} and sequence number {1}", methodId, sequenceNumber); + } + else + { + + //StartupParamOverrides.OutputStream.WriteLine("Received fire-and-forget RPC call to method with id: {0}", methodId); + } + + var lengthOfSerializedArguments = endIndexOfCurrentRPC - cursor; + cursor += lengthOfSerializedArguments; + } + } + } + } + + long _trimLock; + long _appendLock; + + ElasticCircularBuffer _bufferQ; + + internal EventBuffer(AmbrosiaRuntime owningRuntime, + OutputConnectionRecord owningOutputRecord) + { + _bufferQ = new ElasticCircularBuffer(); + _appendLock = 0; + _owningRuntime = owningRuntime; + _curBufPages = 0; + _owningOutputRecord = owningOutputRecord; + _trimLock = 0; + } + + internal void Serialize(ILogWriter writeToStream) + { + writeToStream.WriteIntFixed(_bufferQ.Count); + foreach (var currentBuf in _bufferQ) + { + writeToStream.WriteIntFixed(currentBuf.PageBytes.Length); + writeToStream.WriteIntFixed(currentBuf.curLength); + writeToStream.Write(currentBuf.PageBytes, 0, currentBuf.curLength); + writeToStream.WriteLongFixed(currentBuf.HighestSeqNo); + writeToStream.WriteLongFixed(currentBuf.LowestSeqNo); + writeToStream.WriteLongFixed(currentBuf.UnsentReplayableMessages); + writeToStream.WriteLongFixed(currentBuf.TotalReplayableMessages); + } + } + + internal static EventBuffer Deserialize(ILogReader readFromStream, + AmbrosiaRuntime owningRuntime, + OutputConnectionRecord owningOutputRecord) + { + var _retVal = new EventBuffer(owningRuntime, owningOutputRecord); + var bufferCount = readFromStream.ReadIntFixed(); + for (int i = 0; i < bufferCount; i++) + { + var pageSize = readFromStream.ReadIntFixed(); + var pageFilled = readFromStream.ReadIntFixed(); + var myBytes = new byte[pageSize]; + readFromStream.ReadAllRequiredBytes(myBytes, 0, pageFilled); + var newBufferPage = new BufferPage(myBytes); + newBufferPage.curLength = pageFilled; + newBufferPage.HighestSeqNo = readFromStream.ReadLongFixed(); + newBufferPage.LowestSeqNo = readFromStream.ReadLongFixed(); + newBufferPage.UnsentReplayableMessages = readFromStream.ReadLongFixed(); + newBufferPage.TotalReplayableMessages = readFromStream.ReadLongFixed(); + _retVal._bufferQ.Enqueue(ref newBufferPage); + } + return _retVal; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void AcquireAppendLock(long lockVal = 1) + { + while (true) + { + var origVal = Interlocked.CompareExchange(ref _appendLock, lockVal, 0); + if (origVal == 0) + { + // We have the lock + break; + } + } + } + + internal long ReadAppendLock() + { + return Interlocked.Read(ref _appendLock); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void ReleaseAppendLock() + { + Interlocked.Exchange(ref _appendLock, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void AcquireTrimLock(long lockVal) + { + while (true) + { + var origVal = Interlocked.CompareExchange(ref _trimLock, lockVal, 0); + if (origVal == 0) + { + // We have the lock + break; + } + } + } + + internal long ReadTrimLock() + { + return Interlocked.Read(ref _trimLock); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + internal void ReleaseTrimLock() + { + Interlocked.Exchange(ref _trimLock, 0); + } + + internal class BuffersCursor + { + public IEnumerator PageEnumerator { get; set; } + public int PagePos { get; set; } + public int RelSeqPos { get; set; } + public BuffersCursor(IEnumerator inPageEnumerator, + int inPagePos, + int inRelSeqPos) + { + RelSeqPos = inRelSeqPos; + PageEnumerator = inPageEnumerator; + PagePos = inPagePos; + } + } + + internal async Task SendAsync(Stream outputStream, + BuffersCursor placeToStart) + { + // If the cursor is invalid because of trimming or reconnecting, create it again + if (placeToStart.PagePos == -1) + { + return await ReplayFromAsync(outputStream, _owningOutputRecord.LastSeqSentToReceiver + 1); + + } + var nextSeqNo = _owningOutputRecord.LastSeqSentToReceiver + 1; + var bufferEnumerator = placeToStart.PageEnumerator; + var posToStart = placeToStart.PagePos; + var relSeqPos = placeToStart.RelSeqPos; + + // We are guaranteed to have an enumerator and starting point. Must send output. + AcquireAppendLock(2); + bool needToUnlockAtEnd = true; + do + { + var curBuffer = bufferEnumerator.Current; + var pageLength = curBuffer.curLength; + var morePages = (curBuffer != _bufferQ.Last()); + int numReplayableMessagesToSend; + if (posToStart == 0) + { + // We are starting to send contents of the page. Send everything + numReplayableMessagesToSend = (int)curBuffer.TotalReplayableMessages; + } + else + { + // We are in the middle of sending this page. Respect the previously set counter + numReplayableMessagesToSend = (int)curBuffer.UnsentReplayableMessages; + } + int numRPCs = (int)(curBuffer.HighestSeqNo - curBuffer.LowestSeqNo + 1 - relSeqPos); + curBuffer.UnsentReplayableMessages = 0; + ReleaseAppendLock(); + Debug.Assert((nextSeqNo == curBuffer.LowestSeqNo + relSeqPos) && (nextSeqNo >= curBuffer.LowestSeqNo) && ((nextSeqNo + numRPCs - 1) <= curBuffer.HighestSeqNo)); + ReleaseTrimLock(); + // send the buffer + if (pageLength - posToStart > 0) + { + // We really have output to send. Send it. + //!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Uncomment/Comment for testing + //StartupParamOverrides.OutputStream.WriteLine("Wrote from {0} to {1}, {2}", curBuffer.LowestSeqNo, curBuffer.HighestSeqNo, morePages); + int bytesInBatchData = pageLength - posToStart; + if (numRPCs > 1) + { + if (numReplayableMessagesToSend == numRPCs) + { + // writing a batch + outputStream.WriteInt(bytesInBatchData + 1 + StreamCommunicator.IntSize(numRPCs)); + outputStream.WriteByte(AmbrosiaRuntime.RPCBatchByte); + outputStream.WriteInt(numRPCs); +#if DEBUG + try + { + curBuffer.CheckSendBytes(posToStart, numRPCs, pageLength - posToStart); + } + catch (Exception e) + { + Trace.TraceError("Error sending partial page, checking page integrity: {0}", e.Message); + curBuffer.CheckPageIntegrity(); + throw e; + } +#endif + await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); + await outputStream.FlushAsync(); + } + else + { + // writing a mixed batch + outputStream.WriteInt(bytesInBatchData + 1 + StreamCommunicator.IntSize(numRPCs) + StreamCommunicator.IntSize(numReplayableMessagesToSend)); + outputStream.WriteByte(AmbrosiaRuntime.CountReplayableRPCBatchByte); + outputStream.WriteInt(numRPCs); + outputStream.WriteInt(numReplayableMessagesToSend); +#if DEBUG + try + { + curBuffer.CheckSendBytes(posToStart, numRPCs, pageLength - posToStart); + } + catch (Exception e) + { + Trace.TraceError("Error sending partial page, checking page integrity: {0}", e.Message); +// StartupParamOverrides.OutputStream.WriteLine("Error sending partial page, checking page integrity: {0}", e.Message); + curBuffer.CheckPageIntegrity(); + throw e; + } +#endif + await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); + await outputStream.FlushAsync(); + } + } + else + { + // writing individual RPCs + await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); + await outputStream.FlushAsync(); + } + } + AcquireTrimLock(2); + _owningOutputRecord.LastSeqSentToReceiver += numRPCs; + + Debug.Assert((_owningOutputRecord.placeInOutput != null) && (_owningOutputRecord.placeInOutput.PageEnumerator != null)); // Used to check these, but they should always be true now that there are no recursive SendAsync calls. + + var trimResetIterator = _owningOutputRecord.placeInOutput.PagePos == -1; + + var trimPushedIterator = !trimResetIterator && (bufferEnumerator.Current != curBuffer); + + // Must handle cases where trim came in during the actual send and reset the iterator + if (trimResetIterator) + { + Debug.Assert(!morePages); + // Done outputting. Just return the enumerator replacement + return _owningOutputRecord.placeInOutput; + } + else + { + Debug.Assert((bufferEnumerator.Current != curBuffer) || ((nextSeqNo == curBuffer.LowestSeqNo + relSeqPos) && (nextSeqNo >= curBuffer.LowestSeqNo) && ((nextSeqNo + numRPCs - 1) <= curBuffer.HighestSeqNo))); + nextSeqNo += numRPCs; + + if (trimPushedIterator) + { + Debug.Assert(placeToStart.PagePos == 0 && placeToStart.RelSeqPos == 0); + + if (morePages) + { + AcquireAppendLock(2); + } + else + { + needToUnlockAtEnd = false; + break; + } + } + else // trim didn't alter the iterator at all + { + if (morePages) + { + placeToStart.PagePos = 0; + placeToStart.RelSeqPos = 0; + AcquireAppendLock(2); + var moveNextResult = bufferEnumerator.MoveNext(); + Debug.Assert(moveNextResult); + } + else + { + placeToStart.PagePos = pageLength; + placeToStart.RelSeqPos = relSeqPos + numRPCs; + needToUnlockAtEnd = false; + break; + } + } + } + + nextSeqNo = _owningOutputRecord.LastSeqSentToReceiver + 1; + bufferEnumerator = placeToStart.PageEnumerator; + posToStart = placeToStart.PagePos; + relSeqPos = placeToStart.RelSeqPos; + } + while (true); + Debug.Assert(placeToStart.PageEnumerator == bufferEnumerator); // Used to set this rather than compare, but they should never be different. May be different due to reconnection!!!!!!!!!!!!!!! If they are different due to reconnection or something, don't know why we'd want to make them the same + if (needToUnlockAtEnd) + { + Debug.Assert(false); // Is this ever actually hit? If not, we should eventually get rid of needToUnlockAtEnd and this whole if. + ReleaseAppendLock(); + } + return placeToStart; + } + + internal async Task ReplayFromAsync(Stream outputStream, + long firstSeqNo) + { + var bufferEnumerator = _bufferQ.GetEnumerator(); + // Scan through pages from head to tail looking for events to output + while (bufferEnumerator.MoveNext()) + { + var curBuffer = bufferEnumerator.Current; + Debug.Assert(curBuffer.LowestSeqNo <= firstSeqNo); + if (curBuffer.HighestSeqNo >= firstSeqNo) + { + // We need to send some or all of this buffer + int skipEvents = (int)(Math.Max(0, firstSeqNo - curBuffer.LowestSeqNo)); + + int bufferPos = 0; + if (true) // BUGBUG We are temporarily disabling this optimization which avoids unnecessary locking as reconnecting is not a sufficient criteria: We found a case where input is arriving during reconnection where counting was getting disabled incorrectly. Further investigation is required. + // if (reconnecting) // BUGBUG We are temporarily disabling this optimization which avoids unnecessary locking as reconnecting is not a sufficient criteria: We found a case where input is arriving during reconnection where counting was getting disabled incorrectly. Further investigation is required. + { + // We need to reset how many replayable messages have been sent. We want to minimize the use of + // this codepath because of the expensive locking, which can compete with new RPCs getting appended + AcquireAppendLock(2); + curBuffer.UnsentReplayableMessages = curBuffer.TotalReplayableMessages; + for (int i = 0; i < skipEvents; i++) + { + int eventSize = curBuffer.PageBytes.ReadBufferedInt(bufferPos); + var methodID = curBuffer.PageBytes.ReadBufferedInt(bufferPos + StreamCommunicator.IntSize(eventSize) + 2); + if (curBuffer.PageBytes[bufferPos + StreamCommunicator.IntSize(eventSize) + 2 + StreamCommunicator.IntSize(methodID)] != (byte)RpcTypes.RpcType.Impulse) + { + curBuffer.UnsentReplayableMessages--; + } + bufferPos += eventSize + StreamCommunicator.IntSize(eventSize); + } + ReleaseAppendLock(); + } + else + { + // We assume the counter for unsent replayable messages is correct. NO LOCKING NEEDED + for (int i = 0; i < skipEvents; i++) + { + int eventSize = curBuffer.PageBytes.ReadBufferedInt(bufferPos); + bufferPos += eventSize + StreamCommunicator.IntSize(eventSize); + } + } + // Make sure there is a send enqueued in the work Q. + long sendEnqueued = Interlocked.Read(ref _owningOutputRecord._sendsEnqueued); + if (sendEnqueued == 0) + { + Interlocked.Increment(ref _owningOutputRecord._sendsEnqueued); + _owningOutputRecord.DataWorkQ.Enqueue(-1); + } + return new BuffersCursor(bufferEnumerator, bufferPos, skipEvents); + } + } + // There's no output to replay + return new BuffersCursor(bufferEnumerator, -1, 0); + } + + private void addBufferPage(int writeLength, + long firstSeqNo) + { + BufferPage bufferPage; + ReleaseAppendLock(); + while (!_pool.TryDequeue(out bufferPage)) + { + if (_owningRuntime.Recovering || _owningOutputRecord.ResettingConnection || + _owningRuntime.CheckpointingService || _owningOutputRecord.ConnectingAfterRestart) + { + var newBufferPageBytes = new byte[Math.Max(defaultPageSize, writeLength)]; + bufferPage = new BufferPage(newBufferPageBytes); + _curBufPages++; + break; + } + Thread.Yield(); + } + AcquireAppendLock(); + { + // Grabbed a page from the pool + if (bufferPage.PageBytes.Length < writeLength) + { + // Page isn't big enough. Throw it away and create a bigger one + bufferPage.PageBytes = new byte[writeLength]; + } + } + bufferPage.LowestSeqNo = firstSeqNo; + bufferPage.HighestSeqNo = firstSeqNo; + bufferPage.UnsentReplayableMessages = 0; + bufferPage.TotalReplayableMessages = 0; + bufferPage.curLength = 0; + _bufferQ.Enqueue(ref bufferPage); + } + + internal void CreatePool(int numAlreadyAllocated = 0) + { + _pool = new ConcurrentQueue(); + for (int i = 0; i < (NormalMaxBufferPages - numAlreadyAllocated); i++) + { + var bufferPageBytes = new byte[defaultPageSize]; + var bufferPage = new BufferPage(bufferPageBytes); + _pool.Enqueue(bufferPage); + _curBufPages++; + } + } + + // Assumed that the caller releases the lock acquired here + internal BufferPage GetWritablePage(int writeLength, + long nextSeqNo) + { + if (_pool == null) + { + CreatePool(); + } + AcquireAppendLock(); + // Create a new buffer page if there is none, or if we are introducing a sequence number discontinuity + if (_bufferQ.IsEmpty() || nextSeqNo != (_bufferQ.PeekLast().HighestSeqNo + 1)) + { + addBufferPage(writeLength, nextSeqNo); + } + else + { + // There is something already in the buffer. Check it out. + var outPage = _bufferQ.PeekLast(); + if ((outPage.PageBytes.Length - outPage.curLength) < writeLength) + { + // Not enough space on last page. Add another + addBufferPage(writeLength, nextSeqNo); + } + } + var retVal = _bufferQ.PeekLast(); + return retVal; + } + + internal void Trim(long commitSeqNo, + ref BuffersCursor placeToStart) + { + // Keep trimming pages until we can't anymore or the Q is empty + while (!_bufferQ.IsEmpty()) + { + var currentHead = _bufferQ.PeekFirst(); + bool acquiredLock = false; + // Acquire the lock to ensure someone isn't adding another output to it. + AcquireAppendLock(3); + acquiredLock = true; + if (currentHead.HighestSeqNo <= commitSeqNo) + { + // Trimming for real + // First maintain the placeToStart cursor + if ((placeToStart != null) && ((placeToStart.PagePos >= 0) && (placeToStart.PageEnumerator.Current == currentHead))) + { + // Need to move the enumerator forward. Note that it may be on the last page if all output + // buffers can be trimmed + if (placeToStart.PageEnumerator.MoveNext()) + { + placeToStart.PagePos = 0; + placeToStart.RelSeqPos = 0; + } + else + { + placeToStart.PagePos = -1; + } + } + _bufferQ.Dequeue(); + if (acquiredLock) + { + ReleaseAppendLock(); + } + // Return page to pool + currentHead.curLength = 0; + currentHead.HighestSeqNo = 0; + currentHead.UnsentReplayableMessages = 0; + currentHead.TotalReplayableMessages = 0; + if (_pool == null) + { + CreatePool(_bufferQ.Count); + } + if (_owningRuntime.Recovering || _curBufPages <= NormalMaxBufferPages) + { + _pool.Enqueue(currentHead); + } + else + { + _curBufPages--; + } + } + else + { + // Nothing more to trim + if (acquiredLock) + { + ReleaseAppendLock(); + } + break; + } + } + } + + // Note that this method assumes that the caller has locked this connection record to avoid possible interference. Note that this method + // assumes no discontinuities in sequence numbers since adjusting can only happen on newly initialized service (no recovery), and since + // discontinuities can only happen as the result of recovery + internal long AdjustFirstSeqNoTo(long commitSeqNo) + { + var bufferEnumerator = _bufferQ.GetEnumerator(); + // Scan through pages from head to tail looking for events to output + while (bufferEnumerator.MoveNext()) + { + var curBuffer = bufferEnumerator.Current; + var seqNoDiff = curBuffer.HighestSeqNo - curBuffer.LowestSeqNo; + curBuffer.LowestSeqNo = commitSeqNo; + curBuffer.HighestSeqNo = commitSeqNo + seqNoDiff; + commitSeqNo += seqNoDiff + 1; + } + return commitSeqNo - 1; + } + + // Returns the highest sequence number left in the buffers after removing the non-replayable messages, or -1 if the + // buffers are empty. + internal long TrimAndUnbufferNonreplayableCalls(long trimSeqNo, + long matchingReplayableSeqNo) + { + if (trimSeqNo < 1) + { + return matchingReplayableSeqNo; + } + // No locking necessary since this should only get called during recovery before replay and before a checkpooint is sent to service + // First trim + long highestTrimmedSeqNo = -1; + while (!_bufferQ.IsEmpty()) + { + var currentHead = _bufferQ.PeekFirst(); + if (currentHead.HighestSeqNo <= trimSeqNo) + { + // Must completely trim the page + _bufferQ.Dequeue(); + // Return page to pool + highestTrimmedSeqNo = currentHead.HighestSeqNo; + currentHead.curLength = 0; + currentHead.HighestSeqNo = 0; + currentHead.UnsentReplayableMessages = 0; + currentHead.TotalReplayableMessages = 0; + if (_pool == null) + { + CreatePool(_bufferQ.Count); + } + _pool.Enqueue(currentHead); + } + else + { + // May need to remove some data from the page + int readBufferPos = 0; + for (var i = currentHead.LowestSeqNo; i <= trimSeqNo; i++) + { + int eventSize = currentHead.PageBytes.ReadBufferedInt(readBufferPos); + var methodID = currentHead.PageBytes.ReadBufferedInt(readBufferPos + StreamCommunicator.IntSize(eventSize) + 2); + if (currentHead.PageBytes[readBufferPos + StreamCommunicator.IntSize(eventSize) + 2 + StreamCommunicator.IntSize(methodID)] != (byte)RpcTypes.RpcType.Impulse) + { + currentHead.TotalReplayableMessages--; + } + readBufferPos += eventSize + StreamCommunicator.IntSize(eventSize); + } + Buffer.BlockCopy(currentHead.PageBytes, readBufferPos, currentHead.PageBytes, 0, currentHead.PageBytes.Length - readBufferPos); + currentHead.LowestSeqNo += trimSeqNo - currentHead.LowestSeqNo + 1; + currentHead.curLength -= readBufferPos; + break; + } + } + + var bufferEnumerator = _bufferQ.GetEnumerator(); + long nextReplayableSeqNo = matchingReplayableSeqNo + 1; + while (bufferEnumerator.MoveNext()) + { + var curBuffer = bufferEnumerator.Current; + var numMessagesOnPage = curBuffer.HighestSeqNo - curBuffer.LowestSeqNo + 1; + curBuffer.LowestSeqNo = nextReplayableSeqNo; + if (numMessagesOnPage > curBuffer.TotalReplayableMessages) + { + // There are some nonreplayable messsages to remove + int readBufferPos = 0; + var newPageBytes = new byte[curBuffer.PageBytes.Length]; + var pageWriteStream = new MemoryStream(newPageBytes); + for (int i = 0; i < numMessagesOnPage; i++) + { + int eventSize = curBuffer.PageBytes.ReadBufferedInt(readBufferPos); + var methodID = curBuffer.PageBytes.ReadBufferedInt(readBufferPos + StreamCommunicator.IntSize(eventSize) + 2); + if (curBuffer.PageBytes[readBufferPos + StreamCommunicator.IntSize(eventSize) + 2 + StreamCommunicator.IntSize(methodID)] != (byte)RpcTypes.RpcType.Impulse) + { + // Copy event over to new page bytes + pageWriteStream.Write(curBuffer.PageBytes, readBufferPos, eventSize + StreamCommunicator.IntSize(eventSize)); + } + readBufferPos += eventSize + StreamCommunicator.IntSize(eventSize); + } + curBuffer.curLength = (int)pageWriteStream.Position; + curBuffer.HighestSeqNo = curBuffer.LowestSeqNo + curBuffer.TotalReplayableMessages - 1; + curBuffer.PageBytes = newPageBytes; + } + nextReplayableSeqNo += curBuffer.TotalReplayableMessages; + } + return nextReplayableSeqNo - 1; + } + + internal void RebaseSeqNosInBuffer(long commitSeqNo, + long commitSeqNoReplayable) + { + var seqNoDiff = commitSeqNo - commitSeqNoReplayable; + var bufferEnumerator = _bufferQ.GetEnumerator(); + // Scan through pages from head to tail looking for events to output + while (bufferEnumerator.MoveNext()) + { + var curBuffer = bufferEnumerator.Current; + curBuffer.LowestSeqNo += seqNoDiff; + curBuffer.HighestSeqNo += seqNoDiff; + } + } + } + + [DataContract] + internal class InputConnectionRecord + { + public NetworkStream DataConnectionStream { get; set; } + public NetworkStream ControlConnectionStream { get; set; } + [DataMember] + public long LastProcessedID { get; set; } + [DataMember] + public long LastProcessedReplayableID { get; set; } + public InputConnectionRecord() + { + DataConnectionStream = null; + LastProcessedID = 0; + LastProcessedReplayableID = 0; + } + } + + internal class OutputConnectionRecord + { + // Set on reconnection. Established where to replay from or filter to + public long ReplayFrom { get; set; } + // The seq number from the last RPC call copied to the buffer. Not a property so interlocked read can be done + public long LastSeqNoFromLocalService; + // RPC output buffers + public EventBuffer BufferedOutput { get; set; } + // A cursor which specifies where the last RPC output ended + public EventBuffer.BuffersCursor placeInOutput; + // Work Q for output producing work. + public AsyncQueue DataWorkQ { get; set; } + // Work Q for sending trim messages and perform local trimming + public AsyncQueue ControlWorkQ { get; set; } + // Current sequence number which the output buffer may be trimmed to. + public long TrimTo { get; set; } + // Current replayable sequence number which the output buffer may be trimmed to. + public long ReplayableTrimTo { get; set; } + // The number of sends which are currently enqueued. Should be updated with interlocked increment and decrement + public long _sendsEnqueued; + public AmbrosiaRuntime MyAmbrosia { get; set; } + public bool WillResetConnection { get; set; } + public bool ConnectingAfterRestart { get; set; } + // The latest trim location on the other side. An associated trim message MAY have already been sent + public long RemoteTrim { get; set; } + // The latest replayable trim location on the other side. An associated trim message MAY have already been sent + public long RemoteTrimReplayable { get; set; } + // The seq no of the last RPC sent to the receiver + public long LastSeqSentToReceiver; + internal volatile bool ResettingConnection; + internal object _trimLock = new object(); + internal object _remoteTrimLock = new object(); + + public OutputConnectionRecord(AmbrosiaRuntime inAmbrosia) + { + ReplayFrom = 0; + DataWorkQ = new AsyncQueue(); + ControlWorkQ = new AsyncQueue(); + _sendsEnqueued = 0; + TrimTo = -1; + ReplayableTrimTo = -1; + RemoteTrim = -1; + RemoteTrimReplayable = -1; + LastSeqNoFromLocalService = 0; + MyAmbrosia = inAmbrosia; + BufferedOutput = new EventBuffer(MyAmbrosia, this); + ResettingConnection = false; + ConnectingAfterRestart = false; + LastSeqSentToReceiver = 0; + WillResetConnection = inAmbrosia._createService; + ConnectingAfterRestart = inAmbrosia._restartWithRecovery; + } + } + + public class AmbrosiaRuntimeParams + { + public int serviceReceiveFromPort; + public int serviceSendToPort; + public string serviceName; + public string AmbrosiaBinariesLocation; + public string serviceLogPath; + public bool? createService; + public bool pauseAtStart; + public bool persistLogs; + public bool activeActive; + public long logTriggerSizeMB; + public string storageConnectionString; + public long currentVersion; + public long upgradeToVersion; + } + + public static class AmbrosiaRuntimeParms + { + public static bool _looseAttach = false; + } + + public class AmbrosiaRuntime : VertexBase + { +#if _WINDOWS + [DllImport("Kernel32.dll", CallingConvention = CallingConvention.Winapi)] + private static extern void GetSystemTimePreciseAsFileTime(out long filetime); +#else + private static void GetSystemTimePreciseAsFileTime(out long filetime) + { + filetime = Stopwatch.GetTimestamp(); + } +#endif + + // Util + // Log metadata information record in _logMetadataTable + private class serviceInstanceEntity : TableEntity + { + public serviceInstanceEntity() + { + } + + public serviceInstanceEntity(string key, string inValue) + { + this.PartitionKey = "(Default)"; + this.RowKey = key; + this.value = inValue; + + } + + public string value { get; set; } + } + + + // Create a table with name tableName if it does not exist + private CloudTable CreateTableIfNotExists(String tableName) + { + try + { + CloudTable table = _tableClient.GetTableReference(tableName); + table.CreateIfNotExistsAsync().Wait(); + if (table == null) + { + OnError(AzureOperationError, "Error creating a table in Azure"); + } + return table; + } + catch + { + OnError(AzureOperationError, "Error creating a table in Azure"); + return null; + } + } + + + // Replace info for a key or create a new key. Raises an exception if the operation fails for any reason. + private void InsertOrReplaceServiceInfoRecord(string infoTitle, string info) + { + try + { + serviceInstanceEntity ServiceInfoEntity = new serviceInstanceEntity(infoTitle, info); + TableOperation insertOrReplaceOperation = TableOperation.InsertOrReplace(ServiceInfoEntity); + var myTask = this._serviceInstanceTable.ExecuteAsync(insertOrReplaceOperation); + myTask.Wait(); + var retrievedResult = myTask.Result; + if (retrievedResult.HttpStatusCode < 200 || retrievedResult.HttpStatusCode >= 300) + { + OnError(AzureOperationError, "Error replacing a record in an Azure table"); + } + } + catch + { + OnError(AzureOperationError, "Error replacing a record in an Azure table"); + } + } + + // Retrieve info for a given key + // If no key exists or _logMetadataTable does not exist, raise an exception + private string RetrieveServiceInfo(string key) + { + if (this._serviceInstanceTable != null) + { + TableOperation retrieveOperation = TableOperation.Retrieve("(Default)", key); + var myTask = this._serviceInstanceTable.ExecuteAsync(retrieveOperation); + myTask.Wait(); + var retrievedResult = myTask.Result; + if (retrievedResult.Result != null) + { + return ((serviceInstanceEntity)retrievedResult.Result).value; + } + else + { + string taskExceptionString = myTask.Exception == null ? "" : " Task exception: " + myTask.Exception; + OnError(AzureOperationError, "Error retrieving info from Azure." + taskExceptionString); + } + } + else + { + OnError(AzureOperationError, "Error retrieving info from Azure. The reference to the server instance table was not initialized."); + } + // Make compiler happy + return null; + } + + // Used to hold the bytes which will go in the log. Note that two streams are passed in. The + // log stream must write to durable storage and be flushable, while the second stream initiates + // actual action taken after the message has been made durable. + internal class Committer + { + byte[] _buf; + volatile byte[] _bufbak; + long _maxBufSize; + // Used in CAS. The first 31 bits are the #of writers, the next 32 bits is the buffer size, the last bit is the sealed bit + long _status; + const int SealedBits = 1; + const int TailBits = 32; + const int numWritesBits = 31; + const long Last32Mask = 0x00000000FFFFFFFF; + const long First32Mask = Last32Mask << 32; + ILogWriter _logStream; + Stream _workStream; + ConcurrentDictionary _uncommittedWatermarks; + ConcurrentDictionary _uncommittedWatermarksBak; + internal ConcurrentDictionary _trimWatermarks; + ConcurrentDictionary _trimWatermarksBak; + internal const int HeaderSize = 24; // 4 Committer ID, 8 Write ID, 8 check bytes, 4 page size + Task _lastCommitTask; + bool _persistLogs; + int _committerID; + internal long _nextWriteID; + AmbrosiaRuntime _myAmbrosia; + + public Committer(Stream workStream, + bool persistLogs, + AmbrosiaRuntime myAmbrosia, + long maxBufSize = 8 * 1024 * 1024, + ILogReader recoveryStream = null) + { + _myAmbrosia = myAmbrosia; + _persistLogs = persistLogs; + _uncommittedWatermarksBak = new ConcurrentDictionary(); + _trimWatermarksBak = new ConcurrentDictionary(); + if (maxBufSize <= 0) + { + // Recovering + _committerID = recoveryStream.ReadIntFixed(); + _nextWriteID = recoveryStream.ReadLongFixed(); + _maxBufSize = recoveryStream.ReadIntFixed(); + _buf = new byte[_maxBufSize]; + var bufSize = recoveryStream.ReadIntFixed(); + _status = bufSize << SealedBits; + recoveryStream.ReadAllRequiredBytes(_buf, 0, bufSize); + _uncommittedWatermarks = _uncommittedWatermarks.AmbrosiaDeserialize(recoveryStream); + _trimWatermarks = _trimWatermarks.AmbrosiaDeserialize(recoveryStream); + } + else + { + // starting for the first time + _status = HeaderSize << SealedBits; + _maxBufSize = maxBufSize; + _buf = new byte[maxBufSize]; + _uncommittedWatermarks = new ConcurrentDictionary(); + _trimWatermarks = new ConcurrentDictionary(); + long curTime; + GetSystemTimePreciseAsFileTime(out curTime); + _committerID = (int)((curTime << 33) >> 33); + _nextWriteID = 0; + } + _bufbak = new byte[_maxBufSize]; + var memWriter = new MemoryStream(_buf); + var memWriterBak = new MemoryStream(_bufbak); + memWriter.WriteIntFixed(_committerID); + memWriterBak.WriteIntFixed(_committerID); + _logStream = null; + _workStream = workStream; + } + + internal int CommitID { get { return _committerID; } } + + internal void Serialize(ILogWriter serializeStream) + { + var localStatus = _status; + var bufLength = ((localStatus >> SealedBits) & Last32Mask); + serializeStream.WriteIntFixed(_committerID); + serializeStream.WriteLongFixed(_nextWriteID); + serializeStream.WriteIntFixed((int)_maxBufSize); + serializeStream.WriteIntFixed((int)bufLength); + serializeStream.Write(_buf, 0, (int)bufLength); + _uncommittedWatermarks.AmbrosiaSerialize(serializeStream); + _trimWatermarks.AmbrosiaSerialize(serializeStream); + } + + public byte[] Buf { get { return _buf; } } + + + private void SendInputWatermarks(ConcurrentDictionary uncommittedWatermarks, + ConcurrentDictionary outputs) + { + // trim output buffers of inputs + lock (outputs) + { + foreach (var kv in uncommittedWatermarks) + { + OutputConnectionRecord outputConnectionRecord; + if (!outputs.TryGetValue(kv.Key, out outputConnectionRecord)) + { + // Set up the output record for the first time and add it to the dictionary + outputConnectionRecord = new OutputConnectionRecord(_myAmbrosia); + outputs[kv.Key] = outputConnectionRecord; + Trace.TraceInformation("Adding output:{0}", kv.Key); + } + // Must lock to atomically update due to race with ToControlStreamAsync + lock (outputConnectionRecord._remoteTrimLock) + { + outputConnectionRecord.RemoteTrim = Math.Max(kv.Value.First, outputConnectionRecord.RemoteTrim); + outputConnectionRecord.RemoteTrimReplayable = Math.Max(kv.Value.Second, outputConnectionRecord.RemoteTrimReplayable); + } + if (outputConnectionRecord.ControlWorkQ.IsEmpty) + { + outputConnectionRecord.ControlWorkQ.Enqueue(-2); + } + } + } + } + + private async Task Commit(byte[] firstBufToCommit, + int length1, + byte[] secondBufToCommit, + int length2, + ConcurrentDictionary uncommittedWatermarks, + ConcurrentDictionary trimWatermarks, + ConcurrentDictionary outputs) + { + try + { + // writes to _logstream - don't want to persist logs when perf testing so this is optional parameter + if (_persistLogs) + { + _logStream.Write(firstBufToCommit, 0, 4); + _logStream.WriteIntFixed(length1 + length2); + _logStream.Write(firstBufToCommit, 8, 16); + await _logStream.WriteAsync(firstBufToCommit, HeaderSize, length1 - HeaderSize); + await _logStream.WriteAsync(secondBufToCommit, 0, length2); + await writeFullWaterMarksAsync(uncommittedWatermarks); + await writeSimpleWaterMarksAsync(trimWatermarks); + await _logStream.FlushAsync(); + } + + SendInputWatermarks(uncommittedWatermarks, outputs); + _workStream.Write(firstBufToCommit, 0, 4); + _workStream.WriteIntFixed(length1 + length2); + _workStream.Write(firstBufToCommit, 8, 16); + await _workStream.WriteAsync(firstBufToCommit, HeaderSize, length1 - HeaderSize); + await _workStream.WriteAsync(secondBufToCommit, 0, length2); + // Return the second byte array to the FlexReader pool + FlexReadBuffer.ReturnBuffer(secondBufToCommit); + var flushtask = _workStream.FlushAsync(); + _uncommittedWatermarksBak = uncommittedWatermarks; + _uncommittedWatermarksBak.Clear(); + _trimWatermarksBak = trimWatermarks; + _trimWatermarksBak.Clear(); + } + catch (Exception e) + { + _myAmbrosia.OnError(5, e.Message); + } + _bufbak = firstBufToCommit; + await TryCommitAsync(outputs); + } + + private async Task writeFullWaterMarksAsync(ConcurrentDictionary uncommittedWatermarks) + { + _logStream.WriteInt(uncommittedWatermarks.Count); + foreach (var kv in uncommittedWatermarks) + { + var sourceBytes = Encoding.UTF8.GetBytes(kv.Key); + _logStream.WriteInt(sourceBytes.Length); + await _logStream.WriteAsync(sourceBytes, 0, sourceBytes.Length); + _logStream.WriteLongFixed(kv.Value.First); + _logStream.WriteLongFixed(kv.Value.Second); + } + } + + private async Task writeSimpleWaterMarksAsync(ConcurrentDictionary uncommittedWatermarks) + { + _logStream.WriteInt(uncommittedWatermarks.Count); + foreach (var kv in uncommittedWatermarks) + { + var sourceBytes = Encoding.UTF8.GetBytes(kv.Key); + _logStream.WriteInt(sourceBytes.Length); + await _logStream.WriteAsync(sourceBytes, 0, sourceBytes.Length); + _logStream.WriteLongFixed(kv.Value); + } + } + private async Task Commit(byte[] buf, + int length, + ConcurrentDictionary uncommittedWatermarks, + ConcurrentDictionary trimWatermarks, + ConcurrentDictionary outputs) + { + try + { + // writes to _logstream - don't want to persist logs when perf testing so this is optional parameter + if (_persistLogs) + { + await _logStream.WriteAsync(buf, 0, length); + await writeFullWaterMarksAsync(uncommittedWatermarks); + await writeSimpleWaterMarksAsync(trimWatermarks); + await _logStream.FlushAsync(); + } + SendInputWatermarks(uncommittedWatermarks, outputs); + await _workStream.WriteAsync(buf, 0, length); + var flushtask = _workStream.FlushAsync(); + _uncommittedWatermarksBak = uncommittedWatermarks; + _uncommittedWatermarksBak.Clear(); + _trimWatermarksBak = trimWatermarks; + _trimWatermarksBak.Clear(); + } + catch (Exception e) + { + _myAmbrosia.OnError(5, e.Message); + } + _bufbak = buf; + await TryCommitAsync(outputs); + } + + public async Task SleepAsync() + { + while (true) + { + // We're going to try to seal the buffer + var localStatus = Interlocked.Read(ref _status); + // Yield if the sealed bit is set + while (localStatus % 2 == 1) + { + await Task.Yield(); + localStatus = Interlocked.Read(ref _status); + } + var newLocalStatus = localStatus + 1; + var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); + + // Check if the compare and swap succeeded, otherwise try again + if (origVal == localStatus) + { + // We successfully sealed the buffer and must wait until any active commit finishes + while (_bufbak == null) + { + await Task.Yield(); + } + + // Wait for all writes to complete before sleeping + while (true) + { + localStatus = Interlocked.Read(ref _status); + var numWrites = (localStatus >> (64 - numWritesBits)); + if (numWrites == 0) + { + break; + } + await Task.Yield(); + } + return; + } + } + } + + // This method switches the log stream to the provided stream and removes the write lock on the old file + public void SwitchLogStreams(ILogWriter newLogStream) + { + if (_status % 2 != 1 || _bufbak == null) + { + _myAmbrosia.OnError(5, "Committer is trying to switch log streams when awake"); + } + // Release resources and lock on the old file + _logStream?.Dispose(); + _logStream = newLogStream; + } + + public async Task WakeupAsync() + { + var localStatus = Interlocked.Read(ref _status); + if (localStatus % 2 == 0 || _bufbak == null) + { + _myAmbrosia.OnError(5, "Tried to wakeup committer when not asleep"); + } + // We're going to try to unseal the buffer + var newLocalStatus = localStatus - 1; + var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); + // Check if the compare and swap succeeded + if (origVal != localStatus) + { + _myAmbrosia.OnError(5, "Tried to wakeup committer when not asleep 2"); + } + } + + byte[] _checkTempBytes = new byte[8]; + byte[] _checkTempBytes2 = new byte[8]; + + internal unsafe long CheckBytesExtra(int offset, + int length, + byte[] extraBytes, + int extraLength) + { + var firstBufferCheck = CheckBytes(offset, length); + var secondBufferCheck = CheckBytes(extraBytes, 0, extraLength); + long shiftedSecondBuffer = secondBufferCheck; + var lastByteLongOffset = length % 8; + if (lastByteLongOffset != 0) + { + fixed (byte* p = _checkTempBytes) + { + *((long*)p) = secondBufferCheck; + } + // Create new buffer with circularly shifted secondBufferCheck + for (int i = 0; i < 8; i++) + { + _checkTempBytes2[i] = _checkTempBytes[(i - lastByteLongOffset + 8) % 8]; + } + fixed (byte* p = _checkTempBytes2) + { + shiftedSecondBuffer = *((long*)p); + } + } + return firstBufferCheck ^ shiftedSecondBuffer; + } + + internal unsafe long CheckBytes(int offset, + int length) + { + long checkBytes = 0; + + fixed (byte* p = _buf) + { + if (offset % 8 == 0) + { + int startLongCalc = offset / 8; + int numLongCalcs = length / 8; + int numByteCalcs = length % 8; + long* longPtr = ((long*)p) + startLongCalc; + for (int i = 0; i < numLongCalcs; i++) + { + checkBytes ^= longPtr[i]; + } + if (numByteCalcs != 0) + { + var lastBytes = (byte*)(longPtr + numLongCalcs); + for (int i = 0; i < 8; i++) + { + if (i < numByteCalcs) + { + _checkTempBytes[i] = lastBytes[i]; + } + else + { + _checkTempBytes[i] = 0; + } + } + fixed (byte* p2 = _checkTempBytes) + { + checkBytes ^= *((long*)p2); + } + } + } + else + { + _myAmbrosia.OnError(0, "checkbytes case not implemented"); + } + } + return checkBytes; + } + + + internal unsafe long CheckBytes(byte[] bufToCalc, + int offset, + int length) + { + long checkBytes = 0; + + fixed (byte* p = bufToCalc) + { + if (offset % 8 == 0) + { + int startLongCalc = offset / 8; + int numLongCalcs = length / 8; + int numByteCalcs = length % 8; + long* longPtr = ((long*)p) + startLongCalc; + for (int i = 0; i < numLongCalcs; i++) + { + checkBytes ^= longPtr[i]; + } + if (numByteCalcs != 0) + { + var lastBytes = (byte*)(longPtr + numLongCalcs); + for (int i = 0; i < 8; i++) + { + if (i < numByteCalcs) + { + _checkTempBytes[i] = lastBytes[i]; + } + else + { + _checkTempBytes[i] = 0; + } + } + fixed (byte* p2 = _checkTempBytes) + { + checkBytes ^= *((long*)p2); + } + } + } + else + { + _myAmbrosia.OnError(0, "checkbytes case not implemented 2"); + } + } + return checkBytes; + } + + + public async Task AddRow(FlexReadBuffer copyFromFlexBuffer, + string outputToUpdate, + long newSeqNo, + long newReplayableSeqNo, + ConcurrentDictionary outputs, + InputConnectionRecord associatedInputConnectionRecord) + { + var copyFromBuffer = copyFromFlexBuffer.Buffer; + var length = copyFromFlexBuffer.Length; + while (true) + { + bool sealing = false; + long localStatus; + localStatus = Interlocked.Read(ref _status); + + // Yield if the sealed bit is set + while (localStatus % 2 == 1) + { + await Task.Yield(); + localStatus = Interlocked.Read(ref _status); + } + var oldBufLength = ((localStatus >> SealedBits) & Last32Mask); + var newLength = oldBufLength + length; + + // Assemble the new status + long newLocalStatus; + if ((newLength > _maxBufSize) || (_bufbak != null)) + { + // We're going to try to seal the buffer + newLocalStatus = localStatus + 1; + sealing = true; + } + else + { + // We're going to try to add to the end of the existing buffer + var newWrites = (localStatus >> (64 - numWritesBits)) + 1; + newLocalStatus = ((newWrites) << (64 - numWritesBits)) | (newLength << SealedBits); + } + var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); + + // Check if the compare and swap succeeded, otherwise try again + if (origVal == localStatus) + { + // We are now preventing recovery until addrow finishes and all resulting commits have completed. We can safely update + // LastProcessedID and LastProcessedReplayableID + associatedInputConnectionRecord.LastProcessedID = newSeqNo; + associatedInputConnectionRecord.LastProcessedReplayableID = newReplayableSeqNo; + if (sealing) + { + // This call successfully sealed the buffer. Remember we still have an extra + // message to take care of + + // We have just filled the backup buffer and must wait until any other commit finishes + int counter = 0; + while (_bufbak == null) + { + counter++; + if (counter == 100000) + { + counter = 0; + await Task.Yield(); + } + } + + // There is no other write going on. Take the backup buffer + var newUncommittedWatermarks = _uncommittedWatermarksBak; + var newWriteBuf = _bufbak; + _bufbak = null; + _uncommittedWatermarksBak = null; + + // Wait for other writes to complete before committing + while (true) + { + localStatus = Interlocked.Read(ref _status); + var numWrites = (localStatus >> (64 - numWritesBits)); + if (numWrites == 0) + { + break; + } + await Task.Yield(); + } + + // Filling header with enough info to detect incomplete writes and also writing the page length + var writeStream = new MemoryStream(_buf, 4, 20); + int lengthOnPage; + if (newLength <= _maxBufSize) + { + lengthOnPage = (int)newLength; + } + else + { + lengthOnPage = (int)oldBufLength; + } + writeStream.WriteIntFixed(lengthOnPage); + if (newLength <= _maxBufSize) + { + // Copy the contents into the log record buffer + Buffer.BlockCopy(copyFromBuffer, 0, _buf, (int)oldBufLength, length); + } + long checkBytes; + if (length <= (_maxBufSize - HeaderSize)) + { + // new message will end up in a commit buffer. Use normal CheckBytes + checkBytes = CheckBytes(HeaderSize, lengthOnPage - HeaderSize); + } + else + { + // new message is too big to land in a commit buffer and will be tacked on the end. + checkBytes = CheckBytesExtra(HeaderSize, lengthOnPage - HeaderSize, copyFromBuffer, length); + } + writeStream.WriteLongFixed(checkBytes); + writeStream.WriteLongFixed(_nextWriteID); + _nextWriteID++; + + // Do the actual commit + // Grab the current state of trim levels since the last write + // Note that the trim thread may want to modify the table, requiring a lock + ConcurrentDictionary oldTrimWatermarks; + lock (_trimWatermarks) + { + oldTrimWatermarks = _trimWatermarks; + _trimWatermarks = _trimWatermarksBak; + _trimWatermarksBak = null; + } + if (newLength <= _maxBufSize) + { + // add row to current buffer and commit + _uncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); + _lastCommitTask = Commit(_buf, (int)newLength, _uncommittedWatermarks, oldTrimWatermarks, outputs); + newLocalStatus = HeaderSize << SealedBits; + } + else if (length > (_maxBufSize - HeaderSize)) + { + // Steal the byte array in the flex buffer to return it after writing + copyFromFlexBuffer.StealBuffer(); + // write new event as part of commit + _uncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); + var commitTask = Commit(_buf, (int)oldBufLength, copyFromBuffer, length, _uncommittedWatermarks, oldTrimWatermarks, outputs); + newLocalStatus = HeaderSize << SealedBits; + } + else + { + // commit and add new event to new buffer + newUncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); + _lastCommitTask = Commit(_buf, (int)oldBufLength, _uncommittedWatermarks, oldTrimWatermarks, outputs); + Buffer.BlockCopy(copyFromBuffer, 0, newWriteBuf, (int)HeaderSize, length); + newLocalStatus = (HeaderSize + length) << SealedBits; + } + _buf = newWriteBuf; + _uncommittedWatermarks = newUncommittedWatermarks; + _status = newLocalStatus; + return (long)_logStream.FileSize; + } + // Add the message to the existing buffer + Buffer.BlockCopy(copyFromBuffer, 0, _buf, (int)oldBufLength, length); + _uncommittedWatermarks[outputToUpdate] = new LongPair(newSeqNo, newReplayableSeqNo); + // Reduce write count + while (true) + { + localStatus = Interlocked.Read(ref _status); + var newWrites = (localStatus >> (64 - numWritesBits)) - 1; + newLocalStatus = (localStatus & ((Last32Mask << 1) + 1)) | + (newWrites << (64 - numWritesBits)); + origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); + if (origVal == localStatus) + { + if (localStatus % 2 == 0 && _bufbak != null) + { + await TryCommitAsync(outputs); + } + return (long)_logStream.FileSize; + } + } + } + } + } + + public async Task TryCommitAsync(ConcurrentDictionary outputs) + { + long localStatus; + localStatus = Interlocked.Read(ref _status); + + var bufLength = ((localStatus >> SealedBits) & Last32Mask); + // give up and try later if the sealed bit is set or there is nothing to write + if (localStatus % 2 == 1 || bufLength == HeaderSize || _bufbak == null) + { + return; + } + + // Assemble the new status + long newLocalStatus; + newLocalStatus = localStatus + 1; + var origVal = Interlocked.CompareExchange(ref _status, newLocalStatus, localStatus); + + // Check if the compare and swap succeeded, otherwise skip flush + if (origVal == localStatus) + { + // This call successfully sealed the buffer. + + // We have just filled the backup buffer and must wait until any other commit finishes + int counter = 0; + while (_bufbak == null) + { + counter++; + if (counter == 100000) + { + counter = 0; + await Task.Yield(); + } + } + + // There is no other write going on. Take the backup buffer + var newUncommittedWatermarks = _uncommittedWatermarksBak; + var newWriteBuf = _bufbak; + _bufbak = null; + _uncommittedWatermarksBak = null; + + // Wait for other writes to complete before committing + while (true) + { + localStatus = Interlocked.Read(ref _status); + var numWrites = (localStatus >> (64 - numWritesBits)); + if (numWrites == 0) + { + break; + } + await Task.Yield(); + } + + // Filling header with enough info to detect incomplete writes and also writing the page length + var writeStream = new MemoryStream(_buf, 4, 20); + writeStream.WriteIntFixed((int)bufLength); + long checkBytes = CheckBytes(HeaderSize, (int)bufLength - HeaderSize); + writeStream.WriteLongFixed(checkBytes); + writeStream.WriteLongFixed(_nextWriteID); + _nextWriteID++; + + // Grab the current state of trim levels since the last write + // Note that the trim thread may want to modify the table, requiring a lock + ConcurrentDictionary oldTrimWatermarks; + lock (_trimWatermarks) + { + oldTrimWatermarks = _trimWatermarks; + _trimWatermarks = _trimWatermarksBak; + _trimWatermarksBak = null; + } + _lastCommitTask = Commit(_buf, (int)bufLength, _uncommittedWatermarks, oldTrimWatermarks, outputs); + newLocalStatus = HeaderSize << SealedBits; + _buf = newWriteBuf; + _uncommittedWatermarks = newUncommittedWatermarks; + _status = newLocalStatus; + } + } + + internal void ClearNextWrite() + { + _uncommittedWatermarksBak.Clear(); + _trimWatermarksBak.Clear(); + _status = HeaderSize << SealedBits; + } + + internal void SendUpgradeRequest() + { + _workStream.WriteIntFixed(_committerID); + var numMessageBytes = StreamCommunicator.IntSize(1) + 1; + var messageBuf = new byte[numMessageBytes]; + var memStream = new MemoryStream(messageBuf); + memStream.WriteInt(1); + memStream.WriteByte(upgradeServiceByte); + memStream.Dispose(); + _workStream.WriteIntFixed((int)(HeaderSize + numMessageBytes)); + long checkBytes = CheckBytes(messageBuf, 0, (int)numMessageBytes); + _workStream.WriteLongFixed(checkBytes); + _workStream.WriteLongFixed(-1); + _workStream.Write(messageBuf, 0, numMessageBytes); + _workStream.Flush(); + } + + internal void QuiesceServiceWithSendCheckpointRequest(bool upgrading = false, bool becomingPrimary = false) + { + _workStream.WriteIntFixed(_committerID); + var numMessageBytes = StreamCommunicator.IntSize(1) + 1; + var messageBuf = new byte[numMessageBytes]; + var memStream = new MemoryStream(messageBuf); + memStream.WriteInt(1); +#if DEBUG + // We are about to request a checkpoint from the language binding. Get ready to error check the incoming checkpoint + _myAmbrosia.ExpectingCheckpoint = true; +#endif + if (upgrading) + { + memStream.WriteByte(upgradeTakeCheckpointByte); + } + else if (becomingPrimary) + { + memStream.WriteByte(takeBecomingPrimaryCheckpointByte); + } + else + { + memStream.WriteByte(takeCheckpointByte); + } + memStream.Dispose(); + _workStream.WriteIntFixed((int)(HeaderSize + numMessageBytes)); + long checkBytes = CheckBytes(messageBuf, 0, (int)numMessageBytes); + _workStream.WriteLongFixed(checkBytes); + _workStream.WriteLongFixed(-1); + _workStream.Write(messageBuf, 0, numMessageBytes); + _workStream.Flush(); + } + + internal void SendBecomePrimaryRequest() + { + _workStream.WriteIntFixed(_committerID); + var numMessageBytes = StreamCommunicator.IntSize(1) + 1; + var messageBuf = new byte[numMessageBytes]; + var memStream = new MemoryStream(messageBuf); + memStream.WriteInt(1); + memStream.WriteByte(becomingPrimaryByte); + memStream.Dispose(); + _workStream.WriteIntFixed((int)(HeaderSize + numMessageBytes)); + long checkBytes = CheckBytes(messageBuf, 0, (int)numMessageBytes); + _workStream.WriteLongFixed(checkBytes); + _workStream.WriteLongFixed(-1); + _workStream.Write(messageBuf, 0, numMessageBytes); + _workStream.Flush(); + } + + + internal void SendCheckpointToRecoverFrom(byte[] buf, int length, ILogReader checkpointStream) + { + _workStream.WriteIntFixed(_committerID); + _workStream.WriteIntFixed((int)(HeaderSize + length)); + _workStream.WriteLongFixed(0); + _workStream.WriteLongFixed(-2); + _workStream.Write(buf, 0, length); + var sizeBytes = StreamCommunicator.ReadBufferedInt(buf, 0); + var checkpointSize = StreamCommunicator.ReadBufferedLong(buf, StreamCommunicator.IntSize(sizeBytes) + 1); + checkpointStream.ReadBig(_workStream, checkpointSize); + _workStream.Flush(); + } + + internal async Task AddInitialRowAsync(FlexReadBuffer serviceInitializationMessage) + { + var numMessageBytes = serviceInitializationMessage.Length; + if (numMessageBytes > _buf.Length - HeaderSize) + { + _myAmbrosia.OnError(0, "Initial row is too many bytes"); + } + Buffer.BlockCopy(serviceInitializationMessage.Buffer, 0, _buf, (int)HeaderSize, numMessageBytes); + _status = (HeaderSize + numMessageBytes) << SealedBits; + await SleepAsync(); + } + } + + /** + * This contains information associated with a given machine + **/ + internal class MachineState + { + public MachineState(long shardID) + { + ShardID = shardID; + } + public ILogWriter CheckpointWriter { get; set; } + public Committer Committer { get; set; } + public ConcurrentDictionary Inputs { get; set; } + public long LastCommittedCheckpoint { get; set; } + public long LastLogFile { get; set; } + public AARole MyRole { get; set; } + public ConcurrentDictionary Outputs { get; set; } + public long ShardID { get; set; } + } + + internal void LoadAmbrosiaState(MachineState state) + { + state.CheckpointWriter = _checkpointWriter; + state.Committer = _committer; + state.Inputs = _inputs; + state.LastCommittedCheckpoint = _lastCommittedCheckpoint; + state.LastLogFile = _lastLogFile; + state.MyRole = _myRole; + state.Outputs = _outputs; + } + + internal void UpdateAmbrosiaState(MachineState state) + { + _checkpointWriter = state.CheckpointWriter; + _committer = state.Committer; + _inputs = state.Inputs; + _lastCommittedCheckpoint = state.LastCommittedCheckpoint; + _lastLogFile = state.LastLogFile; + _myRole = state.MyRole; + _outputs = state.Outputs; + } + + public class AmbrosiaOutput : IAsyncVertexOutputEndpoint + { + AmbrosiaRuntime myRuntime; + string _typeOfEndpoint; // Data or control endpoint + + public AmbrosiaOutput(AmbrosiaRuntime inRuntime, + string typeOfEndpoint) : base() + { + myRuntime = inRuntime; + _typeOfEndpoint = typeOfEndpoint; + } + + public void Dispose() + { + } + + public async Task ToInputAsync(IVertexInputEndpoint p, CancellationToken token) + { + await Task.Yield(); + throw new NotImplementedException(); + } + + public async Task ToStreamAsync(Stream stream, string otherProcess, string otherEndpoint, CancellationToken token) + { + if (_typeOfEndpoint == "data") + { + await myRuntime.ToDataStreamAsync(stream, otherProcess, token); + } + else + { + await myRuntime.ToControlStreamAsync(stream, otherProcess, token); + } + } + } + + public class AmbrosiaInput : IAsyncVertexInputEndpoint + { + AmbrosiaRuntime myRuntime; + string _typeOfEndpoint; // Data or control endpoint + + public AmbrosiaInput(AmbrosiaRuntime inRuntime, + string typeOfEndpoint) : base() + { + myRuntime = inRuntime; + _typeOfEndpoint = typeOfEndpoint; + } + + public void Dispose() + { + } + + public async Task FromOutputAsync(IVertexOutputEndpoint p, CancellationToken token) + { + await Task.Yield(); + throw new NotImplementedException(); + } + + public async Task FromStreamAsync(Stream stream, string otherProcess, string otherEndpoint, CancellationToken token) + { + if (_typeOfEndpoint == "data") + { + await myRuntime.FromDataStreamAsync(stream, otherProcess, token); + } + else + { + await myRuntime.FromControlStreamAsync(stream, otherProcess, token); + } + } + } + + ConcurrentDictionary _inputs; + ConcurrentDictionary _outputs; + internal int _localServiceReceiveFromPort; // specifiable on the command line + internal int _localServiceSendToPort; // specifiable on the command line + internal string _serviceName; // specifiable on the command line + internal string _serviceLogPath; + internal string _logFileNameBase; + public const string AmbrosiaDataInputsName = "Ambrosiadatain"; + public const string AmbrosiaControlInputsName = "Ambrosiacontrolin"; + public const string AmbrosiaDataOutputsName = "Ambrosiadataout"; + public const string AmbrosiaControlOutputsName = "Ambrosiacontrolout"; + bool _persistLogs; + bool _sharded; + internal bool _createService; + long _shardID; + bool _runningRepro; + long _currentVersion; + long _upgradeToVersion; + bool _upgrading; + internal bool _restartWithRecovery; + internal bool CheckpointingService { get; set; } + internal bool ExpectingCheckpoint { get; set; } + + // Constants for leading byte communicated between services; + public const byte RPCByte = AmbrosiaRuntimeLBConstants.RPCByte; + public const byte attachToByte = AmbrosiaRuntimeLBConstants.attachToByte; + public const byte takeCheckpointByte = AmbrosiaRuntimeLBConstants.takeCheckpointByte; + public const byte CommitByte = AmbrosiaRuntimeLBConstants.CommitByte; + public const byte replayFromByte = AmbrosiaRuntimeLBConstants.replayFromByte; + public const byte RPCBatchByte = AmbrosiaRuntimeLBConstants.RPCBatchByte; + public const byte PingByte = AmbrosiaRuntimeLBConstants.PingByte; + public const byte PingReturnByte = AmbrosiaRuntimeLBConstants.PingReturnByte; + public const byte checkpointByte = AmbrosiaRuntimeLBConstants.checkpointByte; + public const byte InitalMessageByte = AmbrosiaRuntimeLBConstants.InitalMessageByte; + public const byte upgradeTakeCheckpointByte = AmbrosiaRuntimeLBConstants.upgradeTakeCheckpointByte; + public const byte takeBecomingPrimaryCheckpointByte = AmbrosiaRuntimeLBConstants.takeBecomingPrimaryCheckpointByte; + public const byte upgradeServiceByte = AmbrosiaRuntimeLBConstants.upgradeServiceByte; + public const byte CountReplayableRPCBatchByte = AmbrosiaRuntimeLBConstants.CountReplayableRPCBatchByte; + public const byte trimToByte = AmbrosiaRuntimeLBConstants.trimToByte; + public const byte becomingPrimaryByte = AmbrosiaRuntimeLBConstants.becomingPrimaryByte; + + CRAClientLibrary _coral; + + // Connection to local service + Stream _localServiceReceiveFromStream; + Stream _localServiceSendToStream; + + // Precommit buffers used for writing things to append blobs + Committer _committer; + + // Azure storage clients + string _storageConnectionString; + CloudStorageAccount _storageAccount; + CloudTableClient _tableClient; + + // Azure table for service instance metadata information + CloudTable _serviceInstanceTable; + long _lastCommittedCheckpoint; + + // Azure blob for writing commit log and checkpoint + ILogWriter _checkpointWriter; + ILogWriterStatic _logWriterStatics; + + // true when this service is in an active/active configuration. False if set to single node + bool _activeActive; + + internal enum AARole { Primary, Secondary, Checkpointer }; + AARole _myRole; + // Log size at which we start a new log file. This triggers a checkpoint, <= 0 if manual only checkpointing is done + long _newLogTriggerSize; + // The numeric suffix of the log file currently being read or written to + long _lastLogFile; + // A locking variable (with compare and swap) used to eliminate redundant log moves + int _movingToNextLog = 0; + // A handle to a file used for an upgrading secondary to bring down the primary and prevent primary promotion amongst secondaries. + // As long as the write lock is held, no promotion can happen + ILogWriter _killFileHandle = null; + + + + const int UnexpectedError = 0; + const int VersionMismatch = 1; + const int MissingCheckpoint = 2; + const int MissingLog = 3; + const int AzureOperationError = 4; + const int LogWriteError = 5; + + internal void OnError(int ErrNo, string ErrorMessage) + { + Trace.TraceError("FATAL ERROR " + ErrNo.ToString() + ": " + ErrorMessage); + _coral.KillLocalWorker(""); + } + + /// + /// Need a manually created backing field so it can be marked volatile. + /// + private volatile FlexReadBuffer backingFieldForLastReceivedCheckpoint; + + internal FlexReadBuffer LastReceivedCheckpoint + { + get { return backingFieldForLastReceivedCheckpoint; } + set + { + backingFieldForLastReceivedCheckpoint = value; + } + } + + internal long _lastReceivedCheckpointSize; + + bool _recovering; + internal bool Recovering + { + get { return _recovering; } + set { _recovering = value; } + } + + /// + /// Need a manually created backing field so it can be marked volatile. + /// + private volatile FlexReadBuffer backingFieldForServiceInitializationMessage; + + internal FlexReadBuffer ServiceInitializationMessage + { + get { return backingFieldForServiceInitializationMessage; } + set + { + backingFieldForServiceInitializationMessage = value; + } + } + + // Hack for enabling fast IP6 loopback in Windows on .NET + const int SIO_LOOPBACK_FAST_PATH = (-1744830448); + + // This is a hack to keep threads from deadlocking when running integrated IC. Has no affect for separate IC. + volatile public static bool _listening = false; + + void SetupLocalServiceStreams() + { + // Check to see if this is a tightly bound IC + if ((_localServiceReceiveFromPort == 0) && (_localServiceSendToPort == 0)) + { + //Use anonymous pipes for communication rather than TCP + var pipeServer = new AnonymousPipeServerStream(PipeDirection.In, HandleInheritability.Inheritable); + _listening = true; + StartupParamOverrides.ICReceivePipeName = pipeServer.GetClientHandleAsString(); + _localServiceReceiveFromStream = pipeServer; + pipeServer = new AnonymousPipeServerStream(PipeDirection.Out, HandleInheritability.Inheritable); + StartupParamOverrides.ICSendPipeName = pipeServer.GetClientHandleAsString(); + _localServiceSendToStream = pipeServer; + return; + } + + // We the IC and LB are using TCP to communicate + // Note that the local service must setup the listener and sender in reverse order or there will be a deadlock + // First establish receiver - Use fast IP6 loopback + Byte[] optionBytes = BitConverter.GetBytes(1); +#if _WINDOWS + Socket mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); + mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); + var ipAddress = IPAddress.IPv6Loopback; +#else + Socket mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); + var ipAddress = IPAddress.Loopback; +#endif + + var myReceiveEP = new IPEndPoint(ipAddress, _localServiceReceiveFromPort); + mySocket.Bind(myReceiveEP); + mySocket.Listen(1); + var socket = mySocket.Accept(); + _localServiceReceiveFromStream = new NetworkStream(socket); + + + // Note that the local service must setup the listener and sender in reverse order or there will be a deadlock + // First establish receiver - Use fast IP6 loopback +#if _WINDOWS + mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); + mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); +#else + mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); +#endif + var mySendEP = new IPEndPoint(ipAddress, _localServiceSendToPort); + mySocket.Bind(mySendEP); + mySocket.Listen(1); + socket = mySocket.Accept(); + _localServiceSendToStream = new NetworkStream(socket); + } + + private void SetupAzureConnections() + { + try + { + _storageAccount = CloudStorageAccount.Parse(_storageConnectionString); + _tableClient = _storageAccount.CreateCloudTableClient(); + _serviceInstanceTable = _tableClient.GetTableReference(_serviceName); + if ((_storageAccount == null) || (_tableClient == null) || (_serviceInstanceTable == null)) + { + OnError(AzureOperationError, "Error setting up initial connection to Azure"); + } + } + catch + { + OnError(AzureOperationError, "Error setting up initial connection to Azure"); + } + } + + private const uint FILE_FLAG_NO_BUFFERING = 0x20000000; + + private void PrepareToRecoverOrStart() + { + IPAddress localIPAddress = Dns.GetHostEntry("localhost").AddressList[0]; + _logWriterStatics.CreateDirectoryIfNotExists(LogDirectory(_currentVersion)); + _logFileNameBase = LogFileNameBase(_currentVersion); + SetupLocalServiceStreams(); + if (!_runningRepro) + { + SetupAzureConnections(); + } + ServiceInitializationMessage = null; + Thread localListenerThread = new Thread(() => LocalListener()) { IsBackground = true }; + localListenerThread.Start(); + } + + private async Task CheckForMigrationOrUpgradeAsync() + { + while (true) + { + for (int i = 0; i < 3; i++) + { + await Task.Delay(1500); + try + { + LockKillFile(); + // If we reach here, we have the lock and definitely don't need to commit suicide + ReleaseAndTryCleanupKillFile(); + break; + } + catch (Exception) + { + // Maybe we are tying to upgrade, but maybe someone else is checking. Try 3 times before committing suicide + if (i == 2) + { + // Failed 3 times. Commit suicide + OnError(0, "Migrating or upgrading. Must commit suicide since I'm the primary"); + } + } + } + } + } + + private async Task RecoverOrStartAsync(long checkpointToLoad = -1, + bool testUpgrade = false) + { + CheckpointingService = false; + Recovering = false; + PrepareToRecoverOrStart(); + if (!_runningRepro) + { + RuntimeChecksOnProcessStart(); + } + // Determine if we are recovering + if (!_createService) + { + Recovering = true; + _restartWithRecovery = true; + MachineState state = new MachineState(_shardID); + await RecoverAsync(state, checkpointToLoad, testUpgrade); + UpdateAmbrosiaState(state); + await PrepareToBecomePrimaryAsync(); + // Start task to periodically check if someone's trying to upgrade + (new Task(() => CheckForMigrationOrUpgradeAsync())).Start(); + Recovering = false; + } + else + { + await StartAsync(); + // Start task to periodically check if someone's trying to upgrade + (new Task(() => CheckForMigrationOrUpgradeAsync())).Start(); + } + } + + private async Task RecoverAsync(MachineState state, long checkpointToLoad = -1, bool testUpgrade = false) + { + if (!_runningRepro) + { + // We are recovering - find the last committed checkpoint + state.LastCommittedCheckpoint = long.Parse(RetrieveServiceInfo(InfoTitle("LastCommittedCheckpoint", state.ShardID))); + } + else + { + // We are running a repro + state.LastCommittedCheckpoint = checkpointToLoad; + } + // Start from the log file associated with the last committed checkpoint + state.LastLogFile = state.LastCommittedCheckpoint; + if (_activeActive) + { + if (!_runningRepro) + { + // Determines the role as either secondary or checkpointer. If its a checkpointer, _commitBlobWriter holds the write lock on the last checkpoint + DetermineRole(state); + } + else + { + // We are running a repro. Act as a secondary + state.MyRole = AARole.Secondary; + } + } + + using (ILogReader checkpointStream = LogReaderStaticPicker.curStatic.Generate(CheckpointName(state.LastCommittedCheckpoint, state.ShardID))) + { + // recover the checkpoint - Note that everything except the replay data must have been written successfully or we + // won't think we have a valid checkpoint here. Since we can only be the secondary or checkpointer, the committer doesn't write to the replay log + // Recover committer + state.Committer = new Committer(_localServiceSendToStream, _persistLogs, this, -1, checkpointStream); + // Recover input connections + state.Inputs = state.Inputs.AmbrosiaDeserialize(checkpointStream); + // Recover output connections + state.Outputs = state.Outputs.AmbrosiaDeserialize(checkpointStream, this); + UnbufferNonreplayableCalls(state.Outputs); + // Restore new service from checkpoint + var serviceCheckpoint = new FlexReadBuffer(); + FlexReadBuffer.Deserialize(checkpointStream, serviceCheckpoint); + state.Committer.SendCheckpointToRecoverFrom(serviceCheckpoint.Buffer, serviceCheckpoint.Length, checkpointStream); + } + + using (ILogReader replayStream = LogReaderStaticPicker.curStatic.Generate(LogFileName(state.LastLogFile, state.ShardID))) + { + if (state.MyRole == AARole.Secondary && !_runningRepro) + { + // If this is a secondary, set up the detector to detect when this instance becomes the primary + var t = DetectBecomingPrimaryAsync(state); + } + if (testUpgrade) + { + // We are actually testing an upgrade. Must upgrade the service before replay + state.Committer.SendUpgradeRequest(); + } + // We need _outputs to be set before ProcessRPC is invoked + UpdateAmbrosiaState(state); + await ReplayAsync(replayStream, state); + } + } + + private async Task PrepareToBecomePrimaryAsync() + { + var readVersion = long.Parse(RetrieveServiceInfo(InfoTitle("CurrentVersion"))); + if (_currentVersion != readVersion) + { + + OnError(VersionMismatch, "Version changed during recovery: Expected " + _currentVersion + " was: " + readVersion.ToString()); + } + if (_upgrading) + { + MoveServiceToUpgradeDirectory(); + } + // Now becoming the primary. Moving to next log file since the current one may have junk at the end. + bool wasUpgrading = _upgrading; + var oldFileHandle = await MoveServiceToNextLogFileAsync(false, true); + if (wasUpgrading) + { + // Successfully wrote out our new first checkpoint in the upgraded version, can now officially take the version upgrade + InsertOrReplaceServiceInfoRecord(InfoTitle("CurrentVersion"), _upgradeToVersion.ToString()); + // We have now completed the upgrade and may release the old file lock. + oldFileHandle.Dispose(); + // Moving to the next file means the first log file is empty, but it immediately causes failures of all old secondaries. + await MoveServiceToNextLogFileAsync(); + } + } + + private async Task StartAsync() + { + // We are starting for the first time. This is the primary + _restartWithRecovery = false; + _lastCommittedCheckpoint = 0; + _lastLogFile = 0; + _inputs = new ConcurrentDictionary(); + _outputs = new ConcurrentDictionary(); + _serviceInstanceTable.CreateIfNotExistsAsync().Wait(); + + _myRole = AARole.Primary; + + _checkpointWriter = null; + _committer = new Committer(_localServiceSendToStream, _persistLogs, this); + await ConnectAsync(ServiceName(), AmbrosiaDataOutputsName, ServiceName(), AmbrosiaDataInputsName); + await ConnectAsync(ServiceName(), AmbrosiaControlOutputsName, ServiceName(), AmbrosiaControlInputsName); + await MoveServiceToNextLogFileAsync(true, true); + InsertOrReplaceServiceInfoRecord(InfoTitle("CurrentVersion"), _currentVersion.ToString()); + } + + private void UnbufferNonreplayableCalls(ConcurrentDictionary outputs) + { + foreach (var outputRecord in outputs) + { + var newLastSeqNo = outputRecord.Value.BufferedOutput.TrimAndUnbufferNonreplayableCalls(outputRecord.Value.TrimTo, outputRecord.Value.ReplayableTrimTo); + if (newLastSeqNo != -1) + { + outputRecord.Value.LastSeqNoFromLocalService = newLastSeqNo; + } + } + } + + internal void MoveServiceToUpgradeDirectory() + { + _logWriterStatics.CreateDirectoryIfNotExists(RootDirectory(_upgradeToVersion)); + _logFileNameBase = LogFileNameBase(_upgradeToVersion); + } + + public async Task ConnectAsync(string fromProcessName, string fromEndpoint, string toProcessName, string toEndpoint) + { + foreach (var conn in await _coral.GetConnectionsFromVertexAsync(fromProcessName)) + { + if (conn.FromEndpoint.Equals(fromEndpoint) && conn.ToVertex.Equals(toProcessName) && conn.ToEndpoint.Equals(toEndpoint)) + return CRAErrorCode.Success; + } + return await _coral.ConnectAsync(fromProcessName, fromEndpoint, toProcessName, toEndpoint); + } + + private string ServiceName(long shardID = -1) + { + if (_sharded) + { + if (shardID == -1) + { + shardID = _shardID; + } + return _serviceName + "-" + shardID.ToString(); + } + return _serviceName; + } + + private string RootDirectory(long version = -1) + { + if (version == -1) + { + version = _currentVersion; + } + + return _serviceLogPath + _serviceName + "_" + version; + } + + private string LogDirectory(long version = -1, long shardID = -1) + { + string shard = ""; + if (_sharded) + { + if (shardID == -1) + { + shardID = _shardID; + } + shard = shardID.ToString(); + } + + return Path.Combine(RootDirectory(version), shard); + } + + private string LogFileNameBase(long version = -1, long shardID = -1) + { + if (version == -1) + { + return _logFileNameBase; + } + return Path.Combine(LogDirectory(version, shardID), "server"); + } + + private string CheckpointName(long checkpoint, long shardID = -1, long version = -1) + { + return LogFileNameBase(version, shardID) + "chkpt" + checkpoint.ToString(); + } + + private string LogFileName(long logFile, long shardID = -1, long version = -1) + { + return LogFileNameBase(version, shardID) + "log" + logFile.ToString(); + } + + private ILogWriter CreateNextOldVerLogFile() + { + if (_logWriterStatics.FileExists(LogFileName(_lastLogFile + 1, _shardID, _currentVersion))) + { + _logWriterStatics.DeleteFile(LogFileName(_lastLogFile + 1, _shardID, _currentVersion)); + } + ILogWriter retVal = null; + try + { + retVal = _logWriterStatics.Generate(LogFileName(_lastLogFile + 1, _shardID, _currentVersion), 1024 * 1024, 6); + } + catch (Exception e) + { + OnError(0, "Error opening next log file:" + e.ToString()); + } + return retVal; + } + + // Used to create a kill file meant to being down primaries and prevent promotion. Promotion prevention + // lasts until the returned file handle is released. + private void LockKillFile() + { + _killFileHandle = _logWriterStatics.Generate(_logFileNameBase + "killFile", 1024 * 1024, 6, true); + } + + private void ReleaseAndTryCleanupKillFile() + { + _killFileHandle.Dispose(); + _killFileHandle = null; + try + { + // Try to delete the file. Someone may beat us to it. + _logWriterStatics.DeleteFile(_logFileNameBase + "killFile"); + } + catch (Exception e) + { + Trace.TraceInformation(e.ToString()); + } + } + + private ILogWriter CreateNextLogFile() + { + if (_logWriterStatics.FileExists(LogFileName(_lastLogFile + 1))) + { + _logWriterStatics.DeleteFile(LogFileName(_lastLogFile + 1)); + } + ILogWriter retVal = null; + try + { + retVal = _logWriterStatics.Generate(LogFileName(_lastLogFile + 1), 1024 * 1024, 6); + } + catch (Exception e) + { + OnError(0, "Error opening next log file:" + e.ToString()); + } + return retVal; + } + + private string InfoTitle(string prefix, long shardID = -1) + { + var file = prefix; + if (_sharded) + { + if (shardID == -1) + { + shardID = _shardID; + } + file += shardID.ToString(); + } + return file; + } + + // Closes out the old log file and starts a new one. Takes checkpoints if this instance should + private async Task MoveServiceToNextLogFileAsync(bool firstStart = false, bool becomingPrimary = false) + { + // Move to the next log file. By doing this before checkpointing, we may end up skipping a checkpoint file (failure during recovery). + // This is ok since we recover from the first committed checkpoint and will just skip empty log files during replay. + // This also protects us from a failed upgrade, which is why the file is created in both directories on upgrade, and why the lock on upgrade is held until successful upgrade or failure. + await _committer.SleepAsync(); + var nextLogHandle = CreateNextLogFile(); + ILogWriter oldVerLogHandle = null; + if (_upgrading) + { + oldVerLogHandle = CreateNextOldVerLogFile(); + } + _lastLogFile++; + InsertOrReplaceServiceInfoRecord(InfoTitle("LastLogFile"), _lastLogFile.ToString()); + _committer.SwitchLogStreams(nextLogHandle); + if (!firstStart && _activeActive && !_upgrading && becomingPrimary) + { + // In this case, we want the local service to become primary without taking a checkpoint + _committer.SendBecomePrimaryRequest(); + } + else if (firstStart || !_activeActive || _upgrading) + { + // take the checkpoint associated with the beginning of the new log and let go of the log file lock + _committer.QuiesceServiceWithSendCheckpointRequest(_upgrading, becomingPrimary); + _upgrading = false; + if (firstStart) + { + while (ServiceInitializationMessage == null) { await Task.Yield(); }; + await _committer.AddInitialRowAsync(ServiceInitializationMessage); + } + await CheckpointAsync(); + _checkpointWriter.Dispose(); + _checkpointWriter = null; + } + await _committer.WakeupAsync(); + // This is a safe place to try to commit, because if this is called during recovery, + // it's after replace and moving to the next log file. Note that this will also have the effect + // of shaking loose the initialization message, ensuring liveliness. + await _committer.TryCommitAsync(_outputs); + return oldVerLogHandle; + } + + //============================================================================================================== + // Insance compete over write permission for LOG file & CheckPoint file + private void DetermineRole(MachineState state) + { + if (_upgrading) + { + state.MyRole = AARole.Secondary; + return; + } + try + { + // Try to grab the checkpoint lock twice to break lingering locks on Azure blobs + bool gotLock = false; + for (int i = 0; i < 2; i++) + { + try + { + if (i == 1) + { + // Second attempt, wait 5 seconds to see if the lock can be grabbed + Thread.Sleep(4000); + } + state.CheckpointWriter = _logWriterStatics.Generate(CheckpointName(state.LastCommittedCheckpoint), 1024 * 1024, 6, true); + } + catch { continue; } + // Success! + gotLock = true; + break; + } + if (!gotLock) + { + throw new Exception("Couldn't get checkpoint lock"); + } + state.MyRole = AARole.Checkpointer; // I'm a checkpointing secondary + Trace.TraceInformation("I'm a checkpointer"); + var oldCheckpoint = state.LastCommittedCheckpoint; + state.LastCommittedCheckpoint = long.Parse(RetrieveServiceInfo(InfoTitle("LastCommittedCheckpoint", state.ShardID))); + if (oldCheckpoint != state.LastCommittedCheckpoint) + { + state.CheckpointWriter.Dispose(); + throw new Exception("We got a handle on an old checkpoint. The checkpointer was alive when this instance started"); + } + } + catch + { + state.CheckpointWriter = null; + state.MyRole = AARole.Secondary; // I'm a secondary + Trace.TraceInformation("I'm a secondary"); + } + } + + internal async Task DetectBecomingPrimaryAsync(MachineState state) + { + // keep trying to take the write permission on LOG file + // LOG write permission acquired only in case primary failed (is down) + while (true) + { + ILogWriter lastLogFileStream = null; + try + { + if (_upgrading && _activeActive && (_killFileHandle == null)) + { + await Task.Delay(1500); + continue; + } + var oldLastLogFile = state.LastLogFile; + Debug.Assert(lastLogFileStream == null); + // Compete for log write permission - non destructive open for write - open for append + lastLogFileStream = _logWriterStatics.Generate(LogFileName(oldLastLogFile, state.ShardID), 1024 * 1024, 6, true); + if (long.Parse(RetrieveServiceInfo(InfoTitle("LastLogFile", state.ShardID))) != oldLastLogFile) + { + // We got an old log. Try again + lastLogFileStream.Dispose(); + lastLogFileStream = null; + throw new Exception(); + } + // We got the lock! Set things up so we let go of the lock at the right moment + // But first check if we got the lock because the version changed, in which case, we should commit suicide + var readVersion = long.Parse(RetrieveServiceInfo(InfoTitle("CurrentVersion", state.ShardID))); + if (_currentVersion != readVersion) + { + + OnError(VersionMismatch, "Version changed during recovery: Expected " + _currentVersion + " was: " + readVersion.ToString()); + } + + // Before allowing the node to become primary in active/active, if we are not an upgrader, see if we are prevented by a kill file. + if (_activeActive && !_upgrading) + { + LockKillFile(); + // If we reach here, we have the lock and can promote, otherwise an exception was thrown and we can't promote + ReleaseAndTryCleanupKillFile(); + } + + // Now we can really promote! + await state.Committer.SleepAsync(); + state.Committer.SwitchLogStreams(lastLogFileStream); + await state.Committer.WakeupAsync(); + state.MyRole = AARole.Primary; // this will stop and break the loop in the function replayInput_Sec() + Trace.TraceInformation("\n\nNOW I'm Primary\n\n"); + // if we are an upgrader : Time to release the kill file lock and cleanup. Note that since we have the log lock + // everyone is prevented from promotion until we succeed or fail. + if (_upgrading && _activeActive) + { + Debug.Assert(_killFileHandle != null); + ReleaseAndTryCleanupKillFile(); + } + return; + } + catch + { + if (lastLogFileStream != null) + { + lastLogFileStream.Dispose(); + lastLogFileStream = null; + } + // Check if the version changed, in which case, we should commit suicide + var readVersion = long.Parse(RetrieveServiceInfo(InfoTitle("CurrentVersion"))); + if (_currentVersion != readVersion) + { + + OnError(VersionMismatch, "Version changed during recovery: Expected " + _currentVersion + " was: " + readVersion.ToString()); + } + await Task.Delay(1500); + } + } + } + + private async Task ReplayAsync(ILogReader replayStream, MachineState state) + { + var tempBuf = new byte[100]; + var tempBuf2 = new byte[100]; + var headerBuf = new byte[Committer.HeaderSize]; + var headerBufStream = new MemoryStream(headerBuf); + var committedInputDict = new Dictionary(); + var trimDict = new Dictionary(); + var detectedEOF = false; + var detectedEOL = false; + var clearedCommitterWrite = false; + var haveWriterLockForNonActiveActive = false; + ILogWriter lastLogFileStreamWriter = null; + // Keep replaying commits until we run out of replay data + while (true) + { + long logRecordPos = replayStream.Position; + int commitSize; + try + { + // First get commit ID and check for integrity + replayStream.ReadAllRequiredBytes(headerBuf, 0, Committer.HeaderSize); + headerBufStream.Position = 0; + var commitID = headerBufStream.ReadIntFixed(); + if (commitID != state.Committer.CommitID) + { + throw new Exception("Committer didn't match. Must be incomplete record"); + } + // Get commit page length + commitSize = headerBufStream.ReadIntFixed(); + var checkBytes = headerBufStream.ReadLongFixed(); + var writeSeqID = headerBufStream.ReadLongFixed(); + if (writeSeqID != state.Committer._nextWriteID) + { + throw new Exception("Out of order page. Must be incomplete record"); + } + // Remove header + commitSize -= Committer.HeaderSize; + if (commitSize > tempBuf.Length) + { + tempBuf = new byte[commitSize]; + } + replayStream.ReadAllRequiredBytes(tempBuf, 0, commitSize); + // Perform integrity check + long checkBytesCalc = state.Committer.CheckBytes(tempBuf, 0, commitSize); + if (checkBytesCalc != checkBytes) + { + throw new Exception("Integrity check failed for page. Must be incomplete record"); + } + + // Read changes in input consumption progress to reflect in _inputs + var watermarksToRead = replayStream.ReadInt(); + committedInputDict.Clear(); + for (int i = 0; i < watermarksToRead; i++) + { + var inputNameSize = replayStream.ReadInt(); + if (inputNameSize > tempBuf2.Length) + { + tempBuf2 = new byte[inputNameSize]; + } + replayStream.ReadAllRequiredBytes(tempBuf2, 0, inputNameSize); + var inputName = Encoding.UTF8.GetString(tempBuf2, 0, inputNameSize); + var newLongPair = new LongPair(); + newLongPair.First = replayStream.ReadLongFixed(); + newLongPair.Second = replayStream.ReadLongFixed(); + committedInputDict[inputName] = newLongPair; + } + // Read changes in trim to perform and reflect in _outputs + watermarksToRead = replayStream.ReadInt(); + trimDict.Clear(); + for (int i = 0; i < watermarksToRead; i++) + { + var inputNameSize = replayStream.ReadInt(); + if (inputNameSize > tempBuf2.Length) + { + tempBuf2 = new byte[inputNameSize]; + } + replayStream.ReadAllRequiredBytes(tempBuf2, 0, inputNameSize); + var inputName = Encoding.UTF8.GetString(tempBuf2, 0, inputNameSize); + long seqNo = replayStream.ReadLongFixed(); + trimDict[inputName] = seqNo; + } + } + catch + { + // Non-Active/Active case for couldn't recover replay segment. Could be for a number of reasons. + + // Do we already have the write lock on the latest log? + if (!_activeActive) + { + // Since it's not the active/active case, take over (migrations scenario using the kill file, or just recover) + // But first, make sure we have fully consumed the log (except a bit at the end) + var actualLastLogFileNum = long.Parse(RetrieveServiceInfo(InfoTitle("LastLogFile", state.ShardID))); + if (!_logWriterStatics.FileExists(LogFileName(actualLastLogFileNum, state.ShardID))) + { + OnError(MissingLog, "Missing log in replay or update happened" + state.LastLogFile.ToString()); + } + if (actualLastLogFileNum > state.LastLogFile) // there are more log files to read. Move on. + { + state.LastLogFile++; + replayStream.Dispose(); + replayStream = LogReaderStaticPicker.curStatic.Generate(LogFileName(state.LastLogFile, state.ShardID)); + continue; + } + + if (!haveWriterLockForNonActiveActive) + { + // We're as close to the end of the log as we can get. We need to grab and hold the lock on the kill file. + while (true) + { + Thread.Sleep(200); + try + { + LockKillFile(); + // We have the lock! + break; + } + catch (Exception) + { + // Keep trying until successful + } + } + + // keep trying to take the write permission on LOG file until the old execution instance dies and lets go + while (true) + { + try + { + actualLastLogFileNum = long.Parse(RetrieveServiceInfo(InfoTitle("LastLogFile", state.ShardID))); + if (!_logWriterStatics.FileExists(LogFileName(actualLastLogFileNum, state.ShardID))) + { + OnError(MissingLog, "Missing log in replay or update happened" + state.LastLogFile.ToString()); + } + Debug.Assert(lastLogFileStreamWriter == null); + // See if we've successfully killed the old instance execution + lastLogFileStreamWriter = _logWriterStatics.Generate(LogFileName(actualLastLogFileNum, state.ShardID), 1024 * 1024, 6, true); + if (long.Parse(RetrieveServiceInfo(InfoTitle("LastLogFile", state.ShardID))) != actualLastLogFileNum) + { + // We got an old log. Try again + throw new Exception(); + } + // The old instance execution died. We need to finish recovery, then exit! + break; + } + catch + { + if (lastLogFileStreamWriter != null) + { + lastLogFileStreamWriter.Dispose(); + lastLogFileStreamWriter = null; + } + await Task.Delay(200); + } + } + // We've locked the log. There may be more log to consume. Continue until we hit the true end. + haveWriterLockForNonActiveActive = true; + replayStream.Position = logRecordPos; + continue; + } + else + { + // We've consumed the whole log and have all the necessary locks. + await state.Committer.SleepAsync(); + state.Committer.SwitchLogStreams(lastLogFileStreamWriter); + await state.Committer.WakeupAsync(); + Debug.Assert(_killFileHandle != null); + ReleaseAndTryCleanupKillFile(); + break; + } + } + + // Active/Active case for couldn't recover replay segment. Could be for a number of reasons. + if (detectedEOL) + { + break; + } + if (detectedEOF) + { + // Move to the next log file for reading only. We may need to take a checkpoint + state.LastLogFile++; + replayStream.Dispose(); + if (!_logWriterStatics.FileExists(LogFileName(state.LastLogFile, state.ShardID))) + { + OnError(MissingLog, "Missing log in replay " + state.LastLogFile.ToString()); + } + replayStream = LogReaderStaticPicker.curStatic.Generate(LogFileName(state.LastLogFile, state.ShardID)); + if (state.MyRole == AARole.Checkpointer) + { + // take the checkpoint associated with the beginning of the new log + // It's currently too disruptive to the code to pass in MachineState to + // CheckpointAsync, so we update the corresponding variables instead. + // This should be fine since the checkpointer should not replay from + // multiple logs in parallel. + UpdateAmbrosiaState(state); + _committer.SleepAsync(); + _committer.QuiesceServiceWithSendCheckpointRequest(); + await CheckpointAsync(); + await _committer.WakeupAsync(); + LoadAmbrosiaState(state); + } + detectedEOF = false; + continue; + } + var myRoleBeforeEOLChecking = state.MyRole; + replayStream.Position = logRecordPos; + var newLastLogFile = state.LastLogFile; + if (_runningRepro) + { + if (_logWriterStatics.FileExists(LogFileName(state.LastLogFile + 1, state.ShardID))) + { + // If there is a next file, then move to it + newLastLogFile = state.LastLogFile + 1; + } + } + else + { + newLastLogFile = long.Parse(RetrieveServiceInfo(InfoTitle("LastLogFile", state.ShardID))); + } + if (newLastLogFile > state.LastLogFile) // a new log file has been written + { + // Someone started a new log. Try to read the last record again and then move to next file + detectedEOF = true; + continue; + } + if (myRoleBeforeEOLChecking == AARole.Primary) + { + // Became the primary and the current file is the end of the log. Make sure we read the whole file. + detectedEOL = true; + continue; + } + // The remaining case is that we hit the end of log, but someone is still writing to this file. Wait and try to read again, or kill the primary if we are trying to upgrade in an active/active scenario + if (_upgrading && _activeActive && _killFileHandle == null) + { + // We need to write and hold the lock on the kill file. Recovery will continue until the primary dies and we have + // fully processed the log. + while (true) + { + try + { + LockKillFile(); + break; + } + catch (Exception) + { + // Someone may be checking promotability. Keep trying until successful + } + } + } + await Task.Delay(1000); + continue; + } + // Successfully read an entire replay segment. Go ahead and process for recovery + foreach (var kv in committedInputDict) + { + InputConnectionRecord inputConnectionRecord; + if (!state.Inputs.TryGetValue(kv.Key, out inputConnectionRecord)) + { + // Create input record and add it to the dictionary + inputConnectionRecord = new InputConnectionRecord(); + state.Inputs[kv.Key] = inputConnectionRecord; + } + inputConnectionRecord.LastProcessedID = kv.Value.First; + inputConnectionRecord.LastProcessedReplayableID = kv.Value.Second; + OutputConnectionRecord outputConnectionRecord; + // this lock prevents conflict with output arriving from the local service during replay + lock (state.Outputs) + { + if (!state.Outputs.TryGetValue(kv.Key, out outputConnectionRecord)) + { + outputConnectionRecord = new OutputConnectionRecord(this); + state.Outputs[kv.Key] = outputConnectionRecord; + } + } + // this lock prevents conflict with output arriving from the local service during replay and ensures maximal cleaning + lock (outputConnectionRecord) + { + outputConnectionRecord.RemoteTrim = Math.Max(kv.Value.First, outputConnectionRecord.RemoteTrim); + outputConnectionRecord.RemoteTrimReplayable = Math.Max(kv.Value.Second, outputConnectionRecord.RemoteTrimReplayable); + if (outputConnectionRecord.ControlWorkQ.IsEmpty) + { + outputConnectionRecord.ControlWorkQ.Enqueue(-2); + } + } + } + // Do the actual work on the local service + _localServiceSendToStream.Write(headerBuf, 0, Committer.HeaderSize); + _localServiceSendToStream.Write(tempBuf, 0, commitSize); + // Trim the outputs. Should clean as aggressively as during normal operation + foreach (var kv in trimDict) + { + OutputConnectionRecord outputConnectionRecord; + // this lock prevents conflict with output arriving from the local service during replay + lock (state.Outputs) + { + if (!state.Outputs.TryGetValue(kv.Key, out outputConnectionRecord)) + { + outputConnectionRecord = new OutputConnectionRecord(this); + state.Outputs[kv.Key] = outputConnectionRecord; + } + } + // this lock prevents conflict with output arriving from the local service during replay and ensures maximal cleaning + lock (outputConnectionRecord) + { + outputConnectionRecord.TrimTo = kv.Value; + outputConnectionRecord.ReplayableTrimTo = kv.Value; + outputConnectionRecord.BufferedOutput.Trim(kv.Value, ref outputConnectionRecord.placeInOutput); + } + } + // If this is the first replay segment, it invalidates the contents of the committer, which must be cleared. + if (!clearedCommitterWrite) + { + state.Committer.ClearNextWrite(); + clearedCommitterWrite = true; + } + // bump up the write ID in the committer in preparation for reading or writing the next page + state.Committer._nextWriteID++; + } + } + + // Thread for listening to the local service + private void LocalListener() + { + try + { + var localServiceBuffer = new FlexReadBuffer(); + var batchServiceBuffer = new FlexReadBuffer(); + var bufferSize = 128 * 1024; + byte[] bytes = new byte[bufferSize]; + byte[] bytesBak = new byte[bufferSize]; + while (_outputs == null) { Thread.Yield(); } + while (true) + { + // Do an async message read. Note that the async aspect of this is slow. + FlexReadBuffer.Deserialize(_localServiceReceiveFromStream, localServiceBuffer); + ProcessSyncLocalMessage(ref localServiceBuffer, batchServiceBuffer); + /* Disabling because of BUGBUG. Eats checkpoint bytes in some circumstances before checkpointer can deal with it. + // Process more messages from the local service if available before going async again, doing this here because + // not all language shims will be good citizens here, and we may need to process small messages to avoid inefficiencies + // in LAR. + int curPosInBuffer = 0; + int readBytes = 0; + while (readBytes != 0 || _localServiceReceiveFromStream.DataAvailable) + { + // Read data into buffer to avoid lock contention of reading directly from the stream + while ((_localServiceReceiveFromStream.DataAvailable && readBytes < bufferSize) || !bytes.EnoughBytesForReadBufferedInt(0, readBytes)) + { + readBytes += _localServiceReceiveFromStream.Read(bytes, readBytes, bufferSize - readBytes); + } + // Continue loop as long as we can meaningfully read a message length + var memStream = new MemoryStream(bytes, 0, readBytes); + while (bytes.EnoughBytesForReadBufferedInt(curPosInBuffer, readBytes - curPosInBuffer)) + { + // Read the length of the next message + var messageSize = memStream.ReadInt(); + var messageSizeSize = StreamCommunicator.IntSize(messageSize); + memStream.Position -= messageSizeSize; + if (curPosInBuffer + messageSizeSize + messageSize > readBytes) + { + // didn't read the full message into the buffer. It must be torn + if (messageSize + messageSizeSize > bufferSize) + { + // Buffer isn't big enough to hold the whole torn event even if empty. Increase the buffer size so the message can fit. + bufferSize = messageSize + messageSizeSize; + var newBytes = new byte[bufferSize]; + Buffer.BlockCopy(bytes, curPosInBuffer, newBytes, 0, readBytes - curPosInBuffer); + bytes = newBytes; + bytesBak = new byte[bufferSize]; + readBytes -= curPosInBuffer; + curPosInBuffer = 0; + } + break; + } + else + { + // Count this message since it is fully in the buffer + FlexReadBuffer.Deserialize(memStream, localServiceBuffer); + ProcessSyncLocalMessage(ref localServiceBuffer, batchServiceBuffer); + curPosInBuffer += messageSizeSize + messageSize; + } + } + memStream.Dispose(); + // Shift torn message to the beginning unless it is the first one + if (curPosInBuffer > 0) + { + Buffer.BlockCopy(bytes, curPosInBuffer, bytesBak, 0, readBytes - curPosInBuffer); + var tempBytes = bytes; + bytes = bytesBak; + bytesBak = tempBytes; + readBytes -= curPosInBuffer; + curPosInBuffer = 0; + } + } */ + } + } + catch (Exception e) + { + OnError(AzureOperationError, "Error in local listener data stream:" + e.ToString()); + return; + } + } + + private void MoveServiceToNextLogFileSimple() + { + MoveServiceToNextLogFileAsync().Wait(); + } + + void AttachTo(string destination) + { + while (true) + { + Trace.TraceInformation("Attempting to attach to {0}", destination); + var connectionResult1 = ConnectAsync(ServiceName(), AmbrosiaDataOutputsName, destination, AmbrosiaDataInputsName).GetAwaiter().GetResult(); + var connectionResult2 = ConnectAsync(ServiceName(), AmbrosiaControlOutputsName, destination, AmbrosiaControlInputsName).GetAwaiter().GetResult(); + var connectionResult3 = ConnectAsync(destination, AmbrosiaDataOutputsName, ServiceName(), AmbrosiaDataInputsName).GetAwaiter().GetResult(); + var connectionResult4 = ConnectAsync(destination, AmbrosiaControlOutputsName, ServiceName(), AmbrosiaControlInputsName).GetAwaiter().GetResult(); + if ((connectionResult1 == CRAErrorCode.Success) && (connectionResult2 == CRAErrorCode.Success) && + (connectionResult3 == CRAErrorCode.Success) && (connectionResult4 == CRAErrorCode.Success)) + { + Trace.TraceInformation("Attached to {0}", destination); + return; + } + Thread.Sleep(1000); + } + } + + private void ProcessSyncLocalMessage(ref FlexReadBuffer localServiceBuffer, FlexReadBuffer batchServiceBuffer) + { + var sizeBytes = localServiceBuffer.LengthLength; + Task createCheckpointTask = null; + // Process the Async message +#if DEBUG + ValidateMessageValidity(localServiceBuffer.Buffer[sizeBytes]); +#endif + switch (localServiceBuffer.Buffer[sizeBytes]) + { + case takeCheckpointByte: + // Handle take checkpoint messages - This is here for testing + createCheckpointTask = new Task(new Action(MoveServiceToNextLogFileSimple)); + createCheckpointTask.Start(); + localServiceBuffer.ResetBuffer(); + break; + + case checkpointByte: + _lastReceivedCheckpointSize = StreamCommunicator.ReadBufferedLong(localServiceBuffer.Buffer, sizeBytes + 1); + Trace.TraceInformation("Reading a checkpoint {0} bytes", _lastReceivedCheckpointSize); + LastReceivedCheckpoint = localServiceBuffer; + // Block this thread until checkpointing is complete + while (LastReceivedCheckpoint != null) { Thread.Yield(); }; + break; + + case attachToByte: + // Get dest string + var destination = Encoding.UTF8.GetString(localServiceBuffer.Buffer, sizeBytes + 1, localServiceBuffer.Length - sizeBytes - 1); + localServiceBuffer.ResetBuffer(); + + if (!_runningRepro) + { + if (AmbrosiaRuntimeParms._looseAttach) + { + Thread attachThread = new Thread(() => AttachTo(destination)) { IsBackground = true }; + attachThread.Start(); + } + else + { + Trace.TraceInformation("Attaching to {0}", destination); + var connectionResult1 = ConnectAsync(ServiceName(), AmbrosiaDataOutputsName, destination, AmbrosiaDataInputsName).GetAwaiter().GetResult(); + var connectionResult2 = ConnectAsync(ServiceName(), AmbrosiaControlOutputsName, destination, AmbrosiaControlInputsName).GetAwaiter().GetResult(); + var connectionResult3 = ConnectAsync(destination, AmbrosiaDataOutputsName, ServiceName(), AmbrosiaDataInputsName).GetAwaiter().GetResult(); + var connectionResult4 = ConnectAsync(destination, AmbrosiaControlOutputsName, ServiceName(), AmbrosiaControlInputsName).GetAwaiter().GetResult(); + if ((connectionResult1 != CRAErrorCode.Success) || (connectionResult2 != CRAErrorCode.Success) || + (connectionResult3 != CRAErrorCode.Success) || (connectionResult4 != CRAErrorCode.Success)) + { + Trace.TraceError("Error attaching " + ServiceName() + " to " + destination); + // BUGBUG in tests. Should exit here. Fix tests then delete above line and replace with this OnError(0, "Error attaching " + _serviceName + " to " + destination); + } + } + } + break; + + case RPCBatchByte: + var restOfBatchOffset = sizeBytes + 1; + var memStream = new MemoryStream(localServiceBuffer.Buffer, restOfBatchOffset, localServiceBuffer.Length - restOfBatchOffset); + var numRPCs = memStream.ReadInt(); + for (int i = 0; i < numRPCs; i++) + { + FlexReadBuffer.Deserialize(memStream, batchServiceBuffer); + ProcessRPC(batchServiceBuffer); + } + memStream.Dispose(); + localServiceBuffer.ResetBuffer(); + break; + + case InitalMessageByte: + // Process the Async RPC request + ServiceInitializationMessage = localServiceBuffer; + localServiceBuffer = new FlexReadBuffer(); + break; + + case RPCByte: + ProcessRPC(localServiceBuffer); + // Now process any pending RPC requests from the local service before going async again + break; + + case PingByte: + // Write time into correct place in message + int destBytesSize = localServiceBuffer.Buffer.ReadBufferedInt(sizeBytes + 1); + memStream = new MemoryStream(localServiceBuffer.Buffer, localServiceBuffer.Length - 5 * sizeof(long), sizeof(long)); + long time; + GetSystemTimePreciseAsFileTime(out time); + memStream.WriteLongFixed(time); + // Treat as RPC + ProcessRPC(localServiceBuffer); + memStream.Dispose(); + break; + + case PingReturnByte: + // Write time into correct place in message + destBytesSize = localServiceBuffer.Buffer.ReadBufferedInt(sizeBytes + 1); + memStream = new MemoryStream(localServiceBuffer.Buffer, localServiceBuffer.Length - 2 * sizeof(long), sizeof(long)); + GetSystemTimePreciseAsFileTime(out time); + memStream.WriteLongFixed(time); + // Treat as RPC + ProcessRPC(localServiceBuffer); + memStream.Dispose(); + break; + + default: + // This one really should terminate the process; no recovery allowed. + OnError(0, "Illegal leading byte in local message"); + break; + } + } + + private void ValidateMessageValidity(byte messageType) + { + if ((_createService) && (ServiceInitializationMessage == null) && (messageType != InitalMessageByte)) + { + OnError(0, "Missing initial message from the application"); + } + if (((_createService) && (ServiceInitializationMessage != null) && (messageType == InitalMessageByte)) || + (!_createService && (messageType == InitalMessageByte))) + { + OnError(0, "Extra initialization message"); + } + if (messageType == checkpointByte) + { + if (ExpectingCheckpoint) + { + ExpectingCheckpoint = false; + } + else + { + OnError(0, "Received unexpected checkpoint"); + } + } + } + + int _lastShuffleDestSize = -1; // must be negative because self-messages are encoded with a destination size of 0 + byte[] _lastShuffleDest = new byte[20]; + OutputConnectionRecord _shuffleOutputRecord = null; + + bool EqualBytes(byte[] data1, int data1offset, byte[] data2, int elemsCompared) + { + for (int i = 0; i < elemsCompared; i++) + { + if (data1[i + data1offset] != data2[i]) + { + return false; + } + } + return true; + } + + private void ProcessRPC(FlexReadBuffer RpcBuffer) + { + var sizeBytes = RpcBuffer.LengthLength; + int destBytesSize = RpcBuffer.Buffer.ReadBufferedInt(sizeBytes + 1); + var destOffset = sizeBytes + 1 + StreamCommunicator.IntSize(destBytesSize); + // Check to see if the _lastShuffleDest is the same as the one to process. Caching here avoids significant overhead. + if (_lastShuffleDest == null || (_lastShuffleDestSize != destBytesSize) || !EqualBytes(RpcBuffer.Buffer, destOffset, _lastShuffleDest, destBytesSize)) + { + // Find the appropriate connection record + string destination; + if (_lastShuffleDest.Length < destBytesSize) + { + _lastShuffleDest = new byte[destBytesSize]; + } + Buffer.BlockCopy(RpcBuffer.Buffer, destOffset, _lastShuffleDest, 0, destBytesSize); + _lastShuffleDestSize = destBytesSize; + destination = Encoding.UTF8.GetString(RpcBuffer.Buffer, destOffset, destBytesSize); + // locking to avoid conflict with stream reconnection immediately after replay and trim during replay + lock (_outputs) + { + // During replay, the output connection won't exist if this is the first message ever and no trim record has been processed yet. + if (!_outputs.TryGetValue(destination, out _shuffleOutputRecord)) + { + _shuffleOutputRecord = new OutputConnectionRecord(this); + _outputs[destination] = _shuffleOutputRecord; + } + } + } + + int restOfRPCOffset = destOffset + destBytesSize; + int restOfRPCMessageSize = RpcBuffer.Length - restOfRPCOffset; + var totalSize = StreamCommunicator.IntSize(1 + restOfRPCMessageSize) + + 1 + restOfRPCMessageSize; + + // lock to avoid conflict and ensure maximum memory cleaning during replay. No possible conflict during primary operation + lock (_shuffleOutputRecord) + { + // Buffer the output if it is at or beyond the replay or trim point (during recovery). + if ((_shuffleOutputRecord.LastSeqNoFromLocalService + 1 >= _shuffleOutputRecord.ReplayFrom) && + (_shuffleOutputRecord.LastSeqNoFromLocalService + 1 >= _shuffleOutputRecord.ReplayableTrimTo)) + { + var writablePage = _shuffleOutputRecord.BufferedOutput.GetWritablePage(totalSize, _shuffleOutputRecord.LastSeqNoFromLocalService + 1); + writablePage.HighestSeqNo = _shuffleOutputRecord.LastSeqNoFromLocalService + 1; + + var methodID = RpcBuffer.Buffer.ReadBufferedInt(restOfRPCOffset + 1); + if (RpcBuffer.Buffer[restOfRPCOffset + 1 + StreamCommunicator.IntSize(methodID)] != (byte)RpcTypes.RpcType.Impulse) + { + writablePage.UnsentReplayableMessages++; + writablePage.TotalReplayableMessages++; + } + + // Write the bytes into the page + writablePage.curLength += writablePage.PageBytes.WriteInt(writablePage.curLength, 1 + restOfRPCMessageSize); + writablePage.PageBytes[writablePage.curLength] = RpcBuffer.Buffer[sizeBytes]; + writablePage.curLength++; + Buffer.BlockCopy(RpcBuffer.Buffer, restOfRPCOffset, writablePage.PageBytes, writablePage.curLength, restOfRPCMessageSize); + writablePage.curLength += restOfRPCMessageSize; + + // Done making modifications to the output buffer and grabbed important state. Can execute the rest concurrently. Release the lock + _shuffleOutputRecord.BufferedOutput.ReleaseAppendLock(); + RpcBuffer.ResetBuffer(); + + // Make sure there is a send enqueued in the work Q. + long sendEnqueued = Interlocked.Read(ref _shuffleOutputRecord._sendsEnqueued); + if (sendEnqueued == 0) + { + Interlocked.Increment(ref _shuffleOutputRecord._sendsEnqueued); + _shuffleOutputRecord.DataWorkQ.Enqueue(-1); + } + } + else + { + RpcBuffer.ResetBuffer(); + } + _shuffleOutputRecord.LastSeqNoFromLocalService++; + } + } + + private async Task ToDataStreamAsync(Stream writeToStream, + string destString, + CancellationToken ct) + + { + OutputConnectionRecord outputConnectionRecord; + if (destString.Equals(ServiceName())) + { + destString = ""; + } + lock (_outputs) + { + if (!_outputs.TryGetValue(destString, out outputConnectionRecord)) + { + // Set up the output record for the first time and add it to the dictionary + outputConnectionRecord = new OutputConnectionRecord(this); + _outputs[destString] = outputConnectionRecord; + Trace.TraceInformation("Adding output:{0}", destString); + } + else + { + Trace.TraceInformation("restoring output:{0}", destString); + } + } + try + { + // Reset the output cursor if it exists + outputConnectionRecord.BufferedOutput.AcquireTrimLock(2); + outputConnectionRecord.placeInOutput = new EventBuffer.BuffersCursor(null, -1, 0); + outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); + // Process replay message + var inputFlexBuffer = new FlexReadBuffer(); + await FlexReadBuffer.DeserializeAsync(writeToStream, inputFlexBuffer, ct); + var sizeBytes = inputFlexBuffer.LengthLength; + // Get the seqNo of the replay/filter point + var commitSeqNo = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); + var commitSeqNoReplayable = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1 + StreamCommunicator.LongSize(commitSeqNo)); + inputFlexBuffer.ResetBuffer(); + if (outputConnectionRecord.ConnectingAfterRestart) + { + // We've been through recovery (at least partially), and have scrubbed all ephemeral calls. Must now rebase + // seq nos using the markers which were sent by the listener. Must first take locks to ensure no interference + lock (outputConnectionRecord) + { + // Don't think I actually need this lock, but can't hurt and shouldn't affect perf. + outputConnectionRecord.BufferedOutput.AcquireTrimLock(2); + outputConnectionRecord.BufferedOutput.RebaseSeqNosInBuffer(commitSeqNo, commitSeqNoReplayable); + outputConnectionRecord.LastSeqNoFromLocalService += commitSeqNo - commitSeqNoReplayable; + outputConnectionRecord.ConnectingAfterRestart = false; + outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); + } + } + + // If recovering, make sure event replay will be filtered out + outputConnectionRecord.ReplayFrom = commitSeqNo; + + if (outputConnectionRecord.WillResetConnection) + { + // Register our immediate intent to set the connection. This unblocks output writers + outputConnectionRecord.ResettingConnection = true; + // This lock avoids interference with buffering RPCs + lock (outputConnectionRecord) + { + // If first reconnect/connect after reset, simply adjust the seq no for the first sent message to the received commit seq no + outputConnectionRecord.ResettingConnection = false; + outputConnectionRecord.LastSeqNoFromLocalService = outputConnectionRecord.BufferedOutput.AdjustFirstSeqNoTo(commitSeqNo); + outputConnectionRecord.WillResetConnection = false; + } + } + outputConnectionRecord.LastSeqSentToReceiver = commitSeqNo - 1; + + // Enqueue a replay send + long sendEnqueued = Interlocked.Read(ref outputConnectionRecord._sendsEnqueued); + if (sendEnqueued == 0) + { + Interlocked.Increment(ref outputConnectionRecord._sendsEnqueued); + outputConnectionRecord.DataWorkQ.Enqueue(-1); + } + + // Make sure enough recovery output has been produced before we allow output to start being sent, which means that the next + // message has to be the first for replay. + while (Interlocked.Read(ref outputConnectionRecord.LastSeqNoFromLocalService) < + Interlocked.Read(ref outputConnectionRecord.LastSeqSentToReceiver)) { await Task.Yield(); }; + while (true) + { + var nextEntry = await outputConnectionRecord.DataWorkQ.DequeueAsync(ct); + if (nextEntry == -1) + { + // This is a send output + Debug.Assert(outputConnectionRecord._sendsEnqueued > 0); + Interlocked.Decrement(ref outputConnectionRecord._sendsEnqueued); + + // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Code to manually trim for performance testing + // int placeToTrimTo = outputConnectionRecord.LastSeqNoFromLocalService; + // StartupParamOverrides.OutputStream.WriteLine("send to {0}", outputConnectionRecord.LastSeqNoFromLocalService); + outputConnectionRecord.BufferedOutput.AcquireTrimLock(2); + var placeAtCall = outputConnectionRecord.LastSeqSentToReceiver; + outputConnectionRecord.placeInOutput = + await outputConnectionRecord.BufferedOutput.SendAsync(writeToStream, outputConnectionRecord.placeInOutput); + outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); + // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!Code to manually trim for performance testing + // outputConnectionRecord.TrimTo = placeToTrimTo; + } + } + } + catch (Exception e) + { + // Cleanup held locks if necessary + await Task.Yield(); + var lockVal = outputConnectionRecord.BufferedOutput.ReadTrimLock(); + if (lockVal == 1 || lockVal == 2) + { + outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); + } + var bufferLockVal = outputConnectionRecord.BufferedOutput.ReadAppendLock(); + if (bufferLockVal == 2) + { + outputConnectionRecord.BufferedOutput.ReleaseAppendLock(); + } + throw e; + } + } + + private async Task ToControlStreamAsync(Stream writeToStream, + string destString, + CancellationToken ct) + + { + OutputConnectionRecord outputConnectionRecord; + if (destString.Equals(ServiceName())) + { + destString = ""; + } + lock (_outputs) + { + if (!_outputs.TryGetValue(destString, out outputConnectionRecord)) + { + // Set up the output record for the first time and add it to the dictionary + outputConnectionRecord = new OutputConnectionRecord(this); + _outputs[destString] = outputConnectionRecord; + Trace.TraceInformation("Adding output:{0}", destString); + } + else + { + Trace.TraceInformation("restoring output:{0}", destString); + } + } + // Process remote trim message + var inputFlexBuffer = new FlexReadBuffer(); + await FlexReadBuffer.DeserializeAsync(writeToStream, inputFlexBuffer, ct); + var sizeBytes = inputFlexBuffer.LengthLength; + // Get the seqNo of the replay/filter point + var lastRemoteTrim = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); + long lastRemoteTrimReplayable; + + // This code dequeues output producing tasks and runs them + long currentTrim = -1; + int maxSizeOfWatermark = sizeof(int) + 4 + 2 * sizeof(long); + var watermarkArr = new byte[maxSizeOfWatermark]; + var watermarkStream = new MemoryStream(watermarkArr); + try + { + while (true) + { + // Always try to trim output buffers if possible to free up resources + if (outputConnectionRecord.TrimTo > currentTrim) + { + currentTrim = outputConnectionRecord.TrimTo; + outputConnectionRecord.BufferedOutput.AcquireTrimLock(3); + outputConnectionRecord.BufferedOutput.Trim(currentTrim, ref outputConnectionRecord.placeInOutput); + outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); + } + var nextEntry = await outputConnectionRecord.ControlWorkQ.DequeueAsync(ct); + if (lastRemoteTrim < outputConnectionRecord.RemoteTrim) + { + // This is a send watermark + // Must lock to atomically read due to races with CheckpointAsync and SendInputWatermarks + lock (outputConnectionRecord._remoteTrimLock) + { + + lastRemoteTrim = outputConnectionRecord.RemoteTrim; + lastRemoteTrimReplayable = outputConnectionRecord.RemoteTrimReplayable; + } + watermarkStream.Position = 0; + var watermarkLength = 1 + StreamCommunicator.LongSize(lastRemoteTrim) + StreamCommunicator.LongSize(lastRemoteTrimReplayable); + watermarkStream.WriteInt(watermarkLength); + watermarkStream.WriteByte(AmbrosiaRuntime.CommitByte); + watermarkStream.WriteLong(lastRemoteTrim); + watermarkStream.WriteLong(lastRemoteTrimReplayable); + await writeToStream.WriteAsync(watermarkArr, 0, watermarkLength + StreamCommunicator.IntSize(watermarkLength)); + var flushTask = writeToStream.FlushAsync(); + } + } + } + catch (Exception e) + { + // Cleanup held locks if necessary + await Task.Yield(); + var lockVal = outputConnectionRecord.BufferedOutput.ReadTrimLock(); + if (lockVal == 3) + { + outputConnectionRecord.BufferedOutput.ReleaseTrimLock(); + } + var bufferLockVal = outputConnectionRecord.BufferedOutput.ReadAppendLock(); + if (bufferLockVal == 3) + { + outputConnectionRecord.BufferedOutput.ReleaseAppendLock(); + } + throw e; + } + } + + private async Task SendReplayMessageAsync(Stream sendToStream, + long lastProcessedID, + long lastProcessedReplayableID, + CancellationToken ct) + { + // Send FilterTo message to the destination command stream + // Write message size + sendToStream.WriteInt(1 + StreamCommunicator.LongSize(lastProcessedID) + StreamCommunicator.LongSize(lastProcessedReplayableID)); + // Write message type + sendToStream.WriteByte(replayFromByte); + // Write the output filter seqNo for the other side + sendToStream.WriteLong(lastProcessedID); + sendToStream.WriteLong(lastProcessedReplayableID); + await sendToStream.FlushAsync(ct); + } + + + private async Task SendTrimStateMessageAsync(Stream sendToStream, + long trimTo, + CancellationToken ct) + { + // Send FilterTo message to the destination command stream + // Write message size + sendToStream.WriteInt(1 + StreamCommunicator.LongSize(trimTo)); + // Write message type + sendToStream.WriteByte(trimToByte); + // Write the output filter seqNo for the other side + sendToStream.WriteLong(trimTo); + await sendToStream.FlushAsync(ct); + } + + private async Task FromDataStreamAsync(Stream readFromStream, + string sourceString, + CancellationToken ct) + { + InputConnectionRecord inputConnectionRecord; + if (sourceString.Equals(ServiceName())) + { + sourceString = ""; + } + if (!_inputs.TryGetValue(sourceString, out inputConnectionRecord)) + { + // Create input record and add it to the dictionary + inputConnectionRecord = new InputConnectionRecord(); + _inputs[sourceString] = inputConnectionRecord; + Trace.TraceInformation("Adding input:{0}", sourceString); + } + else + { + Trace.TraceInformation("restoring input:{0}", sourceString); + } + inputConnectionRecord.DataConnectionStream = (NetworkStream)readFromStream; + await SendReplayMessageAsync(readFromStream, inputConnectionRecord.LastProcessedID + 1, inputConnectionRecord.LastProcessedReplayableID + 1, ct); + // Create new input task for monitoring new input + Task inputTask; + inputTask = InputDataListenerAsync(inputConnectionRecord, sourceString, ct); + await inputTask; + } + + private async Task FromControlStreamAsync(Stream readFromStream, + string sourceString, + CancellationToken ct) + { + InputConnectionRecord inputConnectionRecord; + if (sourceString.Equals(ServiceName())) + { + sourceString = ""; + } + if (!_inputs.TryGetValue(sourceString, out inputConnectionRecord)) + { + // Create input record and add it to the dictionary + inputConnectionRecord = new InputConnectionRecord(); + _inputs[sourceString] = inputConnectionRecord; + Trace.TraceInformation("Adding input:{0}", sourceString); + } + else + { + Trace.TraceInformation("restoring input:{0}", sourceString); + } + inputConnectionRecord.ControlConnectionStream = (NetworkStream)readFromStream; + OutputConnectionRecord outputConnectionRecord; + long outputTrim = -1; + lock (_outputs) + { + if (_outputs.TryGetValue(sourceString, out outputConnectionRecord)) + { + outputTrim = outputConnectionRecord.TrimTo; + } + } + await SendTrimStateMessageAsync(readFromStream, outputTrim, ct); + // Create new input task for monitoring new input + Task inputTask; + inputTask = InputControlListenerAsync(inputConnectionRecord, sourceString, ct); + await inputTask; + } + + + private async Task InputDataListenerAsync(InputConnectionRecord inputRecord, + string inputName, + CancellationToken ct) + { + var inputFlexBuffer = new FlexReadBuffer(); + var bufferSize = 128 * 1024; + byte[] bytes = new byte[bufferSize]; + byte[] bytesBak = new byte[bufferSize]; + while (true) + { + await FlexReadBuffer.DeserializeAsync(inputRecord.DataConnectionStream, inputFlexBuffer, ct); + await ProcessInputMessageAsync(inputRecord, inputName, inputFlexBuffer); + } + } + + private async Task InputControlListenerAsync(InputConnectionRecord inputRecord, + string inputName, + CancellationToken ct) + { + var inputFlexBuffer = new FlexReadBuffer(); + var myBytes = new byte[20]; + var bufferSize = 128 * 1024; + byte[] bytes = new byte[bufferSize]; + byte[] bytesBak = new byte[bufferSize]; + while (true) + { + await FlexReadBuffer.DeserializeAsync(inputRecord.ControlConnectionStream, inputFlexBuffer, ct); + var sizeBytes = inputFlexBuffer.LengthLength; + switch (inputFlexBuffer.Buffer[sizeBytes]) + { + case CommitByte: + long commitSeqNo = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1); + long replayableCommitSeqNo = StreamCommunicator.ReadBufferedLong(inputFlexBuffer.Buffer, sizeBytes + 1 + StreamCommunicator.LongSize(commitSeqNo)); + inputFlexBuffer.ResetBuffer(); + + // Find the appropriate connection record + var outputConnectionRecord = _outputs[inputName]; + // Check to make sure this is progress, otherwise, can ignore + if (commitSeqNo > outputConnectionRecord.TrimTo && !outputConnectionRecord.WillResetConnection && !outputConnectionRecord.ConnectingAfterRestart) + { + // Lock to ensure atomic update of both variables due to race in AmbrosiaSerialize + lock (outputConnectionRecord._trimLock) + { + outputConnectionRecord.TrimTo = Math.Max(outputConnectionRecord.TrimTo, commitSeqNo); + outputConnectionRecord.ReplayableTrimTo = Math.Max(outputConnectionRecord.ReplayableTrimTo, replayableCommitSeqNo); + } + if (outputConnectionRecord.ControlWorkQ.IsEmpty) + { + outputConnectionRecord.ControlWorkQ.Enqueue(-2); + } + lock (_committer._trimWatermarks) + { + _committer._trimWatermarks[inputName] = replayableCommitSeqNo; + } + } + break; + default: + // Bubble the exception up to CRA + throw new Exception("Illegal leading byte in input control message"); + break; + } + } + } + + private async Task ProcessInputMessageAsync(InputConnectionRecord inputRecord, + string inputName, + FlexReadBuffer inputFlexBuffer) + { + var sizeBytes = inputFlexBuffer.LengthLength; + switch (inputFlexBuffer.Buffer[sizeBytes]) + { + case RPCByte: + var methodID = inputFlexBuffer.Buffer.ReadBufferedInt(sizeBytes + 2); + long newFileSize; + if (inputFlexBuffer.Buffer[sizeBytes + 2 + StreamCommunicator.IntSize(methodID)] != (byte)RpcTypes.RpcType.Impulse) + { + newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID + 1, inputRecord.LastProcessedReplayableID + 1, _outputs, inputRecord); + } + else + { + newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID + 1, inputRecord.LastProcessedReplayableID, _outputs, inputRecord); + } + inputFlexBuffer.ResetBuffer(); + if (_newLogTriggerSize > 0 && newFileSize >= _newLogTriggerSize) + { + // Make sure only one input thread is moving to the next log file. Won't break the system if we don't do this, but could result in + // empty log files + if (Interlocked.CompareExchange(ref _movingToNextLog, 1, 0) == 0) + { + await MoveServiceToNextLogFileAsync(); + _movingToNextLog = 0; + } + } + break; + + case CountReplayableRPCBatchByte: + var restOfBatchOffset = inputFlexBuffer.LengthLength + 1; + var memStream = new MemoryStream(inputFlexBuffer.Buffer, restOfBatchOffset, inputFlexBuffer.Length - restOfBatchOffset); + var numRPCs = memStream.ReadInt(); + var numReplayableRPCs = memStream.ReadInt(); + newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID + numRPCs, inputRecord.LastProcessedReplayableID + numReplayableRPCs, _outputs, inputRecord); + inputFlexBuffer.ResetBuffer(); + memStream.Dispose(); + if (_newLogTriggerSize > 0 && newFileSize >= _newLogTriggerSize) + { + // Make sure only one input thread is moving to the next log file. Won't break the system if we don't do this, but could result in + // empty log files + if (Interlocked.CompareExchange(ref _movingToNextLog, 1, 0) == 0) + { + await MoveServiceToNextLogFileAsync(); + _movingToNextLog = 0; + } + } + break; + + case RPCBatchByte: + restOfBatchOffset = inputFlexBuffer.LengthLength + 1; + memStream = new MemoryStream(inputFlexBuffer.Buffer, restOfBatchOffset, inputFlexBuffer.Length - restOfBatchOffset); + numRPCs = memStream.ReadInt(); + newFileSize = await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID + numRPCs, inputRecord.LastProcessedReplayableID + numRPCs, _outputs, inputRecord); + inputFlexBuffer.ResetBuffer(); + memStream.Dispose(); + if (_newLogTriggerSize > 0 && newFileSize >= _newLogTriggerSize) + { + // Make sure only one input thread is moving to the next log file. Won't break the system if we don't do this, but could result in + // empty log files + if (Interlocked.CompareExchange(ref _movingToNextLog, 1, 0) == 0) + { + await MoveServiceToNextLogFileAsync(); + _movingToNextLog = 0; + } + } + break; + + case PingByte: + // Write time into correct place in message + memStream = new MemoryStream(inputFlexBuffer.Buffer, inputFlexBuffer.Length - 4 * sizeof(long), sizeof(long)); + long time; + GetSystemTimePreciseAsFileTime(out time); + memStream.WriteLongFixed(time); + // Treat as RPC + await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID + 1, inputRecord.LastProcessedReplayableID + 1, _outputs, inputRecord); + inputFlexBuffer.ResetBuffer(); + memStream.Dispose(); + break; + + case PingReturnByte: + // Write time into correct place in message + memStream = new MemoryStream(inputFlexBuffer.Buffer, inputFlexBuffer.Length - 1 * sizeof(long), sizeof(long)); + GetSystemTimePreciseAsFileTime(out time); + memStream.WriteLongFixed(time); + // Treat as RPC + await _committer.AddRow(inputFlexBuffer, inputName, inputRecord.LastProcessedID + 1, inputRecord.LastProcessedReplayableID + 1, _outputs, inputRecord); + inputFlexBuffer.ResetBuffer(); + memStream.Dispose(); + break; + + default: + // Bubble the exception up to CRA + throw new Exception("Illegal leading byte in input data message"); + } + } + + private ILogWriter OpenNextCheckpointFile() + { + if (_logWriterStatics.FileExists(CheckpointName(_lastCommittedCheckpoint + 1))) + { + _logWriterStatics.DeleteFile(CheckpointName(_lastCommittedCheckpoint + 1)); + } + ILogWriter retVal = null; + try + { + retVal = _logWriterStatics.Generate(CheckpointName(_lastCommittedCheckpoint + 1), 1024 * 1024, 6); + } + catch (Exception e) + { + OnError(0, "Error opening next checkpoint file" + e.ToString()); + } + return retVal; + } + + private void CleanupOldCheckpoint() + { + var fileNameToDelete = CheckpointName(_lastCommittedCheckpoint - 1); + if (_logWriterStatics.FileExists(fileNameToDelete)) + { + _logWriterStatics.DeleteFile(fileNameToDelete); + } + } + + // This method takes a checkpoint and bumps the counter. It DOES NOT quiesce anything + public async Task CheckpointAsync() + { + var oldCheckpointWriter = _checkpointWriter; + // Take lock on new checkpoint file + _checkpointWriter = OpenNextCheckpointFile(); + // Make sure the service is quiesced before continuing + CheckpointingService = true; + while (LastReceivedCheckpoint == null) { await Task.Yield(); } + // Now that the service has sent us its checkpoint, we need to quiesce the output connections, which may be sending + foreach (var outputRecord in _outputs) + { + outputRecord.Value.BufferedOutput.AcquireAppendLock(); + } + + CheckpointingService = false; + // Serialize committer + _committer.Serialize(_checkpointWriter); + // Serialize input connections + _inputs.AmbrosiaSerialize(_checkpointWriter); + // Serialize output connections + _outputs.AmbrosiaSerialize(_checkpointWriter); + foreach (var outputRecord in _outputs) + { + outputRecord.Value.BufferedOutput.ReleaseAppendLock(); + } + + // Serialize the service note that the local listener task is blocked after reading the checkpoint until the end of this method + _checkpointWriter.Write(LastReceivedCheckpoint.Buffer, 0, LastReceivedCheckpoint.Length); + _checkpointWriter.Write(_localServiceReceiveFromStream, _lastReceivedCheckpointSize); + _checkpointWriter.Flush(); + _lastCommittedCheckpoint++; + InsertOrReplaceServiceInfoRecord(InfoTitle("LastCommittedCheckpoint"), _lastCommittedCheckpoint.ToString()); + + // Trim output buffers of inputs, since the inputs are now part of the checkpoint and can't be lost. Must do this after the checkpoint has been + // successfully written + foreach (var kv in _inputs) + { + OutputConnectionRecord outputConnectionRecord; + if (!_outputs.TryGetValue(kv.Key, out outputConnectionRecord)) + { + outputConnectionRecord = new OutputConnectionRecord(this); + _outputs[kv.Key] = outputConnectionRecord; + } + // Must lock to atomically update due to race with ToControlStreamAsync + lock (outputConnectionRecord._remoteTrimLock) + { + outputConnectionRecord.RemoteTrim = Math.Max(kv.Value.LastProcessedID, outputConnectionRecord.RemoteTrim); + outputConnectionRecord.RemoteTrimReplayable = Math.Max(kv.Value.LastProcessedReplayableID, outputConnectionRecord.RemoteTrimReplayable); + } + if (outputConnectionRecord.ControlWorkQ.IsEmpty) + { + outputConnectionRecord.ControlWorkQ.Enqueue(-2); + } + } + + if (oldCheckpointWriter != null) + { + // Release lock on previous checkpoint file + oldCheckpointWriter.Dispose(); + } + + // Unblock the local input processing task + LastReceivedCheckpoint.ThrowAwayBuffer(); + LastReceivedCheckpoint = null; + } + + public AmbrosiaRuntime() : base() + { + } + + private void InitializeLogWriterStatics() + { + _logWriterStatics = LogWriterStaticPicker.curStatic; + } + + public override async Task InitializeAsync(object param) + { + InitializeLogWriterStatics(); + + // Workaround because of parameter type limitation in CRA + AmbrosiaRuntimeParams p = new AmbrosiaRuntimeParams(); + XmlSerializer xmlSerializer = new XmlSerializer(p.GetType()); + using (StringReader textReader = new StringReader((string)param)) + { + p = (AmbrosiaRuntimeParams)xmlSerializer.Deserialize(textReader); + } + + bool sharded = false; + + Initialize( + p.serviceReceiveFromPort, + p.serviceSendToPort, + p.serviceName, + p.serviceLogPath, + p.createService, + p.pauseAtStart, + p.persistLogs, + p.activeActive, + p.logTriggerSizeMB, + p.storageConnectionString, + p.currentVersion, + p.upgradeToVersion, + sharded + ); + return; + } + + internal void RuntimeChecksOnProcessStart() + { + if (!_createService) + { + long readVersion = -1; + try + { + readVersion = long.Parse(RetrieveServiceInfo(InfoTitle("CurrentVersion"))); + } + catch + { + OnError(VersionMismatch, "Version mismatch on process start: Expected " + _currentVersion + " was: " + RetrieveServiceInfo(InfoTitle("CurrentVersion"))); + } + if (_currentVersion != readVersion) + { + OnError(VersionMismatch, "Version mismatch on process start: Expected " + _currentVersion + " was: " + readVersion.ToString()); + } + if (!_runningRepro) + { + if (long.Parse(RetrieveServiceInfo(InfoTitle("LastCommittedCheckpoint"))) < 1) + { + OnError(MissingCheckpoint, "No checkpoint in metadata"); + + } + } + if (!_logWriterStatics.DirectoryExists(LogDirectory(_currentVersion))) + { + OnError(MissingCheckpoint, "No checkpoint/logs directory"); + } + var lastCommittedCheckpoint = long.Parse(RetrieveServiceInfo(InfoTitle("LastCommittedCheckpoint"))); + if (!_logWriterStatics.FileExists(CheckpointName(lastCommittedCheckpoint))) + { + OnError(MissingCheckpoint, "Missing checkpoint " + lastCommittedCheckpoint.ToString()); + } + if (!_logWriterStatics.FileExists(LogFileName(lastCommittedCheckpoint))) + { + OnError(MissingLog, "Missing log " + lastCommittedCheckpoint.ToString()); + } + } + } + + public void Initialize(int serviceReceiveFromPort, + int serviceSendToPort, + string serviceName, + string serviceLogPath, + bool? createService, + bool pauseAtStart, + bool persistLogs, + bool activeActive, + long logTriggerSizeMB, + string storageConnectionString, + long currentVersion, + long upgradeToVersion, + bool sharded + ) + { + if (LogReaderStaticPicker.curStatic == null || LogWriterStaticPicker.curStatic == null) + { + OnError(UnexpectedError, "Must specify log storage type"); + } + _runningRepro = false; + _currentVersion = currentVersion; + _upgradeToVersion = upgradeToVersion; + _upgrading = (_currentVersion < _upgradeToVersion); + if (pauseAtStart == true) + { + Console.WriteLine("Hit Enter to continue:"); + Console.ReadLine(); + } + else + { + Trace.TraceInformation("Ready ..."); + } + _persistLogs = persistLogs; + _activeActive = activeActive; + if (StartupParamOverrides.LogTriggerSizeMB != -1) + { + _newLogTriggerSize = StartupParamOverrides.LogTriggerSizeMB * 1048576; + } + else + { + _newLogTriggerSize = logTriggerSizeMB * 1048576; + } + if (StartupParamOverrides.ICLogLocation == null) + { + _serviceLogPath = serviceLogPath; + } + else + { + _serviceLogPath = StartupParamOverrides.ICLogLocation; + } + if (StartupParamOverrides.receivePort == -1) + { + _localServiceReceiveFromPort = serviceReceiveFromPort; + } + else + { + _localServiceReceiveFromPort = StartupParamOverrides.receivePort; + } + if (StartupParamOverrides.sendPort == -1) + { + _localServiceSendToPort = serviceSendToPort; + } + else + { + _localServiceSendToPort = StartupParamOverrides.sendPort; + } + _serviceName = serviceName; + _storageConnectionString = storageConnectionString; + _sharded = sharded; + _coral = ClientLibrary; + + Trace.TraceInformation("Logs directory: {0}", _serviceLogPath); + + if (createService == null) + { + if (_logWriterStatics.DirectoryExists(RootDirectory())) + { + createService = false; + } + else + { + createService = true; + } + } + AddAsyncInputEndpoint(AmbrosiaDataInputsName, new AmbrosiaInput(this, "data")); + AddAsyncInputEndpoint(AmbrosiaControlInputsName, new AmbrosiaInput(this, "control")); + AddAsyncOutputEndpoint(AmbrosiaDataOutputsName, new AmbrosiaOutput(this, "data")); + AddAsyncOutputEndpoint(AmbrosiaControlOutputsName, new AmbrosiaOutput(this, "control")); + _createService = createService.Value; + RecoverOrStartAsync().Wait(); + } + + public void InitializeRepro(string serviceName, + string serviceLogPath, + long checkpointToLoad, + int version, + bool testUpgrade, + int serviceReceiveFromPort = 0, + int serviceSendToPort = 0) + { + _localServiceReceiveFromPort = serviceReceiveFromPort; + _localServiceSendToPort = serviceSendToPort; + _currentVersion = version; + _runningRepro = true; + _persistLogs = false; + _activeActive = true; + _serviceLogPath = serviceLogPath; + _serviceName = serviceName; + _sharded = false; + _createService = false; + InitializeLogWriterStatics(); + RecoverOrStartAsync(checkpointToLoad, testUpgrade).Wait(); + } + } +} \ No newline at end of file diff --git a/InternalImmortals/PerformanceTest/Server/Properties/AssemblyInfo.cs b/AmbrosiaLib/Ambrosia/Properties/AssemblyInfo.cs similarity index 88% rename from InternalImmortals/PerformanceTest/Server/Properties/AssemblyInfo.cs rename to AmbrosiaLib/Ambrosia/Properties/AssemblyInfo.cs index 8d9b0722..58364f4d 100644 --- a/InternalImmortals/PerformanceTest/Server/Properties/AssemblyInfo.cs +++ b/AmbrosiaLib/Ambrosia/Properties/AssemblyInfo.cs @@ -5,11 +5,11 @@ // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. -//[assembly: AssemblyTitle("Server")] +//[assembly: AssemblyTitle("LocalAmbrosiaRuntime")] [assembly: AssemblyDescription("")] //[assembly: AssemblyConfiguration("")] //[assembly: AssemblyCompany("")] -//[assembly: AssemblyProduct("Server")] +//[assembly: AssemblyProduct("LocalAmbrosiaRuntime")] [assembly: AssemblyCopyright("Copyright © 2017")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] @@ -20,7 +20,7 @@ [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM -[assembly: Guid("8946dffa-c800-4207-9166-6ec0e7e7150a")] +[assembly: Guid("edcf146a-65fe-43dd-913d-283a96dbac47")] // Version information for an assembly consists of the following four values: // diff --git a/AmbrosiaTest/AmbrosiaTest.sln b/AmbrosiaTest/AmbrosiaTest.sln index 6833c53c..1073cf0e 100644 --- a/AmbrosiaTest/AmbrosiaTest.sln +++ b/AmbrosiaTest/AmbrosiaTest.sln @@ -1,10 +1,12 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.27130.2026 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.30621.155 MinimumVisualStudioVersion = 10.0.40219.1 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AmbrosiaTest", "AmbrosiaTest\AmbrosiaTest.csproj", "{F9AA4F89-945C-4118-99CF-FDC7AA142601}" EndProject +Project("{9092AA53-FB77-4645-B42D-1CCCA6BD08BD}") = "JSCodeGen", "JSCodeGen\JSCodeGen.njsproj", "{61917A12-2BE6-4465-BB76-B467295B972D}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -21,6 +23,14 @@ Global {F9AA4F89-945C-4118-99CF-FDC7AA142601}.Release|Any CPU.Build.0 = Release|Any CPU {F9AA4F89-945C-4118-99CF-FDC7AA142601}.Release|x64.ActiveCfg = Release|x64 {F9AA4F89-945C-4118-99CF-FDC7AA142601}.Release|x64.Build.0 = Release|x64 + {61917A12-2BE6-4465-BB76-B467295B972D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Debug|x64.ActiveCfg = Debug|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Debug|x64.Build.0 = Debug|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Release|Any CPU.Build.0 = Release|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Release|x64.ActiveCfg = Release|Any CPU + {61917A12-2BE6-4465-BB76-B467295B972D}.Release|x64.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/AmbrosiaTest/AmbrosiaTest/AMB_UnitTest.cs b/AmbrosiaTest/AmbrosiaTest/AMB_UnitTest.cs index d952fbaf..a62be321 100644 --- a/AmbrosiaTest/AmbrosiaTest/AMB_UnitTest.cs +++ b/AmbrosiaTest/AmbrosiaTest/AMB_UnitTest.cs @@ -79,9 +79,10 @@ public void UnitTest_BasicEndtoEnd_Test() string serverName = testName + "server"; string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; string byteSize = "1073741824"; - + Utilities MyUtils = new Utilities(); + //AMB1 - Job string logOutputFileName_AMB1 = testName + "_AMB1.log"; AMB_Settings AMB1 = new AMB_Settings @@ -126,7 +127,7 @@ public void UnitTest_BasicEndtoEnd_Test() //Client Job Call string logOutputFileName_ClientJob = testName + "_ClientJob.log"; - int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob); + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeSecondProc); // Give it a few seconds to start Thread.Sleep(2000); @@ -145,9 +146,13 @@ public void UnitTest_BasicEndtoEnd_Test() MyUtils.KillProcess(ImmCoordProcessID1); MyUtils.KillProcess(ImmCoordProcessID2); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -249,9 +254,13 @@ public void UnitTest_BasicRestartEndtoEnd_Test() MyUtils.KillProcess(ImmCoordProcessID1); MyUtils.KillProcess(ImmCoordProcessID2_Restarted); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -416,7 +425,7 @@ public void UnitTest_BasicActiveActive_KillPrimary_Test() pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 5, false, testName, true); // Also verify ImmCoord has the string to show it is primary - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true,false); // Stop things so file is freed up and can be opened in verify MyUtils.KillProcess(serverProcessID2); @@ -439,6 +448,175 @@ public void UnitTest_BasicActiveActive_KillPrimary_Test() MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); } + //** Basic end to end test for the InProc TCP feature with minimal rounds and message size of 1GB ... could make it smaller and it would be faster. + [TestMethod] + public void UnitTest_BasicInProcTCPEndtoEnd_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "unittestinproctcp"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob,MyUtils.deployModeInProcManual,"1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0, MyUtils.deployModeInProcManual,"2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + + // Verify Client - NetCore CLR bug causes extra info in the output for this so do not check for core run + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + //** Basic end to end test for the InProc TCP feature with minimal rounds and message size of 1GB ... could make it smaller and it would be faster. + [TestMethod] + public void UnitTest_BasicInProcPipeEndtoEnd_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "unittestinprocpipe"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0, MyUtils.deployModeInProc, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + [TestCleanup()] public void Cleanup() diff --git a/AmbrosiaTest/AmbrosiaTest/ActiveActive_Test.cs b/AmbrosiaTest/AmbrosiaTest/ActiveActive_Test.cs index 960203cb..ede5adc2 100644 --- a/AmbrosiaTest/AmbrosiaTest/ActiveActive_Test.cs +++ b/AmbrosiaTest/AmbrosiaTest/ActiveActive_Test.cs @@ -166,13 +166,13 @@ public void AMB_ActiveActive_KillPrimary_Test() int serverProcessID_Restarted1 = MyUtils.StartPerfServer("1001", "1000", clientJobName, serverName, logOutputFileName_Server1_Restarted, 1, false); //Delay until finished ... looking at the most recent primary (server3) but also verify others hit done too - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server3, byteSize, 30, false, testName, true); // Total Bytes received needs to be accurate + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server3, byteSize, 90, false, testName, true); // Total Bytes received needs to be accurate pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server2, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 15, false, testName, true); // Also verify ImmCoord has the string to show it is primary - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true, false); // Stop things so file is freed up and can be opened in verify MyUtils.KillProcess(serverProcessID2); @@ -212,7 +212,7 @@ public void AMB_ActiveActive_KillCheckPointer_Test() string clientJobName = testName + "clientjob"; string serverName = testName + "server"; string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; - string byteSize = "13958643712"; + string byteSize = "5368709120"; Utilities MyUtils = new Utilities(); @@ -304,7 +304,7 @@ public void AMB_ActiveActive_KillCheckPointer_Test() //start Client Job first ... to mix it up a bit (other tests has client start after server) string logOutputFileName_ClientJob = testName + "_ClientJob.log"; - int clientJobProcessID = MyUtils.StartPerfClientJob("4001", "4000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob); + int clientJobProcessID = MyUtils.StartPerfClientJob("4001", "4000", clientJobName, serverName, "65536", "5", logOutputFileName_ClientJob); //Server Call - primary string logOutputFileName_Server1 = testName + "_Server1.log"; @@ -380,7 +380,7 @@ public void AMB_ActiveActive_KillSecondary_Test() string clientJobName = testName + "clientjob"; string serverName = testName + "server"; string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; - string byteSize = "13958643712"; + string byteSize = "6442450944"; Utilities MyUtils = new Utilities(); @@ -472,7 +472,7 @@ public void AMB_ActiveActive_KillSecondary_Test() //start Client Job first ... to mix it up a bit (other tests has client start after server) string logOutputFileName_ClientJob = testName + "_ClientJob.log"; - int clientJobProcessID = MyUtils.StartPerfClientJob("4001", "4000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob); + int clientJobProcessID = MyUtils.StartPerfClientJob("4001", "4000", clientJobName, serverName, "65536", "6", logOutputFileName_ClientJob); //Server Call - primary string logOutputFileName_Server1 = testName + "_Server1.log"; @@ -924,10 +924,10 @@ public void AMB_ActiveActive_Kill_Client_And_Server_Test() int clientJobProcessID_Restarted1 = MyUtils.StartPerfClientJob("4001", "4000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob1_Restarted); //Delay until finished ... looking at the primary (server1) but also verify others hit done too - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 30, false, testName, true); // Total Bytes received needs to be accurate + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 40, false, testName, true); // Total Bytes received needs to be accurate pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server2, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server3, byteSize, 15, false, testName, true); - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob1_Restarted, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob1_Restarted, byteSize, 20, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob2, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob3, byteSize, 15, false, testName, true); @@ -954,8 +954,8 @@ public void AMB_ActiveActive_Kill_Client_And_Server_Test() MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server3); // Also verify ImmCoord has the string to show it is primary for both server and client - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true); - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord6, newPrimary, 5, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord6, newPrimary, 5, false, testName, true,false); // Verify integrity of Ambrosia logs by replaying MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); @@ -1185,7 +1185,7 @@ public void AMB_ActiveActive_Kill_All_Test() int clientJobProcessID_Restarted3 = MyUtils.StartPerfClientJob("6001", "6000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob3_Restarted); //Delay until finished ... looking at the primary (server1) but also verify others hit done too - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 45, false, testName, true); // Total Bytes received needs to be accurate + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 75, false, testName, true); // Total Bytes received needs to be accurate pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server2_Restarted, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server3_Restarted, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob1_Restarted, byteSize, 15, false, testName, true); @@ -1211,21 +1211,23 @@ public void AMB_ActiveActive_Kill_All_Test() // really reliable. As long as they get through whole thing, that is what counts. // Verify ImmCoord has the string to show it is primary for both server and client - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord2_Restarted, newPrimary, 5, false, testName, true); - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord5_Restarted, newPrimary, 5, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord2_Restarted, newPrimary, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord5_Restarted, newPrimary, 5, false, testName, true,false); // Verify integrity of Ambrosia logs by replaying MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); } //**************************** - // The basic test of Active Active where kill primary server + // The test where add node to the active active before killing primary // 1 client - // 3 servers - primary, checkpointing secondary and active secondary (can become primary) + // 3 servers - primary, checkpointing secondary and active secondary + // + // Then add a 4th server which is an active secondary to the active secondary + // Kill Primary which makes active secondary the primary and 4th the secondary + // Kill the new primary (which was originally the secondary) + // Now Server4 becomes the primary // - // killing first server (primary) will then have active secondary become primary - // restarting first server will make it the active secondary - // //**************************** [TestMethod] public void AMB_ActiveActive_AddNodeBeforeKillPrimary_Test() @@ -1370,7 +1372,7 @@ public void AMB_ActiveActive_AddNodeBeforeKillPrimary_Test() int serverProcessID4 = MyUtils.StartPerfServer("4001", "4000", clientJobName, serverName, logOutputFileName_Server4, 1, false); // Give it 10 seconds to do something before killing it - Thread.Sleep(15000); + Thread.Sleep(10000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. //Kill Primary Server (server1) at this point as well as ImmCoord1 @@ -1378,7 +1380,7 @@ public void AMB_ActiveActive_AddNodeBeforeKillPrimary_Test() MyUtils.KillProcess(ImmCoordProcessID1); // at this point, server3 (active secondary) becomes primary and server4 becomes active secondary - Thread.Sleep(15000); + Thread.Sleep(10000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. //Kill new Primary Server (server3) at this point as well as ImmCoord3 @@ -1390,7 +1392,7 @@ public void AMB_ActiveActive_AddNodeBeforeKillPrimary_Test() // but when server3 (new primary) died, server4 became new primary Thread.Sleep(2000); - // Do nothing with Server1 and server3 let them stay dead + // Do nothing with Server1 and server3 as they were killed as part of the process //Delay until finished ... looking at the most recent primary (server4) but also verify others hit done too bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server4, byteSize, 30, false, testName, true); // Total Bytes received needs to be accurate @@ -1400,8 +1402,8 @@ public void AMB_ActiveActive_AddNodeBeforeKillPrimary_Test() // Also verify ImmCoord has the string to show server3 was primary then server4 became primary //*** Note - can't verify which one will be primary because both Server3 and Server4 are secondary //** They both are trying to take over primary if it dies. No way of knowing which one is. - //pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 1, false, testName, true); - //pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord4, newPrimary, 1, false, testName, true); + //pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 1, false, testName, true,false); + //pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord4, newPrimary, 1, false, testName, true,false); // Stop things so file is freed up and can be opened in verify MyUtils.KillProcess(serverProcessID2); diff --git a/AmbrosiaTest/AmbrosiaTest/AmbrosiaTest.csproj b/AmbrosiaTest/AmbrosiaTest/AmbrosiaTest.csproj index a9d5867f..77339ba3 100644 --- a/AmbrosiaTest/AmbrosiaTest/AmbrosiaTest.csproj +++ b/AmbrosiaTest/AmbrosiaTest/AmbrosiaTest.csproj @@ -76,15 +76,21 @@ + + + + + + Designer - + Always @@ -106,19 +112,19 @@ - 15.9.0 + 16.6.1 - 15.9.0 + 16.6.1 - 15.9.0 + 16.6.1 - 1.4.0 + 2.1.2 - 1.4.0 + 2.1.2 diff --git a/AmbrosiaTest/AmbrosiaTest/AsyncTests.cs b/AmbrosiaTest/AmbrosiaTest/AsyncTests.cs index 8e580c14..997a0d54 100644 --- a/AmbrosiaTest/AmbrosiaTest/AsyncTests.cs +++ b/AmbrosiaTest/AmbrosiaTest/AsyncTests.cs @@ -25,8 +25,13 @@ public void Initialize() } //************* Init Code ***************** + + +/* **** All Async feature removed and being reworked at some point ... tests probably invalid but only comment out + + + //** Basic end to end test starts job and server and runs a bunch of bytes through - //** Only a few rounds and part of [TestMethod] public void AMB_Async_Basic_Test() { @@ -35,15 +40,11 @@ public void AMB_Async_Basic_Test() string clientJobName = testName + "clientjob"; string serverName = testName + "server"; string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; - string byteSize = "3221225472"; + string byteSize = "2147483648"; Utilities MyUtils = new Utilities(); - //#*#*# Remove ... - MyUtils.AsyncTestCleanup(); - //#*#*# - - //AMB1 - Job + //AMB1 - Job string logOutputFileName_AMB1 = testName + "_AMB1.log"; AMB_Settings AMB1 = new AMB_Settings { @@ -87,15 +88,15 @@ public void AMB_Async_Basic_Test() //Client Job Call string logOutputFileName_ClientJob = testName + "_ClientJob.log"; - int clientJobProcessID = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, logOutputFileName_ClientJob); + int clientJobProcessID = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "2",logOutputFileName_ClientJob); //Server Call string logOutputFileName_Server = testName + "_Server.log"; int serverProcessID = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server); //Delay until client is done - also check Server just to make sure - // bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed - // pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 45, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 10, false, testName, true); // Stop things so file is freed up and can be opened in verify MyUtils.KillProcess(clientJobProcessID); @@ -104,25 +105,760 @@ public void AMB_Async_Basic_Test() MyUtils.KillProcess(ImmCoordProcessID2); //Verify AMB -// MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - // MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); // Verify Client - // MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version,"",true); + } + + //** The replay / recovery of this basic test uses the latest log file instead of the first + [TestMethod] + public void AMB_Async_ReplayLatest_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "asyncreplaylatest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "2147483648"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "2", logOutputFileName_ClientJob); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 45, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 10, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + MyUtils.KillProcess(ImmCoordProcessID2); + + // No need to verify cmp files as the test is basically same as basic test + + // Verify integrity of Ambrosia logs by replaying from the Latest one + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true,false, AMB1.AMB_Version, "", true); + } + + //** Test starts job and server then kills the job and restarts it and runs to completion + //** NOTE - this actually kills job once, restarts it, kills again and then restarts it again + [TestMethod] + public void AMB_Async_KillJob_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "asynckilljobtest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "2147483648"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "2", logOutputFileName_ClientJob); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point as well as ImmCoord1 + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + + //Restart ImmCoord1 + string logOutputFileName_ImmCoord1_Restarted = testName + "_ImmCoord1_Restarted.log"; + int ImmCoordProcessID1_Restarted = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1_Restarted); + + // Restart Job Process + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "2", logOutputFileName_ClientJob_Restarted); + + // Give it 5 seconds to do something before killing it again + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point as well as ImmCoord1 + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted); + + //Restart ImmCoord1 Again + string logOutputFileName_ImmCoord1_Restarted_Again = testName + "_ImmCoord1_Restarted_Again.log"; + int ImmCoordProcessID1_Restarted_Again = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1_Restarted_Again); + + // Restart Job Process Again + string logOutputFileName_ClientJob_Restarted_Again = testName + "_ClientJob_Restarted_Again.log"; + int clientJobProcessID_Restarted_Again = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "2", logOutputFileName_ClientJob_Restarted_Again); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted_Again, byteSize, 45, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted_Again); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted_Again); + MyUtils.KillProcess(ImmCoordProcessID2); + + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted_Again); // Verify Server - // MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Give it a few seconds to make sure everything is started fine + Thread.Sleep(3000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version, "", true); + } + + + [TestMethod] + public void AMB_Async_KillServer_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "asynckillservertest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "2147483648"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", // NOTE: if put this to "Y" then when kill it, it will become a checkpointer which never becomes primary + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "2", logOutputFileName_ClientJob); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server); + + // Give it 10 seconds to do something before killing it + Thread.Sleep(10000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill Server at this point as well as ImmCoord2 + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID2); + + //Restart ImmCoord2 + string logOutputFileName_ImmCoord2_Restarted = testName + "_ImmCoord2_Restarted.log"; + int ImmCoordProcessID2_Restarted = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2_Restarted); + + // Restart Server Process + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server_Restarted); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 35, false, testName, true); // Total Bytes received needs to be accurate + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID_Restarted); + MyUtils.KillProcess(ImmCoordProcessID1); + MyUtils.KillProcess(ImmCoordProcessID2_Restarted); + + // Verify Server (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version, "", true); + } + + //**************************** + // The basic test of Active Active where kill ASYNC primary server + // 1 client + // 3 servers - primary, checkpointing secondary and active secondary (can become primary) + // + // killing first server (primary) will then have active secondary become primary + // restarting first server will make it the active secondary + // + //**************************** + [TestMethod] + public void AMB_Async_ActiveActive_BasicTest() + { + string testName = "asyncactiveactivebasic"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "2147483648"; + string newPrimary = "NOW I'm Primary"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - primary + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + + //AMB2 - check pointer + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ReplicaNumber = "1", + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.AddReplica); + + //AMB3 - active secondary + string logOutputFileName_AMB3 = testName + "_AMB3.log"; + AMB_Settings AMB3 = new AMB_Settings + { + AMB_ReplicaNumber = "2", + AMB_ServiceName = serverName, + AMB_PortAppReceives = "3000", + AMB_PortAMBSends = "3001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB3, logOutputFileName_AMB3, AMB_ModeConsts.AddReplica); + + //AMB4 - Job + string logOutputFileName_AMB4 = testName + "_AMB4.log"; + AMB_Settings AMB4 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "4000", + AMB_PortAMBSends = "4001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB4, logOutputFileName_AMB4, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(serverName, 1500, logOutputFileName_ImmCoord1, true, 0); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, true, 1); + + //ImmCoord3 + string logOutputFileName_ImmCoord3 = testName + "_ImmCoord3.log"; + int ImmCoordProcessID3 = MyUtils.StartImmCoord(serverName, 3500, logOutputFileName_ImmCoord3, true, 2); + + //ImmCoord4 + string logOutputFileName_ImmCoord4 = testName + "_ImmCoord4.log"; + int ImmCoordProcessID4 = MyUtils.StartImmCoord(clientJobName, 4500, logOutputFileName_ImmCoord4); + + //Server Call - primary + string logOutputFileName_Server1 = testName + "_Server1.log"; + int serverProcessID1 = MyUtils.StartAsyncPerfServer("1001", "1000", serverName, logOutputFileName_Server1); + Thread.Sleep(1000); // give a second to make it a primary + + //Server Call - checkpointer + string logOutputFileName_Server2 = testName + "_Server2.log"; + int serverProcessID2 = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server2); + Thread.Sleep(1000); // give a second + + //Server Call - active secondary + string logOutputFileName_Server3 = testName + "_Server3.log"; + int serverProcessID3 = MyUtils.StartAsyncPerfServer("3001", "3000", serverName, logOutputFileName_Server3); + + //start Client Job + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartAsyncPerfClientJob("4001", "4000", clientJobName, serverName, "2", logOutputFileName_ClientJob); + + // Give it 10 seconds to do something before killing it + Thread.Sleep(10000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill Primary Server (server1) at this point as well as ImmCoord1 + MyUtils.KillProcess(serverProcessID1); + MyUtils.KillProcess(ImmCoordProcessID1); + + // at this point, server3 (active secondary) becomes primary + Thread.Sleep(1000); + + //Restart server1 (ImmCoord1 and server) ... this will become active secondary now + string logOutputFileName_ImmCoord1_Restarted = testName + "_ImmCoord1_Restarted.log"; + int ImmCoordProcessID1_Restarted = MyUtils.StartImmCoord(serverName, 1500, logOutputFileName_ImmCoord1_Restarted, true, 0); + string logOutputFileName_Server1_Restarted = testName + "_Server1_Restarted.log"; + int serverProcessID_Restarted1 = MyUtils.StartAsyncPerfServer("1001", "1000", serverName, logOutputFileName_Server1_Restarted); + + //Delay until finished ... looking at the most recent primary (server3) but also verify others hit done too + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server3, byteSize, 55, false, testName, true); // Total Bytes received needs to be accurate + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server2, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 15, false, testName, true); + + // Also verify ImmCoord has the string to show it is primary + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, newPrimary, 5, false, testName, true,false); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(serverProcessID2); + MyUtils.KillProcess(serverProcessID_Restarted1); + MyUtils.KillProcess(serverProcessID3); // primary + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(ImmCoordProcessID2); + MyUtils.KillProcess(ImmCoordProcessID3); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted); + MyUtils.KillProcess(ImmCoordProcessID4); + + // Verify cmp files for client and 3 servers + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server1_Restarted); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server2); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server3); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version, "", true); + } + + + + //**************************** + // Most complex test of Active Active for client and server - Async version of it + // 3 clients - primary, checkpointing secondary and active secondary + // 3 servers - primary, checkpointing secondary and active secondary + // + // Kill all aspects of the system and restart + // + //**************************** + [TestMethod] + public void AMB_Async_ActiveActive_KillAllTest() + { + string testName = "asyncactiveactivekillall"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + string newPrimary = "NOW I'm Primary"; + + // If failures in queue, set a flag to not run tests or clean up - helps debug tests that failed by keeping in proper state + Utilities MyUtils = new Utilities(); + + //AMB1 - primary server + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 - check pointer server + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_ReplicaNumber = "1", + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.AddReplica); + + //AMB3 - active secondary server + string logOutputFileName_AMB3 = testName + "_AMB3.log"; + AMB_Settings AMB3 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_ReplicaNumber = "2", + AMB_PortAppReceives = "3000", + AMB_PortAMBSends = "3001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB3, logOutputFileName_AMB3, AMB_ModeConsts.AddReplica); + + //AMB4 - Job primary + string logOutputFileName_AMB4 = testName + "_AMB4.log"; + AMB_Settings AMB4 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "4000", + AMB_PortAMBSends = "4001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB4, logOutputFileName_AMB4, AMB_ModeConsts.RegisterInstance); + + //AMB5 - Job checkpoint + string logOutputFileName_AMB5 = testName + "_AMB5.log"; + AMB_Settings AMB5 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_ReplicaNumber = "1", + AMB_PortAppReceives = "5000", + AMB_PortAMBSends = "5001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB5, logOutputFileName_AMB5, AMB_ModeConsts.AddReplica); + + //AMB6 - Job secondary + string logOutputFileName_AMB6 = testName + "_AMB6.log"; + AMB_Settings AMB6 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_ReplicaNumber = "2", + AMB_PortAppReceives = "6000", + AMB_PortAMBSends = "6001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB6, logOutputFileName_AMB6, AMB_ModeConsts.AddReplica); + + //Server 1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(serverName, 1500, logOutputFileName_ImmCoord1, true, 0); + Thread.Sleep(1000); + string logOutputFileName_Server1 = testName + "_Server1.log"; + int serverProcessID1 = MyUtils.StartAsyncPerfServer("1001", "1000", serverName, logOutputFileName_Server1); + + //Server 2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, true, 1); + Thread.Sleep(1000); // give a second + string logOutputFileName_Server2 = testName + "_Server2.log"; + int serverProcessID2 = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server2); + + //Server 3 + string logOutputFileName_ImmCoord3 = testName + "_ImmCoord3.log"; + int ImmCoordProcessID3 = MyUtils.StartImmCoord(serverName, 3500, logOutputFileName_ImmCoord3, true, 2); + string logOutputFileName_Server3 = testName + "_Server3.log"; + int serverProcessID3 = MyUtils.StartAsyncPerfServer("3001", "3000", serverName, logOutputFileName_Server3); + + //Client 1 + string logOutputFileName_ImmCoord4 = testName + "_ImmCoord4.log"; + int ImmCoordProcessID4 = MyUtils.StartImmCoord(clientJobName, 4500, logOutputFileName_ImmCoord4, true, 0); + Thread.Sleep(1000); // give a second + string logOutputFileName_ClientJob1 = testName + "_ClientJob1.log"; + int clientJobProcessID1 = MyUtils.StartAsyncPerfClientJob("4001", "4000", clientJobName, serverName, "1", logOutputFileName_ClientJob1); + + //Client 2 + string logOutputFileName_ImmCoord5 = testName + "_ImmCoord5.log"; + int ImmCoordProcessID5 = MyUtils.StartImmCoord(clientJobName, 5500, logOutputFileName_ImmCoord5, true, 1); + Thread.Sleep(1000); // give a second + string logOutputFileName_ClientJob2 = testName + "_ClientJob2.log"; + int clientJobProcessID2 = MyUtils.StartAsyncPerfClientJob("5001", "5000", clientJobName, serverName, "1", logOutputFileName_ClientJob2); + + //Client 3 + string logOutputFileName_ImmCoord6 = testName + "_ImmCoord6.log"; + int ImmCoordProcessID6 = MyUtils.StartImmCoord(clientJobName, 6500, logOutputFileName_ImmCoord6, true, 2); + Thread.Sleep(1000); // give a second + string logOutputFileName_ClientJob3 = testName + "_ClientJob3.log"; + int clientJobProcessID3 = MyUtils.StartAsyncPerfClientJob("6001", "6000", clientJobName, serverName, "1", logOutputFileName_ClientJob3); + + // Give it 10 seconds to do something before killing it + Thread.Sleep(10000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill all aspects - kill primary of each last + MyUtils.KillProcess(serverProcessID2); + MyUtils.KillProcess(ImmCoordProcessID2); + + MyUtils.KillProcess(serverProcessID3); + MyUtils.KillProcess(ImmCoordProcessID3); + + MyUtils.KillProcess(serverProcessID1); + MyUtils.KillProcess(ImmCoordProcessID1); + + MyUtils.KillProcess(clientJobProcessID2); + MyUtils.KillProcess(ImmCoordProcessID5); + + MyUtils.KillProcess(clientJobProcessID3); + MyUtils.KillProcess(ImmCoordProcessID6); + + MyUtils.KillProcess(clientJobProcessID1); + MyUtils.KillProcess(ImmCoordProcessID4); + + // at this point, the system is dead - restart + Thread.Sleep(5000); + + //Restart servers + string logOutputFileName_ImmCoord1_Restarted = testName + "_ImmCoord1_Restarted.log"; + int ImmCoordProcessID1_Restarted = MyUtils.StartImmCoord(serverName, 1500, logOutputFileName_ImmCoord1_Restarted, true, 0); + string logOutputFileName_Server1_Restarted = testName + "_Server1_Restarted.log"; + int serverProcessID_Restarted1 = MyUtils.StartAsyncPerfServer("1001", "1000", serverName, logOutputFileName_Server1_Restarted); + string logOutputFileName_ImmCoord2_Restarted = testName + "_ImmCoord2_Restarted.log"; + int ImmCoordProcessID2_Restarted = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2_Restarted, true, 1); + string logOutputFileName_Server2_Restarted = testName + "_Server2_Restarted.log"; + int serverProcessID_Restarted2 = MyUtils.StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server2_Restarted); + string logOutputFileName_ImmCoord3_Restarted = testName + "_ImmCoord3_Restarted.log"; + int ImmCoordProcessID3_Restarted = MyUtils.StartImmCoord(serverName, 3500, logOutputFileName_ImmCoord3_Restarted, true, 2); + string logOutputFileName_Server3_Restarted = testName + "_Server3_Restarted.log"; + int serverProcessID_Restarted3 = MyUtils.StartAsyncPerfServer("3001", "3000", serverName, logOutputFileName_Server3_Restarted); + + //Restart clients + string logOutputFileName_ImmCoord4_Restarted = testName + "_ImmCoord4_Restarted.log"; + int ImmCoordProcessID4_Restarted = MyUtils.StartImmCoord(clientJobName, 4500, logOutputFileName_ImmCoord4_Restarted, true, 0); + string logOutputFileName_ClientJob1_Restarted = testName + "_ClientJob1_Restarted.log"; + int clientJobProcessID_Restarted1 = MyUtils.StartAsyncPerfClientJob("4001", "4000", clientJobName, serverName, "1", logOutputFileName_ClientJob1_Restarted); + + string logOutputFileName_ImmCoord5_Restarted = testName + "_ImmCoord5_Restarted.log"; + int ImmCoordProcessID5_Restarted = MyUtils.StartImmCoord(clientJobName, 5500, logOutputFileName_ImmCoord5_Restarted, true, 1); + string logOutputFileName_ClientJob2_Restarted = testName + "_ClientJob2_Restarted.log"; + int clientJobProcessID_Restarted2 = MyUtils.StartAsyncPerfClientJob("5001", "5000", clientJobName, serverName, "1", logOutputFileName_ClientJob2_Restarted); + string logOutputFileName_ImmCoord6_Restarted = testName + "_ImmCoord6_Restarted.log"; + int ImmCoordProcessID6_Restarted = MyUtils.StartImmCoord(clientJobName, 6500, logOutputFileName_ImmCoord6_Restarted, true, 2); + string logOutputFileName_ClientJob3_Restarted = testName + "_ClientJob3_Restarted.log"; + int clientJobProcessID_Restarted3 = MyUtils.StartAsyncPerfClientJob("6001", "6000", clientJobName, serverName, "1", logOutputFileName_ClientJob3_Restarted); + + //Delay until finished ... looking at the primary (server1) but also verify others hit done too + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_Restarted, byteSize, 45, false, testName, true); // Total Bytes received needs to be accurate + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server2_Restarted, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server3_Restarted, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob1_Restarted, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob2_Restarted, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob3_Restarted, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(serverProcessID_Restarted1); + MyUtils.KillProcess(serverProcessID_Restarted2); + MyUtils.KillProcess(serverProcessID_Restarted3); + MyUtils.KillProcess(clientJobProcessID_Restarted1); + MyUtils.KillProcess(clientJobProcessID_Restarted2); + MyUtils.KillProcess(clientJobProcessID_Restarted3); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted); + MyUtils.KillProcess(ImmCoordProcessID2_Restarted); + MyUtils.KillProcess(ImmCoordProcessID3_Restarted); + MyUtils.KillProcess(ImmCoordProcessID4_Restarted); + MyUtils.KillProcess(ImmCoordProcessID5_Restarted); + MyUtils.KillProcess(ImmCoordProcessID6_Restarted); + + // Verify cmp files for client and 3 servers + // the timing is a bit off when have so many processes so cmp files not + // really reliable. As long as they get through whole thing, that is what counts. + + // Verify ImmCoord has the string to show it is primary for both server and client + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord2_Restarted, newPrimary, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord5_Restarted, newPrimary, 5, false, testName, true,false); // Verify integrity of Ambrosia logs by replaying - // MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version, "", true); } +*/ [TestCleanup()] public void Cleanup() { // Kill all ImmortalCoordinators, Job and Server exes - Utilities MyUtils = new Utilities(); - MyUtils.AsyncTestCleanup(); + // Utilities MyUtils = new Utilities(); +// MyUtils.AsyncTestCleanup(); } diff --git a/AmbrosiaTest/AmbrosiaTest/BasicEXECalls_Test.cs b/AmbrosiaTest/AmbrosiaTest/BasicEXECalls_Test.cs index 217902c6..53c5f49e 100644 --- a/AmbrosiaTest/AmbrosiaTest/BasicEXECalls_Test.cs +++ b/AmbrosiaTest/AmbrosiaTest/BasicEXECalls_Test.cs @@ -20,7 +20,153 @@ public void Initialize() } //************* Init Code ***************** - //**** Add tests to check EXE error handling?? + //**** Show Ambrosia Help + [TestMethod] + public void Help_ShowHelp_Ambrosia_Test() + { + // Don't need to check for framework as proper file is in AmbrosiaTest ... bin directory + string testName = "showhelpambrosia"; + string fileName = "Ambrosia"; + GenericVerifyHelp(testName, fileName, ""); + } + + //**** Show Immortal Coord Help + [TestMethod] + public void Help_ShowHelp_ImmCoord_Test() + { + // Don't need to check for framework as proper file is in AmbrosiaTest ... bin directory + string testName = "showhelpimmcoord"; + string fileName = "ImmortalCoordinator"; + GenericVerifyHelp(testName, fileName, ""); + } + + //**** Show PTI Job Help + [TestMethod] + public void Help_ShowHelp_PTIJob_Test() + { + + Utilities MyUtils = new Utilities(); + + // add proper framework + string current_framework; + if (MyUtils.NetFrameworkTestRun) + current_framework = MyUtils.NetFramework; + else + current_framework = MyUtils.NetCoreFramework; + + string testName = "showhelpptijob"; + string fileName = "job"; + string workingDir = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"] + current_framework; + GenericVerifyHelp(testName, fileName, workingDir); + } + + //**** Show PTI Server Help + [TestMethod] + public void Help_ShowHelp_PTIServer_Test() + { + + Utilities MyUtils = new Utilities(); + + // add proper framework + string current_framework; + if (MyUtils.NetFrameworkTestRun) + current_framework = MyUtils.NetFramework; + else + current_framework = MyUtils.NetCoreFramework; + + string testName = "showhelpptiserver"; + string fileName = "server"; + string workingDir = ConfigurationManager.AppSettings["PerfTestServerExeWorkingDirectory"] + current_framework; + GenericVerifyHelp(testName, fileName, workingDir); + } + + //**** Show PT Job Help + /* + [TestMethod] + public void Help_ShowHelp_PTJob_Test() + { + Utilities MyUtils = new Utilities(); + // add proper framework + string current_framework; + if (MyUtils.NetFrameworkTestRun) + current_framework = MyUtils.NetFramework; + else + current_framework = MyUtils.NetCoreFramework; + + string testName = "showhelpptjob"; + string fileName = "job"; + string workingDir = ConfigurationManager.AppSettings["AsyncPerfTestJobExeWorkingDirectory"] + current_framework; + GenericVerifyHelp(testName, fileName, workingDir); + } + + //**** Show PT Server Help + [TestMethod] + public void Help_ShowHelp_PTServer_Test() + { + Utilities MyUtils = new Utilities(); + + // add proper framework + string current_framework; + if (MyUtils.NetFrameworkTestRun) + current_framework = MyUtils.NetFramework; + else + current_framework = MyUtils.NetCoreFramework; + + string testName = "showhelpptserver"; + string fileName = "server"; + string workingDir = ConfigurationManager.AppSettings["AsyncPerfTestServerExeWorkingDirectory"] + current_framework; + GenericVerifyHelp(testName, fileName, workingDir); + } + */ + + + //************* Helper Method ***************** + // basic helper method to call and exe with no params so shows help - verify getting proper help screen + //********************************************* + public void GenericVerifyHelp(string testName, string fileName, string workingDir) + { + Utilities MyUtils = new Utilities(); + string TestLogDir = ConfigurationManager.AppSettings["TestLogOutputDirectory"]; + string logOutputFileName = testName + ".log"; + + // Get and log the proper help based on if netframework netcore + string fileNameExe = fileName + ".exe"; + if (MyUtils.NetFrameworkTestRun == false) + { + fileNameExe = "dotnet " + fileName + ".dll"; + logOutputFileName = testName + "_Core.log"; // help message different than netframework so have separate cmp file + } + string LogOutputDirFileName = TestLogDir + "\\" + logOutputFileName; + + // Use ProcessStartInfo class + ProcessStartInfo startInfo = new ProcessStartInfo() + { + UseShellExecute = false, + RedirectStandardOutput = true, + WindowStyle = ProcessWindowStyle.Normal, + CreateNoWindow = false, + WorkingDirectory = workingDir, + FileName = "cmd.exe", + Arguments = "/C " + fileNameExe + " > " + LogOutputDirFileName + " 2>&1" + }; + + // Log the info to debug + string logInfo = " " + workingDir + "\\" + fileNameExe; + MyUtils.LogDebugInfo(logInfo); + + // Start cmd.exe process that launches proper exe + Process process = Process.Start(startInfo); + + // Give it a second to completely start \ finish + Thread.Sleep(1000); + + // Kill the process id for the cmd that launched the window so it isn't lingering + MyUtils.KillProcess(process.Id); + + // Verify Help message + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName); + + } } } diff --git a/AmbrosiaTest/AmbrosiaTest/BuildJSTestApp.ps1 b/AmbrosiaTest/AmbrosiaTest/BuildJSTestApp.ps1 new file mode 100644 index 00000000..39fef190 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/BuildJSTestApp.ps1 @@ -0,0 +1,29 @@ +########################################### +# +# Script to build the Javascript Test Apps +# +# TO DO: Currently, only one JS Test App, but if get more could make this generic enough +# Parameter: +# PathToAppToBuild - path on where the TestApp is located +# +# Example: BuildJSTestApp.ps1 D:\\Ambrosia\\AmbrosiaJS\\TestApp +# +########################################### + + + +$PathToAppToBuild=$args[0] + +# Verify parameter is passed +if ([string]::IsNullOrEmpty($PathToAppToBuild)) { + Write-Host "ERROR! Missing parameter value. " + Write-Host " Please specify the path to TestApp" + Write-Host + exit +} + +Write-host "------------- Building TestApp at: $PathToAppToBuild -------------" +Write-host +Set-Location $PathToAppToBuild +npx tsc -p tsconfig.json +Write-host "------------- DONE! Building! -------------" diff --git a/AmbrosiaTest/AmbrosiaTest/CleanUpAzure.ps1 b/AmbrosiaTest/AmbrosiaTest/CleanUpAzure.ps1 index c9ff6a78..9ab5fb0b 100644 --- a/AmbrosiaTest/AmbrosiaTest/CleanUpAzure.ps1 +++ b/AmbrosiaTest/AmbrosiaTest/CleanUpAzure.ps1 @@ -2,14 +2,19 @@ # # Script to clean up the Azure tables. # +# NOTE: This script requires PowerShell 7. Make sure that is the version that is in the path. +# NOTE: powershell.exe is < ver 6. pwsh.exe is ver 6+ +# # Parameters: # ObjectName - name of the objects in Azure you want to delete - can use "*" as wild card ... so "process" will NOT delete "process1" but "process*" will. # -# Note - might need Microsoft Azure Powershell add in - http://go.microsoft.com/fwlink/p/?linkid=320376&clcid=0x409 +# NOTE - might need Microsoft Azure Powershell add in - http://go.microsoft.com/fwlink/p/?linkid=320376&clcid=0x409 # - also need to do this at powershell prompt: -# - Install-Module -Name AzureRM -AllowClobber -# - Install-Module AzureRmStorageTable +# - Install-Module Az -AllowClobber +# - Install-Module AzTable -AllowClobber +# - Enable-AzureRmAlias -Scope CurrentUser # - Get-Module -ListAvailable AzureRM -->> This should show 5.6 (just needs to be above 4.4) +# - NOTE - might need to run Set-ExecutionPolicy Unrestricted # - This script requires environment variable # - AZURE_STORAGE_CONN_STRING - Connection string used to connect to the Azure subscription # @@ -39,7 +44,6 @@ if ([string]::IsNullOrEmpty($env:AZURE_STORAGE_CONN_STRING)) { exit } - Write-host "------------- Clean Up Azure tables and file share -------------" Write-host Write-host "--- Connection Info ---" @@ -57,33 +61,43 @@ Write-host "----------------" Write-host # Get a storage context -$ctx = New-AzureStorageContext -StorageAccountName $storageAccountName -StorageAccountKey $storageKey +$ctx = New-AzStorageContext -StorageAccountName $storageAccountName -StorageAccountKey $storageKey +$container = "ambrosialogs" + +# Clean up the data in the CRA (Immortal Coordinator) tables +Write-host "------------- Delete items in Azure table: craendpointtable filtered on $ObjectName -------------" +$tableName = "craendpointtable" +$storageTable = Get-AzStorageTable -Name $tableName -Context $ctx +Get-AzTableRow -table $storageTable.CloudTable | Where-Object -Property “PartitionKey” -CLike $ObjectName | Remove-AzTableRow -table $storageTable.CloudTable +Write-host -# Delete the table created by the Ambrosia -Write-host "------------- Delete Ambrosia created tables filtered on $ObjectName -------------" -Get-AzureStorageTable $ObjectName* -Context $ctx | Remove-AzureStorageTable -Context $ctx -Force -# Clean up the data in the CRA (Immortal Coordintor) tables Write-host "------------- Delete items in Azure table: craconnectiontable filtered on $ObjectName -------------" $tableName = "craconnectiontable" -$storageTable = Get-AzureStorageTable -Name $tableName -Context $ctx -Get-AzureStorageTableRowAll -table $storageTable | where PartitionKey -Like $ObjectName | Remove-AzureStorageTableRow -table $storageTable +$storageTable = Get-AzStorageTable -Name $tableName -Context $ctx +Get-AzTableRow -table $storageTable.CloudTable | Where-Object -Property “PartitionKey” -CLike $ObjectName | Remove-AzTableRow -table $storageTable.CloudTable Write-host -Write-host "------------- Delete items in Azure table: craendpointtable filtered on $ObjectName -------------" -$tableName = "craendpointtable" -$storageTable = Get-AzureStorageTable -Name $tableName -Context $ctx -Get-AzureStorageTableRowAll -table $storageTable | where PartitionKey -Like $ObjectName | Remove-AzureStorageTableRow -table $storageTable -Write-host Write-host "------------- Delete items in Azure table: cravertextable filtered on $ObjectName -------------" $tableName = "cravertextable" -$storageTable = Get-AzureStorageTable -Name $tableName -Context $ctx -Get-AzureStorageTableRowAll -table $storageTable | where PartitionKey -Like $ObjectName | Remove-AzureStorageTableRow -table $storageTable -Get-AzureStorageTableRowAll -table $storageTable | where RowKey -Like $ObjectName | Remove-AzureStorageTableRow -table $storageTable - +$storageTable = Get-AzStorageTable -Name $tableName -Context $ctx +Get-AzTableRow -table $storageTable.CloudTable | Where-Object -Property “PartitionKey” -CLike $ObjectName | Remove-AzTableRow -table $storageTable.CloudTable Write-host +# Delete the tables created by the Ambrosia +Write-host "------------- Delete Ambrosia created tables filtered on $ObjectName -------------" +Get-AzStorageTable $ObjectName* -Context $ctx | Remove-AzStorageTable -Context $ctx -Force + +Write-host "------------- Delete Azure Blobs in Azure table: ambrosialogs filtered on $ObjectName -------------" +$blobs = Get-AzStorageBlob -Container $container -Context $ctx | Where-Object Name -Like $ObjectName* + +#Remove lease on each Blob +$blobs | ForEach-Object{$_.ICloudBlob.BreakLease()} + +#Delete blobs in a specified container. +$blobs| Remove-AzStorageBlob + #Write-host "------------- Clean Up Azure File Share -------------" #Write-host ## TO DO: Not sure what we do here for File Share ... need the proper name and if we even use it any more. diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/ASTTest_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/ASTTest_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..163f60cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/ASTTest_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,167 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +export namespace Test +{ + /** + * Testing 1) a mix of ',' and ';' member separators, 2) A complex-type array */ + export class MixedTest + { + p1: string[]; + p2: string[][]; + p3: { p4: number, p5: string }[]; + + constructor(p1: string[], p2: string[][], p3: { p4: number, p5: string }[]) + { + this.p1 = p1; + this.p2 = p2; + this.p3 = p3; + } + } + + /** + * Example of a complex type. + */ + export class Name + { + first: string; + last: string; + + constructor(first: string, last: string) + { + this.first = first; + this.last = last; + } + } + + /** + * Example of a type that references another type. + */ + export type Names = Name[]; + + /** + * Example of a nested complex type. + */ + export class Nested + { + abc: { a: Uint8Array, b: { c: Names } }; + + constructor(abc: { a: Uint8Array, b: { c: Names } }) + { + this.abc = abc; + } + } + + /** + * Example of an enum. + */ + export enum Letters { A = 0, B = 3, C = 4, D = 9 } + + /** + * *Note: The result (Names) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * Example of a [post] method that uses custom types. + */ + export function makeName_Post(callContextData: any, firstName?: string, lastName?: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "makeName", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("firstName?", firstName), + IC.arg("lastName?", lastName)); + return (callID); + } + + /** + * *Note: The result (Names) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * Example of a [post] method that uses custom types. + */ + export function makeName_PostByImpulse(callContextData: any, firstName?: string, lastName?: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "makeName", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("firstName?", firstName), + IC.arg("lastName?", lastName)); + } + + /** + * Example of a [non-post] method + */ + export function DoIt_Fork(p1: Name[][]): void + { + IC.callFork(DESTINATION_INSTANCE_NAME, 123, { p1: p1 }); + } + + /** + * Example of a [non-post] method + */ + export function DoIt_Impulse(p1: Name[][]): void + { + IC.callImpulse(DESTINATION_INSTANCE_NAME, 123, { p1: p1 }); + } + + /** + * Example of a [non-post] method + */ + export function DoIt_EnqueueFork(p1: Name[][]): void + { + IC.queueFork(DESTINATION_INSTANCE_NAME, 123, { p1: p1 }); + } + + /** + * Example of a [non-post] method + */ + export function DoIt_EnqueueImpulse(p1: Name[][]): void + { + IC.queueImpulse(DESTINATION_INSTANCE_NAME, 123, { p1: p1 }); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "makeName": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "makeName": + const makeName_Result: Test.Names = result; + // TODO: Handle the result, optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/ASTTest_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/ASTTest_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..0d8f5ff6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/ASTTest_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,245 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/ASTTest"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/ASTTest.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "makeName": + { + let firstName: string = IC.getPostMethodArg(rpc, "firstName?"); + let lastName: string = IC.getPostMethodArg(rpc, "lastName?"); + IC.postResult(rpc, PTM.Test.makeName(firstName, lastName)); + } + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + case 123: + { + const p1: PTM.Test.Name[][] = rpc.jsonParams["p1"]; + PTM.Test.DoIt(p1); + } + break; + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("MixedTest", "{ p1: string[], p2: string[][], p3: { p4: number, p5: string }[] }"); + Meta.publishType("Name", "{ first: string, last: string }"); + Meta.publishType("Names", "Name[]"); + Meta.publishType("Nested", "{ abc: { a: Uint8Array, b: { c: Names } } }"); + Meta.publishType("Letters", "number"); + Meta.publishPostMethod("makeName", 1, ["firstName?: string", "lastName?: string"], "Names"); + Meta.publishMethod(123, "DoIt", ["p1: Name[][]"]); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/ASTTest.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/ASTTest.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/ASTTest.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/PI_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/PI_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..54aee454 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/PI_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,139 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** + * Parameter type for the 'ComputePI' method. + */ +export class Digit3 +{ + count: number; + + constructor(count: number) + { + this.count = count; + } +} + +export namespace Test +{ + /** + * Parameter type for the 'Today' method. + */ + export enum DayOfWeek { Sunday = 0, Monday = 1, Tuesday = 2, Wednesday = 3, Thursday = 4, Friday = 5, Saturday = 6 } + + /** + * Parameter type for the 'ComputePI' method. + */ + export class Digits + { + count: number; + + constructor(count: number) + { + this.count = count; + } + } + + /** + * Parameter type for the 'ComputePI' method. + */ + export class Digit2 + { + count: number; + + constructor(count: number) + { + this.count = count; + } + } + + /** + * Parameter type for the 'ComputePI' method. + */ + export class Digit3 + { + count: number; + + constructor(count: number) + { + this.count = count; + } + } + + /** + * Some new test. + */ + export async function NewTestAsync(person: { age: number }): Promise<{ age: number }> + { + let postResult: { age: number } = await IC.postAsync(DESTINATION_INSTANCE_NAME, "NewTest", 1, null, POST_TIMEOUT_IN_MS, IC.arg("person", person)); + return (postResult); + } + + /** + * Some new test. + */ + export function NewTest(resultHandler: IC.PostResultHandler<{ age: number }>, person: { age: number }): void + { + IC.post(DESTINATION_INSTANCE_NAME, "NewTest", 1, resultHandler, POST_TIMEOUT_IN_MS, IC.arg("person", person)); + } + + export function DoIt_Fork(dow: DayOfWeek): void + { + IC.callFork(DESTINATION_INSTANCE_NAME, 1, { dow: dow }); + } + + export function DoIt_Impulse(dow: DayOfWeek): void + { + IC.callImpulse(DESTINATION_INSTANCE_NAME, 1, { dow: dow }); + } + + export function DoIt_EnqueueFork(dow: DayOfWeek): void + { + IC.queueFork(DESTINATION_INSTANCE_NAME, 1, { dow: dow }); + } + + export function DoIt_EnqueueImpulse(dow: DayOfWeek): void + { + IC.queueImpulse(DESTINATION_INSTANCE_NAME, 1, { dow: dow }); + } + + export namespace TestInner + { + /** + * Parameter type for the 'ComputePI' method. + */ + export class Digit3 + { + count: number; + + constructor(count: number) + { + this.count = count; + } + } + + /** + * Returns pi computed to the specified number of digits. + */ + export async function ComputePIAsync(digits?: Digits): Promise + { + let postResult: number = await IC.postAsync(DESTINATION_INSTANCE_NAME, "ComputePI", 1, null, POST_TIMEOUT_IN_MS, IC.arg("digits?", digits)); + return (postResult); + } + + /** + * Returns pi computed to the specified number of digits. + */ + export function ComputePI(resultHandler: IC.PostResultHandler, digits?: Digits): void + { + IC.post(DESTINATION_INSTANCE_NAME, "ComputePI", 1, resultHandler, POST_TIMEOUT_IN_MS, IC.arg("digits?", digits)); + } + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/PI_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/PI_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..b132818f --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/PI_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,207 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/PI"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this class and _appState variable to your input file (./JS_CodeGen_TestFiles/PI.ts) in an exported namespace/module +class AppState extends Ambrosia.AmbrosiaAppState +{ + // TODO: Define your application state here + + constructor() + { + super(); + // TODO: Initialize your application state here + } +} + +export let _appState: AppState = new AppState(); + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(Utils.jsonStringify(_appState), onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(jsonAppState: string, error?: Error): void + { + if (!error) + { + _appState = Utils.jsonParse(jsonAppState); + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (mainly RPCs, but also the InitialMessage and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // Fast (non-async) handler for high-volume messages + if (!dispatcher(message)) + { + // Slower async handler, but simpler/cleaner to code because we can use 'await' + // Note: messageDispatcher() is NOT awaited by the calling code, so we don't await dispatcherAsync(). Consequently, any await's in + // dispatcherAsync() will start independent Promise chains, and these chains are explicitly responsible for managing any + // order-of-execution synchronization issues (eg. if the handling of message n is dependent on the handling of message n - 1). + dispatcherAsync(message); + } +} + +/** Synchronous message dispatcher. */ +function dispatcher(message: Messages.DispatchedMessage): boolean +{ + let handled: boolean = false; + + try + { + if (message.type === Messages.DispatchedMessageType.RPC) + { + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + // TODO: Add case-statements for your high-volume methods here + } + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } + + return (handled); +} + +/** Asynchronous message dispatcher. */ +async function dispatcherAsync(message: Messages.DispatchedMessage) +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "NewTest": + let person: { age: number } = IC.getPostMethodArg(rpc, "person"); + IC.postResult<{ age: number }>(rpc, PTM.Test.NewTest(person)); + break; + + case "ComputePI": + let digits: PTM.Test.Digits = IC.getPostMethodArg(rpc, "digits?"); + IC.postResult(rpc, await PTM.Test.TestInner.ComputePI(digits)); + break; + + default: + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + case 1: + let dow: PTM.Test.DayOfWeek = rpc.jsonParams["dow"]; + PTM.Test.DoIt(dow); + break; + + default: + Utils.log(`(No method is associated with methodID ${rpc.methodID})`, loggingPrefix) + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("DayOfWeek", "number"); + Meta.publishType("Digits", "{ count: number }"); + Meta.publishType("Digit2", "{ count: number }"); + Meta.publishType("Digit3", "{ count: number }"); + Meta.publishPostMethod("NewTest", 1, ["person: { age: number }"], "{ age: number }"); + Meta.publishPostMethod("ComputePI", 1, ["digits?: Digits"], "number"); + Meta.publishMethod(1, "DoIt", ["dow: DayOfWeek"]); + // TODO: Add an exported function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeStateAndCode: + // TODO: Add an exported [non-async] function 'onUpgradeStateAndCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/PI.ts in order to reference the 'Messages' namespace. + // Also, your handler should call IC.upgrade() [to upgrade code] and _appState.upgrade() [to upgrade state]. + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + await PTM.Test.TestInner.onFirstStart(); + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/PI.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_AmbrosiaTag_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_AmbrosiaTag_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..6613e241 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_AmbrosiaTag_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,221 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** +Test File to test all the the ways that the ambrosia tag can be set and still work + */ +export namespace Test +{ + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* */ + export function OneLineNoComment_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "OneLineNoComment", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* */ + export function OneLineNoComment_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "OneLineNoComment", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * Multi Line with Comment before Tag + * but still before tag + */ + export function MultiLineCommentBeforeTag_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "MultiLineCommentBeforeTag", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * Multi Line with Comment before Tag + * but still before tag + */ + export function MultiLineCommentBeforeTag_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "MultiLineCommentBeforeTag", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* */ + export function MultiSeparateLinesCommentBeforeTag_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "MultiSeparateLinesCommentBeforeTag", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* */ + export function MultiSeparateLinesCommentBeforeTag_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "MultiSeparateLinesCommentBeforeTag", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * ************ Have a space after the tag before function declaration + */ + export function EmptyLineBetweenTagAndFctn_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "EmptyLineBetweenTagAndFctn", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * ************ Have a space after the tag before function declaration + */ + export function EmptyLineBetweenTagAndFctn_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "EmptyLineBetweenTagAndFctn", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * **** Spacing around the tag + */ + export function SpacingAroundTag_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "SpacingAroundTag", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * **** Spacing around the tag + */ + export function SpacingAroundTag_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "SpacingAroundTag", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * JS Doc + */ + export function JSDOcTag_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "JSDOcTag", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * JS Doc + */ + export function JSDOcTag_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "JSDOcTag", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * The ambrosia tag must be on the implementation of an overloaded function + */ + export function fnOverload_Post(callContextData: any, name?: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "fnOverload", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("name?", name)); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * The ambrosia tag must be on the implementation of an overloaded function + */ + export function fnOverload_PostByImpulse(callContextData: any, name?: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "fnOverload", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("name?", name)); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "OneLineNoComment": + case "MultiLineCommentBeforeTag": + case "MultiSeparateLinesCommentBeforeTag": + case "EmptyLineBetweenTagAndFctn": + case "SpacingAroundTag": + case "JSDOcTag": + case "fnOverload": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "OneLineNoComment": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "MultiLineCommentBeforeTag": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "MultiSeparateLinesCommentBeforeTag": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "EmptyLineBetweenTagAndFctn": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "SpacingAroundTag": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "JSDOcTag": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "fnOverload": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_AmbrosiaTag_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_AmbrosiaTag_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..97e8ebc9 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_AmbrosiaTag_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,264 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_AmbrosiaTag"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "OneLineNoComment": + IC.postResult(rpc, PTM.Test.OneLineNoComment()); + break; + + case "MultiLineCommentBeforeTag": + IC.postResult(rpc, PTM.Test.MultiLineCommentBeforeTag()); + break; + + case "MultiSeparateLinesCommentBeforeTag": + IC.postResult(rpc, PTM.Test.MultiSeparateLinesCommentBeforeTag()); + break; + + case "EmptyLineBetweenTagAndFctn": + IC.postResult(rpc, PTM.Test.EmptyLineBetweenTagAndFctn()); + break; + + case "SpacingAroundTag": + IC.postResult(rpc, PTM.Test.SpacingAroundTag()); + break; + + case "JSDOcTag": + IC.postResult(rpc, PTM.Test.JSDOcTag()); + break; + + case "fnOverload": + { + let name: string = IC.getPostMethodArg(rpc, "name?"); + IC.postResult(rpc, PTM.Test.fnOverload(name)); + } + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishPostMethod("OneLineNoComment", 1, [], "void"); + Meta.publishPostMethod("MultiLineCommentBeforeTag", 1, [], "void"); + Meta.publishPostMethod("MultiSeparateLinesCommentBeforeTag", 1, [], "void"); + Meta.publishPostMethod("EmptyLineBetweenTagAndFctn", 1, [], "void"); + Meta.publishPostMethod("SpacingAroundTag", 1, [], "void"); + Meta.publishPostMethod("JSDOcTag", 1, [], "void"); + Meta.publishPostMethod("fnOverload", 1, ["name?: string"], "void"); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParamNoRawParam_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParamNoRawParam_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..0241ef43 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParamNoRawParam_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,52 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** +Test when missing @param rawParams + */ +export namespace Test +{ + /** + * Method to test custom serialized parameters. + * @param rawParams A custom serialization (byte array) of all required parameters. Contact the 'server' instance publisher (Darren Gehring [darrenge@microsoft.com]) for details of the serialization format. + */ + export function takesCustomSerializedParams_Fork(rawParams: Uint8Array): void + { + IC.callFork(DESTINATION_INSTANCE_NAME, 2, rawParams); + } + + /** + * Method to test custom serialized parameters. + * @param rawParams A custom serialization (byte array) of all required parameters. Contact the 'server' instance publisher (Darren Gehring [darrenge@microsoft.com]) for details of the serialization format. + */ + export function takesCustomSerializedParams_Impulse(rawParams: Uint8Array): void + { + IC.callImpulse(DESTINATION_INSTANCE_NAME, 2, rawParams); + } + + /** + * Method to test custom serialized parameters. + * @param rawParams A custom serialization (byte array) of all required parameters. Contact the 'server' instance publisher (Darren Gehring [darrenge@microsoft.com]) for details of the serialization format. + */ + export function takesCustomSerializedParams_EnqueueFork(rawParams: Uint8Array): void + { + IC.queueFork(DESTINATION_INSTANCE_NAME, 2, rawParams); + } + + /** + * Method to test custom serialized parameters. + * @param rawParams A custom serialization (byte array) of all required parameters. Contact the 'server' instance publisher (Darren Gehring [darrenge@microsoft.com]) for details of the serialization format. + */ + export function takesCustomSerializedParams_EnqueueImpulse(rawParams: Uint8Array): void + { + IC.queueImpulse(DESTINATION_INSTANCE_NAME, 2, rawParams); + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParamNoRawParam_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParamNoRawParam_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..37194ea8 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParamNoRawParam_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,234 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + // Code-gen: Post method handlers will go here + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + case 2: + { + const rawParams: Uint8Array = rpc.rawParams; + PTM.Test.takesCustomSerializedParams(rawParams); + } + break; + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishMethod(2, "takesCustomSerializedParams", ["rawParams: Uint8Array"]); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParam_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParam_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..a3fe014a --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParam_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,49 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +export namespace Test +{ + /** + * Method to test custom serialized parameters. + * @param rawParams Description of the format of the custom serialized byte array. + */ + export function takesCustomSerializedParams_Fork(rawParams: Uint8Array): void + { + IC.callFork(DESTINATION_INSTANCE_NAME, 2, rawParams); + } + + /** + * Method to test custom serialized parameters. + * @param rawParams Description of the format of the custom serialized byte array. + */ + export function takesCustomSerializedParams_Impulse(rawParams: Uint8Array): void + { + IC.callImpulse(DESTINATION_INSTANCE_NAME, 2, rawParams); + } + + /** + * Method to test custom serialized parameters. + * @param rawParams Description of the format of the custom serialized byte array. + */ + export function takesCustomSerializedParams_EnqueueFork(rawParams: Uint8Array): void + { + IC.queueFork(DESTINATION_INSTANCE_NAME, 2, rawParams); + } + + /** + * Method to test custom serialized parameters. + * @param rawParams Description of the format of the custom serialized byte array. + */ + export function takesCustomSerializedParams_EnqueueImpulse(rawParams: Uint8Array): void + { + IC.queueImpulse(DESTINATION_INSTANCE_NAME, 2, rawParams); + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParam_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParam_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..90f40798 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_CustomSerialParam_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,234 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_CustomSerialParam"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + // Code-gen: Post method handlers will go here + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + case 2: + { + const rawParams: Uint8Array = rpc.rawParams; + PTM.Test.takesCustomSerializedParams(rawParams); + } + break; + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishMethod(2, "takesCustomSerializedParams", ["rawParams: Uint8Array"]); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParam.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParam.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_CustomSerialParam.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlerWarnings_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlerWarnings_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..5e52c41b --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlerWarnings_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,65 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* */ +export function unused_Post(callContextData: any): number +{ + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "unused", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); +} + +/** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* */ +export function unused_PostByImpulse(callContextData: any): void +{ + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "unused", 1, POST_TIMEOUT_IN_MS, callContextData); +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "unused": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "unused": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlerWarnings_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlerWarnings_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..74c5d613 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlerWarnings_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,231 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_EventHandlerWarnings"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "unused": + IC.postResult(rpc, PTM.unused()); + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishPostMethod("unused", 1, [], "void"); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlers_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlers_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..0fa81115 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlers_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,76 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +export namespace Test +{ + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * Fake Event Handler due to case in the name so this will be generated + */ + export function onbecomingprimary_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "onbecomingprimary", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * Fake Event Handler due to case in the name so this will be generated + */ + export function onbecomingprimary_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "onbecomingprimary", 1, POST_TIMEOUT_IN_MS, callContextData); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "onbecomingprimary": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "onbecomingprimary": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlers_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlers_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..5e612d54 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_EventHandlers_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,234 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_EventHandlers"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_EventHandlers.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "onbecomingprimary": + IC.postResult(rpc, PTM.Test.onbecomingprimary()); + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishPostMethod("onbecomingprimary", 1, [], "void"); + PTM.onICStarting(); + break; + + case Messages.AppEventType.ICStarted: + PTM.onICStarted(); + break; + + case Messages.AppEventType.ICStopped: + { + const exitCode: number = appEvent.args[0] as number; + PTM.onICStopped(exitCode); + } + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + PTM.onICReadyForSelfCallRpc(); + break; + + case Messages.AppEventType.RecoveryComplete: + PTM.Test.onRecoveryComplete(); + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_EventHandlers.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlers.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_EventHandlers.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlers.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + PTM.onIncomingCheckpointStreamSize(); + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlers.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + PTM.Test.onBecomingPrimary(); + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_EventHandlers.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlers.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_EventHandlers.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType1_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType1_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..54b17362 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType1_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,15 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** + * Generic built-in types can be used, but only with concrete types (not type placeholders, eg. "T"): Example #1 + */ +export type NameToNumberDictionary = Map; \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType1_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType1_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..2b2698fb --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType1_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,229 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_GenType1"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_GenType1.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + // Code-gen: Post method handlers will go here + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("NameToNumberDictionary", "Map"); + // Code-gen: Published methods will go here + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType1.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType1.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_GenType1.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType2_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType2_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..91bb4455 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType2_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,43 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** + * Generic built-in types can be used, but only with concrete types (not type placeholders, eg. "T"): Example #2 + */ +export class EmployeeWithGenerics +{ + firstNames: Set<{ name: string, nickNames: NickNames }>; + lastName: string; + birthYear: number; + + constructor(firstNames: Set<{ name: string, nickNames: NickNames }>, lastName: string, birthYear: number) + { + this.firstNames = firstNames; + this.lastName = lastName; + this.birthYear = birthYear; + } +} + +/** + * Test for a literal-object array type; this should generate a 'NickNames_Element' class and then redefine the type of NickNames as Nicknames_Element[]. + * This is done to makes it easier for the consumer to create a NickNames instance. + */ +export type NickNames = NickNames_Element[]; + +export class NickNames_Element +{ + name: string; + + constructor(name: string) + { + this.name = name; + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType2_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType2_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..763096a8 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_GenType2_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,230 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_GenType2"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_GenType2.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + // Code-gen: Post method handlers will go here + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("EmployeeWithGenerics", "{ firstNames: Set<{ name: string, nickNames: NickNames }>, lastName: string, birthYear: number }"); + Meta.publishType("NickNames", "{ name: string }[]"); + // Code-gen: Published methods will go here + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType2.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType2.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_GenType2.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment2_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment2_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..22016410 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment2_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,36 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +export namespace Foo +{ + export namespace Bar + { + /** + * The Baziest Baz... + * ...ever! + */ + export namespace Baz + { + /** + * Generic built-in types can be used, but only with concrete types (not type placeholders, eg. "T"): Example #1 + */ + export type NameToNumberDictionary = Map; + } + } + + export namespace Woo + { + export namespace Hoo + { + export type NumberToNameDictionary = Map; + } + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment2_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment2_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..12a1e6ad --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment2_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,230 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_JSDocComment2"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_JSDocComment2.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + // Code-gen: Post method handlers will go here + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("NameToNumberDictionary", "Map"); + Meta.publishType("NumberToNameDictionary", "Map"); + // Code-gen: Published methods will go here + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment2.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment2.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment2.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..4bcf72b6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,69 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** Some static methods. */ +export namespace StaticStuff +{ + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* */ + export function hello_Post(callContextData: any, name: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "hello", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("name", name)); + return (callID); + } + + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* */ + export function hello_PostByImpulse(callContextData: any, name: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "hello", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("name", name)); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "hello": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "hello": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..b25849f1 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_JSDocComment_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,234 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_JSDocComment"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_JSDocComment.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "hello": + { + let name: string = IC.getPostMethodArg(rpc, "name"); + IC.postResult(rpc, PTM.StaticStuff.hello(name)); + } + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishPostMethod("hello", 1, ["name: string"], "void"); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_JSDocComment.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_LitObjArray_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_LitObjArray_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..474606c7 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_LitObjArray_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,26 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** + * Test for a literal-object array type; this should generate a 'NickNames_Element' class and then redefine the type of NickNames as Nicknames_Element[]. + * This is done to makes it easier for the consumer to create a NickNames instance. + */ +export type NickNames = NickNames_Element[]; + +export class NickNames_Element +{ + name: string; + + constructor(name: string) + { + this.name = name; + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_LitObjArray_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_LitObjArray_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..d5c9e521 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_LitObjArray_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,229 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_LitObjArray"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_LitObjArray.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + // Code-gen: Post method handlers will go here + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("NickNames", "{ name: string }[]"); + // Code-gen: Published methods will go here + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_LitObjArray.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_LitObjArray.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_LitObjArray.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_MiscTests_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_MiscTests_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..0525b6ef --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_MiscTests_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,80 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** +Test File of misc tests. If find a theme or grouping then move out of this file into separate file + */ +export namespace Test +{ + /** + * *Note: The result ({ r1: string, r2: string }) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * Correctly handle line-breaks and comments + */ + export function myComplexReturnFunction_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "myComplexReturnFunction", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result ({ r1: string, r2: string }) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * Correctly handle line-breaks and comments + */ + export function myComplexReturnFunction_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "myComplexReturnFunction", 1, POST_TIMEOUT_IN_MS, callContextData); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "myComplexReturnFunction": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "myComplexReturnFunction": + const myComplexReturnFunction_Result: { r1: string, r2: string } = result; + // TODO: Handle the result, optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_MiscTests_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_MiscTests_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..0eefc80c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_MiscTests_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,231 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_MiscTests"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_MiscTests.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "myComplexReturnFunction": + IC.postResult<{ r1: string, r2: string }>(rpc, PTM.Test.myComplexReturnFunction()); + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishPostMethod("myComplexReturnFunction", 1, [], "{ r1: string, r2: string }"); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_MiscTests.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_MiscTests.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_MiscTests.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_StaticMethod_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_StaticMethod_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..0c4a4432 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_StaticMethod_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,68 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +export namespace StaticStuff +{ + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* */ + export function hello_Post(callContextData: any, name: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "hello", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("name", name)); + return (callID); + } + + /** *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* */ + export function hello_PostByImpulse(callContextData: any, name: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "hello", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("name", name)); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "hello": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "hello": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_StaticMethod_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_StaticMethod_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..0bd91c25 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_StaticMethod_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,234 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_StaticMethod"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_StaticMethod.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "hello": + { + let name: string = IC.getPostMethodArg(rpc, "name"); + IC.postResult(rpc, PTM.StaticStuff.hello(name)); + } + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + // Code-gen: Published types will go here + Meta.publishPostMethod("hello", 1, ["name: string"], "void"); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_StaticMethod.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_StaticMethod.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_StaticMethod.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_Types_GeneratedConsumerInterface.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_Types_GeneratedConsumerInterface.g.ts.cmp new file mode 100644 index 00000000..c6a88f2f --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_Types_GeneratedConsumerInterface.g.ts.cmp @@ -0,0 +1,316 @@ +// Generated consumer-side API for the 'server' Ambrosia Node instance. +// Publisher: Darren Gehring [darrenge@microsoft.com]. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import Ambrosia = require("ambrosia-node"); +import IC = Ambrosia.IC; +import Utils = Ambrosia.Utils; + +let DESTINATION_INSTANCE_NAME: string = "server"; +let POST_TIMEOUT_IN_MS: number = 8000; // -1 = Infinite + +/** +Test File to test all the Types for typescripts +Has the basic types + */ +export namespace Test +{ + /*********** Enum type (numeric enum - strings as number) as return */ + export enum PrintMedia { Newspaper = 1, Newsletter = 2, Magazine = 3, Book = 4 } + + /********** Enum type (Reverse Mapped enum - can access the value of a member and also a member name from its value) */ + export enum PrintMediaReverse { NewspaperReverse = 1, NewsletterReverse = 2, MagazineReverse = 3, BookReverse = 4 } + + export enum MyEnumAA { aa = -1, bb = -123, cc = 123, dd = 0 } + + export enum MyEnumBBB { aaa = -1, bbb = 0 } + + /*************** Complex Type */ + export class Name + { + first: string; + last: string; + + constructor(first: string, last: string) + { + this.first = first; + this.last = last; + } + } + + /************** Example of a type that references another type *************. + */ + export type Names = Name[]; + + /************** Example of a nested complex type.************* + */ + export class Nested + { + abc: { a: Uint8Array, b: { c: Names } }; + + constructor(abc: { a: Uint8Array, b: { c: Names } }) + { + this.abc = abc; + } + } + + /** + * Type with missing type information + */ + export class typeWithMissingType + { + p1: any; + p2: number; + + constructor(p1: any, p2: number) + { + this.p1 = p1; + this.p2 = p2; + } + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * *********** Primitives - bool, string, number, array + */ + export function BasicTypes_Post(callContextData: any, isFalse: boolean, height: number, mystring?: string, mystring2?: string, my_array?: number[], notSure?: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "BasicTypes", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("isFalse", isFalse), + IC.arg("height", height), + IC.arg("mystring?", mystring), + IC.arg("mystring2?", mystring2), + IC.arg("my_array?", my_array), + IC.arg("notSure?", notSure)); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * *********** Primitives - bool, string, number, array + */ + export function BasicTypes_PostByImpulse(callContextData: any, isFalse: boolean, height: number, mystring?: string, mystring2?: string, my_array?: number[], notSure?: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "BasicTypes", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("isFalse", isFalse), + IC.arg("height", height), + IC.arg("mystring?", mystring), + IC.arg("mystring2?", mystring2), + IC.arg("my_array?", my_array), + IC.arg("notSure?", notSure)); + } + + /** + * *Note: The result (PrintMedia) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * ******* Function using / returning Numeric Enum + */ + export function getMedia_Post(callContextData: any, mediaName: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "getMedia", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("mediaName", mediaName)); + return (callID); + } + + /** + * *Note: The result (PrintMedia) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * ******* Function using / returning Numeric Enum + */ + export function getMedia_PostByImpulse(callContextData: any, mediaName: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "getMedia", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("mediaName", mediaName)); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * *********** Void type + */ + export function warnUser_Post(callContextData: any): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "warnUser", 1, POST_TIMEOUT_IN_MS, callContextData); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * *********** Void type + */ + export function warnUser_PostByImpulse(callContextData: any): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "warnUser", 1, POST_TIMEOUT_IN_MS, callContextData); + } + + /** + * *Note: The result (Names) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * ************ Example of a [post] method that uses custom types. + */ + export function makeName_Post(callContextData: any, firstName?: string, lastName?: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "makeName", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("firstName?", firstName), + IC.arg("lastName?", lastName)); + return (callID); + } + + /** + * *Note: The result (Names) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * ************ Example of a [post] method that uses custom types. + */ + export function makeName_PostByImpulse(callContextData: any, firstName?: string, lastName?: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "makeName", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("firstName?", firstName), + IC.arg("lastName?", lastName)); + } + + /** + * *Note: The result (number) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * ******* Function returning number + */ + export function return_number_Post(callContextData: any, strvalue: string): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "return_number", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("strvalue", strvalue)); + return (callID); + } + + /** + * *Note: The result (number) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * ******* Function returning number + */ + export function return_number_PostByImpulse(callContextData: any, strvalue: string): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "return_number", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("strvalue", strvalue)); + } + + /** + * *Note: The result (string) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * ******* Function returning string + */ + export function returnstring_Post(callContextData: any, numvalue: number): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "returnstring", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("numvalue", numvalue)); + return (callID); + } + + /** + * *Note: The result (string) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * ******* Function returning string + */ + export function returnstring_PostByImpulse(callContextData: any, numvalue: number): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "returnstring", 1, POST_TIMEOUT_IN_MS, callContextData, IC.arg("numvalue", numvalue)); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start(). Returns the post method callID.* + * + * ******* Function with missing types **** + * Function with missing type information + */ + export function fnWithMissingType_Post(callContextData: any, p1: any, p2: number): number + { + const callID = IC.postFork(DESTINATION_INSTANCE_NAME, "fnWithMissingType", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("p1", p1), + IC.arg("p2", p2)); + return (callID); + } + + /** + * *Note: The result (void) produced by this post method is received via the PostResultDispatcher provided to IC.start().* + * + * ******* Function with missing types **** + * Function with missing type information + */ + export function fnWithMissingType_PostByImpulse(callContextData: any, p1: any, p2: number): void + { + IC.postByImpulse(DESTINATION_INSTANCE_NAME, "fnWithMissingType", 1, POST_TIMEOUT_IN_MS, callContextData, + IC.arg("p1", p1), + IC.arg("p2", p2)); + } +} + +/** + * Handler for the results of previously called post methods (in Ambrosia, only 'post' methods return values). See Messages.PostResultDispatcher.\ + * Must return true only if the result (or error) was handled. + */ +export function postResultDispatcher(senderInstanceName: string, methodName: string, methodVersion: number, callID: number, callContextData: any, result: any, errorMsg: string): boolean +{ + const sender: string = IC.isSelf(senderInstanceName) ? "local" : `'${senderInstanceName}'`; + let handled: boolean = true; + + if (senderInstanceName !== DESTINATION_INSTANCE_NAME) + { + return (false); // Not handled (this post result is from a different instance than the one this consumer-side file is for) + } + + if (errorMsg) + { + switch (methodName) + { + case "BasicTypes": + case "getMedia": + case "warnUser": + case "makeName": + case "return_number": + case "returnstring": + case "fnWithMissingType": + Utils.log(`Error: ${errorMsg}`); + break; + default: + handled = false; + break; + } + } + else + { + switch (methodName) + { + case "BasicTypes": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "getMedia": + const getMedia_Result: Test.PrintMedia = result; + // TODO: Handle the result, optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "warnUser": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "makeName": + const makeName_Result: Test.Names = result; + // TODO: Handle the result, optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "return_number": + const return_number_Result: number = result; + // TODO: Handle the result, optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "returnstring": + const returnstring_Result: string = result; + // TODO: Handle the result, optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + case "fnWithMissingType": + // TODO: Handle the method completion (it returns void), optionally using the callContextData passed in the call + Utils.log(`Post method '${methodName}' from ${sender} IC succeeded`); + break; + default: + handled = false; + break; + } + } + return (handled); +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_Types_GeneratedPublisherFramework.g.ts.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_Types_GeneratedPublisherFramework.g.ts.cmp new file mode 100644 index 00000000..2311e9db --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/JS_CodeGen_Cmp/TS_Types_GeneratedPublisherFramework.g.ts.cmp @@ -0,0 +1,293 @@ +// Generated publisher-side framework for the 'server' Ambrosia Node instance. +// Note: This file was generated +// Note [to publisher]: You can edit this file, but to avoid losing your changes be sure to specify a 'mergeType' other than 'None' (the default is 'Annotate') when re-running emitTypeScriptFile[FromSource](). +import * as PTM from "./JS_CodeGen_TestFiles/TS_Types"; // PTM = "Published Types and Methods", but this file can also include app-state and app-event handlers +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import IC = Ambrosia.IC; +import Messages = Ambrosia.Messages; +import Meta = Ambrosia.Meta; +import Streams = Ambrosia.Streams; + +// TODO: It's recommended that you move this namespace to your input file (./JS_CodeGen_TestFiles/TS_Types.ts) then re-run code-gen +export namespace State +{ + export class AppState extends Ambrosia.AmbrosiaAppState + { + // TODO: Define your application state here + + /** + * @param restoredAppState Supplied only when loading (restoring) a checkpoint.\ + * **WARNING:** restoredAppState will be an object literal, so you must use this to reinstantiate any members that are (or contain) class references. + */ + constructor(restoredAppState?: AppState) + { + super(restoredAppState); + + if (restoredAppState) + { + // TODO: Re-initialize your application state from restoredAppState here + // WARNING: You MUST reinstantiate all members that are (or contain) class references because restoredAppState is data-only + } + else + { + // TODO: Initialize your application state here + } + } + } + + /** + * Only assign this using the return value of IC.start(), the return value of the upgrade() method of your AmbrosiaAppState + * instance, and [if not using the generated checkpointConsumer()] in the 'onFinished' callback of an IncomingCheckpoint object. + */ + export let _appState: AppState = null; +} + +/** Returns an OutgoingCheckpoint object used to serialize app state to a checkpoint. */ +export function checkpointProducer(): Streams.OutgoingCheckpoint +{ + function onCheckpointSent(error?: Error): void + { + Utils.log(`checkpointProducer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint saved"}`) + } + return (Streams.simpleCheckpointProducer(State._appState, onCheckpointSent)); +} + +/** Returns an IncomingCheckpoint object used to receive a checkpoint of app state. */ +export function checkpointConsumer(): Streams.IncomingCheckpoint +{ + function onCheckpointReceived(appState: Ambrosia.AmbrosiaAppState, error?: Error): void + { + if (!error) + { + State._appState = appState as State.AppState; + } + Utils.log(`checkpointConsumer: ${error ? `Failed (reason: ${error.message})` : "Checkpoint loaded"}`); + } + return (Streams.simpleCheckpointConsumer(State.AppState, onCheckpointReceived)); +} + +/** This method responds to incoming Ambrosia messages (RPCs and AppEvents). */ +export function messageDispatcher(message: Messages.DispatchedMessage): void +{ + // WARNING! Rules for Message Handling: + // + // Rule 1: Messages must be handled - to completion - in the order received. For application (RPC) messages only, if there are messages that are known to + // be commutative then this rule can be relaxed - but only for RPC messages. + // Reason: Using Ambrosia requires applications to have deterministic execution. Further, system messages (like TakeCheckpoint) from the IC rely on being + // handled in the order they are sent to the app. This means being extremely careful about using non-synchronous code (like awaitable operations + // or callbacks) inside message handlers: the safest path is to always only use synchronous code. + // + // Rule 2: Before a TakeCheckpoint message can be handled, all handlers for previously received messages must have completed (ie. finished executing). + // If Rule #1 is followed, the app is automatically in compliance with Rule #2. + // Reason: Unless your application has a way to capture (and rehydrate) runtime execution state (specifically the message handler stack) in the serialized + // application state (checkpoint), recovery of the checkpoint will not be able to complete the in-flight message handlers. But if there are no + // in-flight handlers at the time the checkpoint is taken (because they all completed), then the problem of how to complete them during recovery is moot. + // + // Rule 3: Avoid sending too many messages in a single message handler. + // Reason: Because a message handler always has to run to completion (see Rule #1), if it runs for too long it can monopolize the system leading to performance issues. + // Further, this becomes a very costly message to have to replay during recovery. So instead, when an message handler needs to send a large sequence (series) + // of independent messages, it should be designed to be restartable so that the sequence can pick up where it left off (rather than starting over) when resuming + // execution (ie. after loading a checkpoint that occurred during the long-running - but incomplete - sequence). Restartability is achieved by sending a + // 'sequence continuation' message at the end of each batch, which describes the remaining work to be done. Because the handler for the 'sequence continuation' + // message only ever sends the next batch plus the 'sequence continuation' message, it can run to completion quickly, which both keeps the system responsive + // (by allowing interleaving I/O) while also complying with Rule #1. + // In addition to this "contination messasage" technique for sending a series, if any single message handler has to send a large number of mesages it should be + // sent in batches using either explicit batches (IC.enqueueFork + IC.flushQueue) or implicit batches (IC.callFork / IC.postFork) inside a setImmediate() callback. + // This asynchrony is necessary to allow I/O with the IC to interleave, and is one of the few allowable exceptions to the "always only use asynchronous code" + // dictate in Rule #1. Interleaving I/O allows the instance to service self-calls, and allows checkpoints to be taken between batches. + + dispatcher(message); +} + +/** + * Synchronous Ambrosia message dispatcher. + * + * **WARNING:** Avoid using any asynchronous features (async/await, promises, callbacks, timers, events, etc.). See "Rules for Message Handling" above. + */ +function dispatcher(message: Messages.DispatchedMessage): void +{ + const loggingPrefix: string = "Dispatcher"; + + try + { + switch (message.type) + { + case Messages.DispatchedMessageType.RPC: + let rpc: Messages.IncomingRPC = message as Messages.IncomingRPC; + + switch (rpc.methodID) + { + case IC.POST_METHOD_ID: + try + { + let methodName: string = IC.getPostMethodName(rpc); + let methodVersion: number = IC.getPostMethodVersion(rpc); // Use this to do version-specific method behavior + + switch (methodName) + { + case "BasicTypes": + { + let isFalse: boolean = IC.getPostMethodArg(rpc, "isFalse"); + let height: number = IC.getPostMethodArg(rpc, "height"); + let mystring: string = IC.getPostMethodArg(rpc, "mystring?"); + let mystring2: string = IC.getPostMethodArg(rpc, "mystring2?"); + let my_array: number[] = IC.getPostMethodArg(rpc, "my_array?"); + let notSure: any = IC.getPostMethodArg(rpc, "notSure?"); + IC.postResult(rpc, PTM.Test.BasicTypes(isFalse, height, mystring, mystring2, my_array, notSure)); + } + break; + + case "getMedia": + { + let mediaName: string = IC.getPostMethodArg(rpc, "mediaName"); + IC.postResult(rpc, PTM.Test.getMedia(mediaName)); + } + break; + + case "warnUser": + IC.postResult(rpc, PTM.Test.warnUser()); + break; + + case "makeName": + { + let firstName: string = IC.getPostMethodArg(rpc, "firstName?"); + let lastName: string = IC.getPostMethodArg(rpc, "lastName?"); + IC.postResult(rpc, PTM.Test.makeName(firstName, lastName)); + } + break; + + case "return_number": + { + let strvalue: string = IC.getPostMethodArg(rpc, "strvalue"); + IC.postResult(rpc, PTM.Test.return_number(strvalue)); + } + break; + + case "returnstring": + { + let numvalue: number = IC.getPostMethodArg(rpc, "numvalue"); + IC.postResult(rpc, PTM.Test.returnstring(numvalue)); + } + break; + + case "fnWithMissingType": + { + let p1: any = IC.getPostMethodArg(rpc, "p1"); + let p2: number = IC.getPostMethodArg(rpc, "p2"); + IC.postResult(rpc, PTM.Test.fnWithMissingType(p1, p2)); + } + break; + + default: + { + let errorMsg: string = `Post method '${methodName}' is not implemented`; + Utils.log(`(${errorMsg})`, loggingPrefix) + IC.postError(rpc, new Error(errorMsg)); + } + break; + } + } + catch (error) + { + Utils.log(error); + IC.postError(rpc, error); + } + break; + + // Code-gen: Fork/Impulse method handlers will go here + + default: + Utils.log(`Error: Method dispatch failed (reason: No method is associated with methodID ${rpc.methodID})`); + break; + } + break; + + case Messages.DispatchedMessageType.AppEvent: + let appEvent: Messages.AppEvent = message as Messages.AppEvent; + + switch (appEvent.eventType) + { + case Messages.AppEventType.ICStarting: + Meta.publishType("PrintMedia", "number"); + Meta.publishType("PrintMediaReverse", "number"); + Meta.publishType("MyEnumAA", "number"); + Meta.publishType("MyEnumBBB", "number"); + Meta.publishType("Name", "{ first: string, last: string }"); + Meta.publishType("Names", "Name[]"); + Meta.publishType("Nested", "{ abc: { a: Uint8Array, b: { c: Names } } }"); + Meta.publishType("typeWithMissingType", "{ p1: any, p2: number }"); + Meta.publishPostMethod("BasicTypes", 1, ["isFalse: boolean", "height: number", "mystring?: string", "mystring2?: string", "my_array?: number[]", "notSure?: any"], "void"); + Meta.publishPostMethod("getMedia", 1, ["mediaName: string"], "PrintMedia"); + Meta.publishPostMethod("warnUser", 1, [], "void"); + Meta.publishPostMethod("makeName", 1, ["firstName?: string", "lastName?: string"], "Names"); + Meta.publishPostMethod("return_number", 1, ["strvalue: string"], "number"); + Meta.publishPostMethod("returnstring", 1, ["numvalue: number"], "string"); + Meta.publishPostMethod("fnWithMissingType", 1, ["p1: any", "p2: number"], "void"); + // TODO: Add an exported [non-async] function 'onICStarting(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStarted: + // TODO: Add an exported [non-async] function 'onICStarted(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICStopped: + // TODO: Add an exported [non-async] function 'onICStopped(exitCode: number): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.ICReadyForSelfCallRpc: + // TODO: Add an exported [non-async] function 'onICReadyForSelfCallRpc(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.RecoveryComplete: + // TODO: Add an exported [non-async] function 'onRecoveryComplete(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeState: + // TODO: Add an exported [non-async] function 'onUpgradeState(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_Types.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling _appState.upgrade(), for example: + // _appState = _appState.upgrade(AppStateVNext); + break; + + case Messages.AppEventType.UpgradeCode: + // TODO: Add an exported [non-async] function 'onUpgradeCode(upgradeMode: Messages.AppUpgradeMode): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + // Note: You will need to import Ambrosia to ../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_Types.ts in order to reference the 'Messages' namespace. + // Upgrading is performed by calling IC.upgrade(), passing the new handlers from the "upgraded" PublisherFramework.g.ts, + // which should be part of your app (alongside your original PublisherFramework.g.ts). + break; + + case Messages.AppEventType.IncomingCheckpointStreamSize: + // TODO: Add an exported [non-async] function 'onIncomingCheckpointStreamSize(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.FirstStart: + // TODO: Add an exported [non-async] function 'onFirstStart(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.BecomingPrimary: + // TODO: Add an exported [non-async] function 'onBecomingPrimary(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointLoaded: + // TODO: Add an exported [non-async] function 'onCheckpointLoaded(checkpointSizeInBytes: number): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.CheckpointSaved: + // TODO: Add an exported [non-async] function 'onCheckpointSaved(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + + case Messages.AppEventType.UpgradeComplete: + // TODO: Add an exported [non-async] function 'onUpgradeComplete(): void' to ./JS_CodeGen_TestFiles/TS_Types.ts, then (after the next code-gen) a call to it will be generated here + break; + } + break; + } + } + catch (error) + { + let messageName: string = (message.type === Messages.DispatchedMessageType.AppEvent) ? `AppEvent:${Messages.AppEventType[(message as Messages.AppEvent).eventType]}` : Messages.DispatchedMessage[message.type]; + Utils.log(`Error: Failed to process ${messageName} message`); + Utils.log(error); + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob.cmp index a7991e38..e88cc94b 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob.cmp @@ -9,21 +9,5 @@ Service Received 3072 MB so far Service Received 4096 MB so far *X* 4096 0.0451691721682756 Service Received 5120 MB so far -*X* 2048 0.044631104418191 -Service Received 6144 MB so far -*X* 1024 0.0419209925952016 -Service Received 7168 MB so far -*X* 512 0.0446787974456828 -Service Received 8192 MB so far -*X* 256 0.0412141830203171 -Service Received 9216 MB so far -*X* 128 0.0411807597823824 -Service Received 10240 MB so far -*X* 64 0.0379665717699799 -Service Received 11264 MB so far -*X* 32 0.0352991449512828 -Service Received 12288 MB so far -*X* 16 0.0189336790163664 -Service Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 5368709120 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob_Verify.cmp index a7991e38..e88cc94b 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_ClientJob_Verify.cmp @@ -9,21 +9,5 @@ Service Received 3072 MB so far Service Received 4096 MB so far *X* 4096 0.0451691721682756 Service Received 5120 MB so far -*X* 2048 0.044631104418191 -Service Received 6144 MB so far -*X* 1024 0.0419209925952016 -Service Received 7168 MB so far -*X* 512 0.0446787974456828 -Service Received 8192 MB so far -*X* 256 0.0412141830203171 -Service Received 9216 MB so far -*X* 128 0.0411807597823824 -Service Received 10240 MB so far -*X* 64 0.0379665717699799 -Service Received 11264 MB so far -*X* 32 0.0352991449512828 -Service Received 12288 MB so far -*X* 16 0.0189336790163664 -Service Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 5368709120 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server1.cmp index ec893ff8..317ef47d 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server1.cmp @@ -6,13 +6,5 @@ Received 2048 MB so far Received 3072 MB so far Received 4096 MB so far Received 5120 MB so far -Received 6144 MB so far -Received 7168 MB so far -Received 8192 MB so far -Received 9216 MB so far -Received 10240 MB so far -Received 11264 MB so far -Received 12288 MB so far -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 5368709120 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server2_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server2_Restarted.cmp index 3fd5a103..c2e92c3f 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server2_Restarted.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server2_Restarted.cmp @@ -9,23 +9,6 @@ Received 3072 MB so far Received 4096 MB so far *X* At checkpoint, received 420427 messages Received 5120 MB so far -*X* At checkpoint, received 820546 messages -Received 6144 MB so far -*X* At checkpoint, received 1581824 messages -Received 7168 MB so far -*X* At checkpoint, received 3014001 messages -Received 8192 MB so far -*X* At checkpoint, received 5697009 messages -Received 9216 MB so far -*X* At checkpoint, received 10556921 messages -Received 10240 MB so far -*X* At checkpoint, received 19006666 messages -*X* At checkpoint, received 32911747 messages -Received 11264 MB so far -*X* At checkpoint, received 58685297 messages -Received 12288 MB so far -*X* At checkpoint, received 98001605 messages -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 5368709120 DONE *X* At checkpoint, received 134201344 messages diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server3.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server3.cmp index e39e8422..9b5faf41 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server3.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server3.cmp @@ -4,13 +4,5 @@ Received 2048 MB so far Received 3072 MB so far Received 4096 MB so far Received 5120 MB so far -Received 6144 MB so far -Received 7168 MB so far -Received 8192 MB so far -Received 9216 MB so far -Received 10240 MB so far -Received 11264 MB so far -Received 12288 MB so far -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 5368709120 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server_Verify.cmp index e39e8422..9b5faf41 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillcheckpoint_Server_Verify.cmp @@ -4,13 +4,5 @@ Received 2048 MB so far Received 3072 MB so far Received 4096 MB so far Received 5120 MB so far -Received 6144 MB so far -Received 7168 MB so far -Received 8192 MB so far -Received 9216 MB so far -Received 10240 MB so far -Received 11264 MB so far -Received 12288 MB so far -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 5368709120 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob.cmp index b1217e69..8b6759ac 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob.cmp @@ -11,19 +11,5 @@ Service Received 4096 MB so far Service Received 5120 MB so far *X* 2048 0.0438669371911439 Service Received 6144 MB so far -*X* 1024 0.0416419896236157 -Service Received 7168 MB so far -*X* 512 0.0422990703742958 -Service Received 8192 MB so far -*X* 256 0.0420296870558185 -Service Received 9216 MB so far -*X* 128 0.0396254785217365 -Service Received 10240 MB so far -*X* 64 0.0368080119970268 -Service Received 11264 MB so far -*X* 32 0.0357323424154478 -Service Received 12288 MB so far -*X* 16 0.020614544643097 -Service Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 6442450944 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob_Verify.cmp index b1217e69..8b6759ac 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_ClientJob_Verify.cmp @@ -11,19 +11,5 @@ Service Received 4096 MB so far Service Received 5120 MB so far *X* 2048 0.0438669371911439 Service Received 6144 MB so far -*X* 1024 0.0416419896236157 -Service Received 7168 MB so far -*X* 512 0.0422990703742958 -Service Received 8192 MB so far -*X* 256 0.0420296870558185 -Service Received 9216 MB so far -*X* 128 0.0396254785217365 -Service Received 10240 MB so far -*X* 64 0.0368080119970268 -Service Received 11264 MB so far -*X* 32 0.0357323424154478 -Service Received 12288 MB so far -*X* 16 0.020614544643097 -Service Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 6442450944 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server1.cmp index ec893ff8..7ca6907b 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server1.cmp @@ -7,12 +7,5 @@ Received 3072 MB so far Received 4096 MB so far Received 5120 MB so far Received 6144 MB so far -Received 7168 MB so far -Received 8192 MB so far -Received 9216 MB so far -Received 10240 MB so far -Received 11264 MB so far -Received 12288 MB so far -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 6442450944 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server2.cmp index b67c59e6..dc77f9b4 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server2.cmp @@ -11,21 +11,6 @@ Received 4096 MB so far Received 5120 MB so far *X* At checkpoint, received 822076 messages Received 6144 MB so far -*X* At checkpoint, received 1584903 messages -Received 7168 MB so far -*X* At checkpoint, received 3032207 messages -Received 8192 MB so far -*X* At checkpoint, received 5735455 messages -Received 9216 MB so far -*X* At checkpoint, received 10626311 messages -Received 10240 MB so far -*X* At checkpoint, received 19132276 messages -*X* At checkpoint, received 33094205 messages -Received 11264 MB so far -*X* At checkpoint, received 59042796 messages -Received 12288 MB so far -*X* At checkpoint, received 98813567 messages -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 6442450944 DONE *X* At checkpoint, received 134201344 messages diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server3_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server3_Restarted.cmp index f1d5152c..8adbee0e 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server3_Restarted.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server3_Restarted.cmp @@ -4,12 +4,5 @@ Received 3072 MB so far Received 4096 MB so far Received 5120 MB so far Received 6144 MB so far -Received 7168 MB so far -Received 8192 MB so far -Received 9216 MB so far -Received 10240 MB so far -Received 11264 MB so far -Received 12288 MB so far -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 6442450944 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server_Verify.cmp index e39e8422..fb9f4231 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/activeactivekillsecondary_Server_Verify.cmp @@ -5,12 +5,5 @@ Received 3072 MB so far Received 4096 MB so far Received 5120 MB so far Received 6144 MB so far -Received 7168 MB so far -Received 8192 MB so far -Received 9216 MB so far -Received 10240 MB so far -Received 11264 MB so far -Received 12288 MB so far -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 6442450944 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB1.cmp index 3797d308..69a8d8ae 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB2.cmp index 3797d308..69a8d8ae 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/basictest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob.cmp new file mode 100644 index 00000000..1cef491d --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob.cmp @@ -0,0 +1,22 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0554787710870787 +Service Received 1024 MB so far +Unable to read data from the transport connection: An existing connection was forcibly closed by the remote host. + at System.Net.Sockets.NetworkStream.EndRead(IAsyncResult asyncResult) + at System.Threading.Tasks.TaskFactory`1.FromAsyncTrimPromise`1.Complete(TInstance thisRef, Func`3 endMethod, IAsyncResult asyncResult, Boolean requiresSynchronization) +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at Ambrosia.StreamCommunicator.d__26.MoveNext() +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task) + at Ambrosia.StreamCommunicator.d__5.MoveNext() +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at Ambrosia.Immortal.d__34.MoveNext() +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task) + at Ambrosia.Immortal.<>c__DisplayClass33_0.<b__0>d.MoveNext() diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob_Restarted.cmp new file mode 100644 index 00000000..fb11570e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob_Restarted.cmp @@ -0,0 +1,30 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* 65536 0.0492443801956299 +Service Received 1024 MB so far +*X* 32768 0.0297413895762521 +Service Received 2048 MB so far +*X* 16384 0.0705023508233356 +Service Received 3072 MB so far +*X* 8192 0.0694232390196647 +Service Received 4096 MB so far +*X* 4096 0.0668990463019137 +Service Received 5120 MB so far +*X* 2048 0.0675542447750237 +Service Received 6144 MB so far +*X* 1024 0.0727858518395365 +Service Received 7168 MB so far +*X* 512 0.0667275088091989 +Service Received 8192 MB so far +*X* 256 0.0690039381582566 +Service Received 9216 MB so far +*X* 128 0.0628656256932114 +Service Received 10240 MB so far +*X* 64 0.045170846462861 +Service Received 11264 MB so far +*X* 32 0.0257465263237248 +Service Received 12288 MB so far +*X* 16 0.0140141526797762 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob_Verify.cmp new file mode 100644 index 00000000..c36bc970 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0028881205576843 +Service Received 1024 MB so far +*X* 32768 0.00291145251251637 +Service Received 2048 MB so far +*X* 16384 0.00308327571400177 +Service Received 3072 MB so far +*X* 8192 0.00308822802592757 +Service Received 4096 MB so far +*X* 4096 0.00309216507309636 +Service Received 5120 MB so far +*X* 2048 0.00308936703975461 +Service Received 6144 MB so far +*X* 1024 0.00309459465591775 +Service Received 7168 MB so far +*X* 512 0.00309970663024979 +Service Received 8192 MB so far +*X* 256 0.00309348320545075 +Service Received 9216 MB so far +*X* 128 0.00306559699583659 +Service Received 10240 MB so far +*X* 64 0.00296265299221154 +Service Received 11264 MB so far +*X* 32 0.0027722750766569 +Service Received 12288 MB so far +*X* 16 0.00250059008362161 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server.cmp new file mode 100644 index 00000000..529f500e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server.cmp @@ -0,0 +1,29 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Server in Entry Point +*X* At checkpoint, received 15356 messages +*X* At checkpoint, received 15356 messages +Received 1024 MB so far +*X* At checkpoint, received 44775 messages +*X* At checkpoint, received 44775 messages +Received 2048 MB so far +Unable to read data from the transport connection: An existing connection was forcibly closed by the remote host. + at System.Net.Sockets.NetworkStream.EndRead(IAsyncResult asyncResult) + at System.Threading.Tasks.TaskFactory`1.FromAsyncTrimPromise`1.Complete(TInstance thisRef, Func`3 endMethod, IAsyncResult asyncResult, Boolean requiresSynchronization) +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at Ambrosia.StreamCommunicator.d__26.MoveNext() +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task) + at Ambrosia.StreamCommunicator.d__5.MoveNext() +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at Ambrosia.Immortal.d__34.MoveNext() +--- End of stack trace from previous location where exception was thrown --- + at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw() + at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task) + at Ambrosia.Immortal.<>c__DisplayClass33_0.<b__0>d.MoveNext() diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server_Restarted.cmp new file mode 100644 index 00000000..f33c1460 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server_Restarted.cmp @@ -0,0 +1,52 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Received 1024 MB so far +*X* At checkpoint, received 48083 messages +*X* At checkpoint, received 48083 messages +*X* becoming primary +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:8/3/2020 5:15:39 PM +*X* At checkpoint, received 108015 messages +*X* At checkpoint, received 108015 messages +Received 3072 MB so far +*X* At checkpoint, received 223509 messages +*X* At checkpoint, received 223509 messages +Received 4096 MB so far +*X* At checkpoint, received 445468 messages +*X* At checkpoint, received 445468 messages +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:8/3/2020 5:16:26 PM +*X* At checkpoint, received 871593 messages +*X* At checkpoint, received 871593 messages +Received 6144 MB so far +*X* At checkpoint, received 1687632 messages +*X* At checkpoint, received 1687632 messages +Received 7168 MB so far +*X* At checkpoint, received 3226291 messages +*X* At checkpoint, received 3226291 messages +Received 8192 MB so far +*X* At checkpoint, received 6097292 messages +*X* At checkpoint, received 6097292 messages +*X* I'm healthy after 9000 checks at time:8/3/2020 5:17:13 PM +Received 9216 MB so far +*X* At checkpoint, received 11328316 messages +*X* At checkpoint, received 11328316 messages +Received 10240 MB so far +*X* At checkpoint, received 20391111 messages +*X* At checkpoint, received 20391111 messages +Received 11264 MB so far +*X* At checkpoint, received 34930417 messages +*X* At checkpoint, received 34930417 messages +*X* I'm healthy after 12000 checks at time:8/3/2020 5:18:00 PM +*X* At checkpoint, received 61209028 messages +*X* At checkpoint, received 61209028 messages +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:8/3/2020 5:18:46 PM +*X* At checkpoint, received 102256717 messages +*X* At checkpoint, received 102256717 messages +*X* I'm healthy after 18000 checks at time:8/3/2020 5:19:33 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE +*X* I'm healthy after 21000 checks at time:8/3/2020 5:20:20 PM +*X* I'm healthy after 24000 checks at time:8/3/2020 5:21:07 PM diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server_Verify.cmp new file mode 100644 index 00000000..1fe0d97c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/clientsideupgrade_Server_Verify.cmp @@ -0,0 +1,24 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:8/3/2020 6:20:09 PM +Received 6144 MB so far +Received 7168 MB so far +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:8/3/2020 6:20:56 PM +Received 9216 MB so far +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:8/3/2020 6:21:43 PM +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:8/3/2020 6:22:30 PM +*X* I'm healthy after 18000 checks at time:8/3/2020 6:23:17 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE +*X* I'm healthy after 3000 checks at time:8/3/2020 1:26:24 PM diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB1.cmp index 3797d308..69a8d8ae 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB2.cmp index 3797d308..69a8d8ae 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekilljob_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB1.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB2.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/doublekillserver_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB1.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB2.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_ClientJob.cmp new file mode 100644 index 00000000..c31ef967 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_ClientJob.cmp @@ -0,0 +1,9 @@ +Bytes per RPC Throughput (GB/sec) +*X* 32768 0.0458349165095453 +Service Received 1024 MB so far +*X* 16384 0.0683859566347005 +Service Received 2048 MB so far +*X* 8192 0.067083143868174 +Service Received 3072 MB so far +Bytes received: 3221225472 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_ClientJob_Verify.cmp new file mode 100644 index 00000000..0fc7ea31 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_ClientJob_Verify.cmp @@ -0,0 +1,11 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 32768 0.0119011168476954 +Service Received 1024 MB so far +*X* 16384 0.0129785053576334 +Service Received 2048 MB so far +*X* 8192 0.0128619255825449 +Service Received 3072 MB so far +Bytes received: 3221225472 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_Server.cmp new file mode 100644 index 00000000..9d5fcda3 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_Server.cmp @@ -0,0 +1,17 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 30752 messages +*X* At checkpoint, received 30752 messages +Received 1024 MB so far +*X* At checkpoint, received 90107 messages +*X* At checkpoint, received 90107 messages +Received 2048 MB so far +*X* At checkpoint, received 204249 messages +*X* At checkpoint, received 204249 messages +Received 3072 MB so far +Bytes received: 3221225472 +DONE +*X* I'm healthy after 3000 checks at time:9/3/2020 2:50:20 PM diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_Server_Verify.cmp new file mode 100644 index 00000000..308d387d --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocbasictest_Server_Verify.cmp @@ -0,0 +1,10 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +Received 3072 MB so far +Bytes received: 3221225472 +DONE +*X* I'm healthy after 3000 checks at time:9/3/2020 2:50:20 PM diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_ClientJob.cmp new file mode 100644 index 00000000..e6eccbca --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_ClientJob.cmp @@ -0,0 +1,5 @@ +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0683427535617988 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_ClientJob_Verify.cmp new file mode 100644 index 00000000..32daae6e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_ClientJob_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0233522015774931 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_Server.cmp new file mode 100644 index 00000000..fbd8f1ff --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_Server.cmp @@ -0,0 +1,13 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 969549 messages +*X* At checkpoint, received 969549 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_Server_Verify.cmp new file mode 100644 index 00000000..8a34a3fc --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientpipeservertcp_Server_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob.cmp new file mode 100644 index 00000000..d8e8cf5b --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob.cmp @@ -0,0 +1,5 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0336476771110761 +Service Received 1024 MB so far +FATAL ERROR 0: Migrating or upgrading. Must commit suicide since I'm the primary +KILLING WORKER: diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob_Restarted.cmp new file mode 100644 index 00000000..f46671ad --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob_Restarted.cmp @@ -0,0 +1,16 @@ +*X* 65536 0.0281739434928964 +*X* 32768 0.0321716045741883 +*X* 16384 0.0695161638845232 +*X* 8192 0.0712751262638862 +*X* 4096 0.0683567060177539 +*X* 2048 0.0688366758725166 +*X* 1024 0.0668800300136173 +*X* 512 0.0696207003673975 +*X* 256 0.0661062767076795 +*X* 128 0.0615530399498372 +*X* 64 0.0425935232058608 +*X* 32 0.021912892190891 +*X* 16 0.0152843104979983 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob_Verify.cmp new file mode 100644 index 00000000..2abb2e28 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00297350567225481 +Service Received 1024 MB so far +*X* 32768 0.00320818919499377 +Service Received 2048 MB so far +*X* 16384 0.0033941894577415 +Service Received 3072 MB so far +*X* 8192 0.00339420578470392 +Service Received 4096 MB so far +*X* 4096 0.0033874571595322 +Service Received 5120 MB so far +*X* 2048 0.00338400770950051 +Service Received 6144 MB so far +*X* 1024 0.00338311283906682 +Service Received 7168 MB so far +*X* 512 0.00339199732211309 +Service Received 8192 MB so far +*X* 256 0.00338845418270876 +Service Received 9216 MB so far +*X* 128 0.00338351109612652 +Service Received 10240 MB so far +*X* 64 0.0033765012923346 +Service Received 11264 MB so far +*X* 32 0.00331155540032647 +Service Received 12288 MB so far +*X* 16 0.00331913020870539 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server.cmp new file mode 100644 index 00000000..5840be9d --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server.cmp @@ -0,0 +1,10 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15371 messages +*X* At checkpoint, received 15371 messages +Received 1024 MB so far +FATAL ERROR 0: Migrating or upgrading. Must commit suicide since I'm the primary +KILLING WORKER: diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server_Restarted.cmp new file mode 100644 index 00000000..e59c1901 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server_Restarted.cmp @@ -0,0 +1,52 @@ +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* At checkpoint, received 27859 messages +*X* At checkpoint, received 27859 messages +*X* becoming primary +*X* I'm healthy after 3000 checks at time:9/3/2020 5:01:09 PM +Received 2048 MB so far +*X* At checkpoint, received 67603 messages +*X* At checkpoint, received 67603 messages +Received 3072 MB so far +*X* At checkpoint, received 142574 messages +*X* At checkpoint, received 142574 messages +Received 4096 MB so far +*X* At checkpoint, received 284906 messages +*X* At checkpoint, received 284906 messages +*X* I'm healthy after 6000 checks at time:9/3/2020 5:01:56 PM +Received 5120 MB so far +*X* At checkpoint, received 550066 messages +*X* At checkpoint, received 550066 messages +Received 6144 MB so far +*X* At checkpoint, received 1047081 messages +*X* At checkpoint, received 1047081 messages +*X* At checkpoint, received 2018377 messages +*X* At checkpoint, received 2018377 messages +Received 7168 MB so far +*X* At checkpoint, received 3886160 messages +*X* At checkpoint, received 3886160 messages +*X* I'm healthy after 9000 checks at time:9/3/2020 5:02:43 PM +Received 8192 MB so far +*X* At checkpoint, received 7395089 messages +*X* At checkpoint, received 7395089 messages +Received 9216 MB so far +*X* At checkpoint, received 13838732 messages +*X* At checkpoint, received 13838732 messages +Received 10240 MB so far +*X* At checkpoint, received 25136146 messages +*X* At checkpoint, received 25136146 messages +*X* I'm healthy after 12000 checks at time:9/3/2020 5:03:30 PM +Received 11264 MB so far +*X* At checkpoint, received 43981214 messages +*X* At checkpoint, received 43981214 messages +*X* I'm healthy after 15000 checks at time:9/3/2020 5:04:17 PM +Received 12288 MB so far +*X* At checkpoint, received 72610475 messages +*X* At checkpoint, received 72610475 messages +*X* I'm healthy after 18000 checks at time:9/3/2020 5:05:04 PM +*X* At checkpoint, received 118017247 messages +*X* At checkpoint, received 118017247 messages +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server_Verify.cmp new file mode 100644 index 00000000..9fb721e2 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclientsideupgrade_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* I'm healthy after 3000 checks at time:9/3/2020 5:01:09 PM +Received 2048 MB so far +Received 3072 MB so far +Received 4096 MB so far +*X* I'm healthy after 6000 checks at time:9/3/2020 5:01:56 PM +Received 5120 MB so far +Received 6144 MB so far +Received 7168 MB so far +*X* I'm healthy after 9000 checks at time:9/3/2020 5:02:43 PM +Received 8192 MB so far +Received 9216 MB so far +Received 10240 MB so far +*X* I'm healthy after 12000 checks at time:9/3/2020 5:03:30 PM +Received 11264 MB so far +*X* I'm healthy after 15000 checks at time:9/3/2020 5:04:17 PM +Received 12288 MB so far +*X* I'm healthy after 18000 checks at time:9/3/2020 5:05:04 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_ClientJob.cmp new file mode 100644 index 00000000..99beb8bc --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_ClientJob.cmp @@ -0,0 +1,8 @@ +Bytes per RPC Throughput (GB/sec) +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* 1024 0.0511635269051311 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_ClientJob_Verify.cmp new file mode 100644 index 00000000..a21c62b5 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_ClientJob_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0190695561201352 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_Server.cmp new file mode 100644 index 00000000..3f54dfe8 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_Server.cmp @@ -0,0 +1,10 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 973571 messages +*X* At checkpoint, received 973571 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_Server_Verify.cmp new file mode 100644 index 00000000..8a34a3fc --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocclienttcpserverpipe_Server_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob.cmp new file mode 100644 index 00000000..eb1bc194 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob.cmp @@ -0,0 +1 @@ +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob_Restarted.cmp new file mode 100644 index 00000000..351557b0 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob_Restarted.cmp @@ -0,0 +1,29 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0168222042626127 +Service Received 1024 MB so far +*X* 32768 0.0381830688994807 +Service Received 2048 MB so far +*X* 16384 0.0357962368518722 +Service Received 3072 MB so far +*X* 8192 0.0699934350357543 +Service Received 4096 MB so far +*X* 4096 0.0663715336952148 +Service Received 5120 MB so far +*X* 2048 0.0655843568821575 +Service Received 6144 MB so far +*X* 1024 0.0706760561974724 +Service Received 7168 MB so far +*X* 512 0.0687282356148521 +Service Received 8192 MB so far +*X* 256 0.0683991295225821 +Service Received 9216 MB so far +*X* 128 0.0634635758009806 +Service Received 10240 MB so far +*X* 64 0.0422116409363851 +Service Received 11264 MB so far +*X* 32 0.0198444085652246 +Service Received 12288 MB so far +*X* 16 0.0137877176234564 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob_Verify.cmp new file mode 100644 index 00000000..40964c3b --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00287215656898183 +Service Received 1024 MB so far +*X* 32768 0.00304556880272303 +Service Received 2048 MB so far +*X* 16384 0.00304792754200868 +Service Received 3072 MB so far +*X* 8192 0.00305015859967624 +Service Received 4096 MB so far +*X* 4096 0.00304178979271551 +Service Received 5120 MB so far +*X* 2048 0.00304427440851334 +Service Received 6144 MB so far +*X* 1024 0.00305629056431662 +Service Received 7168 MB so far +*X* 512 0.00305121484189108 +Service Received 8192 MB so far +*X* 256 0.00305696850898801 +Service Received 9216 MB so far +*X* 128 0.00305577750185336 +Service Received 10240 MB so far +*X* 64 0.00303814349448794 +Service Received 11264 MB so far +*X* 32 0.00297623978876348 +Service Received 12288 MB so far +*X* 16 0.00303406079182434 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server.cmp new file mode 100644 index 00000000..f25ed92e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server.cmp @@ -0,0 +1,5 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server_Restarted.cmp new file mode 100644 index 00000000..c728233b --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server_Restarted.cmp @@ -0,0 +1,54 @@ +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 4455 messages +*X* At checkpoint, received 4455 messages +*X* becoming primary +Received 1024 MB so far +*X* At checkpoint, received 23052 messages +*X* At checkpoint, received 23052 messages +Received 2048 MB so far +*X* At checkpoint, received 58223 messages +*X* At checkpoint, received 58223 messages +*X* I'm healthy after 3000 checks at time:9/4/2020 10:41:41 AM +Received 3072 MB so far +*X* At checkpoint, received 124278 messages +*X* At checkpoint, received 124278 messages +Received 4096 MB so far +*X* At checkpoint, received 248437 messages +*X* At checkpoint, received 248437 messages +*X* At checkpoint, received 492727 messages +*X* At checkpoint, received 492727 messages +Received 5120 MB so far +*X* At checkpoint, received 964863 messages +*X* At checkpoint, received 964863 messages +*X* I'm healthy after 6000 checks at time:9/4/2020 10:42:28 AM +Received 6144 MB so far +*X* At checkpoint, received 1870426 messages +*X* At checkpoint, received 1870426 messages +Received 7168 MB so far +*X* At checkpoint, received 3589346 messages +*X* At checkpoint, received 3589346 messages +Received 8192 MB so far +*X* At checkpoint, received 6808898 messages +*X* At checkpoint, received 6808898 messages +Received 9216 MB so far +*X* I'm healthy after 9000 checks at time:9/4/2020 10:43:15 AM +*X* At checkpoint, received 12689928 messages +*X* At checkpoint, received 12689928 messages +Received 10240 MB so far +*X* At checkpoint, received 22991913 messages +*X* At checkpoint, received 22991913 messages +Received 11264 MB so far +*X* At checkpoint, received 39919275 messages +*X* At checkpoint, received 39919275 messages +*X* I'm healthy after 12000 checks at time:9/4/2020 10:44:02 AM +*X* At checkpoint, received 66395697 messages +*X* At checkpoint, received 66395697 messages +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/4/2020 10:44:49 AM +*X* At checkpoint, received 111477694 messages +*X* At checkpoint, received 111477694 messages +*X* I'm healthy after 18000 checks at time:9/4/2020 10:45:42 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server_Verify.cmp new file mode 100644 index 00000000..8c5fff71 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekilljob_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/4/2020 10:41:41 AM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:9/4/2020 10:42:28 AM +Received 6144 MB so far +Received 7168 MB so far +Received 8192 MB so far +Received 9216 MB so far +*X* I'm healthy after 9000 checks at time:9/4/2020 10:43:15 AM +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/4/2020 10:44:02 AM +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/4/2020 10:44:49 AM +*X* I'm healthy after 18000 checks at time:9/4/2020 10:45:42 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob.cmp new file mode 100644 index 00000000..eb1bc194 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob.cmp @@ -0,0 +1 @@ +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob_Restarted.cmp new file mode 100644 index 00000000..dcdd1f79 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob_Restarted.cmp @@ -0,0 +1,29 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0159934653515423 +Service Received 1024 MB so far +*X* 32768 0.0691507884182194 +Service Received 2048 MB so far +*X* 16384 0.0691934447431287 +Service Received 3072 MB so far +*X* 8192 0.0705781281740308 +Service Received 4096 MB so far +*X* 4096 0.0702365804022859 +Service Received 5120 MB so far +*X* 2048 0.0632966708888078 +Service Received 6144 MB so far +*X* 1024 0.0577749430822926 +Service Received 7168 MB so far +*X* 512 0.06793564241917 +Service Received 8192 MB so far +*X* 256 0.0650272249807963 +Service Received 9216 MB so far +*X* 128 0.0648693236665932 +Service Received 10240 MB so far +*X* 64 0.0452493648833082 +Service Received 11264 MB so far +*X* 32 0.0267392267314574 +Service Received 12288 MB so far +*X* 16 0.0168747188724569 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob_Verify.cmp new file mode 100644 index 00000000..fd0eff6f --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0030889331064103 +Service Received 1024 MB so far +*X* 32768 0.00332978133792789 +Service Received 2048 MB so far +*X* 16384 0.00332431438087773 +Service Received 3072 MB so far +*X* 8192 0.00331928210517504 +Service Received 4096 MB so far +*X* 4096 0.00331534947098939 +Service Received 5120 MB so far +*X* 2048 0.00330864617758339 +Service Received 6144 MB so far +*X* 1024 0.00332784800486059 +Service Received 7168 MB so far +*X* 512 0.00334629445869543 +Service Received 8192 MB so far +*X* 256 0.00333592172392578 +Service Received 9216 MB so far +*X* 128 0.00332013674486516 +Service Received 10240 MB so far +*X* 64 0.00329841369383626 +Service Received 11264 MB so far +*X* 32 0.00325294509043326 +Service Received 12288 MB so far +*X* 16 0.0031830245400166 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server.cmp new file mode 100644 index 00000000..f25ed92e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server.cmp @@ -0,0 +1,5 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server_Restarted.cmp new file mode 100644 index 00000000..780ceae6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server_Restarted.cmp @@ -0,0 +1,54 @@ +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 6279 messages +*X* At checkpoint, received 6279 messages +*X* becoming primary +Received 1024 MB so far +*X* At checkpoint, received 26891 messages +*X* At checkpoint, received 26891 messages +Received 2048 MB so far +*X* At checkpoint, received 65919 messages +*X* At checkpoint, received 65919 messages +*X* I'm healthy after 3000 checks at time:9/4/2020 11:12:39 AM +Received 3072 MB so far +*X* At checkpoint, received 139421 messages +*X* At checkpoint, received 139421 messages +Received 4096 MB so far +*X* At checkpoint, received 276974 messages +*X* At checkpoint, received 276974 messages +Received 5120 MB so far +*X* At checkpoint, received 534704 messages +*X* At checkpoint, received 534704 messages +*X* I'm healthy after 6000 checks at time:9/4/2020 11:13:26 AM +*X* At checkpoint, received 1023204 messages +*X* At checkpoint, received 1023204 messages +Received 6144 MB so far +*X* At checkpoint, received 1987058 messages +*X* At checkpoint, received 1987058 messages +Received 7168 MB so far +*X* At checkpoint, received 3831086 messages +*X* At checkpoint, received 3831086 messages +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/4/2020 11:14:13 AM +*X* At checkpoint, received 7283197 messages +*X* At checkpoint, received 7283197 messages +Received 9216 MB so far +*X* At checkpoint, received 13613792 messages +*X* At checkpoint, received 13613792 messages +Received 10240 MB so far +*X* At checkpoint, received 24713010 messages +*X* At checkpoint, received 24713010 messages +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/4/2020 11:15:00 AM +*X* At checkpoint, received 43137234 messages +*X* At checkpoint, received 43137234 messages +Received 12288 MB so far +*X* At checkpoint, received 71168845 messages +*X* At checkpoint, received 71168845 messages +*X* I'm healthy after 15000 checks at time:9/4/2020 11:15:47 AM +*X* At checkpoint, received 116608469 messages +*X* At checkpoint, received 116608469 messages +*X* I'm healthy after 18000 checks at time:9/4/2020 11:16:34 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server_Verify.cmp new file mode 100644 index 00000000..5dc13219 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocdoublekillserver_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/4/2020 11:12:39 AM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:9/4/2020 11:13:26 AM +Received 6144 MB so far +Received 7168 MB so far +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/4/2020 11:14:13 AM +Received 9216 MB so far +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/4/2020 11:15:00 AM +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/4/2020 11:15:47 AM +*X* I'm healthy after 18000 checks at time:9/4/2020 11:16:34 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_ClientJob.cmp new file mode 100644 index 00000000..65692568 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_ClientJob.cmp @@ -0,0 +1,23 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0113017096345932 +Service Received 1024 MB so far +*X* 32768 0.0196826952308186 +Service Received 2048 MB so far +*X* 16384 0.0193044599228891 +Service Received 3072 MB so far +*X* 8192 0.0197764414106786 +Service Received 4096 MB so far +*X* 4096 0.0196165097112453 +Service Received 5120 MB so far +*X* 2048 0.0194600939763355 +Service Received 6144 MB so far +*X* 1024 0.0192040590805426 +Service Received 7168 MB so far +*X* 512 0.0195024220682044 +Service Received 8192 MB so far +*X* 256 0.0194184392597997 +Service Received 9216 MB so far +*X* 128 0.018656386694121 +Service Received 10240 MB so far +Bytes received: 10737418240 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_ClientJob_Verify.cmp new file mode 100644 index 00000000..d7487650 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_ClientJob_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00267768217558065 +Service Received 1024 MB so far +*X* 32768 0.00281806130025493 +Service Received 2048 MB so far +*X* 16384 0.00297281016364982 +Service Received 3072 MB so far +*X* 8192 0.00314630068175585 +Service Received 4096 MB so far +*X* 4096 0.00333100723990227 +Service Received 5120 MB so far +*X* 2048 0.00354448958974192 +Service Received 6144 MB so far +*X* 1024 0.00376534710767592 +Service Received 7168 MB so far +*X* 512 0.00392421859532876 +Service Received 8192 MB so far +*X* 256 0.00409353717042727 +Service Received 9216 MB so far +*X* 128 0.00426074749172661 +Service Received 10240 MB so far +Bytes received: 10737418240 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_Server.cmp new file mode 100644 index 00000000..ae4a0d64 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_Server.cmp @@ -0,0 +1,40 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15354 messages +*X* At checkpoint, received 15354 messages +Received 1024 MB so far +*X* At checkpoint, received 44986 messages +*X* At checkpoint, received 44986 messages +Received 2048 MB so far +*X* At checkpoint, received 101963 messages +*X* At checkpoint, received 101963 messages +*X* I'm healthy after 3000 checks at time:9/4/2020 9:48:35 AM +Received 3072 MB so far +*X* At checkpoint, received 211973 messages +*X* At checkpoint, received 211973 messages +Received 4096 MB so far +*X* At checkpoint, received 422940 messages +*X* At checkpoint, received 422940 messages +Received 5120 MB so far +*X* At checkpoint, received 826556 messages +*X* At checkpoint, received 826556 messages +Received 6144 MB so far +*X* I'm healthy after 6000 checks at time:9/4/2020 9:50:03 AM +*X* At checkpoint, received 1592341 messages +*X* At checkpoint, received 1592341 messages +Received 7168 MB so far +*X* At checkpoint, received 3036125 messages +*X* At checkpoint, received 3036125 messages +Received 8192 MB so far +*X* At checkpoint, received 5720424 messages +*X* At checkpoint, received 5720424 messages +Received 9216 MB so far +*X* At checkpoint, received 10602023 messages +*X* At checkpoint, received 10602023 messages +*X* I'm healthy after 9000 checks at time:9/4/2020 9:51:41 AM +Received 10240 MB so far +Bytes received: 10737418240 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_Server_Verify.cmp new file mode 100644 index 00000000..49367ea4 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantcheckpointtest_Server_Verify.cmp @@ -0,0 +1,19 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/4/2020 9:48:35 AM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +Received 6144 MB so far +*X* I'm healthy after 6000 checks at time:9/4/2020 9:50:03 AM +Received 7168 MB so far +Received 8192 MB so far +Received 9216 MB so far +*X* I'm healthy after 9000 checks at time:9/4/2020 9:51:41 AM +Received 10240 MB so far +Bytes received: 10737418240 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_ClientJob - Copy.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_ClientJob.cmp similarity index 59% rename from AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_ClientJob - Copy.cmp rename to AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_ClientJob.cmp index ef60092e..ba4e34a4 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/giantmessagetest_ClientJob - Copy.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_ClientJob.cmp @@ -1,13 +1,13 @@ Bytes per RPC Throughput (GB/sec) -*X* 67108864 0.0751094957495461 +*X* 67108864 0.357014242761902 Service Received 1024 MB so far -*X* 33554432 0.0547176475001053 +*X* 33554432 0.0309030953714642 Service Received 2048 MB so far -*X* 16777216 0.062500748837097 +*X* 16777216 0.0375077986683839 Service Received 3072 MB so far -*X* 8388608 0.0753160684000047 +*X* 8388608 0.0760510515803253 Service Received 4096 MB so far -*X* 4194304 0.0933986349191755 +*X* 4194304 0.0593152363219554 Service Received 5120 MB so far Bytes received: 5368709120 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_ClientJob_Verify.cmp new file mode 100644 index 00000000..a73f5e0c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_ClientJob_Verify.cmp @@ -0,0 +1,15 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 67108864 0.00847485759336604 +Service Received 1024 MB so far +*X* 33554432 0.00765610706215592 +Service Received 2048 MB so far +*X* 16777216 0.00834541826397625 +Service Received 3072 MB so far +*X* 8388608 0.00949414812325196 +Service Received 4096 MB so far +*X* 4194304 0.00936390723457271 +Service Received 5120 MB so far +Bytes received: 5368709120 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_Server.cmp new file mode 100644 index 00000000..0230e813 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_Server.cmp @@ -0,0 +1,23 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15 messages +*X* At checkpoint, received 15 messages +Received 1024 MB so far +*X* At checkpoint, received 44 messages +*X* At checkpoint, received 44 messages +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/4/2020 10:19:14 AM +*X* At checkpoint, received 100 messages +*X* At checkpoint, received 100 messages +Received 3072 MB so far +*X* At checkpoint, received 208 messages +*X* At checkpoint, received 208 messages +Received 4096 MB so far +*X* At checkpoint, received 417 messages +*X* At checkpoint, received 417 messages +Received 5120 MB so far +Bytes received: 5368709120 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_Server_Verify.cmp new file mode 100644 index 00000000..b10f7596 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocgiantmessagetest_Server_Verify.cmp @@ -0,0 +1,12 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/4/2020 10:19:14 AM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +Bytes received: 5368709120 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob.cmp new file mode 100644 index 00000000..eb1bc194 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob.cmp @@ -0,0 +1 @@ +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Restarted.cmp new file mode 100644 index 00000000..eb1bc194 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Restarted.cmp @@ -0,0 +1 @@ +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Restarted_Again.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Restarted_Again.cmp new file mode 100644 index 00000000..47fca505 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Restarted_Again.cmp @@ -0,0 +1,28 @@ +*X* 65536 0.0253239207392581 +Service Received 1024 MB so far +*X* 32768 0.0667824937786597 +Service Received 2048 MB so far +*X* 16384 0.0637265013544717 +Service Received 3072 MB so far +*X* 8192 0.0667861765223829 +Service Received 4096 MB so far +*X* 4096 0.0710138026332557 +Service Received 5120 MB so far +*X* 2048 0.0687377152384936 +Service Received 6144 MB so far +*X* 1024 0.0698858170578001 +Service Received 7168 MB so far +*X* 512 0.0689082169699797 +Service Received 8192 MB so far +*X* 256 0.0637590044291595 +Service Received 9216 MB so far +*X* 128 0.0650615661660546 +Service Received 10240 MB so far +*X* 64 0.0494704490202125 +Service Received 11264 MB so far +*X* 32 0.0296159759188472 +Service Received 12288 MB so far +*X* 16 0.0147326531436311 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Verify.cmp new file mode 100644 index 00000000..41adfc6d --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00303549121646285 +Service Received 1024 MB so far +*X* 32768 0.00328632865471561 +Service Received 2048 MB so far +*X* 16384 0.00329072038012138 +Service Received 3072 MB so far +*X* 8192 0.00329254612579396 +Service Received 4096 MB so far +*X* 4096 0.00330708850494249 +Service Received 5120 MB so far +*X* 2048 0.00329986430126427 +Service Received 6144 MB so far +*X* 1024 0.00330391402625493 +Service Received 7168 MB so far +*X* 512 0.00329496949389398 +Service Received 8192 MB so far +*X* 256 0.00328378620754638 +Service Received 9216 MB so far +*X* 128 0.00328368619916626 +Service Received 10240 MB so far +*X* 64 0.0032812345678924 +Service Received 11264 MB so far +*X* 32 0.00323532095444406 +Service Received 12288 MB so far +*X* 16 0.0031499541355023 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_Server.cmp new file mode 100644 index 00000000..3289bf11 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_Server.cmp @@ -0,0 +1,57 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15295 messages +*X* At checkpoint, received 15295 messages +Received 1024 MB so far +*X* I'm healthy after 3000 checks at time:10/1/2020 8:58:38 AM +*X* At checkpoint, received 44736 messages +*X* At checkpoint, received 44736 messages +Received 2048 MB so far +*X* At checkpoint, received 101485 messages +*X* At checkpoint, received 101485 messages +*X* I'm healthy after 6000 checks at time:10/1/2020 8:59:26 AM +Received 3072 MB so far +*X* At checkpoint, received 210202 messages +*X* At checkpoint, received 210202 messages +Received 4096 MB so far +*X* I'm healthy after 9000 checks at time:10/1/2020 9:00:13 AM +*X* At checkpoint, received 419326 messages +*X* At checkpoint, received 419326 messages +Received 5120 MB so far +*X* At checkpoint, received 818385 messages +*X* At checkpoint, received 818385 messages +Received 6144 MB so far +*X* I'm healthy after 12000 checks at time:10/1/2020 9:01:00 AM +*X* At checkpoint, received 1580223 messages +*X* At checkpoint, received 1580223 messages +Received 7168 MB so far +*X* At checkpoint, received 3018947 messages +*X* At checkpoint, received 3018947 messages +Received 8192 MB so far +*X* I'm healthy after 15000 checks at time:10/1/2020 9:01:47 AM +*X* At checkpoint, received 5708293 messages +*X* At checkpoint, received 5708293 messages +Received 9216 MB so far +*X* At checkpoint, received 10595429 messages +*X* At checkpoint, received 10595429 messages +*X* I'm healthy after 18000 checks at time:10/1/2020 9:02:34 AM +Received 10240 MB so far +*X* At checkpoint, received 19021210 messages +*X* At checkpoint, received 19021210 messages +*X* At checkpoint, received 33003324 messages +*X* At checkpoint, received 33003324 messages +Received 11264 MB so far +*X* I'm healthy after 21000 checks at time:10/1/2020 9:03:21 AM +*X* At checkpoint, received 58838590 messages +*X* At checkpoint, received 58838590 messages +Received 12288 MB so far +*X* I'm healthy after 24000 checks at time:10/1/2020 9:04:09 AM +*X* At checkpoint, received 98634371 messages +*X* At checkpoint, received 98634371 messages +*X* I'm healthy after 27000 checks at time:10/1/2020 9:05:00 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_Server_Verify.cmp new file mode 100644 index 00000000..a9e95d4e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockilljobtest_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* I'm healthy after 3000 checks at time:9/4/2020 12:25:20 PM +Received 2048 MB so far +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:9/4/2020 12:26:07 PM +Received 6144 MB so far +Received 7168 MB so far +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/4/2020 12:26:54 PM +Received 9216 MB so far +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/4/2020 12:27:41 PM +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/4/2020 12:28:30 PM +*X* I'm healthy after 18000 checks at time:9/4/2020 12:29:22 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_ClientJob.cmp new file mode 100644 index 00000000..a3ef9786 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_ClientJob.cmp @@ -0,0 +1,29 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0202977421808515 +Service Received 1024 MB so far +*X* 32768 0.0369283467913277 +Service Received 2048 MB so far +*X* 16384 0.0693274063022772 +Service Received 3072 MB so far +*X* 8192 0.0694748049342007 +Service Received 4096 MB so far +*X* 4096 0.0694547951382199 +Service Received 5120 MB so far +*X* 2048 0.0709840565904775 +Service Received 6144 MB so far +*X* 1024 0.0693809719368053 +Service Received 7168 MB so far +*X* 512 0.0690027458883696 +Service Received 8192 MB so far +*X* 256 0.0640402324306935 +Service Received 9216 MB so far +*X* 128 0.0598831264613841 +Service Received 10240 MB so far +*X* 64 0.0404083862239374 +Service Received 11264 MB so far +*X* 32 0.0209215774961487 +Service Received 12288 MB so far +*X* 16 0.0125384733965781 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_ClientJob_Verify.cmp new file mode 100644 index 00000000..b23fbc59 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00284036922386733 +Service Received 1024 MB so far +*X* 32768 0.00300752972450123 +Service Received 2048 MB so far +*X* 16384 0.00300255315652176 +Service Received 3072 MB so far +*X* 8192 0.00300052571490925 +Service Received 4096 MB so far +*X* 4096 0.00299514959574021 +Service Received 5120 MB so far +*X* 2048 0.00299736706002082 +Service Received 6144 MB so far +*X* 1024 0.00299545840039982 +Service Received 7168 MB so far +*X* 512 0.00299419402304451 +Service Received 8192 MB so far +*X* 256 0.00297631990270158 +Service Received 9216 MB so far +*X* 128 0.00297295429775793 +Service Received 10240 MB so far +*X* 64 0.00297314192883434 +Service Received 11264 MB so far +*X* 32 0.00293624729380548 +Service Received 12288 MB so far +*X* 16 0.00286218871348242 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server.cmp new file mode 100644 index 00000000..f25ed92e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server.cmp @@ -0,0 +1,5 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server_Restarted.cmp new file mode 100644 index 00000000..17a91164 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server_Restarted.cmp @@ -0,0 +1,54 @@ +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 10951 messages +*X* At checkpoint, received 10951 messages +*X* becoming primary +Received 1024 MB so far +*X* At checkpoint, received 36053 messages +*X* At checkpoint, received 36053 messages +Received 2048 MB so far +*X* At checkpoint, received 84213 messages +*X* At checkpoint, received 84213 messages +*X* I'm healthy after 3000 checks at time:9/8/2020 3:28:13 PM +Received 3072 MB so far +*X* At checkpoint, received 175798 messages +*X* At checkpoint, received 175798 messages +Received 4096 MB so far +*X* At checkpoint, received 349592 messages +*X* At checkpoint, received 349592 messages +Received 5120 MB so far +*X* At checkpoint, received 678366 messages +*X* At checkpoint, received 678366 messages +Received 6144 MB so far +*X* I'm healthy after 6000 checks at time:9/8/2020 3:29:00 PM +*X* At checkpoint, received 1298652 messages +*X* At checkpoint, received 1298652 messages +Received 7168 MB so far +*X* At checkpoint, received 2456522 messages +*X* At checkpoint, received 2456522 messages +Received 8192 MB so far +*X* At checkpoint, received 4575314 messages +*X* At checkpoint, received 4575314 messages +Received 9216 MB so far +*X* At checkpoint, received 8406531 messages +*X* At checkpoint, received 8406531 messages +*X* I'm healthy after 9000 checks at time:9/8/2020 3:29:47 PM +*X* At checkpoint, received 15794894 messages +*X* At checkpoint, received 15794894 messages +Received 10240 MB so far +*X* At checkpoint, received 28910744 messages +*X* At checkpoint, received 28910744 messages +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/8/2020 3:30:34 PM +*X* At checkpoint, received 51153003 messages +*X* At checkpoint, received 51153003 messages +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/8/2020 3:31:21 PM +*X* At checkpoint, received 84972166 messages +*X* At checkpoint, received 84972166 messages +*X* I'm healthy after 18000 checks at time:9/8/2020 3:32:08 PM +*X* At checkpoint, received 130349418 messages +*X* At checkpoint, received 130349418 messages +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server_Verify.cmp new file mode 100644 index 00000000..7e1786ad --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprockillservertest_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/8/2020 3:28:13 PM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +Received 6144 MB so far +*X* I'm healthy after 6000 checks at time:9/8/2020 3:29:00 PM +Received 7168 MB so far +Received 8192 MB so far +Received 9216 MB so far +*X* I'm healthy after 9000 checks at time:9/8/2020 3:29:47 PM +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/8/2020 3:30:34 PM +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/8/2020 3:31:21 PM +*X* I'm healthy after 18000 checks at time:9/8/2020 3:32:08 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob.cmp new file mode 100644 index 00000000..eb1bc194 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob.cmp @@ -0,0 +1 @@ +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob_Restarted.cmp new file mode 100644 index 00000000..d4b12540 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob_Restarted.cmp @@ -0,0 +1,29 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0178803877763325 +Service Received 1024 MB so far +*X* 32768 0.0320146467264891 +Service Received 2048 MB so far +*X* 16384 0.0360346132206953 +Service Received 3072 MB so far +*X* 8192 0.0715985033163424 +Service Received 4096 MB so far +*X* 4096 0.0679328978399811 +Service Received 5120 MB so far +*X* 2048 0.0702215635689236 +Service Received 6144 MB so far +*X* 1024 0.0668205320785328 +Service Received 7168 MB so far +*X* 512 0.0651556540558463 +Service Received 8192 MB so far +*X* 256 0.0657289628226667 +Service Received 9216 MB so far +*X* 128 0.064034135419364 +Service Received 10240 MB so far +*X* 64 0.0419731530562905 +Service Received 11264 MB so far +*X* 32 0.0268798285815271 +Service Received 12288 MB so far +*X* 16 0.0128567774546708 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob_Verify.cmp new file mode 100644 index 00000000..43dd2306 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00213166998771756 +Service Received 1024 MB so far +*X* 32768 0.00237720470867314 +Service Received 2048 MB so far +*X* 16384 0.00246027601866669 +Service Received 3072 MB so far +*X* 8192 0.0025416320808998 +Service Received 4096 MB so far +*X* 4096 0.00262063260772365 +Service Received 5120 MB so far +*X* 2048 0.00271062642638516 +Service Received 6144 MB so far +*X* 1024 0.00281217735687059 +Service Received 7168 MB so far +*X* 512 0.00291707197473504 +Service Received 8192 MB so far +*X* 256 0.00302272307040026 +Service Received 9216 MB so far +*X* 128 0.00312832878133601 +Service Received 10240 MB so far +*X* 64 0.00324384190811947 +Service Received 11264 MB so far +*X* 32 0.00331294326741953 +Service Received 12288 MB so far +*X* 16 0.00328633423722237 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server.cmp new file mode 100644 index 00000000..6b9fc6ec --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server.cmp @@ -0,0 +1,8 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15295 messages +*X* At checkpoint, received 15295 messages +*X* I'm healthy after 3000 checks at time:10/5/2020 2:37:08 PM diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server_Restarted.cmp new file mode 100644 index 00000000..84764eac --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server_Restarted.cmp @@ -0,0 +1,49 @@ +*X* Press enter to terminate program. +Received 1024 MB so far +*X* At checkpoint, received 43277 messages +*X* At checkpoint, received 43277 messages +*X* becoming primary +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/11/2020 10:41:35 AM +*X* At checkpoint, received 98468 messages +*X* At checkpoint, received 98468 messages +Received 3072 MB so far +*X* At checkpoint, received 204460 messages +*X* At checkpoint, received 204460 messages +Received 4096 MB so far +*X* At checkpoint, received 408341 messages +*X* At checkpoint, received 408341 messages +Received 5120 MB so far +*X* At checkpoint, received 796887 messages +*X* At checkpoint, received 796887 messages +*X* I'm healthy after 6000 checks at time:9/11/2020 10:42:21 AM +Received 6144 MB so far +*X* At checkpoint, received 1532478 messages +*X* At checkpoint, received 1532478 messages +Received 7168 MB so far +*X* At checkpoint, received 2921897 messages +*X* At checkpoint, received 2921897 messages +Received 8192 MB so far +*X* At checkpoint, received 5501043 messages +*X* At checkpoint, received 5501043 messages +*X* I'm healthy after 9000 checks at time:9/11/2020 10:43:08 AM +Received 9216 MB so far +*X* At checkpoint, received 10191366 messages +*X* At checkpoint, received 10191366 messages +Received 10240 MB so far +*X* At checkpoint, received 18327934 messages +*X* At checkpoint, received 18327934 messages +*X* At checkpoint, received 32282325 messages +*X* At checkpoint, received 32282325 messages +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/11/2020 10:43:55 AM +*X* At checkpoint, received 57474086 messages +*X* At checkpoint, received 57474086 messages +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/11/2020 10:44:42 AM +*X* At checkpoint, received 95972570 messages +*X* At checkpoint, received 95972570 messages +*X* I'm healthy after 18000 checks at time:9/11/2020 10:45:34 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server_Verify.cmp new file mode 100644 index 00000000..78730988 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmigrateclient_Server_Verify.cmp @@ -0,0 +1,28 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* I'm healthy after 3000 checks at time:10/1/2020 11:02:03 PM +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 6000 checks at time:10/1/2020 11:02:51 PM +Received 3072 MB so far +Received 4096 MB so far +*X* I'm healthy after 9000 checks at time:10/1/2020 11:03:39 PM +Received 5120 MB so far +Received 6144 MB so far +*X* I'm healthy after 12000 checks at time:10/1/2020 11:04:26 PM +Received 7168 MB so far +*X* I'm healthy after 15000 checks at time:10/1/2020 11:05:13 PM +Received 8192 MB so far +Received 9216 MB so far +*X* I'm healthy after 18000 checks at time:10/1/2020 11:06:00 PM +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 21000 checks at time:10/1/2020 11:06:47 PM +Received 12288 MB so far +*X* I'm healthy after 24000 checks at time:10/1/2020 11:07:34 PM +*X* I'm healthy after 27000 checks at time:10/1/2020 11:08:22 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob0.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob0.cmp new file mode 100644 index 00000000..bd39e912 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob0.cmp @@ -0,0 +1,18 @@ +Bytes per RPC Throughput (GB/sec) +Service Received 1024 MB so far +Service Received 2048 MB so far +Service Received 3072 MB so far +*X* 65536 0.00623661107962322 +Service Received 4096 MB so far +Service Received 5120 MB so far +Service Received 6144 MB so far +Service Received 7168 MB so far +*X* 32768 0.00638966630041866 +Service Received 8192 MB so far +Service Received 9216 MB so far +Service Received 10240 MB so far +Service Received 11264 MB so far +*X* 16384 0.0065423278076004 +Service Received 12288 MB so far +Bytes received: 12884901888 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob1.cmp new file mode 100644 index 00000000..9f50b4f9 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob1.cmp @@ -0,0 +1,18 @@ +Bytes per RPC Throughput (GB/sec) +Service Received 1024 MB so far +Service Received 2048 MB so far +Service Received 3072 MB so far +*X* 65536 0.00632565195879082 +Service Received 4096 MB so far +Service Received 5120 MB so far +Service Received 6144 MB so far +Service Received 7168 MB so far +*X* 32768 0.00692991790241298 +Service Received 8192 MB so far +Service Received 9216 MB so far +Service Received 10240 MB so far +Service Received 11264 MB so far +*X* 16384 0.00683293426924544 +Service Received 12288 MB so far +Bytes received: 12884901888 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob2.cmp new file mode 100644 index 00000000..293cd682 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob2.cmp @@ -0,0 +1,18 @@ +Bytes per RPC Throughput (GB/sec) +Service Received 1024 MB so far +Service Received 2048 MB so far +Service Received 3072 MB so far +Service Received 4096 MB so far +*X* 65536 0.00616695777400265 +Service Received 5120 MB so far +Service Received 6144 MB so far +Service Received 7168 MB so far +Service Received 8192 MB so far +*X* 32768 0.00625427474597289 +Service Received 9216 MB so far +Service Received 10240 MB so far +Service Received 11264 MB so far +*X* 16384 0.00709261257886558 +Service Received 12288 MB so far +Bytes received: 12884901888 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob3.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob3.cmp new file mode 100644 index 00000000..a5d73332 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob3.cmp @@ -0,0 +1,18 @@ +Bytes per RPC Throughput (GB/sec) +Service Received 1024 MB so far +Service Received 2048 MB so far +Service Received 3072 MB so far +*X* 65536 0.00687014443353342 +Service Received 4096 MB so far +Service Received 5120 MB so far +Service Received 6144 MB so far +Service Received 7168 MB so far +*X* 32768 0.00711325292737477 +Service Received 8192 MB so far +Service Received 9216 MB so far +Service Received 10240 MB so far +Service Received 11264 MB so far +*X* 16384 0.00699940419881561 +Service Received 12288 MB so far +Bytes received: 12884901888 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob_Verify.cmp new file mode 100644 index 00000000..e8f996d1 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_ClientJob_Verify.cmp @@ -0,0 +1,20 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +Service Received 1024 MB so far +Service Received 2048 MB so far +Service Received 3072 MB so far +*X* 65536 0.00183147181280669 +Service Received 4096 MB so far +Service Received 5120 MB so far +Service Received 6144 MB so far +Service Received 7168 MB so far +*X* 32768 0.00226328852883228 +Service Received 8192 MB so far +Service Received 9216 MB so far +Service Received 10240 MB so far +Service Received 11264 MB so far +*X* 16384 0.00293864168211097 +Service Received 12288 MB so far +Bytes received: 12884901888 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_Server.cmp new file mode 100644 index 00000000..e0f842c0 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_Server.cmp @@ -0,0 +1,52 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* I'm healthy after 3000 checks at time:9/8/2020 4:05:16 PM +*X* At checkpoint, received 3735 messages +*X* At checkpoint, received 3735 messages +Received 1024 MB so far +*X* I'm healthy after 6000 checks at time:9/8/2020 4:06:03 PM +*X* At checkpoint, received 7530 messages +*X* At checkpoint, received 7530 messages +Received 2048 MB so far +*X* At checkpoint, received 10665 messages +*X* At checkpoint, received 10665 messages +Received 3072 MB so far +*X* I'm healthy after 9000 checks at time:9/8/2020 4:06:52 PM +*X* At checkpoint, received 15390 messages +*X* At checkpoint, received 15390 messages +Received 4096 MB so far +*X* I'm healthy after 12000 checks at time:9/8/2020 4:07:39 PM +*X* At checkpoint, received 23196 messages +*X* At checkpoint, received 23196 messages +Received 5120 MB so far +*X* I'm healthy after 15000 checks at time:9/8/2020 4:08:27 PM +*X* At checkpoint, received 26358 messages +*X* At checkpoint, received 26358 messages +Received 6144 MB so far +*X* At checkpoint, received 33364 messages +*X* At checkpoint, received 33364 messages +*X* I'm healthy after 18000 checks at time:9/8/2020 4:09:16 PM +Received 7168 MB so far +*X* At checkpoint, received 40618 messages +*X* At checkpoint, received 40618 messages +Received 8192 MB so far +*X* I'm healthy after 21000 checks at time:9/8/2020 4:10:04 PM +*X* At checkpoint, received 61041 messages +*X* At checkpoint, received 61041 messages +Received 9216 MB so far +*X* At checkpoint, received 70176 messages +*X* At checkpoint, received 70176 messages +*X* I'm healthy after 24000 checks at time:9/8/2020 4:10:53 PM +Received 10240 MB so far +*X* At checkpoint, received 93045 messages +*X* At checkpoint, received 93045 messages +*X* I'm healthy after 27000 checks at time:9/8/2020 4:11:42 PM +Received 11264 MB so far +*X* At checkpoint, received 109488 messages +*X* At checkpoint, received 109488 messages +*X* I'm healthy after 30000 checks at time:9/8/2020 4:12:30 PM +Received 12288 MB so far +Bytes received: 12884901888 diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_Server_Verify.cmp new file mode 100644 index 00000000..3a46d6fd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocmultipleclientsperserver_Server_Verify.cmp @@ -0,0 +1,27 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* I'm healthy after 3000 checks at time:9/8/2020 4:05:16 PM +Received 1024 MB so far +*X* I'm healthy after 6000 checks at time:9/8/2020 4:06:03 PM +Received 2048 MB so far +Received 3072 MB so far +*X* I'm healthy after 9000 checks at time:9/8/2020 4:06:52 PM +Received 4096 MB so far +*X* I'm healthy after 12000 checks at time:9/8/2020 4:07:39 PM +Received 5120 MB so far +*X* I'm healthy after 15000 checks at time:9/8/2020 4:08:27 PM +Received 6144 MB so far +*X* I'm healthy after 18000 checks at time:9/8/2020 4:09:16 PM +Received 7168 MB so far +Received 8192 MB so far +*X* I'm healthy after 21000 checks at time:9/8/2020 4:10:04 PM +Received 9216 MB so far +*X* I'm healthy after 24000 checks at time:9/8/2020 4:10:53 PM +Received 10240 MB so far +*X* I'm healthy after 27000 checks at time:9/8/2020 4:11:42 PM +Received 11264 MB so far +*X* I'm healthy after 30000 checks at time:9/8/2020 4:12:30 PM +Received 12288 MB so far +Bytes received: 12884901888 diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeclientonly_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeclientonly_ClientJob.cmp new file mode 100644 index 00000000..62102006 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeclientonly_ClientJob.cmp @@ -0,0 +1,5 @@ +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0283369937881201 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeclientonly_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeclientonly_Server.cmp new file mode 100644 index 00000000..a48c3c7c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeclientonly_Server.cmp @@ -0,0 +1,12 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 975898 messages +*X* At checkpoint, received 975898 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeserveronly_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeserveronly_ClientJob.cmp new file mode 100644 index 00000000..785f1a9a --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeserveronly_ClientJob.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0683573172044335 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeserveronly_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeserveronly_Server.cmp new file mode 100644 index 00000000..08f1adea --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocpipeserveronly_Server.cmp @@ -0,0 +1,10 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 969265 messages +*X* At checkpoint, received 969265 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpclientonly_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpclientonly_ClientJob.cmp new file mode 100644 index 00000000..08a4b2f2 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpclientonly_ClientJob.cmp @@ -0,0 +1,9 @@ +*X* ImmortalCoordinator -i=inproctcpclientonlyclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0317853152676239 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpclientonly_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpclientonly_Server.cmp new file mode 100644 index 00000000..8284e71f --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpclientonly_Server.cmp @@ -0,0 +1,12 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 970662 messages +*X* At checkpoint, received 970662 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob.cmp new file mode 100644 index 00000000..20ddbde6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob.cmp @@ -0,0 +1,5 @@ +*X* ImmortalCoordinator -i=inproctcpkilljobtestclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Restarted.cmp new file mode 100644 index 00000000..20ddbde6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Restarted.cmp @@ -0,0 +1,5 @@ +*X* ImmortalCoordinator -i=inproctcpkilljobtestclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Restarted_Again.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Restarted_Again.cmp new file mode 100644 index 00000000..4d037cff --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Restarted_Again.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* 65536 0.0234134330212378 +Service Received 1024 MB so far +*X* 32768 0.0453701277326076 +Service Received 2048 MB so far +*X* 16384 0.0679433503020945 +Service Received 3072 MB so far +*X* 8192 0.06769956112744 +Service Received 4096 MB so far +*X* 4096 0.0720971416122106 +Service Received 5120 MB so far +*X* 2048 0.0679341411110316 +Service Received 6144 MB so far +*X* 1024 0.0690021445314503 +Service Received 7168 MB so far +*X* 512 0.0672352862400445 +Service Received 8192 MB so far +*X* 256 0.0643784443071252 +Service Received 9216 MB so far +*X* 128 0.056534957421347 +Service Received 10240 MB so far +*X* 64 0.0301993259093706 +Service Received 11264 MB so far +*X* 32 0.0159338152653853 +Service Received 12288 MB so far +*X* 16 0.00974523739236517 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Verify.cmp new file mode 100644 index 00000000..06a33e79 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00309767012000673 +Service Received 1024 MB so far +*X* 32768 0.0032051603604886 +Service Received 2048 MB so far +*X* 16384 0.0033427386745749 +Service Received 3072 MB so far +*X* 8192 0.00335159598771221 +Service Received 4096 MB so far +*X* 4096 0.00335880841504655 +Service Received 5120 MB so far +*X* 2048 0.00335744374230585 +Service Received 6144 MB so far +*X* 1024 0.00335685159787588 +Service Received 7168 MB so far +*X* 512 0.00334274086577849 +Service Received 8192 MB so far +*X* 256 0.00334290015307852 +Service Received 9216 MB so far +*X* 128 0.00332261480612167 +Service Received 10240 MB so far +*X* 64 0.00324813892391 +Service Received 11264 MB so far +*X* 32 0.00313098330616278 +Service Received 12288 MB so far +*X* 16 0.00300706494904524 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_Server.cmp new file mode 100644 index 00000000..1cc99bc0 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_Server.cmp @@ -0,0 +1,59 @@ +*X* ImmortalCoordinator -i=inproctcpkilljobtestserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15257 messages +*X* At checkpoint, received 15257 messages +Received 1024 MB so far +*X* At checkpoint, received 44819 messages +*X* At checkpoint, received 44819 messages +*X* I'm healthy after 3000 checks at time:9/10/2020 3:30:28 PM +Received 2048 MB so far +*X* At checkpoint, received 101728 messages +*X* At checkpoint, received 101728 messages +Received 3072 MB so far +*X* At checkpoint, received 211665 messages +*X* At checkpoint, received 211665 messages +Received 4096 MB so far +*X* At checkpoint, received 422408 messages +*X* At checkpoint, received 422408 messages +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:9/10/2020 3:31:15 PM +*X* At checkpoint, received 826587 messages +*X* At checkpoint, received 826587 messages +Received 6144 MB so far +*X* At checkpoint, received 1592820 messages +*X* At checkpoint, received 1592820 messages +Received 7168 MB so far +*X* At checkpoint, received 3045907 messages +*X* At checkpoint, received 3045907 messages +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/10/2020 3:32:02 PM +*X* At checkpoint, received 5737123 messages +*X* At checkpoint, received 5737123 messages +Received 9216 MB so far +*X* At checkpoint, received 10614620 messages +*X* At checkpoint, received 10614620 messages +Received 10240 MB so far +*X* At checkpoint, received 19036308 messages +*X* At checkpoint, received 19036308 messages +*X* I'm healthy after 12000 checks at time:9/10/2020 3:32:48 PM +*X* At checkpoint, received 32905060 messages +*X* At checkpoint, received 32905060 messages +Received 11264 MB so far +*X* I'm healthy after 15000 checks at time:9/10/2020 3:33:35 PM +*X* At checkpoint, received 58560261 messages +*X* At checkpoint, received 58560261 messages +Received 12288 MB so far +*X* I'm healthy after 18000 checks at time:9/10/2020 3:34:22 PM +*X* At checkpoint, received 97592258 messages +*X* At checkpoint, received 97592258 messages +*X* I'm healthy after 21000 checks at time:9/10/2020 3:35:09 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_Server_Verify.cmp new file mode 100644 index 00000000..525cf94b --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkilljobtest_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* I'm healthy after 3000 checks at time:9/10/2020 3:41:03 PM +Received 2048 MB so far +Received 3072 MB so far +Received 4096 MB so far +*X* I'm healthy after 6000 checks at time:9/10/2020 3:41:50 PM +Received 5120 MB so far +Received 6144 MB so far +Received 7168 MB so far +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/10/2020 3:42:36 PM +Received 9216 MB so far +Received 10240 MB so far +*X* I'm healthy after 12000 checks at time:9/10/2020 3:43:23 PM +Received 11264 MB so far +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/10/2020 3:44:10 PM +*X* I'm healthy after 18000 checks at time:9/10/2020 3:44:57 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_ClientJob.cmp new file mode 100644 index 00000000..17d4d0a4 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_ClientJob.cmp @@ -0,0 +1,33 @@ +*X* ImmortalCoordinator -i=inproctcpkillservertestclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0209787157460421 +Service Received 1024 MB so far +*X* 32768 0.047139397070707 +Service Received 2048 MB so far +*X* 16384 0.058760675881982 +Service Received 3072 MB so far +*X* 8192 0.0693570146293988 +Service Received 4096 MB so far +*X* 4096 0.0711711058861081 +Service Received 5120 MB so far +*X* 2048 0.0708713133292059 +Service Received 6144 MB so far +*X* 1024 0.0715403557895052 +Service Received 7168 MB so far +*X* 512 0.0680870079517046 +Service Received 8192 MB so far +*X* 256 0.0629941287015279 +Service Received 9216 MB so far +*X* 128 0.0607897388282548 +Service Received 10240 MB so far +*X* 64 0.0402881165981928 +Service Received 11264 MB so far +*X* 32 0.0221065832257515 +Service Received 12288 MB so far +*X* 16 0.0154543830795573 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_ClientJob_Verify.cmp new file mode 100644 index 00000000..0d5a66c1 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00311338656413632 +Service Received 1024 MB so far +*X* 32768 0.00311141308813296 +Service Received 2048 MB so far +*X* 16384 0.00317206613596753 +Service Received 3072 MB so far +*X* 8192 0.00320036453124922 +Service Received 4096 MB so far +*X* 4096 0.00320774608474855 +Service Received 5120 MB so far +*X* 2048 0.00320099192337722 +Service Received 6144 MB so far +*X* 1024 0.00320358038397101 +Service Received 7168 MB so far +*X* 512 0.00319983870457848 +Service Received 8192 MB so far +*X* 256 0.00317734623106456 +Service Received 9216 MB so far +*X* 128 0.00316120753159553 +Service Received 10240 MB so far +*X* 64 0.003026022787093 +Service Received 11264 MB so far +*X* 32 0.0027769645313727 +Service Received 12288 MB so far +*X* 16 0.00271055484823823 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server.cmp new file mode 100644 index 00000000..4a486c3c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server.cmp @@ -0,0 +1,9 @@ +*X* ImmortalCoordinator -i=inproctcpkillservertestserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server_Restarted.cmp new file mode 100644 index 00000000..dd406192 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server_Restarted.cmp @@ -0,0 +1,54 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +Received 1024 MB so far +*X* At checkpoint, received 16966 messages +*X* At checkpoint, received 16966 messages +*X* becoming primary +*X* At checkpoint, received 47523 messages +*X* At checkpoint, received 47523 messages +Received 2048 MB so far +*X* At checkpoint, received 106939 messages +*X* At checkpoint, received 106939 messages +*X* I'm healthy after 3000 checks at time:9/10/2020 3:54:59 PM +Received 3072 MB so far +*X* At checkpoint, received 221532 messages +*X* At checkpoint, received 221532 messages +Received 4096 MB so far +*X* At checkpoint, received 441344 messages +*X* At checkpoint, received 441344 messages +Received 5120 MB so far +*X* At checkpoint, received 861393 messages +*X* At checkpoint, received 861393 messages +Received 6144 MB so far +*X* I'm healthy after 6000 checks at time:9/10/2020 3:55:45 PM +*X* At checkpoint, received 1661378 messages +*X* At checkpoint, received 1661378 messages +Received 7168 MB so far +*X* At checkpoint, received 3175694 messages +*X* At checkpoint, received 3175694 messages +Received 8192 MB so far +*X* At checkpoint, received 6002744 messages +*X* At checkpoint, received 6002744 messages +Received 9216 MB so far +*X* I'm healthy after 9000 checks at time:9/10/2020 3:56:32 PM +*X* At checkpoint, received 11130344 messages +*X* At checkpoint, received 11130344 messages +Received 10240 MB so far +*X* At checkpoint, received 20081773 messages +*X* At checkpoint, received 20081773 messages +Received 11264 MB so far +*X* At checkpoint, received 34340462 messages +*X* At checkpoint, received 34340462 messages +*X* I'm healthy after 12000 checks at time:9/10/2020 3:57:19 PM +*X* At checkpoint, received 60604023 messages +*X* At checkpoint, received 60604023 messages +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/10/2020 3:58:06 PM +*X* At checkpoint, received 101208908 messages +*X* At checkpoint, received 101208908 messages +*X* I'm healthy after 18000 checks at time:9/10/2020 3:58:53 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server_Verify.cmp new file mode 100644 index 00000000..20008c46 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpkillservertest_Server_Verify.cmp @@ -0,0 +1,25 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/10/2020 3:54:59 PM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +Received 6144 MB so far +*X* I'm healthy after 6000 checks at time:9/10/2020 3:55:45 PM +Received 7168 MB so far +Received 8192 MB so far +Received 9216 MB so far +*X* I'm healthy after 9000 checks at time:9/10/2020 3:56:32 PM +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 12000 checks at time:9/10/2020 3:57:19 PM +Received 12288 MB so far +*X* I'm healthy after 15000 checks at time:9/10/2020 3:58:06 PM +*X* I'm healthy after 18000 checks at time:9/10/2020 3:58:53 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob.cmp new file mode 100644 index 00000000..c8ce11b2 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob.cmp @@ -0,0 +1,5 @@ +*X* ImmortalCoordinator -i=inproctcpupgradeclientclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob_Restarted.cmp new file mode 100644 index 00000000..3ac86e26 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob_Restarted.cmp @@ -0,0 +1,32 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0429049993385765 +Service Received 1024 MB so far +*X* 32768 0.0414782293425041 +Service Received 2048 MB so far +*X* 16384 0.0664449085562245 +Service Received 3072 MB so far +*X* 8192 0.0698767559198976 +Service Received 4096 MB so far +*X* 4096 0.0686197640195136 +Service Received 5120 MB so far +*X* 2048 0.069737277082353 +Service Received 6144 MB so far +*X* 1024 0.0656210103040932 +Service Received 7168 MB so far +*X* 512 0.06863385761456 +Service Received 8192 MB so far +*X* 256 0.0676495913257746 +Service Received 9216 MB so far +*X* 128 0.0638770706726584 +Service Received 10240 MB so far +*X* 64 0.0334457876592361 +Service Received 11264 MB so far +*X* 32 0.0182967267488847 +Service Received 12288 MB so far +*X* 16 0.0108875905229116 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob_Verify.cmp new file mode 100644 index 00000000..76829b64 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00271092215419825 +Service Received 1024 MB so far +*X* 32768 0.00278120585425843 +Service Received 2048 MB so far +*X* 16384 0.00285986661044474 +Service Received 3072 MB so far +*X* 8192 0.00286853773152103 +Service Received 4096 MB so far +*X* 4096 0.00287333711648391 +Service Received 5120 MB so far +*X* 2048 0.00287860983419633 +Service Received 6144 MB so far +*X* 1024 0.00288173040422886 +Service Received 7168 MB so far +*X* 512 0.0028892451401983 +Service Received 8192 MB so far +*X* 256 0.00286911466928741 +Service Received 9216 MB so far +*X* 128 0.00280936269653303 +Service Received 10240 MB so far +*X* 64 0.00260867346733422 +Service Received 11264 MB so far +*X* 32 0.00240781951503656 +Service Received 12288 MB so far +*X* 16 0.00238183030432107 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server.cmp new file mode 100644 index 00000000..f09c4d2c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server.cmp @@ -0,0 +1,11 @@ +*X* ImmortalCoordinator -i=inproctcpupgradeclientserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 15334 messages +*X* At checkpoint, received 15334 messages diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server_Restarted.cmp new file mode 100644 index 00000000..c26af617 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server_Restarted.cmp @@ -0,0 +1,55 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +Received 1024 MB so far +*X* At checkpoint, received 29027 messages +*X* At checkpoint, received 29027 messages +*X* becoming primary +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/11/2020 10:53:23 AM +*X* At checkpoint, received 70076 messages +*X* At checkpoint, received 70076 messages +Received 3072 MB so far +*X* At checkpoint, received 147615 messages +*X* At checkpoint, received 147615 messages +Received 4096 MB so far +*X* At checkpoint, received 294281 messages +*X* At checkpoint, received 294281 messages +Received 5120 MB so far +*X* At checkpoint, received 570795 messages +*X* At checkpoint, received 570795 messages +*X* I'm healthy after 6000 checks at time:9/11/2020 10:54:10 AM +Received 6144 MB so far +*X* At checkpoint, received 1081867 messages +*X* At checkpoint, received 1081867 messages +*X* At checkpoint, received 2053584 messages +*X* At checkpoint, received 2053584 messages +Received 7168 MB so far +*X* At checkpoint, received 3950252 messages +*X* At checkpoint, received 3950252 messages +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/11/2020 10:54:57 AM +*X* At checkpoint, received 7528963 messages +*X* At checkpoint, received 7528963 messages +Received 9216 MB so far +*X* At checkpoint, received 14115800 messages +*X* At checkpoint, received 14115800 messages +Received 10240 MB so far +*X* At checkpoint, received 25641389 messages +*X* At checkpoint, received 25641389 messages +*X* I'm healthy after 12000 checks at time:9/11/2020 10:55:43 AM +Received 11264 MB so far +*X* At checkpoint, received 44833736 messages +*X* At checkpoint, received 44833736 messages +*X* I'm healthy after 15000 checks at time:9/11/2020 10:56:30 AM +Received 12288 MB so far +*X* At checkpoint, received 73940140 messages +*X* At checkpoint, received 73940140 messages +*X* I'm healthy after 18000 checks at time:9/11/2020 10:57:17 AM +*X* At checkpoint, received 119255205 messages +*X* At checkpoint, received 119255205 messages +*X* I'm healthy after 21000 checks at time:9/11/2020 10:58:04 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server_Verify.cmp new file mode 100644 index 00000000..68c3e03e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpmigrateclient_Server_Verify.cmp @@ -0,0 +1,26 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Received 2048 MB so far +*X* I'm healthy after 3000 checks at time:9/11/2020 10:53:23 AM +Received 3072 MB so far +Received 4096 MB so far +Received 5120 MB so far +*X* I'm healthy after 6000 checks at time:9/11/2020 10:54:10 AM +Received 6144 MB so far +Received 7168 MB so far +Received 8192 MB so far +*X* I'm healthy after 9000 checks at time:9/11/2020 10:54:57 AM +Received 9216 MB so far +Received 10240 MB so far +*X* I'm healthy after 12000 checks at time:9/11/2020 10:55:43 AM +Received 11264 MB so far +*X* I'm healthy after 15000 checks at time:9/11/2020 10:56:30 AM +Received 12288 MB so far +*X* I'm healthy after 18000 checks at time:9/11/2020 10:57:17 AM +*X* I'm healthy after 21000 checks at time:9/11/2020 10:58:04 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpsavelogtoblob_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpsavelogtoblob_ClientJob.cmp new file mode 100644 index 00000000..3e61fac5 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpsavelogtoblob_ClientJob.cmp @@ -0,0 +1,8 @@ +*X* ImmortalCoordinator -i=unittestinproctcpserver -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* 1024 0.0705962372530287 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpsavelogtoblob_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpsavelogtoblob_Server.cmp new file mode 100644 index 00000000..51d1d5b6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpsavelogtoblob_Server.cmp @@ -0,0 +1,14 @@ +*X* ImmortalCoordinator -i=unittestinproctcpserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 972166 messages +*X* At checkpoint, received 972166 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpserveronly_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpserveronly_ClientJob.cmp new file mode 100644 index 00000000..c15ddb79 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpserveronly_ClientJob.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0413099726099184 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpserveronly_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpserveronly_Server.cmp new file mode 100644 index 00000000..7e7bb0fa --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpserveronly_Server.cmp @@ -0,0 +1,12 @@ +*X* ImmortalCoordinator -i=unittestinproctcpserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 970708 messages +*X* At checkpoint, received 970708 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpupgradeserver_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpupgradeserver_ClientJob.cmp new file mode 100644 index 00000000..d40c0ca0 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpupgradeserver_ClientJob.cmp @@ -0,0 +1,33 @@ +*X* ImmortalCoordinator -i=inproctcpupgradeserverclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.019883684383327 +Service Received 1024 MB so far +*X* 32768 0.0364469877087673 +Service Received 2048 MB so far +*X* 16384 0.0355700420796443 +Service Received 3072 MB so far +*X* 8192 0.0368427694801188 +Service Received 4096 MB so far +*X* 4096 0.0369998844937606 +Service Received 5120 MB so far +*X* 2048 0.0378420832081943 +Service Received 6144 MB so far +*X* 1024 0.0372031441150917 +Service Received 7168 MB so far +*X* 512 0.0358574703562259 +Service Received 8192 MB so far +*X* 256 0.0353717489816668 +Service Received 9216 MB so far +*X* 128 0.033583917269217 +Service Received 10240 MB so far +*X* 64 0.0280063464397489 +Service Received 11264 MB so far +*X* 32 0.014804647684635 +Service Received 12288 MB so far +*X* 16 0.00945081139359995 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpupgradeserver_Server_upgraded.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpupgradeserver_Server_upgraded.cmp new file mode 100644 index 00000000..8f25b593 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inproctcpupgradeserver_Server_upgraded.cmp @@ -0,0 +1,64 @@ +*X* ImmortalCoordinator -i=inproctcpupgradeserverserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, upgraded service received 7096 messages +*X* At checkpoint, upgraded service received 7096 messages +becoming upgraded primary +*X* At checkpoint, upgraded service received 7096 messages +*X* At checkpoint, upgraded service received 7096 messages +Received 1024 MB so far +*X* At checkpoint, upgraded service received 27656 messages +*X* At checkpoint, upgraded service received 27656 messages +*X* I'm healthy after 3000 checks at time:9/17/2020 10:29:49 AM +Received 2048 MB so far +*X* At checkpoint, upgraded service received 67551 messages +*X* At checkpoint, upgraded service received 67551 messages +Received 3072 MB so far +*X* At checkpoint, upgraded service received 142712 messages +*X* At checkpoint, upgraded service received 142712 messages +*X* I'm healthy after 6000 checks at time:9/17/2020 10:30:36 AM +Received 4096 MB so far +*X* At checkpoint, upgraded service received 284037 messages +*X* At checkpoint, upgraded service received 284037 messages +Received 5120 MB so far +*X* I'm healthy after 9000 checks at time:9/17/2020 10:31:23 AM +*X* At checkpoint, upgraded service received 549952 messages +*X* At checkpoint, upgraded service received 549952 messages +Received 6144 MB so far +*X* At checkpoint, upgraded service received 1043457 messages +*X* At checkpoint, upgraded service received 1043457 messages +*X* I'm healthy after 12000 checks at time:9/17/2020 10:32:09 AM +*X* At checkpoint, upgraded service received 2012822 messages +*X* At checkpoint, upgraded service received 2012822 messages +Received 7168 MB so far +*X* At checkpoint, upgraded service received 3873225 messages +*X* At checkpoint, upgraded service received 3873225 messages +Received 8192 MB so far +*X* I'm healthy after 15000 checks at time:9/17/2020 10:32:56 AM +*X* At checkpoint, upgraded service received 7391883 messages +*X* At checkpoint, upgraded service received 7391883 messages +Received 9216 MB so far +*X* At checkpoint, upgraded service received 13837489 messages +*X* At checkpoint, upgraded service received 13837489 messages +Received 10240 MB so far +*X* I'm healthy after 18000 checks at time:9/17/2020 10:33:43 AM +*X* At checkpoint, upgraded service received 25124644 messages +*X* At checkpoint, upgraded service received 25124644 messages +Received 11264 MB so far +*X* I'm healthy after 21000 checks at time:9/17/2020 10:34:30 AM +*X* At checkpoint, upgraded service received 43869329 messages +*X* At checkpoint, upgraded service received 43869329 messages +*X* I'm healthy after 24000 checks at time:9/17/2020 10:35:17 AM +Received 12288 MB so far +*X* At checkpoint, upgraded service received 72236703 messages +*X* At checkpoint, upgraded service received 72236703 messages +*X* I'm healthy after 27000 checks at time:9/17/2020 10:36:04 AM +*X* At checkpoint, upgraded service received 117434245 messages +*X* At checkpoint, upgraded service received 117434245 messages +*X* I'm healthy after 30000 checks at time:9/17/2020 10:36:51 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_ClientJob.cmp new file mode 100644 index 00000000..ff03b429 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_ClientJob.cmp @@ -0,0 +1,11 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0268049854667524 +Service Received 1024 MB so far +*X* 32768 0.0371572217956965 +Service Received 2048 MB so far +*X* 16384 0.0376291791375567 +Service Received 3072 MB so far +*X* 8192 0.0361280147067032 +Service Received 4096 MB so far +Bytes received: 4294967296 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_ClientJob_Verify.cmp new file mode 100644 index 00000000..26405236 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_ClientJob_Verify.cmp @@ -0,0 +1,13 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00569467587338279 +Service Received 1024 MB so far +*X* 32768 0.00649811091565609 +Service Received 2048 MB so far +*X* 16384 0.00705336233832041 +Service Received 3072 MB so far +*X* 8192 0.00780639551458378 +Service Received 4096 MB so far +Bytes received: 4294967296 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_Server_Verify.cmp new file mode 100644 index 00000000..a2570cf3 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_Server_Verify.cmp @@ -0,0 +1,12 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* I'm healthy after 3000 checks at time:10/1/2020 5:59:01 PM +Received 2048 MB so far +Received 3072 MB so far +*X* I'm healthy after 6000 checks at time:10/1/2020 5:59:48 PM +Received 4096 MB so far +Bytes received: 4294967296 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_Server_upgraded.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_Server_upgraded.cmp new file mode 100644 index 00000000..94207124 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradeafterserverdone_Server_upgraded.cmp @@ -0,0 +1,9 @@ +*X* Press enter to terminate program. +Received 4096 MB so far +Bytes received: 4294967296 +DONE +*X* At checkpoint, upgraded service received 245760 messages +*X* At checkpoint, upgraded service received 245760 messages +becoming upgraded primary +*X* At checkpoint, upgraded service received 245760 messages +*X* At checkpoint, upgraded service received 245760 messages diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradebeforeserverdone_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradebeforeserverdone_ClientJob.cmp new file mode 100644 index 00000000..ea5e35d2 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradebeforeserverdone_ClientJob.cmp @@ -0,0 +1,29 @@ +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0199926148880013 +Service Received 1024 MB so far +*X* 32768 0.0367999240861726 +Service Received 2048 MB so far +*X* 16384 0.0361683835153319 +Service Received 3072 MB so far +*X* 8192 0.0376331044329863 +Service Received 4096 MB so far +*X* 4096 0.0361563323884177 +Service Received 5120 MB so far +*X* 2048 0.0353423488932218 +Service Received 6144 MB so far +*X* 1024 0.036961921650898 +Service Received 7168 MB so far +*X* 512 0.0347171449603174 +Service Received 8192 MB so far +*X* 256 0.0360966970883253 +Service Received 9216 MB so far +*X* 128 0.0333865737896699 +Service Received 10240 MB so far +*X* 64 0.0324895051831791 +Service Received 11264 MB so far +*X* 32 0.0211446577143724 +Service Received 12288 MB so far +*X* 16 0.0139449802088797 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradebeforeserverdone_Server_upgraded.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradebeforeserverdone_Server_upgraded.cmp new file mode 100644 index 00000000..27269136 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/inprocupgradebeforeserverdone_Server_upgraded.cmp @@ -0,0 +1,59 @@ +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, upgraded service received 5115 messages +*X* At checkpoint, upgraded service received 5115 messages +becoming upgraded primary +*X* At checkpoint, upgraded service received 5115 messages +*X* At checkpoint, upgraded service received 5115 messages +Received 1024 MB so far +*X* At checkpoint, upgraded service received 24400 messages +*X* At checkpoint, upgraded service received 24400 messages +*X* I'm healthy after 3000 checks at time:9/17/2020 9:54:30 AM +Received 2048 MB so far +*X* At checkpoint, upgraded service received 61121 messages +*X* At checkpoint, upgraded service received 61121 messages +Received 3072 MB so far +*X* At checkpoint, upgraded service received 130112 messages +*X* At checkpoint, upgraded service received 130112 messages +*X* I'm healthy after 6000 checks at time:9/17/2020 9:55:17 AM +Received 4096 MB so far +*X* At checkpoint, upgraded service received 260209 messages +*X* At checkpoint, upgraded service received 260209 messages +*X* I'm healthy after 9000 checks at time:9/17/2020 9:56:04 AM +*X* At checkpoint, upgraded service received 504096 messages +*X* At checkpoint, upgraded service received 504096 messages +Received 5120 MB so far +*X* At checkpoint, upgraded service received 986916 messages +*X* At checkpoint, upgraded service received 986916 messages +Received 6144 MB so far +*X* I'm healthy after 12000 checks at time:9/17/2020 9:56:51 AM +*X* At checkpoint, upgraded service received 1911638 messages +*X* At checkpoint, upgraded service received 1911638 messages +Received 7168 MB so far +*X* At checkpoint, upgraded service received 3671549 messages +*X* At checkpoint, upgraded service received 3671549 messages +Received 8192 MB so far +*X* I'm healthy after 15000 checks at time:9/17/2020 9:57:38 AM +*X* At checkpoint, upgraded service received 6990272 messages +*X* At checkpoint, upgraded service received 6990272 messages +Received 9216 MB so far +*X* At checkpoint, upgraded service received 13046270 messages +*X* At checkpoint, upgraded service received 13046270 messages +*X* I'm healthy after 18000 checks at time:9/17/2020 9:58:25 AM +Received 10240 MB so far +*X* At checkpoint, upgraded service received 23660623 messages +*X* At checkpoint, upgraded service received 23660623 messages +Received 11264 MB so far +*X* At checkpoint, upgraded service received 41296525 messages +*X* At checkpoint, upgraded service received 41296525 messages +*X* I'm healthy after 21000 checks at time:9/17/2020 9:59:12 AM +Received 12288 MB so far +*X* At checkpoint, upgraded service received 68026356 messages +*X* At checkpoint, upgraded service received 68026356 messages +*X* I'm healthy after 24000 checks at time:9/17/2020 9:59:59 AM +*X* At checkpoint, upgraded service received 113467226 messages +*X* At checkpoint, upgraded service received 113467226 messages +*X* I'm healthy after 27000 checks at time:9/17/2020 10:00:46 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB1.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB2.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted.cmp index 262c95d8..2bb3f0ab 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted.cmp @@ -1,29 +1,2 @@ +*X* Trying to connect IC and Language Binding Bytes per RPC Throughput (GB/sec) -*X* 65536 0.0273828010297083 -Service Received 1024 MB so far -Service Received 2048 MB so far -*X* 32768 0.0709177553954565 -Service Received 3072 MB so far -*X* 16384 0.0717941152689843 -Service Received 4096 MB so far -*X* 8192 0.0726432838832339 -*X* 4096 0.0708769724033704 -Service Received 5120 MB so far -*X* 2048 0.0727033736742785 -Service Received 6144 MB so far -*X* 1024 0.0726175684424032 -Service Received 7168 MB so far -Service Received 8192 MB so far -*X* 512 0.0709311429758552 -*X* 256 0.0713231837827627 -Service Received 9216 MB so far -Service Received 10240 MB so far -*X* 128 0.066423578510511 -*X* 64 0.0626573117545812 -Service Received 11264 MB so far -Service Received 12288 MB so far -*X* 32 0.0574327695589092 -*X* 16 0.0353197351340568 -Service Received 13312 MB so far -Bytes received: 13958643712 -DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted_Again.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted_Again.cmp new file mode 100644 index 00000000..a29562d8 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/killjobtest_ClientJob_Restarted_Again.cmp @@ -0,0 +1,30 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* 65536 0.0236803162235329 +Service Received 1024 MB so far +*X* 32768 0.069665824049236 +Service Received 2048 MB so far +*X* 16384 0.0735720516373684 +Service Received 3072 MB so far +*X* 8192 0.0704808571278528 +Service Received 4096 MB so far +*X* 4096 0.0678463136889375 +Service Received 5120 MB so far +*X* 2048 0.0689074971287969 +Service Received 6144 MB so far +*X* 1024 0.067924556086875 +Service Received 7168 MB so far +*X* 512 0.0659925788705357 +Service Received 8192 MB so far +*X* 256 0.0679938383983643 +Service Received 9216 MB so far +*X* 128 0.0643013455294467 +Service Received 10240 MB so far +*X* 64 0.0556072588759292 +Service Received 11264 MB so far +*X* 32 0.0294555285172786 +Service Received 12288 MB so far +*X* 16 0.0190104081109929 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB1.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB2.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/killservertest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob.cmp new file mode 100644 index 00000000..0755ceae --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob.cmp @@ -0,0 +1,12 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +Unable to read data from the transport connection: An existing connection was forcibly closed by the remote host.. + at System.Net.Sockets.Socket.AwaitableSocketAsyncEventArgs.ThrowException(SocketError error, CancellationToken cancellationToken) + at System.Net.Sockets.Socket.AwaitableSocketAsyncEventArgs.GetResult(Int16 token) + at System.Threading.Tasks.ValueTask`1.ValueTaskSourceAsTask.<>c.<.cctor>b__4_0(Object state) +--- End of stack trace from previous location where exception was thrown --- + at Ambrosia.StreamCommunicator.ReadAllRequiredBytesAsync(Stream stream, Byte[] buffer, Int32 offset, Int32 count, CancellationToken ct) + at Ambrosia.StreamCommunicator.ReadIntFixedAsync(Stream stream, CancellationToken ct) + at Ambrosia.Immortal.Dispatch(Int32 bytesToRead) + at Ambrosia.Immortal.<>c__DisplayClass33_0.<b__0>d.MoveNext() diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob_Restarted.cmp new file mode 100644 index 00000000..27fd6bf9 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob_Restarted.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.0200299767028937 +Service Received 1024 MB so far +*X* 32768 0.0323557983362098 +Service Received 2048 MB so far +*X* 16384 0.0371632391302329 +Service Received 3072 MB so far +*X* 8192 0.0371775788877274 +Service Received 4096 MB so far +*X* 4096 0.0370883834313388 +Service Received 5120 MB so far +*X* 2048 0.037139796526505 +Service Received 6144 MB so far +*X* 1024 0.0374765437591809 +Service Received 7168 MB so far +*X* 512 0.0356971436909057 +Service Received 8192 MB so far +*X* 256 0.0361775349142877 +Service Received 9216 MB so far +*X* 128 0.0334792598295425 +Service Received 10240 MB so far +*X* 64 0.0293757011943155 +Service Received 11264 MB so far +*X* 32 0.0202221391060848 +Service Received 12288 MB so far +*X* 16 0.0122738566912618 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob_Verify.cmp new file mode 100644 index 00000000..c9a47859 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_ClientJob_Verify.cmp @@ -0,0 +1,31 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 65536 0.00210209328751363 +Service Received 1024 MB so far +*X* 32768 0.0022988308597278 +Service Received 2048 MB so far +*X* 16384 0.00239575728767967 +Service Received 3072 MB so far +*X* 8192 0.00247131760796524 +Service Received 4096 MB so far +*X* 4096 0.00255890552867392 +Service Received 5120 MB so far +*X* 2048 0.00264473649752394 +Service Received 6144 MB so far +*X* 1024 0.00272849513253126 +Service Received 7168 MB so far +*X* 512 0.00283268735347629 +Service Received 8192 MB so far +*X* 256 0.00293614394404815 +Service Received 9216 MB so far +*X* 128 0.00304401080222147 +Service Received 10240 MB so far +*X* 64 0.0031265840603648 +Service Received 11264 MB so far +*X* 32 0.00311602315309029 +Service Received 12288 MB so far +*X* 16 0.00267055259486893 +Service Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server.cmp new file mode 100644 index 00000000..5917d5a5 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server.cmp @@ -0,0 +1,15 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Server in Entry Point +Unable to read data from the transport connection: An existing connection was forcibly closed by the remote host.. + at System.Net.Sockets.Socket.AwaitableSocketAsyncEventArgs.ThrowException(SocketError error, CancellationToken cancellationToken) + at System.Net.Sockets.Socket.AwaitableSocketAsyncEventArgs.GetResult(Int16 token) + at System.Threading.Tasks.ValueTask`1.ValueTaskSourceAsTask.<>c.<.cctor>b__4_0(Object state) +--- End of stack trace from previous location where exception was thrown --- + at Ambrosia.StreamCommunicator.ReadAllRequiredBytesAsync(Stream stream, Byte[] buffer, Int32 offset, Int32 count, CancellationToken ct) + at Ambrosia.StreamCommunicator.ReadIntFixedAsync(Stream stream, CancellationToken ct) + at Ambrosia.Immortal.Dispatch(Int32 bytesToRead) + at Ambrosia.Immortal.<>c__DisplayClass33_0.<b__0>d.MoveNext() diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server_Restarted.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server_Restarted.cmp new file mode 100644 index 00000000..73a2f1f3 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server_Restarted.cmp @@ -0,0 +1,57 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* At checkpoint, received 17480 messages +*X* At checkpoint, received 17480 messages +*X* becoming primary +*X* I'm healthy after 3000 checks at time:10/1/2020 10:54:44 AM +*X* At checkpoint, received 48019 messages +*X* At checkpoint, received 48019 messages +Received 2048 MB so far +*X* At checkpoint, received 107947 messages +*X* At checkpoint, received 107947 messages +Received 3072 MB so far +*X* I'm healthy after 6000 checks at time:10/1/2020 10:55:31 AM +*X* At checkpoint, received 223542 messages +*X* At checkpoint, received 223542 messages +Received 4096 MB so far +*X* At checkpoint, received 445723 messages +*X* At checkpoint, received 445723 messages +*X* I'm healthy after 9000 checks at time:10/1/2020 10:56:18 AM +Received 5120 MB so far +*X* At checkpoint, received 872676 messages +*X* At checkpoint, received 872676 messages +Received 6144 MB so far +*X* I'm healthy after 12000 checks at time:10/1/2020 10:57:05 AM +*X* At checkpoint, received 1689319 messages +*X* At checkpoint, received 1689319 messages +Received 7168 MB so far +*X* At checkpoint, received 3234125 messages +*X* At checkpoint, received 3234125 messages +Received 8192 MB so far +*X* I'm healthy after 15000 checks at time:10/1/2020 10:57:53 AM +*X* At checkpoint, received 6128565 messages +*X* At checkpoint, received 6128565 messages +Received 9216 MB so far +*X* At checkpoint, received 11376107 messages +*X* At checkpoint, received 11376107 messages +*X* I'm healthy after 18000 checks at time:10/1/2020 10:58:39 AM +Received 10240 MB so far +*X* At checkpoint, received 20496833 messages +*X* At checkpoint, received 20496833 messages +Received 11264 MB so far +*X* At checkpoint, received 35132132 messages +*X* At checkpoint, received 35132132 messages +*X* I'm healthy after 21000 checks at time:10/1/2020 10:59:26 AM +*X* At checkpoint, received 61390049 messages +*X* At checkpoint, received 61390049 messages +Received 12288 MB so far +*X* I'm healthy after 24000 checks at time:10/1/2020 11:00:13 AM +*X* At checkpoint, received 102662457 messages +*X* At checkpoint, received 102662457 messages +*X* I'm healthy after 27000 checks at time:10/1/2020 11:01:00 AM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server_Verify.cmp new file mode 100644 index 00000000..c873fb0e --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/migrateclient_Server_Verify.cmp @@ -0,0 +1,29 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +*X* I'm healthy after 3000 checks at time:10/2/2020 1:44:49 PM +Received 2048 MB so far +*X* I'm healthy after 6000 checks at time:10/2/2020 1:45:38 PM +Received 3072 MB so far +Received 4096 MB so far +*X* I'm healthy after 9000 checks at time:10/2/2020 1:46:24 PM +Received 5120 MB so far +Received 6144 MB so far +*X* I'm healthy after 12000 checks at time:10/2/2020 1:47:11 PM +Received 7168 MB so far +*X* I'm healthy after 15000 checks at time:10/2/2020 1:47:58 PM +Received 8192 MB so far +Received 9216 MB so far +*X* I'm healthy after 18000 checks at time:10/2/2020 1:48:45 PM +Received 10240 MB so far +Received 11264 MB so far +*X* I'm healthy after 21000 checks at time:10/2/2020 1:49:32 PM +*X* I'm healthy after 24000 checks at time:10/2/2020 1:50:19 PM +Received 12288 MB so far +*X* I'm healthy after 27000 checks at time:10/2/2020 1:51:06 PM +*X* I'm healthy after 30000 checks at time:10/2/2020 1:51:53 PM +Received 13312 MB so far +Bytes received: 13958643712 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server.cmp index a1c7a84d..b8c41ab5 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server.cmp @@ -102,4 +102,3 @@ Received 11264 MB so far *X* I'm healthy after 228000 checks at time:10/31/2018 11:37:51 AM Received 12288 MB so far Bytes received: 12884901888 -DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server_Verify.cmp index 0a2110ab..4d4e17e1 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/multipleclientsperserver_Server_Verify.cmp @@ -91,4 +91,3 @@ Received 11264 MB so far *X* I'm healthy after 237000 checks at time:11/1/2018 1:49:07 PM Received 12288 MB so far Bytes received: 12884901888 -DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_AMB1.cmp new file mode 100644 index 00000000..e365d7cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_AMB1.cmp @@ -0,0 +1 @@ +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_AMB2.cmp new file mode 100644 index 00000000..e365d7cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_AMB2.cmp @@ -0,0 +1 @@ +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_ClientJob.cmp new file mode 100644 index 00000000..ce42a64a --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_ClientJob.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0520278524032053 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_ClientJob_Verify.cmp new file mode 100644 index 00000000..1c044725 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_ClientJob_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0174801610834601 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_Server.cmp new file mode 100644 index 00000000..3615fe31 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_Server.cmp @@ -0,0 +1,11 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Server in Entry Point +*X* At checkpoint, received 972895 messages +*X* At checkpoint, received 972895 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_Server_Verify.cmp new file mode 100644 index 00000000..c8c1e275 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/overrideoptions_Server_Verify.cmp @@ -0,0 +1,6 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Server in Entry Point +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtoblob_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtoblob_ClientJob.cmp new file mode 100644 index 00000000..f20f044f --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtoblob_ClientJob.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.00380647997847277 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtoblob_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtoblob_Server.cmp new file mode 100644 index 00000000..3d9dd255 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtoblob_Server.cmp @@ -0,0 +1,11 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Server in Entry Point +*X* At checkpoint, received 978408 messages +*X* At checkpoint, received 978408 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtofileandblob_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtofileandblob_ClientJob.cmp new file mode 100644 index 00000000..77c45062 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtofileandblob_ClientJob.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.00470526772762104 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtofileandblob_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtofileandblob_Server.cmp new file mode 100644 index 00000000..deaa8682 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/savelogtofileandblob_Server.cmp @@ -0,0 +1,14 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Server in Entry Point +*X* I'm healthy after 3000 checks at time:8/4/2020 5:28:23 PM +*X* I'm healthy after 6000 checks at time:8/4/2020 5:29:20 PM +*X* I'm healthy after 9000 checks at time:8/4/2020 5:30:32 PM +*X* At checkpoint, received 968215 messages +*X* At checkpoint, received 968215 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpambrosia.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpambrosia.cmp new file mode 100644 index 00000000..ed674d48 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpambrosia.cmp @@ -0,0 +1,42 @@ +Missing or illegal runtime mode. +Usage: Ambrosia.exe RegisterInstance [OPTIONS] +Options: + -i, --instanceName=VALUE The instance name [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -l, --log=VALUE The service log path. + -cs, --createService=VALUE [A - AutoRecovery | N - NoRecovery | Y - + AlwaysRecover]. + -ps, --pauseAtStart Is pause at start enabled. + -npl, --noPersistLogs Is persistent logging disabled. + -lts, --logTriggerSize=VALUE Log trigger size (in MBs). + -aa, --activeActive Is active-active enabled. + -cv, --currentVersion=VALUE The current version #. + -uv, --upgradeVersion=VALUE The upgrade version #. + -h, --help show this message and exit +Usage: Ambrosia.exe AddReplica [OPTIONS] +Options: + -r, --replicaNum=VALUE The replica # [REQUIRED]. + -i, --instanceName=VALUE The instance name [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -l, --log=VALUE The service log path. + -cs, --createService=VALUE [A - AutoRecovery | N - NoRecovery | Y - + AlwaysRecover]. + -ps, --pauseAtStart Is pause at start enabled. + -npl, --noPersistLogs Is persistent logging disabled. + -lts, --logTriggerSize=VALUE Log trigger size (in MBs). + -aa, --activeActive Is active-active enabled. + -cv, --currentVersion=VALUE The current version #. + -uv, --upgradeVersion=VALUE The upgrade version #. + -h, --help show this message and exit +Usage: Ambrosia.exe DebugInstance [OPTIONS] +Options: + -i, --instanceName=VALUE The instance name [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -l, --log=VALUE The service log path. + -c, --checkpoint=VALUE The checkpoint # to load. + -cv, --currentVersion=VALUE The version # to debug. + -tu, --testingUpgrade Is testing upgrade. + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpambrosia_Core.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpambrosia_Core.cmp new file mode 100644 index 00000000..6c87b0ed --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpambrosia_Core.cmp @@ -0,0 +1,42 @@ +Missing or illegal runtime mode. +Usage: dotnet Ambrosia.dll RegisterInstance [OPTIONS] +Options: + -i, --instanceName=VALUE The instance name [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -l, --log=VALUE The service log path. + -cs, --createService=VALUE [A - AutoRecovery | N - NoRecovery | Y - + AlwaysRecover]. + -ps, --pauseAtStart Is pause at start enabled. + -npl, --noPersistLogs Is persistent logging disabled. + -lts, --logTriggerSize=VALUE Log trigger size (in MBs). + -aa, --activeActive Is active-active enabled. + -cv, --currentVersion=VALUE The current version #. + -uv, --upgradeVersion=VALUE The upgrade version #. + -h, --help show this message and exit +Usage: dotnet Ambrosia.dll AddReplica [OPTIONS] +Options: + -r, --replicaNum=VALUE The replica # [REQUIRED]. + -i, --instanceName=VALUE The instance name [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -l, --log=VALUE The service log path. + -cs, --createService=VALUE [A - AutoRecovery | N - NoRecovery | Y - + AlwaysRecover]. + -ps, --pauseAtStart Is pause at start enabled. + -npl, --noPersistLogs Is persistent logging disabled. + -lts, --logTriggerSize=VALUE Log trigger size (in MBs). + -aa, --activeActive Is active-active enabled. + -cv, --currentVersion=VALUE The current version #. + -uv, --upgradeVersion=VALUE The upgrade version #. + -h, --help show this message and exit +Usage: dotnet Ambrosia.dll DebugInstance [OPTIONS] +Options: + -i, --instanceName=VALUE The instance name [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -l, --log=VALUE The service log path. + -c, --checkpoint=VALUE The checkpoint # to load. + -cv, --currentVersion=VALUE The version # to debug. + -tu, --testingUpgrade Is testing upgrade. + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpimmcoord.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpimmcoord.cmp new file mode 100644 index 00000000..a2632a9a --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpimmcoord.cmp @@ -0,0 +1,18 @@ +Instance name is required.Port number is required. +Worker for Common Runtime for Applications (CRA) [http://github.com/Microsoft/CRA] +Usage: ImmortalCoordinator.exe [OPTIONS] +Options: + -i, --instanceName=VALUE The instance name [REQUIRED]. + -p, --port=VALUE An port number [REQUIRED]. + -aa, --activeActive Is active-active enabled. + -r, --replicaNum=VALUE The replica # + -an, --assemblyName=VALUE The secure network assembly name. + -ac, --assemblyClass=VALUE The secure network assembly class. + -ip, --IPAddr=VALUE Override automatic self IP detection + -h, --help show this message and exit + -rp, --receivePort=VALUE The service receive from port override. + -sp, --sendPort=VALUE The service send to port override. + -l, --log=VALUE The service log path override. + -lts, --logTriggerSize=VALUE Log trigger size (in MBs). + -lst, --logStorageType=VALUE Can be set to files or blobs. Defaults to + files diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpimmcoord_Core.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpimmcoord_Core.cmp new file mode 100644 index 00000000..594f18ba --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpimmcoord_Core.cmp @@ -0,0 +1,18 @@ +Instance name is required.Port number is required. +Worker for Common Runtime for Applications (CRA) [http://github.com/Microsoft/CRA] +Usage: dotnet ImmortalCoordinator.dll [OPTIONS] +Options: + -i, --instanceName=VALUE The instance name [REQUIRED]. + -p, --port=VALUE An port number [REQUIRED]. + -aa, --activeActive Is active-active enabled. + -r, --replicaNum=VALUE The replica # + -an, --assemblyName=VALUE The secure network assembly name. + -ac, --assemblyClass=VALUE The secure network assembly class. + -ip, --IPAddr=VALUE Override automatic self IP detection + -h, --help show this message and exit + -rp, --receivePort=VALUE The service receive from port override. + -sp, --sendPort=VALUE The service send to port override. + -l, --log=VALUE The service log path override. + -lts, --logTriggerSize=VALUE Log trigger size (in MBs). + -lst, --logStorageType=VALUE Can be set to files or blobs. Defaults to + files diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptijob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptijob.cmp new file mode 100644 index 00000000..dc1d6180 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptijob.cmp @@ -0,0 +1,23 @@ +Job name is required. +Server name is required. + +Usage: Job.exe [OPTIONS] +Options: + -j, --jobName=VALUE The service name of the job [REQUIRED]. + -s, --serverName=VALUE The service name of the server [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port. + -sp, --sendPort=VALUE The service send to port. + -icp, --ICPort=VALUE The IC port, if the IC should be run in proc. + Note that if this is specified, the + command line ports override stored + registration settings + -mms, --maxMessageSize=VALUE The maximum message size. + -n, --numOfRounds=VALUE The number of rounds. + -nds, --noDescendingSize Disable message descending size. + -c, --autoContinue Is continued automatically at start + -d, --ICDeploymentMode=VALUE IC deployment mode specification (SecondProc( + Default)/InProcDeploy/InProcManual/ + InProcTimeTravel) + -l, --log=VALUE If TTD, the service log path. + -ch, --checkpoint=VALUE If TTD, the checkpoint # to load. + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptijob_Core.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptijob_Core.cmp new file mode 100644 index 00000000..c2c601c6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptijob_Core.cmp @@ -0,0 +1,23 @@ +Job name is required. +Server name is required. + +Usage: dotnet Job.dll [OPTIONS] +Options: + -j, --jobName=VALUE The service name of the job [REQUIRED]. + -s, --serverName=VALUE The service name of the server [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port. + -sp, --sendPort=VALUE The service send to port. + -icp, --ICPort=VALUE The IC port, if the IC should be run in proc. + Note that if this is specified, the + command line ports override stored + registration settings + -mms, --maxMessageSize=VALUE The maximum message size. + -n, --numOfRounds=VALUE The number of rounds. + -nds, --noDescendingSize Disable message descending size. + -c, --autoContinue Is continued automatically at start + -d, --ICDeploymentMode=VALUE IC deployment mode specification (SecondProc( + Default)/InProcDeploy/InProcManual/ + InProcTimeTravel) + -l, --log=VALUE If TTD, the service log path. + -ch, --checkpoint=VALUE If TTD, the checkpoint # to load. + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptiserver.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptiserver.cmp new file mode 100644 index 00000000..dcb97637 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptiserver.cmp @@ -0,0 +1,26 @@ +Job name is required. +Server name is required. + +Usage: Server.exe [OPTIONS] +Options: + -j, --jobName=VALUE The service name of the job [REQUIRED]. + -s, --serverName=VALUE The service name of the server [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -nbd, --notBidirectional Disable bidirectional communication. + -icp, --ICPort=VALUE The IC port, if the IC should be run in proc. + Note that if this is specified, the + command line ports override stored + registration settings + -n, --numOfJobs=VALUE The number of jobs. + -u, --upgrading Is upgrading. + -m, --memoryUsed=VALUE Memory used. + -c, --autoContinue Is continued automatically at start + -d, --ICDeploymentMode=VALUE IC deployment mode specification (SecondProc( + Default)/InProcDeploy/InProcManual/ + InProcTimeTravel) + -l, --log=VALUE If TTD, the service log path. + -ch, --checkpoint=VALUE If TTD, the checkpoint # to load. + -cv, --currentVersion=VALUE The version # used to time travel debug ( + ignored otherwise). + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptiserver_Core.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptiserver_Core.cmp new file mode 100644 index 00000000..5199abee --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptiserver_Core.cmp @@ -0,0 +1,26 @@ +Job name is required. +Server name is required. + +Usage: dotnet Server.dll [OPTIONS] +Options: + -j, --jobName=VALUE The service name of the job [REQUIRED]. + -s, --serverName=VALUE The service name of the server [REQUIRED]. + -rp, --receivePort=VALUE The service receive from port [REQUIRED]. + -sp, --sendPort=VALUE The service send to port. [REQUIRED] + -nbd, --notBidirectional Disable bidirectional communication. + -icp, --ICPort=VALUE The IC port, if the IC should be run in proc. + Note that if this is specified, the + command line ports override stored + registration settings + -n, --numOfJobs=VALUE The number of jobs. + -u, --upgrading Is upgrading. + -m, --memoryUsed=VALUE Memory used. + -c, --autoContinue Is continued automatically at start + -d, --ICDeploymentMode=VALUE IC deployment mode specification (SecondProc( + Default)/InProcDeploy/InProcManual/ + InProcTimeTravel) + -l, --log=VALUE If TTD, the service log path. + -ch, --checkpoint=VALUE If TTD, the checkpoint # to load. + -cv, --currentVersion=VALUE The version # used to time travel debug ( + ignored otherwise). + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptjob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptjob.cmp new file mode 100644 index 00000000..9464e5d8 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptjob.cmp @@ -0,0 +1,15 @@ +Job name is required. +Server name is required. +Send port is required. +Receive port is required. + +Usage: Job.exe [OPTIONS] +Options: + -j, --jobName=VALUE The service name of the job [REQUIRED]. + -s, --serverName=VALUE The service name of the server [REQUIRED]. + --rp, --receivePort=VALUE + The service receive from port [REQUIRED]. + --sp, --sendPort=VALUE The service send to port. [REQUIRED] + -n, --numOfRounds=VALUE The number of rounds. + -c, --autoContinue Is continued automatically at start + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptjob_Core.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptjob_Core.cmp new file mode 100644 index 00000000..eb6b5ea6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptjob_Core.cmp @@ -0,0 +1,15 @@ +Job name is required. +Server name is required. +Send port is required. +Receive port is required. + +Usage: dotnet Job.dll [OPTIONS] +Options: + -j, --jobName=VALUE The service name of the job [REQUIRED]. + -s, --serverName=VALUE The service name of the server [REQUIRED]. + --rp, --receivePort=VALUE + The service receive from port [REQUIRED]. + --sp, --sendPort=VALUE The service send to port. [REQUIRED] + -n, --numOfRounds=VALUE The number of rounds. + -c, --autoContinue Is continued automatically at start + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptserver.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptserver.cmp new file mode 100644 index 00000000..1f5a4688 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptserver.cmp @@ -0,0 +1,12 @@ +Server name is required. +Send port is required. +Receive port is required. + +Usage: Server.exe [OPTIONS] +Options: + -s, --serverName=VALUE The service name of the server [REQUIRED]. + --rp, --receivePort=VALUE + The service receive from port [REQUIRED]. + --sp, --sendPort=VALUE The service send to port. [REQUIRED] + -c, --autoContinue Is continued automatically at start + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptserver_Core.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptserver_Core.cmp new file mode 100644 index 00000000..555888e9 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/showhelpptserver_Core.cmp @@ -0,0 +1,12 @@ +Server name is required. +Send port is required. +Receive port is required. + +Usage: dotnet Server.dll [OPTIONS] +Options: + -s, --serverName=VALUE The service name of the server [REQUIRED]. + --rp, --receivePort=VALUE + The service receive from port [REQUIRED]. + --sp, --sendPort=VALUE The service send to port. [REQUIRED] + -c, --autoContinue Is continued automatically at start + -h, --help show this message and exit diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB1.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB2.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/startimmcoordlasttest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB1.cmp index 3797d308..69a8d8ae 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB2.cmp index 3797d308..69a8d8ae 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendrestarttest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB1.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB1.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB1.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB2.cmp index 3797d308..e365d7cd 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB2.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unitendtoendtest_AMB2.cmp @@ -1 +1 @@ -The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_AMB1.cmp new file mode 100644 index 00000000..e365d7cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_AMB1.cmp @@ -0,0 +1 @@ +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_AMB2.cmp new file mode 100644 index 00000000..e365d7cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_AMB2.cmp @@ -0,0 +1 @@ +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_ClientJob.cmp new file mode 100644 index 00000000..f556c498 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_ClientJob.cmp @@ -0,0 +1,5 @@ +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0252311076738605 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_ClientJob_Verify.cmp new file mode 100644 index 00000000..eed1cc30 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_ClientJob_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.00518975369884087 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_Server.cmp new file mode 100644 index 00000000..24876e33 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_Server.cmp @@ -0,0 +1,10 @@ +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 970280 messages +*X* At checkpoint, received 970280 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_Server_Verify.cmp new file mode 100644 index 00000000..8a34a3fc --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinprocpipe_Server_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_AMB1.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_AMB1.cmp new file mode 100644 index 00000000..e365d7cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_AMB1.cmp @@ -0,0 +1 @@ +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_AMB2.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_AMB2.cmp new file mode 100644 index 00000000..e365d7cd --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_AMB2.cmp @@ -0,0 +1 @@ +Ambrosia.exe Information: 0 : The CRA instance appears to be down. Restart it and this vertex will be instantiated automatically \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_ClientJob.cmp new file mode 100644 index 00000000..4e93138c --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_ClientJob.cmp @@ -0,0 +1,9 @@ +*X* ImmortalCoordinator -i=unittestinproctcpclientjob -p=1500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0253109624484106 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_ClientJob_Verify.cmp new file mode 100644 index 00000000..f06d701b --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_ClientJob_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +Bytes per RPC Throughput (GB/sec) +*X* 1024 0.0235505638837506 +Service Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_Server.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_Server.cmp new file mode 100644 index 00000000..2f8fb996 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_Server.cmp @@ -0,0 +1,14 @@ +*X* ImmortalCoordinator -i=unittestinproctcpserver -p=2500 +*X* Trying to connect IC and Language Binding +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* At checkpoint, received 0 messages +*X* At checkpoint, received 0 messages +*X* becoming primary +*X* Press enter to terminate program. +*X* Server in Entry Point +*X* At checkpoint, received 972895 messages +*X* At checkpoint, received 972895 messages +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_Server_Verify.cmp new file mode 100644 index 00000000..8a34a3fc --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/unittestinproctcp_Server_Verify.cmp @@ -0,0 +1,7 @@ +*X* Trying to connect IC and Language Binding +*X* Trying to do second connection between IC and Language Binding +*X* Press enter to terminate program. +*X* Server in Entry Point +Received 1024 MB so far +Bytes received: 1073741824 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeactiveactiveprimaryonly_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeactiveactiveprimaryonly_ClientJob.cmp new file mode 100644 index 00000000..5c598cf5 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeactiveactiveprimaryonly_ClientJob.cmp @@ -0,0 +1,6 @@ +Bytes per RPC Throughput (GB/sec) +*X* 2500 0.0382975324366815 +Service Received 1024 MB so far +*X* 1250 0.0326551631289168 +Bytes received: 2147481250 +DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob.cmp index cd92da3b..90f18c6c 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob.cmp @@ -7,23 +7,5 @@ Service Received 2048 MB so far Service Received 3072 MB so far *X* 8192 0.0721689542769765 Service Received 4096 MB so far -*X* 4096 0.0710525552161486 -Service Received 5120 MB so far -*X* 2048 0.0696522388392265 -Service Received 6144 MB so far -*X* 1024 0.0713425649090351 -Service Received 7168 MB so far -*X* 512 0.0665708689671939 -Service Received 8192 MB so far -*X* 256 0.0675220535973721 -Service Received 9216 MB so far -*X* 128 0.0669660145734923 -Service Received 10240 MB so far -*X* 64 0.0574610386145937 -Service Received 11264 MB so far -*X* 32 0.0373536713814197 -Service Received 12288 MB so far -*X* 16 0.0216096466067523 -Service Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 4294967296 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob_Verify.cmp index 23043301..86e99c56 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_ClientJob_Verify.cmp @@ -7,23 +7,5 @@ Service Received 2048 MB so far Service Received 3072 MB so far *X* 8192 0.00263155041028176 Service Received 4096 MB so far -*X* 4096 0.00263855904980482 -Service Received 5120 MB so far -*X* 2048 0.00263386567717369 -Service Received 6144 MB so far -*X* 1024 0.00263399797853351 -Service Received 7168 MB so far -*X* 512 0.00262654222157599 -Service Received 8192 MB so far -*X* 256 0.0026258115547523 -Service Received 9216 MB so far -*X* 128 0.00259123332180528 -Service Received 10240 MB so far -*X* 64 0.00254187248726103 -Service Received 11264 MB so far -*X* 32 0.00246138566416935 -Service Received 12288 MB so far -*X* 16 0.00236375732620996 -Service Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 4294967296 DONE diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_Verify.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_Verify.cmp index 6e307a6d..4708ff0c 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_Verify.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_Verify.cmp @@ -13,54 +13,7 @@ Received 3072 MB so far *X* I'm healthy after 27000 checks at time:11/27/2018 8:23:30 AM *X* I'm healthy after 30000 checks at time:11/27/2018 8:23:36 AM Received 4096 MB so far -*X* I'm healthy after 33000 checks at time:11/27/2018 8:23:42 AM -*X* I'm healthy after 36000 checks at time:11/27/2018 8:23:48 AM -Received 5120 MB so far -*X* I'm healthy after 39000 checks at time:11/27/2018 8:23:55 AM -*X* I'm healthy after 42000 checks at time:11/27/2018 8:24:01 AM -Received 6144 MB so far -*X* I'm healthy after 45000 checks at time:11/27/2018 8:24:07 AM -*X* I'm healthy after 48000 checks at time:11/27/2018 8:24:13 AM -*X* I'm healthy after 51000 checks at time:11/27/2018 8:24:19 AM -Received 7168 MB so far -*X* I'm healthy after 54000 checks at time:11/27/2018 8:24:25 AM -*X* I'm healthy after 57000 checks at time:11/27/2018 8:24:31 AM -Received 8192 MB so far -*X* I'm healthy after 60000 checks at time:11/27/2018 8:24:37 AM -*X* I'm healthy after 63000 checks at time:11/27/2018 8:24:43 AM -Received 9216 MB so far -*X* I'm healthy after 66000 checks at time:11/27/2018 8:24:49 AM -*X* I'm healthy after 69000 checks at time:11/27/2018 8:24:55 AM -*X* I'm healthy after 72000 checks at time:11/27/2018 8:25:01 AM -Received 10240 MB so far -*X* I'm healthy after 75000 checks at time:11/27/2018 8:25:07 AM -*X* I'm healthy after 78000 checks at time:11/27/2018 8:25:13 AM -*X* I'm healthy after 81000 checks at time:11/27/2018 8:25:19 AM -*X* I'm healthy after 84000 checks at time:11/27/2018 8:25:25 AM -Received 11264 MB so far -*X* I'm healthy after 87000 checks at time:11/27/2018 8:25:31 AM -*X* I'm healthy after 90000 checks at time:11/27/2018 8:25:37 AM -*X* I'm healthy after 93000 checks at time:11/27/2018 8:25:43 AM -*X* I'm healthy after 96000 checks at time:11/27/2018 8:25:49 AM -*X* I'm healthy after 99000 checks at time:11/27/2018 8:25:55 AM -*X* I'm healthy after 102000 checks at time:11/27/2018 8:26:01 AM -*X* I'm healthy after 105000 checks at time:11/27/2018 8:26:07 AM -*X* I'm healthy after 108000 checks at time:11/27/2018 8:26:13 AM -Received 12288 MB so far -*X* I'm healthy after 111000 checks at time:11/27/2018 8:26:19 AM -*X* I'm healthy after 114000 checks at time:11/27/2018 8:26:25 AM -*X* I'm healthy after 117000 checks at time:11/27/2018 8:26:31 AM -*X* I'm healthy after 120000 checks at time:11/27/2018 8:26:37 AM -*X* I'm healthy after 123000 checks at time:11/27/2018 8:26:43 AM -*X* I'm healthy after 126000 checks at time:11/27/2018 8:26:49 AM -*X* I'm healthy after 129000 checks at time:11/27/2018 8:26:55 AM -*X* I'm healthy after 132000 checks at time:11/27/2018 8:27:01 AM -*X* I'm healthy after 135000 checks at time:11/27/2018 8:27:07 AM -*X* I'm healthy after 138000 checks at time:11/27/2018 8:27:13 AM -*X* I'm healthy after 141000 checks at time:11/27/2018 8:27:20 AM -*X* I'm healthy after 144000 checks at time:11/27/2018 8:27:26 AM -*X* I'm healthy after 147000 checks at time:11/27/2018 8:27:32 AM -Received 13312 MB so far -Bytes received: 13958643712 +Bytes received: 4294967296 DONE -*X* I'm healthy after 150000 checks at time:11/27/2018 8:27:38 AM +*X* I'm healthy after 36000 checks at time:6/14/2019 2:13:43 PM +*X* I'm healthy after 39000 checks at time:6/14/2019 2:13:49 PM diff --git a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_upgraded.cmp b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_upgraded.cmp index c8d7e1cf..2578ca1d 100644 --- a/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_upgraded.cmp +++ b/AmbrosiaTest/AmbrosiaTest/Cmp/upgradeserverafterserverdone_Server_upgraded.cmp @@ -1,13 +1,13 @@ *X* Press enter to terminate program. -*X* I'm healthy after 171000 checks at time:10/11/2018 2:48:16 PM -*X* I'm healthy after 174000 checks at time:10/11/2018 2:48:22 PM -*X* I'm healthy after 177000 checks at time:10/11/2018 2:48:28 PM -*X* I'm healthy after 180000 checks at time:10/11/2018 2:48:34 PM -*X* I'm healthy after 183000 checks at time:10/11/2018 2:48:40 PM -Received 13312 MB so far -Bytes received: 13958643712 +*X* I'm healthy after 33000 checks at time:6/14/2019 2:05:21 PM +Received 4096 MB so far +Bytes received: 4294967296 DONE -*X* I'm healthy after 117000 checks at time:10/10/2018 10:47:19 AM -*X* I'm healthy after 120000 checks at time:10/10/2018 10:47:25 AM -*X* At checkpoint, upgraded service received 134201344 messages +*X* I'm healthy after 36000 checks at time:6/14/2019 2:05:27 PM +*X* I'm healthy after 39000 checks at time:6/14/2019 2:05:33 PM +*X* At checkpoint, upgraded service received 245760 messages +*X* At checkpoint, upgraded service received 245760 messages becoming upgraded primary +*X* At checkpoint, upgraded service received 245760 messages +*X* At checkpoint, upgraded service received 245760 messages +*X* I'm healthy after 42000 checks at time:6/14/2019 2:06:02 PM diff --git a/AmbrosiaTest/AmbrosiaTest/EndToEndStressIntegration_Test.cs b/AmbrosiaTest/AmbrosiaTest/EndToEndStressIntegration_Test.cs index 532d51d2..1d994e08 100644 --- a/AmbrosiaTest/AmbrosiaTest/EndToEndStressIntegration_Test.cs +++ b/AmbrosiaTest/AmbrosiaTest/EndToEndStressIntegration_Test.cs @@ -3,6 +3,7 @@ using System.Configuration; using System.Threading; using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.IO; namespace AmbrosiaTest { @@ -71,11 +72,11 @@ public void AMB_Basic_Test() //ImmCoord1 string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; - int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1, false, 9999, 0, 0, "", "", MyUtils.logTypeFiles); //ImmCoord2 string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; - int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, false, 9999, 0, 0, "", "", MyUtils.logTypeFiles); //Client Job Call string logOutputFileName_ClientJob = testName + "_ClientJob.log"; @@ -95,9 +96,13 @@ public void AMB_Basic_Test() MyUtils.KillProcess(ImmCoordProcessID1); MyUtils.KillProcess(ImmCoordProcessID2); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -187,9 +192,13 @@ public void AMB_GiantMessage_Test() MyUtils.KillProcess(ImmCoordProcessID1); MyUtils.KillProcess(ImmCoordProcessID2); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -203,6 +212,7 @@ public void AMB_GiantMessage_Test() //** Test starts job and server then kills the job and restarts it and runs to completion + //** NOTE - this actually kills job once, restarts it, kills again and then restarts it again [TestMethod] public void AMB_KillJob_Test() { @@ -268,7 +278,7 @@ public void AMB_KillJob_Test() // Give it 5seconds to do something before killing it Thread.Sleep(5000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. - + //Kill job at this point as well as ImmCoord1 MyUtils.KillProcess(clientJobProcessID); MyUtils.KillProcess(ImmCoordProcessID1); @@ -281,23 +291,44 @@ public void AMB_KillJob_Test() string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted); + // Give it 5seconds to do something before killing it again + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point as well as ImmCoord1 + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted); + + //Restart ImmCoord1 Again + string logOutputFileName_ImmCoord1_Restarted_Again = testName + "_ImmCoord1_Restarted_Again.log"; + int ImmCoordProcessID1_Restarted_Again = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1_Restarted_Again); + + // Restart Job Process Again + string logOutputFileName_ClientJob_Restarted_Again = testName + "_ClientJob_Restarted_Again.log"; + int clientJobProcessID_Restarted_Again = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted_Again); + //Delay until client is done - also check Server just to make sure - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted, byteSize, 15, false, testName, true); // Total bytes received - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName,true ); + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted_Again, byteSize, 15, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); // Stop things so file is freed up and can be opened in verify - MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(clientJobProcessID_Restarted_Again); MyUtils.KillProcess(serverProcessID); - MyUtils.KillProcess(ImmCoordProcessID1_Restarted); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted_Again); MyUtils.KillProcess(ImmCoordProcessID2); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client (before and after restart) MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted_Again); // Verify Server MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); @@ -395,9 +426,13 @@ public void AMB_KillServer_Test() MyUtils.KillProcess(ImmCoordProcessID1); MyUtils.KillProcess(ImmCoordProcessID2_Restarted); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Server (before and after restart) MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); @@ -513,9 +548,13 @@ public void AMB_DoubleKill_RestartJOBFirst_Test() MyUtils.KillProcess(ImmCoordProcessID1_Restarted); MyUtils.KillProcess(ImmCoordProcessID2_Restarted); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client (before and after restart) MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -629,9 +668,13 @@ public void AMB_DoubleKill_RestartSERVERFirst_Test() MyUtils.KillProcess(ImmCoordProcessID1_Restarted); MyUtils.KillProcess(ImmCoordProcessID2_Restarted); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client (before and after restart) MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -715,7 +758,7 @@ public void AMB_StartImmCoordLast_Test() int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); //Delay until client is done - also check Server just to make sure - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 45, false, testName, true); // number of bytes processed pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); // Stop things so file is freed up and can be opened in verify @@ -724,9 +767,13 @@ public void AMB_StartImmCoordLast_Test() MyUtils.KillProcess(ImmCoordProcessID1); MyUtils.KillProcess(ImmCoordProcessID2); - //Verify AMB - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); - MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } // Verify Client MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); @@ -748,7 +795,7 @@ public void AMB_UpgradeServerAFTERServerDone_Test() string clientJobName = testName + "clientjob"; string serverName = testName + "server"; string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; - string byteSize = "13958643712"; + string byteSize = "4294967296"; string newUpgradedPrimary = "becoming upgraded primary"; Utilities MyUtils = new Utilities(); @@ -766,7 +813,7 @@ public void AMB_UpgradeServerAFTERServerDone_Test() AMB_PersistLogs = "Y", AMB_NewLogTriggerSize = "1000", AMB_ActiveActive = "N", - AMB_Version = "9" + AMB_Version = "0" // client always is 0 }; MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); @@ -797,14 +844,14 @@ public void AMB_UpgradeServerAFTERServerDone_Test() //Client Job Call string logOutputFileName_ClientJob = testName + "_ClientJob.log"; - int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob); + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "4", logOutputFileName_ClientJob); //Server Call string logOutputFileName_Server = testName + "_Server.log"; int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server,1, false); // Wait for client job to finish - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 30, false, testName, true); // number of bytes processed // kill Server MyUtils.KillProcess(serverProcessID); @@ -836,8 +883,8 @@ public void AMB_UpgradeServerAFTERServerDone_Test() string logOutputFileName_Server_upgraded = testName + "_Server_upgraded.log"; int serverProcessID_upgraded = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_upgraded, 1, true); - //Delay until client is done - also check Server just to make sure - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, byteSize, 15, false, testName, true); + //Delay until server upgrade is done + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, byteSize, 30, false, testName, true); // Stop things so file is freed up and can be opened in verify MyUtils.KillProcess(clientJobProcessID); @@ -855,7 +902,7 @@ public void AMB_UpgradeServerAFTERServerDone_Test() MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_upgraded); // Verify integrity of Ambrosia logs by replaying - MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB2.AMB_Version); } //** Upgrade scenario where the server is upgraded server before client is finished @@ -885,7 +932,7 @@ public void AMB_UpgradeServerBEFOREServerDone_Test() AMB_PersistLogs = "Y", AMB_NewLogTriggerSize = "1000", AMB_ActiveActive = "N", - AMB_Version = "10" + AMB_Version = "0" // client is always 0 }; MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); @@ -957,7 +1004,7 @@ public void AMB_UpgradeServerBEFOREServerDone_Test() int serverProcessID_upgraded = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_upgraded, 1, true); //Delay until client is done - also check Server just to make sure - bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 25, false, testName, true); // number of bytes processed pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, byteSize, 15, false, testName, true); // Stop things so file is freed up and can be opened in verify @@ -972,8 +1019,203 @@ public void AMB_UpgradeServerBEFOREServerDone_Test() // Verify Server MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_upgraded); + // Verify integrity of Ambrosia logs by replaying + // Do not verify log file through replay / ttd - doesn't work when log files span different versions + // MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB2.AMB_Version); + } + //** Upgrade scenario where the server is upgraded before client is finished but the + //** Primary is not killed and it is automatically killed + [TestMethod] + public void AMB_UpgradeActiveActivePrimaryOnly_Test() + { + string testName = "upgradeactiveactiveprimaryonly"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "2147481250"; + string newPrimary = "NOW I'm Primary"; + string serverUpgradePrimary = "becoming upgraded primary"; + string upgradingImmCoordPrimary = "Migrating or upgrading. Must commit suicide since I'm the primary"; + string serverKilledMessage = "connection was forcibly closed"; + string immCoordKilledMessage = "KILLING WORKER:"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - primary -- in actuality, this is replica #0 + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "10" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 - check pointer + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ReplicaNumber = "1", + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "10" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.AddReplica); + + //AMB3 - active secondary + string logOutputFileName_AMB3 = testName + "_AMB3.log"; + AMB_Settings AMB3 = new AMB_Settings + { + AMB_ReplicaNumber = "2", + AMB_ServiceName = serverName, + AMB_PortAppReceives = "3000", + AMB_PortAMBSends = "3001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "10" + }; + MyUtils.CallAMB(AMB3, logOutputFileName_AMB3, AMB_ModeConsts.AddReplica); + + //AMB4 - Job + string logOutputFileName_AMB4 = testName + "_AMB4.log"; + AMB_Settings AMB4 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "4000", + AMB_PortAMBSends = "4001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB4, logOutputFileName_AMB4, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(serverName, 1500, logOutputFileName_ImmCoord1, true, 0); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, true, 1); + + //ImmCoord3 + string logOutputFileName_ImmCoord3 = testName + "_ImmCoord3.log"; + int ImmCoordProcessID3 = MyUtils.StartImmCoord(serverName, 3500, logOutputFileName_ImmCoord3, true, 2); + + //ImmCoord4 + string logOutputFileName_ImmCoord4 = testName + "_ImmCoord4.log"; + int ImmCoordProcessID4 = MyUtils.StartImmCoord(clientJobName, 4500, logOutputFileName_ImmCoord4); + + //Server Call - primary + string logOutputFileName_Server1 = testName + "_Server1.log"; + int serverProcessID1 = MyUtils.StartPerfServer("1001", "1000", clientJobName, serverName, logOutputFileName_Server1, 1, false); + Thread.Sleep(1000); // give a second to make it a primary + + //Server Call - checkpointer + string logOutputFileName_Server2 = testName + "_Server2.log"; + int serverProcessID2 = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server2, 1, false); + Thread.Sleep(1000); // give a second + + //Server Call - active secondary + string logOutputFileName_Server3 = testName + "_Server3.log"; + int serverProcessID3 = MyUtils.StartPerfServer("3001", "3000", clientJobName, serverName, logOutputFileName_Server3, 1, false); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("4001", "4000", clientJobName, serverName, "2500", "2", logOutputFileName_ClientJob); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //** Do not kill any processes - since active / active, the various nodes will be killed after successfully updated + + // Run AMB again with new version # upped by 1 (11) + string logOutputFileName_AMB1_Upgraded = testName + "_AMB1_Upgraded.log"; + AMB_Settings AMB1_Upgraded = new AMB_Settings + { + AMB_ReplicaNumber = "3", + AMB_ServiceName = serverName, + AMB_PortAppReceives = "5000", + AMB_PortAMBSends = "5001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "Y", + AMB_Version = "10", + AMB_UpgradeToVersion = "11" + }; + MyUtils.CallAMB(AMB1_Upgraded, logOutputFileName_AMB1_Upgraded, AMB_ModeConsts.AddReplica); + + // start Immortal Coord for server again + string logOutputFileName_ImmCoord1_Upgraded = testName + "_ImmCoord1_Upgraded.log"; + int ImmCoordProcessID1_upgraded = MyUtils.StartImmCoord(serverName, 5500, logOutputFileName_ImmCoord1_Upgraded, true, 3); + + // start server again but with Upgrade = true + string logOutputFileName_Server1_upgraded = testName + "_Server1_upgraded.log"; + int serverProcessID_upgraded = MyUtils.StartPerfServer("5001", "5000", clientJobName, serverName, logOutputFileName_Server1_upgraded, 1, true); + + //** Upgraded service running at this point ... doing logs but no checkpointer + //** Because checkpointer and secondary were not upgraded so they were stopped which means nothing to take the checkpoint or be secondary + + //Delay until finished ... looking at the most recent primary (server3) but also verify others hit done too + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 10, false, testName, true); // Total Bytes received needs to be accurate + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_upgraded, byteSize, 5, false, testName, true); + + // Also verify ImmCoord has the string to show it is it killed itself and others killed off too + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord1, upgradingImmCoordPrimary, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord1_Upgraded, newPrimary, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord2, immCoordKilledMessage, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord3, immCoordKilledMessage, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1, serverKilledMessage, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1, serverKilledMessage, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server2, serverKilledMessage, 5, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server1_upgraded, serverUpgradePrimary, 5, false, testName, true,false); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(serverProcessID_upgraded); + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(ImmCoordProcessID1_upgraded); + MyUtils.KillProcess(ImmCoordProcessID4); + + MyUtils.KillProcess(serverProcessID2); // This should be dead anyways + MyUtils.KillProcess(serverProcessID3); // This should be dead anyways + MyUtils.KillProcess(ImmCoordProcessID2); // This should be dead anyways + MyUtils.KillProcess(ImmCoordProcessID3); // This should be dead anyways + + // Verify cmp files for client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + } + + + //** Multiple clientscenario where many clients connect to a server [TestMethod] public void AMB_MultipleClientsPerServer_Test() @@ -1107,7 +1349,7 @@ public void AMB_MultipleClientsPerServer_Test() pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob1, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob2, byteSize, 15, false, testName, true); pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob3, byteSize, 15, false, testName, true); - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true,false); // don't check for DONE sometimes not getting it ... not big deal // Stop things so file is freed up and can be opened in verify MyUtils.KillProcess(serverProcessID); @@ -1123,7 +1365,6 @@ public void AMB_MultipleClientsPerServer_Test() MyUtils.KillProcess(ImmCoordProcessID2); MyUtils.KillProcess(ImmCoordProcessID3); - // Verify Client MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob0); MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob1); @@ -1133,11 +1374,9 @@ public void AMB_MultipleClientsPerServer_Test() // Verify Server MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); - // Not easy to do unless modify verify log file call due to break down of log files with multiclient names - // Verify integrity of Ambrosia logs by replaying every client ... - MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, "0", "1"); - MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, "0", "2"); - MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, "0", "3"); + // Verify log files + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version, "4",false,false); // dont check for DONE string as but in PTI that is won't fix + } //** Basically same as the basic test but using large check points - change is in the call to server @@ -1229,9 +1468,411 @@ public void AMB_GiantCheckPoint_Test() } + //** The settings receive port, send port, log location and IP Addr, can now be overridden on the command line when starting the IC. + [TestMethod] + public void AMB_OverrideOptions_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "overrideoptions"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir_Invalid = "C:\\Junk\\"; // give invalid so know valid one overrode it + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + int overrideJobReceivePort = 3000; + int overrideJobSendPort = 3001; + int overrideServerReceivePort = 4000; + int overrideServerSendPort = 4001; + string overrideIPAddress = "99.999.6.11"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "8000", // set to invalid so has to change to valid + AMB_PortAMBSends = "8001", + AMB_ServiceLogPath = ambrosiaLogDir_Invalid, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "9000", + AMB_PortAMBSends = "9001", + AMB_ServiceLogPath = ambrosiaLogDir_Invalid, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord -- WILL FAIL due to invalid IP but this will show that it is actually being set. + string logOutputFileName_ImmCoord_Bad = testName + "_ImmCoord_Bad.log"; + int ImmCoordProcessID_Bad = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord_Bad, false, 9999, overrideJobReceivePort, overrideJobSendPort, ambrosiaLogDir, overrideIPAddress); + + //ImmCoord1 -- Call again but let it auto pick IP which will pass + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1, false, 9999, overrideJobReceivePort, overrideJobSendPort, ambrosiaLogDir); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, false, 9999, overrideServerReceivePort, overrideServerSendPort, ambrosiaLogDir); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob(overrideJobSendPort.ToString(), overrideJobReceivePort.ToString(), clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer(overrideServerSendPort.ToString(), overrideServerReceivePort.ToString(), clientJobName, serverName, logOutputFileName_Server, 1, false); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + MyUtils.KillProcess(ImmCoordProcessID2); + MyUtils.KillProcess(ImmCoordProcessID_Bad); // should be killed anyways but just make sure + + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + //Verify AMB + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_AMB2); + } + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // verify ImmCoord has the string to show it failed because of bad IP ... + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord_Bad, overrideIPAddress, 5, false, testName, true,false); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + + } + + //** Similar to Double Kill restart but it doesn't actually kill it. It just restarts it and it + //** Takes on the new restarted process and original process dies. It is a way to do client migration + [TestMethod] + public void AMB_MigrateClient_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "migrateclient"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + string killJobMessage = "Migrating or upgrading. Must commit suicide since I'm the primary"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false); + + // Give it 2 seconds to do something before killing it + Thread.Sleep(2500); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // DO NOT Kill both Job (and ImmCoord) and Server (and ImmCoord) + // This is main part of test - start Job and Server so it takes over and then Orig Job and Server stop then +// MyUtils.KillProcess(clientJobProcessID); + // MyUtils.KillProcess(serverProcessID); + // MyUtils.KillProcess(ImmCoordProcessID1); + // MyUtils.KillProcess(ImmCoordProcessID2); + + // Restart Job / ImmCoord1 + string logOutputFileName_ImmCoord1_Restarted = testName + "_ImmCoord1_Restarted.log"; + int ImmCoordProcessID1_Restarted = MyUtils.StartImmCoord(clientJobName, 3500, logOutputFileName_ImmCoord1_Restarted); + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted); + + // just give a rest + Thread.Sleep(4000); + + // Restart Server / ImmCoord2 + string logOutputFileName_ImmCoord2_Restarted = testName + "_ImmCoord2_Restarted.log"; + int ImmCoordProcessID2_Restarted = MyUtils.StartImmCoord(serverName, 4500, logOutputFileName_ImmCoord2_Restarted); + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted, byteSize, 25, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 20, false, testName, true); + + // verify actually killed first one + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ImmCoord1, killJobMessage, 5, false, testName, true,false); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(serverProcessID_Restarted); + MyUtils.KillProcess(ImmCoordProcessID1_Restarted); + MyUtils.KillProcess(ImmCoordProcessID2_Restarted); + + // Verify Client (before and after restart) + //MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); // causes exception when kill process and that exception can change once in a while so not worth it to verify vs a cmp file + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + + // Verify Server + //MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); // causes exception when kill process and that exception can change once in a while so not worth it to verify vs a cmp file + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Basic test that saves logs to blobs instead of to log files + [TestMethod] + public void AMB_SaveLogsToBlob_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "savelogtoblob"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaBlobLoc = "";// this is where you specify the name of the blob - blank is default + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaBlobLoc, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaBlobLoc, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1,false,9999,0,0,"","", MyUtils.logTypeBlobs); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, false, 9999, 0, 0, "", "", MyUtils.logTypeBlobs); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + MyUtils.KillProcess(ImmCoordProcessID2); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + //** Not sure how to verify if the blob exists ... probably safe assumption that if client and server get the data, + //** Then safe to say that blob worked. + } + + + //** This saves client info to blob but server info to a file + [TestMethod] + public void AMB_SaveLogsToFileAndBlob_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "savelogtofileandblob"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaBlobLoc = testName + "blobstore\\"; // specify the name of the blob instead of taking default by making blank + string ambrosiaFileLoc = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaBlobLoc, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaFileLoc, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1, false, 9999, 0, 0, "", "", MyUtils.logTypeBlobs); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2, false, 9999, 0, 0, "", "", MyUtils.logTypeFiles); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + MyUtils.KillProcess(ImmCoordProcessID2); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + //** Not sure how to verify if the blob exists ... probably safe assumption that if client and server get the data, + //** Then safe to say that blob worked. + } + + [TestCleanup()] public void Cleanup() { + + // Cleans up the bad IP file - it is just created in the local directory + string BadIPFileDirectory = "99.999.6.11overrideoptionsclientjob_0"; + if (Directory.Exists(BadIPFileDirectory)) + { + Directory.Delete(BadIPFileDirectory, true); + } + // Kill all ImmortalCoordinators, Job and Server exes Utilities MyUtils = new Utilities(); MyUtils.TestCleanup(); diff --git a/AmbrosiaTest/AmbrosiaTest/InProc_Pipe_Test.cs b/AmbrosiaTest/AmbrosiaTest/InProc_Pipe_Test.cs new file mode 100644 index 00000000..8830aa35 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/InProc_Pipe_Test.cs @@ -0,0 +1,1289 @@ +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading; +using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.Configuration; + + +namespace AmbrosiaTest +{ + /// + /// Summary description for InProc_Test + /// + [TestClass] + public class InProc_Pipe_Test + { + //************* Init Code ***************** + // NOTE: Need this bit of code at the top of every "[TestClass]" (per .cs test file) to get context \ details of the current test running + // NOTE: Make sure all names be "Azure Safe". No capital letters and no underscore. + [TestInitialize()] + public void Initialize() + { + Utilities MyUtils = new Utilities(); + MyUtils.TestInitialize(); + } + //************* Init Code ***************** + + + private TestContext testContextInstance; + + /// + ///Gets or sets the test context which provides + ///information about and functionality for the current test run. + /// + public TestContext TestContext + { + get + { + return testContextInstance; + } + set + { + testContextInstance = value; + } + } + + + //** Simple end to end where Client is InProc Pipe and Server is two proc + [TestMethod] + public void AMB_InProc_Pipe_ClientOnly_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocpipeclientonly"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeInProc, "1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID2); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + // To verify Server in one location and client in another would take bigger code change + // Not that crucial to do ... but TO DO: make it so verify log in two different places. + //MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + //** Simple end to end where Server is InProc Pipe and Client is two proc + [TestMethod] + public void AMB_InProc_Pipe_ServerOnly_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocpipeserveronly"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeSecondProc); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + // To verify Server in one location and client in another would take bigger code change + // Not that crucial to do ... but TO DO: make it so verify log in two different places. + //MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Basic end to end test starts job and server and runs a bunch of bytes through + //** Only a few rounds but more extensive then unit tests + [TestMethod] + public void AMB_InProc_Basic_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocbasictest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "3221225472"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "32768", "3", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc,"2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + //** Similar to Double Kill restart but it doesn't actually kill it. It just restarts it and it + //** takes on the new restarted process and original process dies. It is a way to do client migration + [TestMethod] + public void AMB_InProc_MigrateClient_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocmigrateclient"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + // string killJobMessage = "Migrating or upgrading. Must commit suicide since I'm the primary"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc, "2500"); + + // Give it 3 seconds to do something before killing it + Thread.Sleep(3000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // DO NOT Kill both Job and Server + // This is main part of test - get it to have Job and Server take over and run + // Orig Job and Server stop then + // MyUtils.KillProcess(clientJobProcessID); + // MyUtils.KillProcess(serverProcessID); + + // Restart Job + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted,MyUtils.deployModeInProc,"3500"); + + // just give a rest + Thread.Sleep(2000); + + // Restart Server + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false,0,MyUtils.deployModeInProc,"4500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted, byteSize, 20, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 20, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(serverProcessID_Restarted); + + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + + // Verify Server + //MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // check message - comes from Imm Coord so won't show in Job for InProc + //pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, killJobMessage, 5, false, testName, true,false); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + //** Basically same as the basic test but using large check points - change is in the call to server + //** See memory usage spike when checkpoint size is bigger + [TestMethod] + public void AMB_InProc_GiantCheckPoint_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocgiantcheckpointtest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + long giantCheckpointSize = 2000483648;// 2147483648; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "10", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, giantCheckpointSize, MyUtils.deployModeInProc, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** This test does 5 rounds of messages starting with 64MB and cutting in half each time + //** Basically same as the basic test but passing giant message - the difference is in the job.exe call and that is it + [TestMethod] + public void AMB_InProc_GiantMessage_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocgiantmessagetest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "5368709120"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "67108864", "5", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0, MyUtils.deployModeInProc, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Test starts Job and Server then kills both Job and Server + // restarts both with JOB restarted first + [TestMethod] + public void AMB_InProc_DoubleKill_RestartJOBFirst_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "inprocdoublekilljob"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc,"2500"); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // Kill both Job and Server + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Actual test part here -- restarting JOB first before restarting Server + // Restart Job + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted,MyUtils.deployModeInProc,"1500"); + + // just give a rest + Thread.Sleep(3000); + + // Restart Server + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false,0,MyUtils.deployModeInProc,"2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted, byteSize, 20, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 20, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(serverProcessID_Restarted); + + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Test starts Job and Server then kills both Job and Server + // restarts both with SERVER restarted first + [TestMethod] + public void AMB_InProc_DoubleKill_RestartSERVERFirst_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "inprocdoublekillserver"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc,"2500"); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // Kill both Job and Server + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Actual test part here -- restarting SERVER first before restarting Job + // Restart Server + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false,0,MyUtils.deployModeInProc,"2500"); + + // just give a rest + Thread.Sleep(3000); + + // Restart Job + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted,MyUtils.deployModeInProc,"1500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted, byteSize, 20, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 20, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(serverProcessID_Restarted); + + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + //** Test starts job and server then kills the job and restarts it and runs to completion + //** NOTE - this actually kills job once, restarts it, kills again and then restarts it again + [TestMethod] + public void AMB_InProc_KillJob_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "inprockilljobtest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProc, "2500"); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob, MyUtils.deployModeInProc, "1500"); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point + MyUtils.KillProcess(clientJobProcessID); + + // Restart Job Process + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted,MyUtils.deployModeInProc,"1500"); + + // Give it 5 seconds to do something before killing it again + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point + MyUtils.KillProcess(clientJobProcessID_Restarted); + + // Restart Job Process Again + string logOutputFileName_ClientJob_Restarted_Again = testName + "_ClientJob_Restarted_Again.log"; + int clientJobProcessID_Restarted_Again = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted_Again,MyUtils.deployModeInProc,"1500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted_Again, byteSize, 25, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted_Again); + MyUtils.KillProcess(serverProcessID); + + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted_Again); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Test starts job and server then kills the server and restarts it and runs to completion + [TestMethod] + public void AMB_InProc_KillServer_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "inprockillservertest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", // NOTE: if put this to "Y" then when kill it, it will become a checkpointer which never becomes primary + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc,"2500"); + + // Give it 10 seconds to do something before killing it + Thread.Sleep(10000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill Server at this point as well as ImmCoord2 + MyUtils.KillProcess(serverProcessID); + + // Restart Server Process + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false,0,MyUtils.deployModeInProc,"2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 25, false, testName, true); // Total Bytes received needs to be accurate + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID_Restarted); + + // Verify Server (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Multiple clientscenario where many clients connect to a server + [TestMethod] + public void AMB_InProc_MultipleClientsPerServer_Test() + { + + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocmultipleclientsperserver"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "12884901888"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Server + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 - Job 1 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = clientJobName + "0", + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //AMB3 - Job 2 + string logOutputFileName_AMB3 = testName + "_AMB3.log"; + AMB_Settings AMB3 = new AMB_Settings + { + AMB_ServiceName = clientJobName + "1", + AMB_PortAppReceives = "3000", + AMB_PortAMBSends = "3001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB3, logOutputFileName_AMB3, AMB_ModeConsts.RegisterInstance); + + //AMB4 - Job 3 + string logOutputFileName_AMB4 = testName + "_AMB4.log"; + AMB_Settings AMB4 = new AMB_Settings + { + AMB_ServiceName = clientJobName + "2", + AMB_PortAppReceives = "4000", + AMB_PortAMBSends = "4001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB4, logOutputFileName_AMB4, AMB_ModeConsts.RegisterInstance); + + //AMB5 - job 4 + string logOutputFileName_AMB5 = testName + "_AMB5.log"; + AMB_Settings AMB5 = new AMB_Settings + { + AMB_ServiceName = clientJobName + "3", + AMB_PortAppReceives = "5000", + AMB_PortAMBSends = "5001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB5, logOutputFileName_AMB5, AMB_ModeConsts.RegisterInstance); + + // Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("1001", "1000", clientJobName, serverName, logOutputFileName_Server, 4, false,0, MyUtils.deployModeInProc, "1500"); + + // Client call + // For multiple clients, you have a "root" name and each of the client names are then root name + instance number starting at 0 + string logOutputFileName_ClientJob0 = testName + "_ClientJob0.log"; + int clientJobProcessID0 = MyUtils.StartPerfClientJob("2001", "2000", clientJobName + "0", serverName, "65536", "3", logOutputFileName_ClientJob0,MyUtils.deployModeInProc,"2500"); + + string logOutputFileName_ClientJob1 = testName + "_ClientJob1.log"; + int clientJobProcessID1 = MyUtils.StartPerfClientJob("3001", "3000", clientJobName + "1", serverName, "65536", "3", logOutputFileName_ClientJob1, MyUtils.deployModeInProc, "3500"); + + string logOutputFileName_ClientJob2 = testName + "_ClientJob2.log"; + int clientJobProcessID2 = MyUtils.StartPerfClientJob("4001", "4000", clientJobName + "2", serverName, "65536", "3", logOutputFileName_ClientJob2, MyUtils.deployModeInProc, "4500"); + + string logOutputFileName_ClientJob3 = testName + "_ClientJob3.log"; + int clientJobProcessID3 = MyUtils.StartPerfClientJob("5001", "5000", clientJobName + "3", serverName, "65536", "3", logOutputFileName_ClientJob3, MyUtils.deployModeInProc, "5500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob0, byteSize, 25, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob1, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob2, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob3, byteSize, 15, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true,false); // don't check for DONE sometimes not getting it ... not big deal + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(serverProcessID); + + MyUtils.KillProcess(clientJobProcessID0); + MyUtils.KillProcess(clientJobProcessID1); + MyUtils.KillProcess(clientJobProcessID2); + MyUtils.KillProcess(clientJobProcessID3); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob0); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob1); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob2); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob3); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify log files + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version, "4",false,false); + + } + + + //** Upgrade scenario where the server is upgraded after server is finished - all done InProc + [TestMethod] + public void AMB_InProc_UpgradeServerAFTERServerDone_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocupgradeafterserverdone"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "4294967296"; + string newUpgradedPrimary = "becoming upgraded primary"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" // client is always 0 + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "9" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "4", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc,"2500"); + + // Wait for client job to finish + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 30, false, testName, true); // number of bytes processed + + // kill Server + MyUtils.KillProcess(serverProcessID); + + // Run AMB again with new version # upped by 9 (10) + string logOutputFileName_AMB2_Upgraded = testName + "_AMB2_Upgraded.log"; + AMB_Settings AMB2_Upgraded = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "9", + AMB_UpgradeToVersion = "10" + }; + MyUtils.CallAMB(AMB2_Upgraded, logOutputFileName_AMB2_Upgraded, AMB_ModeConsts.RegisterInstance); + + // start server again but with Upgrade = true + string logOutputFileName_Server_upgraded = testName + "_Server_upgraded.log"; + int serverProcessID_upgraded = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_upgraded, 1, true,0,MyUtils.deployModeInProc,"2500"); + + //Delay until server upgrade is done + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, byteSize, 30, false, testName, true); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, newUpgradedPrimary, 5, false, testName, true, false); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID_upgraded); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_upgraded); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB2.AMB_Version); + } + + + //** Upgrade scenario where the server is upgraded server before client is finished - all done InProc + [TestMethod] + public void AMB_InProc_UpgradeServerBEFOREServerDone_Test() + { + + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocupgradebeforeserverdone"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" // client is always 0 + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "10" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob,MyUtils.deployModeInProc,"1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false,0,MyUtils.deployModeInProc,"2500"); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // kill Server + MyUtils.KillProcess(serverProcessID); + + // Run AMB again with new version # upped by 1 (11) + string logOutputFileName_AMB2_Upgraded = testName + "_AMB2_Upgraded.log"; + AMB_Settings AMB2_Upgraded = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "10", + AMB_UpgradeToVersion = "11" + }; + MyUtils.CallAMB(AMB2_Upgraded, logOutputFileName_AMB2_Upgraded, AMB_ModeConsts.RegisterInstance); + + // start server again but with Upgrade = true + string logOutputFileName_Server_upgraded = testName + "_Server_upgraded.log"; + int serverProcessID_upgraded = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_upgraded, 1, true,0,MyUtils.deployModeInProc,"2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 25, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID_upgraded); + + // Verify Client + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_upgraded); + + // Verify integrity of Ambrosia logs by replaying and TTD + // Do not verify log file through replay / ttd - doesn't work when log files span different versions + // MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB2.AMB_Version); + + } + + + [TestCleanup()] + public void Cleanup() + { + // Kill all ImmortalCoordinators, Job and Server exes + Utilities MyUtils = new Utilities(); + MyUtils.InProcPipeTestCleanup(); + } + + + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/InProc_TCP_Test.cs b/AmbrosiaTest/AmbrosiaTest/InProc_TCP_Test.cs new file mode 100644 index 00000000..7c9478bc --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/InProc_TCP_Test.cs @@ -0,0 +1,795 @@ +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading; +using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.Configuration; + + +namespace AmbrosiaTest +{ + /// + /// Summary description for InProc_Test + /// + [TestClass] + public class InProc_TCP_Test + { + //************* Init Code ***************** + // NOTE: Need this bit of code at the top of every "[TestClass]" (per .cs test file) to get context \ details of the current test running + // NOTE: Make sure all names be "Azure Safe". No capital letters and no underscore. + [TestInitialize()] + public void Initialize() + { + Utilities MyUtils = new Utilities(); + MyUtils.TestInitialize(); + } + //************* Init Code ***************** + + + private TestContext testContextInstance; + + /// + ///Gets or sets the test context which provides + ///information about and functionality for the current test run. + /// + public TestContext TestContext + { + get + { + return testContextInstance; + } + set + { + testContextInstance = value; + } + } + + + //** Basic end to end test for the InProc TCP feature where Client is InProc and Server is Two Proc + [TestMethod] + public void AMB_InProc_TCP_ClientOnly_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inproctcpclientonly"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord2 + string logOutputFileName_ImmCoord2 = testName + "_ImmCoord2.log"; + int ImmCoordProcessID2 = MyUtils.StartImmCoord(serverName, 2500, logOutputFileName_ImmCoord2); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeInProcManual, "1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID2); + + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + // Unable to verify when client files in different location than server log - TO DO: modify method to do this + // MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + + //** Basic end to end test for the InProc TCP feature where Server is InProc and Client is Two Proc + [TestMethod] + public void AMB_InProc_TCP_ServerOnly_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inproctcpserveronly"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //ImmCoord1 + string logOutputFileName_ImmCoord1 = testName + "_ImmCoord1.log"; + int ImmCoordProcessID1 = MyUtils.StartImmCoord(clientJobName, 1500, logOutputFileName_ImmCoord1); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeSecondProc); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + MyUtils.KillProcess(ImmCoordProcessID1); + + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + if (MyUtils.NetFrameworkTestRun) + { + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + // Unable to verify when client files in different location than server log - TO DO: modify method to do this + // MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + //** Basic end to end test for the InProc where client is Pipe and Server is TCP. + [TestMethod] + public void AMB_InProc_ClientTCP_ServerPipe_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocclienttcpserverpipe"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeInProcManual, "1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProc, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + if (MyUtils.NetFrameworkTestRun) + { + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Basic end to end test for the InProc where client is Pipe and Server is TCP. + [TestMethod] + public void AMB_InProc_ClientPipe_ServerTCP_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inprocclientpipeservertcp"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "1073741824"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "1024", "1", logOutputFileName_ClientJob, MyUtils.deployModeInProc, "1500"); + + // Give it a few seconds to start + Thread.Sleep(2000); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 5, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 5, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID); + + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + if (MyUtils.NetFrameworkTestRun) + { + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Test starts job and server then kills the job and restarts it and runs to completion + //** NOTE - this actually kills job once, restarts it, kills again and then restarts it again + [TestMethod] + public void AMB_InProc_TCP_KillJob_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "inproctcpkilljobtest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob, MyUtils.deployModeInProcManual, "1500"); + + // Give it 5seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point + MyUtils.KillProcess(clientJobProcessID); + + // Restart Job Process + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted, MyUtils.deployModeInProcManual, "1500"); + + // Give it 5 seconds to do something before killing it again + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill job at this point + MyUtils.KillProcess(clientJobProcessID_Restarted); + + // Restart Job Process Again + string logOutputFileName_ClientJob_Restarted_Again = testName + "_ClientJob_Restarted_Again.log"; + int clientJobProcessID_Restarted_Again = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted_Again, MyUtils.deployModeInProcManual, "1500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted_Again, byteSize, 15, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted_Again); + MyUtils.KillProcess(serverProcessID); + + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted_Again); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Test starts job and server then kills the server and restarts it and runs to completion + [TestMethod] + public void AMB_InProc_TCP_KillServer_Test() + { + //NOTE - the Cleanup has test name hard coded so if this changes, update Cleanup section too + string testName = "inproctcpkillservertest"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", // NOTE: if put this to "Y" then when kill it, it will become a checkpointer which never becomes primary + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob, MyUtils.deployModeInProcManual, "1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + // Give it 10 seconds to do something before killing it + Thread.Sleep(10000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + //Kill Server at this point as well as ImmCoord2 + MyUtils.KillProcess(serverProcessID); + + // Restart Server Process + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 25, false, testName, true); // Total Bytes received needs to be accurate + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID_Restarted); + + // Verify Server (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // .netcore has slightly different cmp file - not crucial to try to have separate files + if (MyUtils.NetFrameworkTestRun) + { + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + } + + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + //** Upgrade scenario where the server is upgraded to diff server before client is finished - all done InProc TCP + [TestMethod] + public void AMB_InProc_TCP_UpgradeServer_Test() + { + + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inproctcpupgradeserver"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" // Client is always 0 + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "10" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob, MyUtils.deployModeInProcManual, "1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + // Give it 5 seconds to do something before killing it + Thread.Sleep(5000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // kill Server + MyUtils.KillProcess(serverProcessID); + + // Run AMB again with new version # upped by 1 (11) + string logOutputFileName_AMB2_Upgraded = testName + "_AMB2_Upgraded.log"; + AMB_Settings AMB2_Upgraded = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "10", + AMB_UpgradeToVersion = "11" + }; + MyUtils.CallAMB(AMB2_Upgraded, logOutputFileName_AMB2_Upgraded, AMB_ModeConsts.RegisterInstance); + + // start server again but with Upgrade = true + string logOutputFileName_Server_upgraded = testName + "_Server_upgraded.log"; + int serverProcessID_upgraded = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_upgraded, 1, true, 0, MyUtils.deployModeInProcManual, "2500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, byteSize, 25, false, testName, true); // number of bytes processed + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_upgraded, byteSize, 15, false, testName, true); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID); + MyUtils.KillProcess(serverProcessID_upgraded); + + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + if (MyUtils.NetFrameworkTestRun) + { + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_upgraded); + + // Verify integrity of Ambrosia logs by replaying + // Do not verify log file through replay / ttd - doesn't work when log files span different versions + // MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB2.AMB_Version); + } + + + //** Similar to Double Kill restart but it doesn't actually kill it. It just restarts it and it + //** takes on the new restarted process and original process dies. It is a way to do client migration + [TestMethod] + public void AMB_InProc_TCP_MigrateClient_Test() + { + //NOTE - the Cleanup has this hard coded so if this changes, update Cleanup section too + string testName = "inproctcpmigrateclient"; + string clientJobName = testName + "clientjob"; + string serverName = testName + "server"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string byteSize = "13958643712"; + //string killJobMessage = "Migrating or upgrading. Must commit suicide since I'm the primary"; + + Utilities MyUtils = new Utilities(); + + //AMB1 - Job + string logOutputFileName_AMB1 = testName + "_AMB1.log"; + AMB_Settings AMB1 = new AMB_Settings + { + AMB_ServiceName = clientJobName, + AMB_PortAppReceives = "1000", + AMB_PortAMBSends = "1001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB1, logOutputFileName_AMB1, AMB_ModeConsts.RegisterInstance); + + //AMB2 + string logOutputFileName_AMB2 = testName + "_AMB2.log"; + AMB_Settings AMB2 = new AMB_Settings + { + AMB_ServiceName = serverName, + AMB_PortAppReceives = "2000", + AMB_PortAMBSends = "2001", + AMB_ServiceLogPath = ambrosiaLogDir, + AMB_CreateService = "A", + AMB_PauseAtStart = "N", + AMB_PersistLogs = "Y", + AMB_NewLogTriggerSize = "1000", + AMB_ActiveActive = "N", + AMB_Version = "0" + }; + MyUtils.CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.RegisterInstance); + + //Client Job Call + string logOutputFileName_ClientJob = testName + "_ClientJob.log"; + int clientJobProcessID = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob, MyUtils.deployModeInProcManual, "1500"); + + //Server Call + string logOutputFileName_Server = testName + "_Server.log"; + int serverProcessID = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server, 1, false, 0, MyUtils.deployModeInProcManual, "2500"); + + // Give it 3 seconds to do something before killing it + Thread.Sleep(3000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // DO NOT Kill both Job and Server + // This is main part of test - get it to have Job and Server take over and run + // Orig Job and Server stop then + // MyUtils.KillProcess(clientJobProcessID); + // MyUtils.KillProcess(serverProcessID); + + // Restart Job + string logOutputFileName_ClientJob_Restarted = testName + "_ClientJob_Restarted.log"; + int clientJobProcessID_Restarted = MyUtils.StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Restarted, MyUtils.deployModeInProcManual, "3500"); + + // just give a rest + Thread.Sleep(2000); + + // Restart Server + string logOutputFileName_Server_Restarted = testName + "_Server_Restarted.log"; + int serverProcessID_Restarted = MyUtils.StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Restarted, 1, false, 0, MyUtils.deployModeInProcManual, "4500"); + + //Delay until client is done - also check Server just to make sure + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob_Restarted, byteSize, 20, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server_Restarted, byteSize, 20, false, testName, true); + + // verify actually killed first one - this output was from Imm Coord but not showing any more + //pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, killJobMessage, 5, false, testName, true,false); + + // Stop things so file is freed up and can be opened in verify + MyUtils.KillProcess(clientJobProcessID_Restarted); + MyUtils.KillProcess(serverProcessID_Restarted); + + // Verify Client - .net core with TCP causes extra message in output of core, so don't cmp to others + if (MyUtils.NetFrameworkTestRun) + { + // Verify Client (before and after restart) + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Restarted); + } + + // Verify Server + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server); + MyUtils.VerifyTestOutputFileToCmpFile(logOutputFileName_Server_Restarted); + + // Verify integrity of Ambrosia logs by replaying + MyUtils.VerifyAmbrosiaLogFile(testName, Convert.ToInt64(byteSize), true, true, AMB1.AMB_Version); + } + + + [TestCleanup()] + public void Cleanup() + { + // Kill all ImmortalCoordinators, Job and Server exes + Utilities MyUtils = new Utilities(); + MyUtils.InProcTCPTestCleanup(); + } + + + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/JS_CodeGen_Neg_Tests.cs b/AmbrosiaTest/AmbrosiaTest/JS_CodeGen_Neg_Tests.cs new file mode 100644 index 00000000..0d96a529 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/JS_CodeGen_Neg_Tests.cs @@ -0,0 +1,551 @@ +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading; +using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.Configuration; +using System.IO; + + +namespace AmbrosiaTest +{ + [TestClass] + public class JS_CG_NegativeTests + { + + //************* Init Code ***************** + // NOTE: Build the javascript test app once at beginning of the class. + // NOTE: Make sure all names be "Azure Safe". No capital letters and no underscore. + + [ClassInitialize()] + public static void Class_Initialize(TestContext tc) + { + // Build the JS app first from a JS file + JS_Utilities JSUtils = new JS_Utilities(); + //*#*#*# COMMENT OUT FOR NOW - EASIER WITH TEST WRITING ETC JSUtils.BuildJSTestApp(); + //JSUtils.BuildJSTestApp(); + } + + [TestInitialize()] + public void Initialize() + { + Utilities MyUtils = new Utilities(); + MyUtils.TestInitialize(); + } + //************* Init Code ***************** + + + //************* Negative Tests ***************** + + + // ** Shotgun approach of throwing a bunch of ts files against code gen and see if any fails beyond just saying it is not annotated + [TestMethod] + public void JS_CG_Neg_AmbrosiaSrcFiles_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + Utilities MyUtils = new Utilities(); + + // get ambrosia-node source files + string AmbrosiaNodeDir = @"../../../../JSCodeGen/node_modules/ambrosia-node/src/"; + + // loop through all the Ambrosia JS src files and generate them + foreach (string currentSrcFile in Directory.GetFiles(AmbrosiaNodeDir, "*.ts")) + { + + string fileName = Path.GetFileName(currentSrcFile); + + string PrimaryErrorMessage = "Error: The input source file"; + string SecondaryErrorMessage = " does not publish any entities (exported functions, static methods, type aliases and enums annotated with an @ambrosia JSDoc tag)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(fileName, true, PrimaryErrorMessage, SecondaryErrorMessage,true); + } + } + + + [TestMethod] + public void JS_CG_Neg_AmbrosiaTagNewLine() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_AmbrosiaTagNewline.ts"; + string PrimaryErrorMessage = "Error: A newline is not allowed in the attributes of an @ambrosia tag"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_AsyncFcthn() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_AsyncFctn.ts"; + string PrimaryErrorMessage = "as a post method (reason: async functions are not supported)"; + string SecondaryErrorMessage = "Error: Unable to publish function 'ComputePI'"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_CircularReference() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_CircReference.ts"; + string PrimaryErrorMessage = "Error: Unable to publish type alias 'CNames'"; + string SecondaryErrorMessage = "as a type (reason: Deferred expansion of type(s) failed (reason: Unable to expand type definition '{ first: string, last: string, priorNames: CNames[] }' because it has a circular reference with definition 'CName[]')) "; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_CommaAttrib() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_CommasBetweenAttrib.ts"; + string PrimaryErrorMessage = "Error: Malformed @ambrosia attribute 'publish=true version=1 doRuntimeTypeChecking=true'"; + string SecondaryErrorMessage = "expected format is: attrName=attrValue, ..."; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_GenericType() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_GenericType.ts"; + + // Consumer and Publisher error msg the same ... since part of message has path (which can differ from machine to machine) - verify first part of message in conumser string and second part in Publisher + string PrimaryErrorMessage = "Unable to publish function 'generic'"; + string SecondaryErrorMessage = "TS_GenericType.ts:8:5) as a post method (reason: Generic functions are not supported)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_IntersectionType() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NoIntersectionType.ts"; + + // Consumer and Publisher error msg the same ... since part of message has path (which can differ from machine to machine) - verify first part of message in conumser string and second part in Publisher + string PrimaryErrorMessage = "Error: Unable to publish type alias 'IntersectionType'"; + string SecondaryErrorMessage = "as a type (reason: The published type 'IntersectionType' has an invalid type ('FullName[] & ShortName[]'); intersection types are not supported)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_MethodIDInt() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_MethodIDInt.ts"; + string PrimaryErrorMessage = "Error: The value ('Hello') supplied for @ambrosia attribute 'methodID' is not an integer"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_MethodIDNeg() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_MethodIDNeg.ts"; + string PrimaryErrorMessage = "Error: The value (-2) supplied for @ambrosia"; + string SecondaryErrorMessage = "attribute 'methodID' cannot be negative"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_MethodIDOnType() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_MethodIDOnType.ts"; + string PrimaryErrorMessage = "Error: The value ('Hello') supplied for @ambrosia attribute 'methodID' is not an integer"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_NamespaceModule() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NamespaceModule.ts"; + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a module"; + string SecondaryErrorMessage = "valid targets are: function, static method, type alias, and enum"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_NestedFctn() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NestedFunction.ts"; // Cannot publish a local (nested) function + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a local function"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_NestedFctn2() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NestedFunction2.ts"; // Cannot publish a local (nested) function in a static method + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a local function"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_NoTaggedItems() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NoTaggedItems.ts"; + string PrimaryErrorMessage = "Error: The input source file (TS_NoTaggedItems.ts) does not publish any entities (exported functions, static methods, type aliases and enums annotated with an @ambrosia JSDoc tag)"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_NoFunctionComplexTypes() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NoFunctionComplexType.ts"; + string PrimaryErrorMessage = "Error: Unable to publish type alias 'myComplexType'"; + string SecondaryErrorMessage = "as a type (reason: The published type 'myComplexType' [property 'fn'] has an invalid type ('() => void'); function types are not supported"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_NoFunctionTypes() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_NoFunctionType.ts"; + string PrimaryErrorMessage = "Error: Unable to publish type alias 'fnType'"; + string SecondaryErrorMessage = "as a type (reason: The published type 'fnType' has an invalid type ('(p1: number) => string'); function types are not supported) "; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_OptionalProp() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_OptionalProperties.ts"; + string PrimaryErrorMessage = "Error: Unable to publish type alias 'MyTypeWithOptionalMembers'"; + string SecondaryErrorMessage = "as a type (reason: Property 'bar' is optional; types with optional properties are not supported)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_OverloadFctn() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_OverloadedFunction.ts"; + string PrimaryErrorMessage = "Error: Unable to publish function 'fnOverload'"; + string SecondaryErrorMessage = "as a post method (reason: The @ambrosia tag must appear on the implementation of an overloaded function"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_PublishClass() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_PublishClass.ts"; + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a class"; + string SecondaryErrorMessage = "valid targets are: function, static method, type alias, and enum"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_PublishMethodRef() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_PublishMethodBeforeRef.ts"; + string PrimaryErrorMessage = "Error: Unable to publish function 'fn'"; + string SecondaryErrorMessage = "as a post method (reason: The following types must be published before any method can be published: 'Name' found in published type 'MyType')"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_QuoteAttribVal() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_QuoteAttributeValue.ts"; + string PrimaryErrorMessage = "Error: The value ('\"true\"') supplied for @ambrosia attribute 'publish' is not a boolean"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_RunTimeBool() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_RunTimeBool.ts"; + string PrimaryErrorMessage = "Error: The value ('Hello') supplied for @ambrosia attribute 'doRuntimeTypeChecking' is not a boolean "; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_StaticMethod1() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_StaticMethod1.ts"; // he parent class of a published static method must be exported. + string PrimaryErrorMessage = "Warning: Skipping static method 'hello'"; + string SecondaryErrorMessage = "Error: The input source file (TS_StaticMethod1.ts) does not publish any entities (exported functions, static methods, type aliases and enums annotated with an @ambrosia JSDoc tag)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_StaticMethod2() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_StaticMethod2.ts"; // A method must have the 'static' modifier to be published. + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a non-static method"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_StaticMethod3() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_StaticMethod3.ts"; // Cannot publish a static method from a class expression + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a static method of a class expression"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_StaticMethod4() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_StaticMethod4.ts"; // Can't publish a private static method + string PrimaryErrorMessage = "Error: The @ambrosia tag is not valid on a private static method"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_StringEnum() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_StringEnum.ts"; + + // Consumer and Publisher error msg the same ... since part of message has path (which can differ from machine to machine) - verify first part of message in conumser string and second part in Publisher + string PrimaryErrorMessage = "Error: Unable to publish enum 'PrintMediaString'"; + string SecondaryErrorMessage = "TS_StringEnum.ts:6:5) as a type (reason: Unable to parse enum value 'NewspaperStringEnum' (\"NEWSPAPER\"); only integers are supported)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_TagInterface() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_TagInterface.ts"; + string PrimaryErrorMessage = "Error: The input source file (TS_TagInterface.ts) does not publish any entities (exported functions, static methods, type aliases and enums annotated with an @ambrosia JSDoc tag)"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_TagMethod() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_TagMethod.ts"; + string PrimaryErrorMessage = "Error: The input source file (TS_TagMethod.ts) does not publish any entities (exported functions, static methods, type aliases and enums annotated with an @ambrosia JSDoc tag)"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_TupleType() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_TupleType.ts"; + string PrimaryErrorMessage = "Error: Unable to publish type alias 'MyTupleType'"; + string SecondaryErrorMessage = "as a type (reason: The published type 'MyTupleType' has an invalid type ('[string, number]'); tuple types are not supported)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_TwoAmbrTag() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_TwoAmbrTags.ts"; + string PrimaryErrorMessage = "Error: The @ambrosia tag is defined more than once"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_UnionType() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_UnionType.ts"; + string PrimaryErrorMessage = "Error: Unable to publish type alias 'MyUnionType'"; + string SecondaryErrorMessage = "as a type (reason: The published type 'MyUnionType' has an invalid type ('string | number'); union types are not supported)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_UnionTypeCommented() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_UnionTypeCommented.ts"; + string PrimaryErrorMessage = "Error: Unable to publish function 'myComplexReturnFunction'"; + string SecondaryErrorMessage = "as a post method (reason: The return type of method 'myComplexReturnFunction' [property 'r2'] has an invalid type ('number | string'); union types are not supported) "; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_UnknownAtt_Method() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_UnknownAtt_Method.ts"; + string PrimaryErrorMessage = "Error: Unknown @ambrosia attribute 'published'"; + string SecondaryErrorMessage = "valid attributes are: publish, version, methodID, doRuntimeTypeChecking"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_UnknownAtt_Type() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_UnknownAtt_Type.ts"; + string PrimaryErrorMessage = "Error: Unknown @ambrosia attribute 'published'"; + string SecondaryErrorMessage = "valid attributes are: publish"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + [TestMethod] + public void JS_CG_Neg_VersionInt() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_VersionInt.ts"; + string PrimaryErrorMessage = "Error: The value ('Hello') supplied for @ambrosia attribute 'version' is not an integer"; + string SecondaryErrorMessage = ""; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + [TestMethod] + public void JS_CG_Neg_SingleUInt8Array() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_SingleUInt8Array.ts"; + string PrimaryErrorMessage = "Unable to publish function 'takesCustomSerializedParams'"; + string SecondaryErrorMessage = "Uint8Array parameter; Post methods do NOT support custom (raw byte) parameter serialization - all parameters are always serialized to JSON)"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, true, PrimaryErrorMessage, SecondaryErrorMessage); + } + + + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/JS_CodeGen_Tests.cs b/AmbrosiaTest/AmbrosiaTest/JS_CodeGen_Tests.cs new file mode 100644 index 00000000..d5cd55a6 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/JS_CodeGen_Tests.cs @@ -0,0 +1,205 @@ +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading; +using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.Configuration; + + +namespace AmbrosiaTest +{ + [TestClass] + public class JS_CodeGen_Tests + { + + //************* Init Code ***************** + // NOTE: Build the javascript test app once at beginning of the class. + [ClassInitialize()] + public static void Class_Initialize(TestContext tc) + { + // Build the JS app first from a JS file + JS_Utilities JSUtils = new JS_Utilities(); +//*#*#*# COMMENT OUT FOR NOW - EASIER WITH TEST WRITING ETC .. JSUtils.BuildJSTestApp(); + } + + // NOTE: Make sure all names be "Azure Safe". No capital letters and no underscore. + [TestInitialize()] + public void Initialize() + { + Utilities MyUtils = new Utilities(); + MyUtils.TestInitialize(); + } + //************* Init Code ***************** + + [TestCleanup()] + public void Cleanup() + { + // Kill all exes associated with tests + JS_Utilities JSUtils = new JS_Utilities(); + JSUtils.JS_TestCleanup(); + } + + + [TestMethod] + public void JS_CG_Misc_AST_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "ASTTest.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + + [TestMethod] + public void JS_CG_Types_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_Types.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_AmbrosiaTag_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_AmbrosiaTag.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_EventHandler_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_EventHandlers.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_CustomSerialParam_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_CustomSerialParam.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_CustomSerialParamNoRaw_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_CustomSerialParamNoRawParam.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + + [TestMethod] + public void JS_CG_EventHandlerWarnings_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_EventHandlerWarnings.ts"; + + // Warning message in Event Handlers - not really consumer vs publisher so overloading use here + string ConsumerWarning = "Warning: Skipping Ambrosia AppEvent handler function 'onRecoveryComplete'"; + string PublisherWarning = "Warning: Skipping Ambrosia AppEvent handler function 'onBecomingPrimary'"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName, false, ConsumerWarning, PublisherWarning); + } + + [TestMethod] + public void JS_CG_GenTypeConcrete_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_GenType1.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_GenTypeConcrete2_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_GenType2.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_JSDocComment_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_JSDocComment.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_JSDocComment2_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_JSDocComment2.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_LiteralObjArray_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_LitObjArray.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + [TestMethod] + public void JS_CG_StaticMethod_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_StaticMethod.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + + //**** Misc valid tests that are just a "catch all" if don't know where to put test + [TestMethod] + public void JS_CG_Misc_Test() + { + JS_Utilities JSUtils = new JS_Utilities(); + + string testfileName = "TS_MiscTests.ts"; + + // Generate the consumer and publisher files and verify output and the generated files to cmp files + JSUtils.Test_CodeGen_TSFile(testfileName); + } + + } +} \ No newline at end of file diff --git a/AmbrosiaTest/AmbrosiaTest/JS_Tests.cs b/AmbrosiaTest/AmbrosiaTest/JS_Tests.cs new file mode 100644 index 00000000..42824715 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/JS_Tests.cs @@ -0,0 +1,62 @@ +using System; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading; +using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.Configuration; + + +namespace AmbrosiaTest +{ + [TestClass] + public class JS_Tests + { + //************* Init Code ***************** + // NOTE: Build the javascript test app once at beginning of the class. + [ClassInitialize()] + public static void Class_Initialize(TestContext tc) + { + // Build the JS PTI first from a JS file + JS_Utilities JSUtils = new JS_Utilities(); + //JSUtils.BuildJSTestApp(); // at some point this will be the JS PTI + } + + // NOTE: Make sure all names be "Azure Safe". No capital letters and no underscore. + [TestInitialize()] + public void Initialize() + { + Utilities MyUtils = new Utilities(); + MyUtils.TestInitialize(); + } + //************* Init Code ***************** + + + [TestCleanup()] + public void Cleanup() + { + // Kill all exes associated with tests + JS_Utilities JSUtils = new JS_Utilities(); + JSUtils.JS_TestCleanup(); + } + + [TestMethod] + public void JS_NodeUnitTests() + { + + Utilities MyUtils = new Utilities(); + JS_Utilities JSUtils = new JS_Utilities(); + + string testName = "jsnodeunittest"; + string finishedString = "UNIT TESTS COMPLETE"; + string successString = "SUMMARY: 83 passed (100%), 0 failed (0%)"; + string logOutputFileName_TestApp = testName + "_TestApp.log"; + + // Launched all the unit tests for JS Node (npm run unittests) + int JSTestAppID = JSUtils.StartJSNodeUnitTests(logOutputFileName_TestApp); + + // Wait until summary at the end and if not there, then know not finished + bool pass = MyUtils.WaitForProcessToFinish(logOutputFileName_TestApp, finishedString, 2, false, testName, true,false); + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_TestApp, successString, 1, false, testName, true,false); + + } + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/JS_Utilities.cs b/AmbrosiaTest/AmbrosiaTest/JS_Utilities.cs new file mode 100644 index 00000000..74f54503 --- /dev/null +++ b/AmbrosiaTest/AmbrosiaTest/JS_Utilities.cs @@ -0,0 +1,244 @@ +using System; +using System.Diagnostics; +using System.Configuration; +using System.IO; +using Microsoft.VisualStudio.TestTools.UnitTesting; +using System.Threading; +using System.Windows.Forms; // need this to handle threading issue on sleeps +using System.Collections.Generic; +using System.Linq; + +namespace AmbrosiaTest +{ + + public class JS_Utilities + { + // Message at the bottom of the output file to show everything passed + public string CodeGenSuccessMessage = "Code file generation SUCCEEDED: 2 of 2 files generated; 0 TypeScript errors, 0 merge conflicts"; + public string CodeGenFailMessage = "Code file generation FAILED: 0 of 2 files generated"; + public string CodeGenNoTypeScriptErrorsMessage = "Success: No TypeScript errors found in generated file "; + + // Runs a TS file through the JS LB and verifies code gen works correctly + // Handles valid tests one way, Negative tests from a different directory and Source Files as negative tests + public void Test_CodeGen_TSFile(string TestFile, bool NegTest = false, string PrimaryErrorMessage = "", string SecondaryErrorMessage = "", bool UsingSrcTestFile = false) + { + try + { + + Utilities MyUtils = new Utilities(); + + // Test Name is just the file without the extension + string TestName = TestFile.Substring(0, TestFile.Length - 3); + + // Launch the client job process with these values + string testfileDir = @"../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/"; + if (NegTest) + { + testfileDir = @"../../AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/"; + } + if (UsingSrcTestFile) + { + testfileDir = @"../../AmbrosiaTest/JSCodeGen/node_modules/ambrosia-node/src/"; + TestName = "SRC_" + TestName; + } + + + string ConSuccessString = CodeGenNoTypeScriptErrorsMessage + TestName + "_GeneratedConsumerInterface.g.ts"; + string PubSuccessString = CodeGenNoTypeScriptErrorsMessage + TestName + "_GeneratedPublisherFramework.g.ts"; + bool pass = true; // not actually used in this test but it is a generic utility fctn return + + + string testappdir = ConfigurationManager.AppSettings["AmbrosiaJSCodeGenDirectory"]; + string sourcefile = testfileDir + TestFile; + string generatedfile = TestName + "_Generated"; + string fileNameExe = "node.exe"; + string argString = "out\\TestCodeGen.js sourceFile=" + sourcefile + " mergeType=None generatedFileName=" + generatedfile; + string testOutputLogFile = TestName + "_CodeGen_Out.log"; + + + int processID = MyUtils.LaunchProcess(testappdir, fileNameExe, argString, false, testOutputLogFile); + if (processID <= 0) + { + MyUtils.FailureSupport(""); + Assert.Fail(" JS TestApp was not started. ProcessID <=0 "); + } + + // Verify things differently if it is a negative test + if (NegTest) + { + pass = MyUtils.WaitForProcessToFinish(testOutputLogFile, CodeGenFailMessage, 1, false, TestFile, true,false); + + // Verify the log file only has the one error (one that is related to not being annotated) + if (UsingSrcTestFile) + { + + string TestLogDir = ConfigurationManager.AppSettings["TestLogOutputDirectory"]; + string outputFile = TestLogDir + "\\" + testOutputLogFile; + + var total = 0; + using (StreamReader sr = new StreamReader(outputFile)) + { + + while (!sr.EndOfStream) + { + var counts = sr + .ReadLine() + .Split(' ') + .GroupBy(s => s) + .Select(g => new { Word = g.Key, Count = g.Count() }); + var wc = counts.SingleOrDefault(c => c.Word == "Error:"); + total += (wc == null) ? 0 : wc.Count; + } + } + + // Look for "Error:" in the log file + if (total > 1) + { + Assert.Fail(" Failure! Found more than 1 error in output file:"+ testOutputLogFile); + } + } + } + else + { + // Wait to see if success comes shows up in log file for total and for consumer and publisher + pass = MyUtils.WaitForProcessToFinish(testOutputLogFile, CodeGenSuccessMessage, 1, false, TestFile, true,false); + pass = MyUtils.WaitForProcessToFinish(testOutputLogFile, ConSuccessString, 1, false, TestFile, true,false); + pass = MyUtils.WaitForProcessToFinish(testOutputLogFile, PubSuccessString, 1, false, TestFile, true,false); + + // Verify the generated files with cmp files + string GenConsumerFile = TestName + "_GeneratedConsumerInterface.g.ts"; + string GenPublisherFile = TestName + "_GeneratedPublisherFramework.g.ts"; + MyUtils.VerifyTestOutputFileToCmpFile(GenConsumerFile, true); + MyUtils.VerifyTestOutputFileToCmpFile(GenPublisherFile, true); + } + + // Can use these to verify extra messages in the log file + if (PrimaryErrorMessage != "") + { + pass = MyUtils.WaitForProcessToFinish(testOutputLogFile, PrimaryErrorMessage, 1, false, TestFile, true,false); + } + if (SecondaryErrorMessage != "") + { + pass = MyUtils.WaitForProcessToFinish(testOutputLogFile, SecondaryErrorMessage, 1, false, TestFile, true,false); + } + + + } + catch (Exception e) + { + Assert.Fail(" Failure! Exception:" + e.Message); + } + } + + + // Run JS Node Unit Tests + public int StartJSNodeUnitTests(string testOutputLogFile) + { + + Utilities MyUtils = new Utilities(); + + // Launch the client job process with these values + string workingDir = ConfigurationManager.AppSettings["AmbrosiaJSDirectory"] + "\\Ambrosia-Node"; + string fileNameExe = "pwsh.exe"; + string argString = "-c npm run unittests"; + + int processID = MyUtils.LaunchProcess(workingDir, fileNameExe, argString, false, testOutputLogFile); + if (processID <= 0) + { + MyUtils.FailureSupport(""); + Assert.Fail(" npm unittests were not started. ProcessID <=0 "); + } + + // Give it a few seconds to start + Thread.Sleep(2000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + return processID; + } + + + // *### These will be for the JS PTI calls + // Build JS Test App - easiest to call external powershell script. + // ** TO DO - maybe make this a generic "build .TS file" or something like that + // ** For now - this is only .ts that is required to be built + public void BuildJSTestApp() + { + try + { + + Utilities MyUtils = new Utilities(); + + // For some reason, the powershell script does NOT work if called from bin/x64/debug directory. Setting working directory to origin fixes it + string scriptWorkingDir = @"..\..\..\..\..\AmbrosiaTest"; + string scriptDir = ConfigurationManager.AppSettings["AmbrosiaJSCodeGenDirectory"]; + string fileName = "pwsh.exe"; + string parameters = "-file BuildJSTestApp.ps1 " + scriptDir; + bool waitForExit = true; + string testOutputLogFile = "BuildJSTestApp.log"; + + int powerShell_PID = MyUtils.LaunchProcess(scriptWorkingDir, fileName, parameters, waitForExit, testOutputLogFile); + + // Give it a few seconds to be sure + Thread.Sleep(2000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + // Verify .js file exists + string expectedjsfile = scriptDir + "\\out\\TestApp.js"; + if (File.Exists(expectedjsfile) == false) + { + MyUtils.FailureSupport(""); + Assert.Fail(" " + expectedjsfile + " was not built"); + } + } + catch (Exception e) + { + Assert.Fail(" Failure! " + e.Message); + } + } + + + // Start Javascript Test App + public int StartJSTestApp(string testOutputLogFile) + { + + Utilities MyUtils = new Utilities(); + + // Launch the client job process with these values + string workingDir = ConfigurationManager.AppSettings["AmbrosiaJSCodeGenDirectory"]; + string fileNameExe = "node.exe"; + string argString = "out\\TestApp.js"; + + int processID = MyUtils.LaunchProcess(workingDir, fileNameExe, argString, false, testOutputLogFile); + if (processID <= 0) + { + MyUtils.FailureSupport(""); + Assert.Fail(" JS TestApp was not started. ProcessID <=0 "); + } + + // Give it a few seconds to start + Thread.Sleep(6000); + Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. + + return processID; + } + + //** Clean up all the left overs from JS tests. + public void JS_TestCleanup() + { + Utilities MyUtils = new Utilities(); + + // If failures in queue then do not want to do anything (init, run test, clean up) + if (MyUtils.CheckStopQueueFlag()) + { + return; + } + + // Stop all running processes that hung or were left behind + MyUtils.StopAllAmbrosiaProcesses(); + + Thread.Sleep(2000); + } + + + } +} diff --git a/AmbrosiaTest/AmbrosiaTest/LaunchCodeCoverage.bat b/AmbrosiaTest/AmbrosiaTest/LaunchCodeCoverage.bat index 86ce59d4..07b2d03a 100644 --- a/AmbrosiaTest/AmbrosiaTest/LaunchCodeCoverage.bat +++ b/AmbrosiaTest/AmbrosiaTest/LaunchCodeCoverage.bat @@ -1,20 +1,20 @@ -echo "****************************"" -echo "* Batch file to do to code coverage of Ambrosia and ImmCoord" -echo "* To use this .bat file you need TestAgent to be installed:" -echo "* https://www.visualstudio.com/downloads/?q=agents" -echo "* " -echo "* To run this .bat file, make sure to build the AmbrosiaTest solution (in VS) which will" -echo "* build AmbrosiaTest.dll and put it in the bin directory." -echo "* " -echo "* Need the file CodeCoverage.runsettings in the same directory as all exes and dlls" -echo "*" -echo "* After the run, import the .coverage file into Visual Studio (just open the .coverage file in VS). This file is found in TestResults in the " -echo "* directory ...\CommonExtensions\Microsoft\TestWindow\TestResults" -echo "****************************"" +rem ****************************"" +rem * Batch file to do to code coverage of Ambrosia and ImmCoord +rem * To use this .bat file you need TestAgent to be installed: +rem * https://www.visualstudio.com/downloads/?q=agents +rem * +rem * To run this .bat file, make sure to build the AmbrosiaTest solution (in VS) which will +rem * build AmbrosiaTest.dll and put it in the bin directory. +rem * +rem * Need the file CodeCoverage.runsettings in the same directory as all exes and dlls +rem * +rem * After the run, import the .coverage file into Visual Studio (just open the .coverage file in VS). This file is found in TestResults in the +rem * directory ...\CommonExtensions\Microsoft\TestWindow\TestResults +rem ***************************** set "testdir=%cd%" c: -cd\"Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\Common7\IDE\CommonExtensions\Microsoft\TestWindow" +cd\"Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\IDE\CommonExtensions\Microsoft\TestWindow" vstest.console.exe %testdir%\AmbrosiaTest.dll /EnableCodeCoverage /Settings:%testdir%\CodeCoverage.runsettings /logger:trx diff --git a/AmbrosiaTest/AmbrosiaTest/LaunchTests.bat b/AmbrosiaTest/AmbrosiaTest/LaunchTests.bat index 56cc3301..230d0688 100644 --- a/AmbrosiaTest/AmbrosiaTest/LaunchTests.bat +++ b/AmbrosiaTest/AmbrosiaTest/LaunchTests.bat @@ -1,17 +1,17 @@ -echo "****************************"" -echo "* Batch file to launch Ambrosia tests" -echo "* This takes Visual Studio out of the equation" -echo "* Keeps it simple. " -echo "* To use this .bat file you need TestAgent to be installed:" -echo "* https://www.visualstudio.com/downloads/?q=agents" -echo "* " -echo "* To run this .bat file, make sure to build the AmbrosiaTest or AmbrosiaTest_Local solution (in VS) which will" -echo "* build AmbrosiaTest.dll and put it in the bin directory." -echo "****************************"" +rem **************************** +rem * Batch file to launch Ambrosia tests +rem * This takes Visual Studio out of the equation +rem * Keeps it simple. +rem * To use this .bat file you need TestAgent to be installed: +rem * https://www.visualstudio.com/downloads/?q=agents +rem * +rem * To run this .bat file, make sure to build the AmbrosiaTest or AmbrosiaTest_Local solution (in VS) which will +rem * build AmbrosiaTest.dll and put it in the bin directory. +rem * +rem **************************** set "testdir=%cd%" c: -cd\"Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\Common7\IDE\CommonExtensions\Microsoft\TestWindow" -vstest.console.exe %testdir%\AmbrosiaTest.dll > AmbrosiaTestResults.txt -echo vstest.console.exe %testdir%\AmbrosiaTest.dll /Tests:AMB_KillServer_Test - +cd\"Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\IDE\CommonExtensions\Microsoft\TestWindow" +vstest.console.exe %testdir%\bin\x64\Release\AmbrosiaTest.dll > AmbrosiaTestResults.txt +rem vstest.console.exe %testdir%\AmbrosiaTest.dll /Tests:AMB_KillServer_Test diff --git a/AmbrosiaTest/AmbrosiaTest/LaunchUnitTests.bat b/AmbrosiaTest/AmbrosiaTest/LaunchUnitTests.bat index 50f0a16f..4dbb3f0e 100644 --- a/AmbrosiaTest/AmbrosiaTest/LaunchUnitTests.bat +++ b/AmbrosiaTest/AmbrosiaTest/LaunchUnitTests.bat @@ -1,13 +1,13 @@ -echo "****************************"" -echo "* Batch file to launch Ambrosia unit tests" -echo "* This takes Visual Studio out of the equation" -echo "* Keeps it simple. " -echo "* To use this .bat file you need TestAgent to be installed:" -echo "* https://www.visualstudio.com/downloads/?q=agents" -echo "* " -echo "****************************"" +rem **************************** +rem * Batch file to launch Ambrosia unit tests +rem * This takes Visual Studio out of the equation +rem * Keeps it simple. +rem * To use this .bat file you need TestAgent to be installed: +rem * https://www.visualstudio.com/downloads/?q=agents +rem * +rem ******************************" set "testdir=%cd%" c: -cd\"Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\Common7\IDE\CommonExtensions\Microsoft\TestWindow" +cd\"Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\Common7\IDE\CommonExtensions\Microsoft\TestWindow" vstest.console.exe %testdir%\AmbrosiaTest.dll /Tests:UnitTest_BasicEndtoEnd_Test,UnitTest_BasicActiveActive_KillPrimary_Test,UnitTest_BasicRestartEndtoEnd_Test diff --git a/AmbrosiaTest/AmbrosiaTest/MTF_Test.cs b/AmbrosiaTest/AmbrosiaTest/MTF_Test.cs index 707e2b63..d893cd4c 100644 --- a/AmbrosiaTest/AmbrosiaTest/MTF_Test.cs +++ b/AmbrosiaTest/AmbrosiaTest/MTF_Test.cs @@ -25,6 +25,8 @@ public void Initialize() // This has Persist Logs = Y for both Job and Server // Set Server \ Job to exchange random sized //**************************** + + /* Commment out MTF so don't run in normal queue. Just remove comments when want to run MTF tests locally. [TestMethod] public void AMB_MTF_KILL_PERSIST_Test() { @@ -173,8 +175,8 @@ public void AMB_MTF_KILL_PERSIST_Test() // Verify client / server have proper bytes MyUtils.VerifyBytesRecievedInTwoLogFiles(logOutputFileName_ClientJob, logOutputFileName_Server); - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, totalNumBytesReceived.ToString(), 1, false, testName, true); // Total bytes received - pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, totalNumBytesReceived.ToString(), 1, false, testName, true); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_ClientJob, totalNumBytesReceived.ToString(), 1, false, testName, true,false); // Total bytes received + pass = MyUtils.WaitForProcessToFinish(logOutputFileName_Server, totalNumBytesReceived.ToString(), 1, false, testName, true,false); // Total bytes received // Verify integrity of Ambrosia logs by replaying - do NOT check cmp files because MTF can change run to run MyUtils.VerifyAmbrosiaLogFile(testName, totalNumBytesReceived, false, false, AMB1.AMB_Version); @@ -291,8 +293,8 @@ public void AMB_MTF_NoKill_Test() string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; //****************** MTF Settings *************** - //int numRounds = 5; long totalNumBytesReceived = 5368709120; int maxMminsToWaitToFinish = 5; - int numRounds = 25; long totalNumBytesReceived = 26843545600; int maxMminsToWaitToFinish = 30; + int numRounds = 5; long totalNumBytesReceived = 5368709120; int maxMminsToWaitToFinish = 5; + //int numRounds = 25; long totalNumBytesReceived = 26843545600; int maxMminsToWaitToFinish = 30; //int numRounds = 100; long totalNumBytesReceived = 107374182400; int maxMminsToWaitToFinish = 80; // 15 mins //int numRounds = 500; long totalNumBytesReceived = 536870912000; int maxMminsToWaitToFinish = 160; // about 1.5 hrs //int numRounds = 1000; long totalNumBytesReceived = 1073741824000; int maxMminsToWaitToFinish = 320; // 3 hrs or so @@ -367,6 +369,7 @@ public void AMB_MTF_NoKill_Test() } + */ [TestCleanup()] public void Cleanup() { diff --git a/AmbrosiaTest/AmbrosiaTest/Utilities.cs b/AmbrosiaTest/AmbrosiaTest/Utilities.cs index da422572..bab00af4 100644 --- a/AmbrosiaTest/AmbrosiaTest/Utilities.cs +++ b/AmbrosiaTest/AmbrosiaTest/Utilities.cs @@ -15,7 +15,7 @@ public class AMB_Settings { public string AMB_ServiceName { get; set; } public string AMB_ImmCoordName { get; set; } // This will go away - public string AMB_PortAppReceives { get; set; } + public string AMB_PortAppReceives { get; set; } public string AMB_PortAMBSends { get; set; } public string AMB_TestingUpgrade { get; set; } public string AMB_ServiceLogPath { get; set; } @@ -47,11 +47,30 @@ public class Utilities //********* // NetFrameworkTestRun - // when = true, the test will run under the assumption that .Net Framework files in AmbrosiaTest\bin\x64\debug (or release) directory (from net46 directory) - // when = false, the test will run under the assumption that .Net Core files in AmbrosiaTest\bin\x64\debug (or release) directory (from netcoreapp2.0 directory) + // when = true, the test will run under the assumption that .Net Framework files in AmbrosiaTest\bin\x64\debug (or release) directory (from net461 directory) + // when = false, the test will run under the assumption that .Net Core files in AmbrosiaTest\bin\x64\debug (or release) directory (from netcoreapp3.1 directory) // .NET CORE only has DLLs, so no AMB exe so run by using "dotnet" + // The two strings (NetFramework and NetCoreFramework) are part of the path when calling PTI and PT - called in helper functions //********* - static bool NetFrameworkTestRun = true; + public bool NetFrameworkTestRun = true; + public string NetFramework = "net461"; + public string NetCoreFramework = "netcoreapp3.1"; + + //********* + // LogType + // This is type \ location of the logs.. "files" or "blobs" in the ImmortalCoordinator + //********* + public string logTypeFiles = "files"; + public string logTypeBlobs = "blobs"; + + //********* + // DeployMode + // This is the mode on whether IC call is part of client and server or on its own (-d paramter in PTI job.exe and server.exe) + //********* + public string deployModeSecondProc = "secondproc"; // original design where need IC in separate process + public string deployModeInProc = "inprocdeploy"; // No longer need rp and sp ports since we are using pipes instead of TCP + public string deployModeInProcManual = "inprocmanual"; // this is the TCP port call where need rp & sp but still in single proc per job or server + public string deployModeInProcTimeTravel = "inproctimetravel"; // Used by Client and Server of PTI for time travel debugging // Returns the Process ID of the process so you then can something with it // Currently output to file using ">", but using cmd.exe to do that. @@ -94,32 +113,38 @@ public int LaunchProcess(string workingDirectory, string fileName, string parame process.WaitForExit(); // Give it a second to completely start - Thread.Sleep(1000); + Thread.Sleep(2000); - //Figure out the process ID for the program ... process id from process.start is the process ID for cmd.exe - Process[] processesforapp = Process.GetProcessesByName(fileToExecute.Remove(fileToExecute.Length - 4)); - if (processesforapp.Length == 0) + int processID = 999; + + if (startInfo.Arguments.Contains("dotnet Ambrosia.dll") == false) { - FailureSupport(fileToExecute); - Assert.Fail(" Failure! Process " + fileToExecute + " failed to start."); - return 0; - } + //Figure out the process ID for the program ... process id from process.start is the process ID for cmd.exe + Process[] processesforapp = Process.GetProcessesByName(fileToExecute.Remove(fileToExecute.Length - 4)); - int processID = processesforapp[0].Id; - var processStart = processesforapp[0].StartTime; + if (processesforapp.Length == 0) + { + FailureSupport(fileToExecute); + Assert.Fail(" Failure! Process " + fileToExecute + " failed to start."); + return 0; + } - // make sure to get most recent one as that is safe to know that is one we just created - for (int i = 1; i <= processesforapp.Length - 1; i++) - { - if (processStart < processesforapp[i].StartTime) + processID = processesforapp[0].Id; + var processStart = processesforapp[0].StartTime; + + // make sure to get most recent one as that is safe to know that is one we just created + for (int i = 1; i <= processesforapp.Length - 1; i++) { - processStart = processesforapp[i].StartTime; - processID = processesforapp[i].Id; + if (processStart < processesforapp[i].StartTime) + { + processStart = processesforapp[i].StartTime; + processID = processesforapp[i].Id; + } } - } - // Kill the process id for the cmd that launched the window so it isn't lingering - KillProcess(process.Id); + // Kill the process id for the cmd that launched the window so it isn't lingering + KillProcess(process.Id); + } return processID; @@ -132,13 +157,15 @@ public int LaunchProcess(string workingDirectory, string fileName, string parame } } - // timing mechanism to see when a process finishes. It uses a trigger string ("FINISHED") and will delay until that string - // is hit or until maxDelay (mins) is hit - public bool WaitForProcessToFinish(string logFile, string doneString, int maxDelay, bool truncateAmbrosiaLogs, string testName, bool assertOnFalseReturn) + // timing mechanism to see when a process finishes. It uses a trigger string ("DONE") and will delay until that string + // is hit or until maxDelay (mins) is hit it also can determine if the extraStringToFind is part of it as well. + public bool WaitForProcessToFinish(string logFile, string extraStringToFind, int maxDelay, bool truncateAmbrosiaLogs, string testName, bool assertOnFalseReturn, bool checkForDoneString = true) { int timeCheckInterval = 10000; // 10 seconds int maxTimeLoops = (maxDelay * 60000) / timeCheckInterval; - + string doneString = "DONE"; + bool foundExtraString = false; + bool foundDoneString = false; logFile = ConfigurationManager.AppSettings["TestLogOutputDirectory"] + "\\" + logFile; for (int i = 0; i < maxTimeLoops; i++) @@ -151,11 +178,33 @@ public bool WaitForProcessToFinish(string logFile, string doneString, int maxDel while (!logFileReader.EndOfStream) { string line = logFileReader.ReadLine(); + + // Looking for "DONE" if (line.Contains(doneString)) + { + foundDoneString = true; + } + + // Looking for extra string (usually byte size or some extra message in output) + if (line.Contains(extraStringToFind)) + { + foundExtraString = true; + + // since not looking for done, can close things down here + if (checkForDoneString == false) + { + logFileReader.Close(); + logFileStream.Close(); + return true; + } + } + + // kick out because had success only if doneString is found AND the extra string is found + if ((foundDoneString) && (foundExtraString)) { logFileReader.Close(); logFileStream.Close(); - return true; // kick out because had success + return true; } } @@ -173,14 +222,21 @@ public bool WaitForProcessToFinish(string logFile, string doneString, int maxDel } } - // made it here so we know it timed out and didn't find the string it was looking for + // made it here so we know it either DONE was not found or the DONE was found but the extra string was not found // only pop assert if asked to do that if (assertOnFalseReturn == true) { FailureSupport(testName); // If times out without string hit - then pop exception - Assert.Fail(" Failure! Looking for string:" + doneString + " in log file:" + logFile + " but did not find it after waiting:" + maxDelay.ToString() + " minutes."); + if (checkForDoneString) + { + Assert.Fail(" Failure! Looking for '" + doneString + "' string AND the extra string:" + extraStringToFind + " in log file:" + logFile + " but did not find one or both after waiting:" + maxDelay.ToString() + " minutes."); + } + else + { + Assert.Fail(" Failure! Looking for string:" + extraStringToFind + " in log file:" + logFile + " but did not find it after waiting:" + maxDelay.ToString() + " minutes."); + } } return false; // made it this far, we know it is a false @@ -221,7 +277,7 @@ public void CleanupAzureTables(string nameOfObjects) // For some reason, the powershell script does NOT work if called from bin/x64/debug directory. Setting working directory to origin fixes it string scriptWorkingDir = @"..\..\..\..\..\AmbrosiaTest\AmbrosiaTest"; - string fileName = "powershell.exe"; + string fileName = "pwsh.exe"; string parameters = "-file CleanUpAzure.ps1 " + nameOfObjects + "*"; bool waitForExit = false; string testOutputLogFile = nameOfObjects + "_CleanAzureTables.log"; @@ -285,6 +341,53 @@ public void CleanupAmbrosiaLogFiles() Assert.Fail(" Unable to delete Log Dir:" + ambrosiaLogDir); } + // Clean up the InProc files now. Since InProc, they are relative to PTI + string PTIAmbrosiaLogDir = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"] + ConfigurationManager.AppSettings["PTIAmbrosiaLogDirectory"]; + if (Directory.Exists(PTIAmbrosiaLogDir)) + { + Directory.Delete(PTIAmbrosiaLogDir, true); + } + + // Clean up the InProc IC output files from Job and Server + string InProcICOutputFile = "ICOutput*.txt"; + string CurrentFramework = NetFramework; + if (NetFrameworkTestRun == false) + { + CurrentFramework = NetCoreFramework; + } + + // job IC output file and any blob log files + string PTI_Job_Dir = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"]+ CurrentFramework; + var jobdir = new DirectoryInfo(PTI_Job_Dir); + foreach (var file in jobdir.EnumerateFiles(InProcICOutputFile)) + { + file.Delete(); + } + + // Delete the folders from inproc + DeleteDirectoryUsingWildCard(PTI_Job_Dir, "job_"); + + // server IC output file and any blob log files + string PTI_Server_Dir = ConfigurationManager.AppSettings["PerfTestServerExeWorkingDirectory"] + CurrentFramework; + var serverdir = new DirectoryInfo(PTI_Server_Dir); + foreach (var file in serverdir.EnumerateFiles(InProcICOutputFile)) + { + file.Delete(); + } + // Delete the folders from inproc + DeleteDirectoryUsingWildCard(PTI_Server_Dir, "server_"); + + + // Give it a second to make sure - had timing issues where wasn't fully deleted by time got here + Thread.Sleep(1000); + + // Double check to make sure it is deleted and not locked by something else + if (Directory.Exists(PTIAmbrosiaLogDir)) + { + FailureSupport(""); + Assert.Fail(" Unable to delete PTI Log Dir:" + PTIAmbrosiaLogDir); + } + } catch (Exception e) { @@ -293,6 +396,30 @@ public void CleanupAmbrosiaLogFiles() } } + // Helper function for cleaning up log files where don't know full name of folder to delete + public void DeleteDirectoryUsingWildCard(string rootpath, string substringtomatch) + { + try + { + List dirs = new List(Directory.EnumerateDirectories(rootpath)); + + foreach (var dir in dirs) + { + string currentDir = dir; + if (dir.Contains(substringtomatch)) + { + Directory.Delete(dir, true); + } + } + } + catch (Exception e) + { + // If log clean up fails ... probably not enough to stop the test but log it + string logInfo = " Exception:" + e.Message; + LogDebugInfo(logInfo); + } + } + // Kills a single process based on Process ID. Used to kill a ImmCoord, Server etc as those are created with a Process ID return. // If the processID isn't there, then will each exception and log a line in AmbrosiaTest_Debug.log @@ -323,6 +450,9 @@ public void KillProcess(int processID) public void VerifyTestEnvironment() { + // used in PT and PTI - set here by default and change below if need to + string current_framework = NetFramework; + // Verify logging directory ... if doesn't exist, create it string testLogDir = ConfigurationManager.AppSettings["TestLogOutputDirectory"]; if (Directory.Exists(testLogDir) == false) @@ -345,6 +475,7 @@ public void VerifyTestEnvironment() string AMBExe = "Ambrosia.exe"; if (File.Exists(AMBExe) == false) Assert.Fail(" Missing AMB exe. Expecting:" + AMBExe); + } else // .net core only has dll ... { @@ -357,6 +488,10 @@ public void VerifyTestEnvironment() string AMBExe = "Ambrosia.dll"; if (File.Exists(AMBExe) == false) Assert.Fail(" Missing AMB dll. Expecting:" + AMBExe); + + // used in PTI and PT calls + current_framework = NetCoreFramework; + } // Don't need AmbrosiaLibCS.exe as part of tests @@ -364,17 +499,27 @@ public void VerifyTestEnvironment() // if (File.Exists(AmbrosiaLibCSExe) == false) // Assert.Fail(" Missing AmbrosiaLibcs dll. Expecting:" + AmbrosiaLibCSExe); - string perfTestJobFile = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"] + "\\job.exe"; + string perfTestJobFile = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"] + current_framework + "\\job.exe"; if (File.Exists(perfTestJobFile) == false) - Assert.Fail(" Missing job.exe. Expecting:" + perfTestJobFile); + Assert.Fail(" Missing PTI job.exe. Expecting:" + perfTestJobFile); - string perfTestServerFile = ConfigurationManager.AppSettings["PerfTestServerExeWorkingDirectory"] + "\\server.exe"; + string perfTestServerFile = ConfigurationManager.AppSettings["PerfTestServerExeWorkingDirectory"] + current_framework + "\\server.exe"; if (File.Exists(perfTestServerFile) == false) - Assert.Fail(" Missing server.exe. Expecting:" + perfTestServerFile); + Assert.Fail(" Missing PTI server.exe. Expecting:" + perfTestServerFile); string connectionString = Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING"); if (connectionString == null) Assert.Fail(" Missing Connection String environment variable 'AZURE_STORAGE_CONN_STRING'"); + +/* ** Async feature removed so Performance Test not needed + string perfAsyncTestJobFile = ConfigurationManager.AppSettings["AsyncPerfTestJobExeWorkingDirectory"] + current_framework + "\\job.exe"; + if (File.Exists(perfAsyncTestJobFile) == false) + Assert.Fail(" Missing PerformanceTest job.exe. Expecting:" + perfAsyncTestJobFile); + + string perfAsyncTestServerFile = ConfigurationManager.AppSettings["AsyncPerfTestServerExeWorkingDirectory"] + current_framework + "\\server.exe"; + if (File.Exists(perfAsyncTestJobFile) == false) + Assert.Fail(" Missing PerformanceTest server.exe. Expecting:" + perfAsyncTestJobFile); +*/ } @@ -382,9 +527,11 @@ public void VerifyTestEnvironment() // This takes the log file and compares it to the associated .CMP file // NOTE: Has a feature if a line in cmp file has *X* then that line will not be used in comparison - useful for dates or debug messages // + // Optional parameter is for Javascript LB tests. There are different locations for Log files and CMP files for JS LB tests + // // Assumption: Test Output logs are .log and the cmp is the same file name but with .cmp extension //********************************************************************* - public void VerifyTestOutputFileToCmpFile(string testOutputLogFile) + public void VerifyTestOutputFileToCmpFile(string testOutputLogFile, bool JSTest = false, bool TTDTest = false) { // Give it a second to get all ready to be verified - helps timing issues @@ -395,6 +542,23 @@ public void VerifyTestOutputFileToCmpFile(string testOutputLogFile) string cmpLogDir = ConfigurationManager.AppSettings["TestCMPDirectory"]; string cmpDirFile = cmpLogDir + "\\" + testOutputLogFile.Replace(".log", ".cmp"); + // TTD tests have different files so need modify file to do proper match + if (TTDTest) + { + cmpDirFile = cmpDirFile.Replace("_TTD_Verify", "_Verify"); + } + + + // Javascript tests + if (JSTest) + { + // Test Log Output + testLogDir = ConfigurationManager.AppSettings["AmbrosiaJSCodeGenDirectory"]; + logOutputDirFileName = testLogDir +"\\"+ testOutputLogFile; + cmpLogDir = ConfigurationManager.AppSettings["TestCMPDirectory"] + "\\JS_CodeGen_Cmp"; + cmpDirFile = cmpLogDir + "\\" + testOutputLogFile +".cmp"; + } + // Put files into memory so can filter out ignore lines etc List logFileList = new List(); List cmpFileList = new List(); @@ -427,7 +591,7 @@ public void VerifyTestOutputFileToCmpFile(string testOutputLogFile) cmpFileStream.Close(); // Go through filtered list of strings and verify - string errorMessage = "Log file vs Cmp file failed! Log file is " + testOutputLogFile + ". Elements are in the filtered list where *X* is ignored."; + string errorMessage = "Log file vs Cmp file failed! Log file: " + testOutputLogFile + ". Elements are in the filtered list where *X* is ignored."; // put around a try catch because want to stop the queue as well try @@ -451,16 +615,37 @@ public void VerifyTestOutputFileToCmpFile(string testOutputLogFile) // // Assumption: Test Output logs are .log and the cmp is the same file name but with .cmp extension //********************************************************************* - public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpFile, bool startWithFirstFile, string CurrentVersion, string optionalMultiClientStartingPoint = "") + public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpFile, bool startWithFirstFile, string CurrentVersion, string optionalNumberOfClient = "", bool asyncTest = false, bool checkForDoneString = true) { + // Doing this for multi client situations + string optionalMultiClientStartingPoint = ""; + if (optionalNumberOfClient == "") + { + optionalNumberOfClient = "1"; + } + else + { + optionalMultiClientStartingPoint = "0"; + } + string clientJobName = testName + "clientjob" + optionalMultiClientStartingPoint; string serverName = testName + "server"; - string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\"; + string ambrosiaLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"]; // don't put + "\\" on end as mess up location .. need append in Ambrosia call though + string ambrosiaLogDirFromPTI = ConfigurationManager.AppSettings["TTDAmbrosiaLogDirectory"] + "\\"; + + // if not in standard log place, then must be in InProc log location which is relative to PTI - safe assumption + if (Directory.Exists(ambrosiaLogDir) ==false) + { + ambrosiaLogDir = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"] + ConfigurationManager.AppSettings["PTIAmbrosiaLogDirectory"]; + ambrosiaLogDirFromPTI = "..\\..\\"+ambrosiaLogDir+"\\"; // feels like there has to be better way of determing this + } + // used to get log file - string ambrosiaClientLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\" + testName + "clientjob" + optionalMultiClientStartingPoint + "_" + CurrentVersion; - string ambrosiaServerLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\" + testName + "server_" + CurrentVersion; + string ambrosiaClientLogDir = ambrosiaLogDir + "\\" + testName + "clientjob" + optionalMultiClientStartingPoint + "_0"; // client is always 0 so don't use + CurrentVersion; + string ambrosiaServerLogDir = ambrosiaLogDir + "\\" + testName + "server_" + CurrentVersion; + string startingClientChkPtVersionNumber = "1"; string clientFirstFile = ""; @@ -508,6 +693,7 @@ public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpF // Get most recent version of SERVER log file and check point string startingServerChkPtVersionNumber = "1"; + string serverFirstFile = ""; string serverLogFile = ""; if (Directory.Exists(ambrosiaServerLogDir)) @@ -551,9 +737,9 @@ public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpF AMB_Settings AMB1 = new AMB_Settings { AMB_ServiceName = clientJobName, - AMB_ServiceLogPath = ambrosiaLogDir, + AMB_ServiceLogPath = ambrosiaLogDir + "\\", AMB_StartingCheckPointNum = startingClientChkPtVersionNumber, - AMB_Version = CurrentVersion.ToString(), + AMB_Version = "0", // always 0 CurrentVersion.ToString(), AMB_TestingUpgrade = "N", AMB_PortAppReceives = "1000", AMB_PortAMBSends = "1001" @@ -565,7 +751,7 @@ public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpF AMB_Settings AMB2 = new AMB_Settings { AMB_ServiceName = serverName, - AMB_ServiceLogPath = ambrosiaLogDir, + AMB_ServiceLogPath = ambrosiaLogDir + "\\", AMB_StartingCheckPointNum = startingServerChkPtVersionNumber, AMB_Version = CurrentVersion.ToString(), AMB_TestingUpgrade = "N", @@ -574,19 +760,37 @@ public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpF }; CallAMB(AMB2, logOutputFileName_AMB2, AMB_ModeConsts.DebugInstance); - // Job call - string logOutputFileName_ClientJob_Verify = testName + "_ClientJob_Verify.log"; - int clientJobProcessID = StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Verify); + string logOutputFileName_ClientJob_Verify; + string logOutputFileName_Server_Verify; - //Server Call - string logOutputFileName_Server_Verify = testName + "_Server_Verify.log"; - int serverProcessID = StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Verify, 1, false); + // if async, use the async job and server + if (asyncTest) + { + // Job call + logOutputFileName_ClientJob_Verify = testName + "_ClientJob_Verify.log"; + int clientJobProcessID = StartAsyncPerfClientJob("1001", "1000", clientJobName, serverName, "1", logOutputFileName_ClientJob_Verify); + + //Server Call + logOutputFileName_Server_Verify = testName + "_Server_Verify.log"; + int serverProcessID = StartAsyncPerfServer("2001", "2000", serverName, logOutputFileName_Server_Verify); + } + else + { + // Job call + logOutputFileName_ClientJob_Verify = testName + "_ClientJob_Verify.log"; + int clientJobProcessID = StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_Verify); + + //Server Call + logOutputFileName_Server_Verify = testName + "_Server_Verify.log"; + int serverProcessID = StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_Verify, Convert.ToInt32(optionalNumberOfClient), false); + } // wait until done running - bool pass = WaitForProcessToFinish(logOutputFileName_Server_Verify, numBytes.ToString(), 15, false, testName, true); - pass = WaitForProcessToFinish(logOutputFileName_ClientJob_Verify, numBytes.ToString(), 15, false, testName, true); + bool pass = WaitForProcessToFinish(logOutputFileName_ClientJob_Verify, numBytes.ToString(), 15, false, testName, true, checkForDoneString); + pass = WaitForProcessToFinish(logOutputFileName_Server_Verify, numBytes.ToString(), 15, false, testName, true, checkForDoneString); + - // MTFs don't check cmp files because they change from run to run + // MTFs don't check cmp files because they change from run to run if (checkCmpFile) { // verify new log files to cmp files @@ -594,9 +798,46 @@ public void VerifyAmbrosiaLogFile(string testName, long numBytes, bool checkCmpF VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_Verify); } + // Test Time Travel Debugging on the Log Files from PTI job and PTI server - don't do for MTF as not needed for TTD handled by other tests also cmp files change too much + VerifyTimeTravelDebugging(testName, numBytes, clientJobName, serverName, ambrosiaLogDirFromPTI, startingClientChkPtVersionNumber, startingServerChkPtVersionNumber, optionalNumberOfClient, CurrentVersion, checkCmpFile, checkForDoneString); + } - public int StartImmCoord(string ImmCoordName, int portImmCoordListensAMB, string testOutputLogFile, bool ActiveActive=false, int replicaNum = 9999) + //** Basically same as VerifyAmbrosiaLogFile but instead of using Ambrosia.exe to verify log, this uses + //** job.exe and server.exe to verify it. Porbably easiest to call from VerifyAmbrosiaLogFile since that does + //** all the work to get the log files and checkpoint numbers + //** Assumption that this is called at the end of a test where Ambrosia.exe was already called to register for this test + public void VerifyTimeTravelDebugging(string testName, long numBytes, string clientJobName, string serverName, string ambrosiaLogDir, string startingClientChkPtVersionNumber, string startingServerChkPtVersionNumber, string optionalNumberOfClient = "", string currentVersion = "", bool checkCmpFile = true, bool checkForDoneString = true) + { + + // Basically doing this for multi client stuff + if (optionalNumberOfClient == "") + { + optionalNumberOfClient = "1"; + } + + // Job call + string logOutputFileName_ClientJob_TTD_Verify = testName + "_ClientJob_TTD_Verify.log"; + int clientJobProcessID = StartPerfClientJob("1001", "1000", clientJobName, serverName, "65536", "13", logOutputFileName_ClientJob_TTD_Verify, deployModeInProcTimeTravel,"", ambrosiaLogDir, startingClientChkPtVersionNumber); + + //Server Call + string logOutputFileName_Server_TTD_Verify = testName + "_Server_TTD_Verify.log"; + int serverProcessID = StartPerfServer("2001", "2000", clientJobName, serverName, logOutputFileName_Server_TTD_Verify, Convert.ToInt32(optionalNumberOfClient), false,0, deployModeInProcTimeTravel,"", ambrosiaLogDir, startingServerChkPtVersionNumber,currentVersion); + + // wait until done running + bool pass = WaitForProcessToFinish(logOutputFileName_Server_TTD_Verify, numBytes.ToString(), 20, false, testName, true, checkForDoneString); + pass = WaitForProcessToFinish(logOutputFileName_ClientJob_TTD_Verify, numBytes.ToString(), 15, false, testName, true, checkForDoneString); + + // With Meantime to Failure tests don't check cmp files because they change from run to run + if (checkCmpFile) + { + // verify TTD files to cmp files + VerifyTestOutputFileToCmpFile(logOutputFileName_Server_TTD_Verify, false, true); + VerifyTestOutputFileToCmpFile(logOutputFileName_ClientJob_TTD_Verify, false, true); + } + } + + public int StartImmCoord(string ImmCoordName, int portImmCoordListensAMB, string testOutputLogFile, bool ActiveActive = false, int replicaNum = 9999, int overRideReceivePort = 0, int overRideSendPort = 0, string overRideLogLoc = "", string overRideIPAddr = "", string logToType = "") { // Launch the AMB process with these values @@ -619,7 +860,29 @@ public int StartImmCoord(string ImmCoordName, int portImmCoordListensAMB, string FailureSupport(ImmCoordName); Assert.Fail(" Replica Number is required when doing active active "); } - argString = argString + " -aa -r="+ replicaNum.ToString(); + argString = argString + " -aa -r=" + replicaNum.ToString(); + } + + // If the override values sent through, then over ride existing ports, Log loc or IP + if (overRideReceivePort != 0) + { + argString = argString + " -rp=" + overRideReceivePort.ToString(); + } + if (overRideSendPort != 0) + { + argString = argString + " -sp=" + overRideSendPort.ToString(); + } + if (overRideLogLoc != "") + { + argString = argString + " -l=" + overRideLogLoc; + } + if (overRideIPAddr != "") + { + argString = argString + " -ip=" + overRideIPAddr; + } + if (logToType != "") // could make boolean but made it string so could pass "" to test default + { + argString = argString + " -lst="+ logToType; } @@ -656,8 +919,8 @@ public void CallAMB(AMB_Settings AMBSettings, string testOutputLogFile, AMB_Mode { case AMB_ModeConsts.RegisterInstance: - argString = "RegisterInstance " + "-i=" + AMBSettings.AMB_ServiceName - + " -rp=" + AMBSettings.AMB_PortAppReceives+ " -sp=" + AMBSettings.AMB_PortAMBSends; + argString = "RegisterInstance " + "-i=" + AMBSettings.AMB_ServiceName + + " -rp=" + AMBSettings.AMB_PortAppReceives + " -sp=" + AMBSettings.AMB_PortAMBSends; // add pause at start if (AMBSettings.AMB_PauseAtStart != null && AMBSettings.AMB_PauseAtStart != "N") @@ -694,7 +957,7 @@ public void CallAMB(AMB_Settings AMBSettings, string testOutputLogFile, AMB_Mode break; case AMB_ModeConsts.AddReplica: - argString = "AddReplica " + "-r=" + AMBSettings.AMB_ReplicaNumber+ " -i=" + AMBSettings.AMB_ServiceName + argString = "AddReplica " + "-r=" + AMBSettings.AMB_ReplicaNumber + " -i=" + AMBSettings.AMB_ServiceName + " -rp=" + AMBSettings.AMB_PortAppReceives + " -sp=" + AMBSettings.AMB_PortAMBSends; // add Service log path @@ -732,7 +995,7 @@ public void CallAMB(AMB_Settings AMBSettings, string testOutputLogFile, AMB_Mode break; case AMB_ModeConsts.DebugInstance: - argString = "DebugInstance " + "-i=" + AMBSettings.AMB_ServiceName + " -rp=" + AMBSettings.AMB_PortAppReceives + argString = "DebugInstance " + "-i=" + AMBSettings.AMB_ServiceName + " -rp=" + AMBSettings.AMB_PortAppReceives + " -sp=" + AMBSettings.AMB_PortAMBSends; // add Service log path @@ -766,9 +1029,9 @@ public void CallAMB(AMB_Settings AMBSettings, string testOutputLogFile, AMB_Mode Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. } - + // Starts the server.exe from PerformanceTestUninterruptible. - public int StartPerfServer(string receivePort, string sendPort, string perfJobName, string perfServerName, string testOutputLogFile, int NumClients, bool upgrade, long optionalMemoryAllocat = 0) + public int StartPerfServer(string receivePort, string sendPort, string perfJobName, string perfServerName, string testOutputLogFile, int NumClients, bool upgrade, long optionalMemoryAllocat = 0, string deployMode = "", string ICPort = "", string TTDLog = "", string TTDCheckpointNum = "", string currentVersion = "") { // Configure upgrade properly @@ -778,13 +1041,62 @@ public int StartPerfServer(string receivePort, string sendPort, string perfJobNa upgradeString = "Y"; } - // Launch the server process with these values - string workingDir = ConfigurationManager.AppSettings["PerfTestServerExeWorkingDirectory"]; + // Set path by using proper framework + string current_framework = NetCoreFramework; + if (NetFrameworkTestRun) + current_framework = NetFramework; + + // Launch the server process with these values based on deploy mode + string workingDir = ConfigurationManager.AppSettings["PerfTestServerExeWorkingDirectory"] + current_framework; string fileNameExe = "Server.exe"; - string argString = "-j="+perfJobName + " -s=" + perfServerName +" -rp="+receivePort + " -sp=" + sendPort - + " -n="+ NumClients.ToString() +" -m="+ optionalMemoryAllocat.ToString() + " -c"; + string argString = ""; - // add upgrade switch if upgradeing + // Determine the arg based on deployMode + // Original & default method where need separate ImmCoord call + if ((deployMode == "") || (deployMode == deployModeSecondProc)) + { + argString = "-j=" + perfJobName + " -s=" + perfServerName + " -rp=" + receivePort + " -sp=" + sendPort + + " -n=" + NumClients.ToString() + " -m=" + optionalMemoryAllocat.ToString() + " -c"; + + if (deployMode != "") + { + argString = argString + " -d=" + deployModeSecondProc; + } + } + + // In proc using Pipe - No longer need rp and sp ports since we are using pipes instead of TCP. ImmCoord port is used - more commonly used in proc scenario + if (deployMode == deployModeInProc) + { + argString = "-j=" + perfJobName + " -s=" + perfServerName + + " -n=" + NumClients.ToString() + " -m=" + optionalMemoryAllocat.ToString() + " -c" + + " -d=" + deployModeInProc + " -icp=" + ICPort; + } + + // In proc using TCP - this is the TCP port call where need rp & sp but still in single proc per job or server + if (deployMode == deployModeInProcManual) + { + argString = "-j=" + perfJobName + " -s=" + perfServerName + " -rp=" + receivePort + " -sp=" + sendPort + + " -n=" + NumClients.ToString() + " -m=" + optionalMemoryAllocat.ToString() + " -c" + + " -d=" + deployModeInProcManual + " -icp=" + ICPort; + } + + // If starting in Time Travel debugger mode, then add the TTD parameters + if (deployMode == deployModeInProcTimeTravel) + { + // removed " -icp=" + ICPort + argString = "-j=" + perfJobName + " -s=" + perfServerName + + " -n=" + NumClients.ToString() + " -m=" + optionalMemoryAllocat.ToString() + " -c" + + " -d=" + deployModeInProcTimeTravel + + " -l=" + TTDLog + " -ch=" + TTDCheckpointNum; + + // The version # used to time travel debug (ignored otherwise). + if (currentVersion != "") + { + argString = argString + " -cv=" + currentVersion; + } + } + + // add upgrade switch if upgrading if (upgradeString != null && upgradeString != "N") argString = argString + " -u"; @@ -795,8 +1107,12 @@ public int StartPerfServer(string receivePort, string sendPort, string perfJobNa Assert.Fail(" Perf Server was not started. ProcessID <=0 "); } - // Give it a few seconds to start - Thread.Sleep(2000); + // Give it a few seconds to start -- give extra time if starting IC as part of this too + if (ICPort != "") + { + Thread.Sleep(6000); + } + Thread.Sleep(3000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. return processID; @@ -806,10 +1122,15 @@ public int StartPerfServer(string receivePort, string sendPort, string perfJobNa public int StartAsyncPerfServer(string receivePort, string sendPort, string perfServerName, string testOutputLogFile) { + // Set path by using proper framework + string current_framework = NetCoreFramework; + if (NetFrameworkTestRun) + current_framework = NetFramework; + // Launch the server process with these values - string workingDir = ConfigurationManager.AppSettings["AsyncPerfTestServerExeWorkingDirectory"]; + string workingDir = ConfigurationManager.AppSettings["AsyncPerfTestServerExeWorkingDirectory"] + current_framework; string fileNameExe = "Server.exe"; - string argString = "-rp="+receivePort + " -sp=" + sendPort + " -s=" + perfServerName + " -c "; + string argString = "-rp=" + receivePort + " -sp=" + sendPort + " -s=" + perfServerName + " -c "; int processID = LaunchProcess(workingDir, fileNameExe, argString, false, testOutputLogFile); if (processID <= 0) @@ -819,22 +1140,62 @@ public int StartAsyncPerfServer(string receivePort, string sendPort, string perf } // Give it a few seconds to start - Thread.Sleep(4000); + Thread.Sleep(6000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. return processID; } - // Perf Client from PerformanceTestInterruptible --- runs in Async - public int StartPerfClientJob(string receivePort, string sendPort, string perfJobName, string perfServerName, string perfMessageSize, string perfNumberRounds, string testOutputLogFile) + // Perf Client from PerformanceTestInterruptible + public int StartPerfClientJob(string receivePort, string sendPort, string perfJobName, string perfServerName, string perfMessageSize, string perfNumberRounds, string testOutputLogFile, string deployMode="", string ICPort="", string TTDLog="", string TTDCheckpointNum="") { - // Launch the client job process with these values - string workingDir = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"]; + // Set path by using proper framework + string current_framework = NetCoreFramework; + if (NetFrameworkTestRun) + current_framework = NetFramework; + + // Set defaults here and can modify based on deploy mode + string workingDir = ConfigurationManager.AppSettings["PerfTestJobExeWorkingDirectory"] + current_framework; string fileNameExe = "Job.exe"; - string argString = "-j="+perfJobName + " -s=" + perfServerName +" -rp="+ receivePort + " -sp=" + sendPort - + " -mms=" + perfMessageSize + " -n=" + perfNumberRounds + " -c"; + string argString = ""; + + // Determine the arg based on deployMode + // Original & default method where need separate ImmCoord call + if ((deployMode=="") || (deployMode== deployModeSecondProc)) + { + argString = "-j=" + perfJobName + " -s=" + perfServerName + " -rp=" + receivePort + " -sp=" + sendPort + + " -mms=" + perfMessageSize + " -n=" + perfNumberRounds + " -c"; + + if (deployMode!="") + { + argString = argString + " -d=" + deployModeSecondProc; + } + } + + // In proc using Pipe - No longer need rp and sp ports since we are using pipes instead of TCP. ImmCoord port is used - more commonly used in proc scenario + if (deployMode == deployModeInProc) + { + argString = "-j=" + perfJobName + " -s=" + perfServerName + " -mms=" + perfMessageSize + " -n=" + perfNumberRounds + " -c" + + " -d=" + deployModeInProc + " -icp=" + ICPort; + } + + // In proc using TCP - this is the TCP port call where need rp & sp but still in single proc per job or server + if (deployMode == deployModeInProcManual) + { + argString = "-j=" + perfJobName + " -s=" + perfServerName + " -rp=" + receivePort + " -sp=" + sendPort + + " -mms=" + perfMessageSize + " -n=" + perfNumberRounds + " -c" + " -d=" + deployModeInProcManual + " -icp=" + ICPort; + } + + // If starting in Time Travel debugger mode, then add the TTD parameters + if (deployMode == deployModeInProcTimeTravel) + { + // removed " -icp=" + ICPort + argString = "-j=" + perfJobName + " -s=" + perfServerName + " -rp=" + receivePort + " -sp=" + sendPort + + " -mms=" + perfMessageSize + " -n=" + perfNumberRounds + " -c" + " -d=" + deployModeInProcTimeTravel + + " -l=" + TTDLog + " -ch=" + TTDCheckpointNum; + } // Start process int processID = LaunchProcess(workingDir, fileNameExe, argString, false, testOutputLogFile); @@ -844,7 +1205,11 @@ public int StartPerfClientJob(string receivePort, string sendPort, string perfJo Assert.Fail(" Perf Client was not started. ProcessID <=0 "); } - // Give it a few seconds to start + // Give it a few seconds to start -- give extra time if starting IC as part of this too + if (ICPort != "") + { + Thread.Sleep(6000); + } Thread.Sleep(2000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. @@ -852,12 +1217,18 @@ public int StartPerfClientJob(string receivePort, string sendPort, string perfJo } // Perf Client from PerformanceTest --- runs in Async - public int StartAsyncPerfClientJob(string receivePort, string sendPort, string perfJobName, string perfServerName, string testOutputLogFile) + public int StartAsyncPerfClientJob(string receivePort, string sendPort, string perfJobName, string perfServerName, string perfNumberRounds, string testOutputLogFile) { + + // Set path by using proper framework + string current_framework = NetCoreFramework; + if (NetFrameworkTestRun) + current_framework = NetFramework; + // Launch the client job process with these values - string workingDir = ConfigurationManager.AppSettings["AsyncPerfTestJobExeWorkingDirectory"]; + string workingDir = ConfigurationManager.AppSettings["AsyncPerfTestJobExeWorkingDirectory"] + current_framework; string fileNameExe = "Job.exe"; - string argString = "-rp="+receivePort + " -sp=" + sendPort + " -j=" + perfJobName + " -s=" + perfServerName +" -c "; + string argString = "-rp=" + receivePort + " -sp=" + sendPort + " -j=" + perfJobName + " -s=" + perfServerName + " -n=" + perfNumberRounds + " -c "; int processID = LaunchProcess(workingDir, fileNameExe, argString, false, testOutputLogFile); if (processID <= 0) @@ -867,13 +1238,14 @@ public int StartAsyncPerfClientJob(string receivePort, string sendPort, string p } // Give it a few seconds to start - Thread.Sleep(4000); + Thread.Sleep(6000); Application.DoEvents(); // if don't do this ... system sees thread as blocked thread and throws message. return processID; } + public void LogDebugInfo(string logEntry) { string timeStamp = DateTime.Now.ToString(); @@ -888,7 +1260,7 @@ public void LogDebugInfo(string logEntry) File.AppendAllText(logDir + @"\AmbrosiaTest_Debug.log", logEntry); } } - catch + catch { // If debug logging fails ... no biggie, don't want it to stop test } @@ -903,7 +1275,7 @@ public void LogDebugInfo(string logEntry) public void TruncateAmbrosiaLogDir(string testName) { // Assuming _0 for directory files ... this might be bad assumption - string ambrosiaClientLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"]+"\\"+testName+"clientjob_0"; + string ambrosiaClientLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\" + testName + "clientjob_0"; string ambrosiaServerLogDir = ConfigurationManager.AppSettings["AmbrosiaLogDirectory"] + "\\" + testName + "server_0"; int numberOfFilesToKeep = 8; @@ -917,7 +1289,7 @@ public void TruncateAmbrosiaLogDir(string testName) int i = 0; foreach (FileInfo file in files) { - + string currentFile = file.Name; i++; @@ -973,8 +1345,8 @@ public void VerifyBytesRecievedInTwoLogFiles(string logFile1, string logFile2) try { // set default to something different so if not existent, then know it fails - string bytesReceivedFile1="0"; - string bytesReceivedFile2="1"; + string bytesReceivedFile1 = "0"; + string bytesReceivedFile2 = "1"; using (var streamReader = File.OpenText(firstLogFile)) { @@ -1002,7 +1374,7 @@ public void VerifyBytesRecievedInTwoLogFiles(string logFile1, string logFile2) } // Make sure has bytes recieved in it - if (bytesReceivedFile1=="0") + if (bytesReceivedFile1 == "0") { FailureSupport(""); Assert.Fail("Could not find 'Bytes received' in log file:" + logFile1); @@ -1017,172 +1389,251 @@ public void VerifyBytesRecievedInTwoLogFiles(string logFile1, string logFile2) if (Convert.ToInt64(bytesReceivedFile1) != Convert.ToInt64(bytesReceivedFile2)) { FailureSupport(""); - Assert.Fail("'Bytes received' did not match up. Log:"+logFile1+" had:"+ bytesReceivedFile1+" and Log:"+logFile2+" had:"+bytesReceivedFile2); + Assert.Fail("'Bytes received' did not match up. Log:" + logFile1 + " had:" + bytesReceivedFile1 + " and Log:" + logFile2 + " had:" + bytesReceivedFile2); } } catch (Exception e) { FailureSupport(""); - Assert.Fail(" Exception happened:"+e.Message); + Assert.Fail(" Exception happened:" + e.Message); } } //** Separate from TestCleanup as want it to be as quick as possible public void UnitTestCleanup() { - Utilities MyUtils = new Utilities(); - // If failures in queue then do not want to do anything (init, run test, clean up) - if (MyUtils.CheckStopQueueFlag()) + if (CheckStopQueueFlag()) { return; } - // Kill all ImmortalCoordinators, Job and Server exes - MyUtils.KillProcessByName("ImmortalCoordinator"); - MyUtils.KillProcessByName("Job"); - MyUtils.KillProcessByName("Server"); - MyUtils.KillProcessByName("Ambrosia"); - MyUtils.KillProcessByName("MSBuild"); - //MyUtils.KillProcessByName("cmd"); // sometimes processes hang - - // Give it a few second to clean things up a bit more - Thread.Sleep(2000); + // Stop all running processes that hung or were left behind + StopAllAmbrosiaProcesses(); // Clean up Azure - this is called after each test so put all test names in for azure tables - MyUtils.CleanupAzureTables("unitendtoendtest"); + CleanupAzureTables("unitendtoendtest"); + Thread.Sleep(2000); + CleanupAzureTables("unitendtoendrestarttest"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("unitendtoendrestarttest"); + CleanupAzureTables("unittestactiveactivekillprimary"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("unittestactiveactivekillprimary"); + CleanupAzureTables("unittestinproctcp"); + Thread.Sleep(2000); + CleanupAzureTables("unittestinprocpipe"); Thread.Sleep(2000); } public void TestCleanup() { - Utilities MyUtils = new Utilities(); // If failures in queue then do not want to do anything (init, run test, clean up) - if (MyUtils.CheckStopQueueFlag()) + if (CheckStopQueueFlag()) { return; } - // Kill all ImmortalCoordinators, Job and Server exes - MyUtils.KillProcessByName("ImmortalCoordinator"); - MyUtils.KillProcessByName("Job"); - MyUtils.KillProcessByName("Server"); - MyUtils.KillProcessByName("Ambrosia"); - MyUtils.KillProcessByName("MSBuild"); - MyUtils.KillProcessByName("dotnet"); - //MyUtils.KillProcessByName("cmd"); // sometimes processes hang - - // Give it a few second to clean things up a bit more - Thread.Sleep(5000); + // Stop all running processes that hung or were left behind + StopAllAmbrosiaProcesses(); // Clean up Azure - this is called after each test so put all test names in for azure tables - MyUtils.CleanupAzureTables("killjobtest"); + CleanupAzureTables("killjobtest"); + Thread.Sleep(2000); + CleanupAzureTables("basictest"); + Thread.Sleep(2000); + CleanupAzureTables("killservertest"); + Thread.Sleep(2000); + CleanupAzureTables("giantmessagetest"); + Thread.Sleep(2000); + CleanupAzureTables("doublekilljob"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("basictest"); + CleanupAzureTables("doublekillserver"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("killservertest"); + CleanupAzureTables("mtfnokill"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("giantmessagetest"); + CleanupAzureTables("mtfnokillpersist"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("doublekilljob"); + CleanupAzureTables("mtfkillpersist"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("doublekillserver"); + CleanupAzureTables("activeactiveaddnotekillprimary"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("mtfnokill"); + CleanupAzureTables("activeactivekillprimary"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("mtfnokillpersist"); + CleanupAzureTables("activeactivekillcheckpoint"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("mtfkillpersist"); + CleanupAzureTables("activeactivekillsecondary"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactiveaddnotekillprimary"); + CleanupAzureTables("activeactivekillsecondaryandcheckpoint"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactivekillprimary"); + CleanupAzureTables("activeactivekillclientandserver"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactivekillcheckpoint"); + CleanupAzureTables("activeactivekillall"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactivekillsecondary"); + CleanupAzureTables("startimmcoordlasttest"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactivekillsecondaryandcheckpoint"); + CleanupAzureTables("actactaddnotekillprimary"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactivekillclientandserver"); + CleanupAzureTables("upgradeserverafterserverdone"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("activeactivekillall"); + CleanupAzureTables("upgradeserverbeforeserverdone"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("startimmcoordlasttest"); + CleanupAzureTables("upgradeserverbeforestarts"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("actactaddnotekillprimary"); + CleanupAzureTables("upgradeactiveactiveprimaryonly"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("upgradeserverbeforeserverdone"); + CleanupAzureTables("migrateclient"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("upgradeserverafterserverdone"); + CleanupAzureTables("multipleclientsperserver"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("upgradeserverbeforestarts"); + CleanupAzureTables("giantcheckpointtest"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("multipleclientsperserver"); + CleanupAzureTables("overrideoptions"); Thread.Sleep(2000); - MyUtils.CleanupAzureTables("giantcheckpointtest"); + CleanupAzureTables("savelogtoblob"); + Thread.Sleep(2000); + CleanupAzureTables("savelogtofileandblob"); + // Give it a few second to clean things up a bit more Thread.Sleep(5000); } - public void AsyncTestCleanup() + public void InProcPipeTestCleanup() { - Utilities MyUtils = new Utilities(); // If failures in queue then do not want to do anything (init, run test, clean up) - if (MyUtils.CheckStopQueueFlag()) + if (CheckStopQueueFlag()) { return; } - // Kill all ImmortalCoordinators, Job and Server exes - MyUtils.KillProcessByName("ImmortalCoordinator"); - MyUtils.KillProcessByName("Job"); - MyUtils.KillProcessByName("Server"); - MyUtils.KillProcessByName("Ambrosia"); - MyUtils.KillProcessByName("MSBuild"); - MyUtils.KillProcessByName("dotnet"); - //MyUtils.KillProcessByName("cmd"); // sometimes processes hang + // Stop all running processes that hung or were left behind + StopAllAmbrosiaProcesses(); + + // Clean up Azure - this is called after each test so put all test names in for azure tables + CleanupAzureTables("inprocpipeclientonly"); + Thread.Sleep(2000); + CleanupAzureTables("inprocpipeclientonly"); + Thread.Sleep(2000); + CleanupAzureTables("inprocbasictest"); + Thread.Sleep(2000); + CleanupAzureTables("inprocgiantcheckpointtest"); + Thread.Sleep(2000); + CleanupAzureTables("inprocgiantmessagetest"); + Thread.Sleep(2000); + CleanupAzureTables("inprocdoublekilljob"); + Thread.Sleep(2000); + CleanupAzureTables("inprocdoublekillserver"); + Thread.Sleep(2000); + CleanupAzureTables("inprockilljobtest"); + Thread.Sleep(2000); + CleanupAzureTables("inprockillservertest"); + Thread.Sleep(2000); + CleanupAzureTables("inprocmultipleclientsperserver"); + Thread.Sleep(2000); + CleanupAzureTables("inprocblob"); + Thread.Sleep(2000); + CleanupAzureTables("inprocfileblob"); + Thread.Sleep(2000); + CleanupAzureTables("inprocmigrateclient"); + Thread.Sleep(2000); + CleanupAzureTables("inprocupgradeafterserverdone"); + Thread.Sleep(2000); + CleanupAzureTables("inprocupgradebeforeserverdone"); + Thread.Sleep(2000); + CleanupAzureTables("inprocpipeserveronly"); + Thread.Sleep(2000); // Give it a few second to clean things up a bit more Thread.Sleep(5000); + } + + + public void InProcTCPTestCleanup() + { + + // If failures in queue then do not want to do anything (init, run test, clean up) + if (CheckStopQueueFlag()) + { + return; + } + + // Stop all running processes that hung or were left behind + StopAllAmbrosiaProcesses(); // Clean up Azure - this is called after each test so put all test names in for azure tables - MyUtils.CleanupAzureTables("asyncbasic"); + CleanupAzureTables("inproctcpclientonly"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpserveronly"); + Thread.Sleep(2000); + CleanupAzureTables("inprocclienttcpserverpipe"); + Thread.Sleep(2000); + CleanupAzureTables("inprocclientpipeservertcp"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpkilljobtest"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpkillservertest"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpfileblob"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpblob"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpupgradeserver"); + Thread.Sleep(2000); + CleanupAzureTables("inproctcpmigrateclient"); Thread.Sleep(2000); // Give it a few second to clean things up a bit more Thread.Sleep(5000); } + public void StopAllAmbrosiaProcesses() + { + + // If failures in queue then do not want to do anything (init, run test, clean up) + if (CheckStopQueueFlag()) + { + return; + } + + // Kill all ImmortalCoordinators, Job and Server exes + KillProcessByName("Job"); + KillProcessByName("Server"); + KillProcessByName("ImmortalCoordinator"); + KillProcessByName("Ambrosia"); + KillProcessByName("MSBuild"); + KillProcessByName("dotnet"); + //KillProcessByName("cmd"); // sometimes processes hang + KillProcessByName("node"); + + + // Give it a few second to clean things up a bit more + Thread.Sleep(5000); + } + public void TestInitialize() { - Utilities MyUtils = new Utilities(); - // If failures in queue then do not want to do anything (init, run test, clean up) - if (MyUtils.CheckStopQueueFlag()) + if (CheckStopQueueFlag()) { Assert.Fail("Queue Stopped due to previous test failure. This test not run."); return; } // Verify environment - MyUtils.VerifyTestEnvironment(); + VerifyTestEnvironment(); // Make sure azure tables etc are cleaned up - there is a lag when cleaning up Azure so could cause issues with test // Cleanup(); + // Make sure nothing running from previous test + StopAllAmbrosiaProcesses(); + // make sure log files cleaned up - MyUtils.CleanupAmbrosiaLogFiles(); + CleanupAmbrosiaLogFiles(); // Give it a few seconds to truly init everything - on 8 min test - 3 seconds is no biggie Thread.Sleep(3000); diff --git a/AmbrosiaTest/AmbrosiaTest/app.config b/AmbrosiaTest/AmbrosiaTest/app.config index 845d06bd..91cb2fd3 100644 --- a/AmbrosiaTest/AmbrosiaTest/app.config +++ b/AmbrosiaTest/AmbrosiaTest/app.config @@ -1,131 +1,142 @@  - -
- - - - - - - - - - + +
+ + + + + + + + + + - - + + + - - + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JSCodeGen.njsproj b/AmbrosiaTest/JSCodeGen/JSCodeGen.njsproj new file mode 100644 index 00000000..5453789c --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JSCodeGen.njsproj @@ -0,0 +1,89 @@ + + + 14.0 + $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion) + JSCodeGen + JSCodeGen + + + + Debug + 2.0 + 61917a12-2be6-4465-bb76-b467295b972d + . + + + False + + + . + . + v4.0 + {3AF33F2E-1136-4D97-BBB7-1795711AC8B8};{9092AA53-FB77-4645-B42D-1CCCA6BD08BD} + false + + + true + + + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/ASTTest.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/ASTTest.ts new file mode 100644 index 00000000..711e96e3 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/ASTTest.ts @@ -0,0 +1,81 @@ +export namespace Test +{ + /** + * Testing 1) a mix of ',' and ';' member separators, 2) A complex-type array + * @ambrosia publish = true */ + export type MixedTest = + { + p1: string[]; + p2: string[][], + p3: { p4: number; p5: string }[]; + }; + + /** + * Example of a complex type. + * @ambrosia publish=true + */ + export type Name = + { + // Test 1 + first: string, // Test 2 + /** Test 3 */ + last: string /* Test 4 */ + } + + /** + * Example of a type that references another type. + * @ambrosia publish=true + */ + export type Names = Name[]; + + /** + * Example of a nested complex type. + * @ambrosia publish=true + */ + export type Nested = + { + abc: + { + a: Uint8Array, + b: + { + c: Names + } + } + } + + /** + * Example of an enum. + * @ambrosia publish=true + */ + export enum Letters + { + // The A + A, + B = /** The B */ 3, + /* The C */ + C, // The C + /** The D */ D = 9 + } + + /** + * Example of a [post] method that uses custom types. + * @ambrosia publish=true, version=1 + */ + export function makeName(firstName: string = "John", lastName: string /** Foo */ = "Doe"): Names + { + let names: Names; + let name: Name = { first: firstName, last: lastName }; + names.push(name); + return (names); + } + + /** + * Example of a [non-post] method + * @ambrosia publish=true, methodID=123 + */ + export function DoIt(p1: Name[][]): void + { + console.log("Done!"); + } +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_AmbrosiaTagNewline.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_AmbrosiaTagNewline.ts new file mode 100644 index 00000000..07913050 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_AmbrosiaTagNewline.ts @@ -0,0 +1,18 @@ +/** + Invalid test case - a new line after tag is not valid scenario +*/ + +export namespace Test +{ + + + /** @ambrosia publish=true + * Comment on next line. + */ + export function NewLineCommentAfterTag() + { + console.log("New Line after tag"); + } + +} + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_AsyncFctn.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_AsyncFctn.ts new file mode 100644 index 00000000..247f7c46 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_AsyncFctn.ts @@ -0,0 +1,34 @@ +/** + Invalid test case - Async is not supported +*/ + +export namespace Test +{ + + /** + * Parameter type for the 'ComputePI' method. + * @ambrosia publish = true + */ + export type Digits = { count: number }; + + /** + * Returns pi computed to the specified number of digits. + * @ambrosia publish=true, version=1, doRuntimeTypeChecking=true + */ + export async function ComputePI(/** Foo */ + digits /* Bar */ : + /** Baz */ Digits = + { + count: 12 /** a Dozen! */ + }): Promise + { + function localfn(): void + { + console.log("foo!"); + } + let pi: number = Number.parseFloat(Math.PI.toFixed(digits?.count ?? 10)); + return (pi); + } + +} + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_CircReference.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_CircReference.ts new file mode 100644 index 00000000..bbe7d5fa --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_CircReference.ts @@ -0,0 +1,12 @@ +/** @ambrosia publish=true */ + export type CName = + { + first: string, + last: string, + priorNames: CNames[] + } + /** + * Cannot publish a type that has a circular reference + * @ambrosia publish=true + */ + export type CNames = CName[]; diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_CommasBetweenAttrib.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_CommasBetweenAttrib.ts new file mode 100644 index 00000000..d1bf3c8d --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_CommasBetweenAttrib.ts @@ -0,0 +1,10 @@ +export module Test +{ + /** + * There must be commas between attributes + * @ambrosia publish=true version=1 doRuntimeTypeChecking=true + */ + export function MyFn5(): void { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_GenericType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_GenericType.ts new file mode 100644 index 00000000..27deac43 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_GenericType.ts @@ -0,0 +1,14 @@ +export module Test +{ + /** + * Invalid test - generic function not supported as published function + * @ambrosia publish=true + * + */ + export function generic(p1: T): T + { + return (p1); + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDInt.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDInt.ts new file mode 100644 index 00000000..694f85d2 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDInt.ts @@ -0,0 +1,13 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * methodID attribute must be an integer + * @ambrosia publish=true, methodID=Hello + */ + export function MyFn2(): void { + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDNeg.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDNeg.ts new file mode 100644 index 00000000..398da522 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDNeg.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Can't have a methodID less than -1 + * @ambrosia publish=true, methodID=-2 + */ + export function MyFn(): void { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDOnType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDOnType.ts new file mode 100644 index 00000000..694f85d2 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_MethodIDOnType.ts @@ -0,0 +1,13 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * methodID attribute must be an integer + * @ambrosia publish=true, methodID=Hello + */ + export function MyFn2(): void { + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NamespaceModule.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NamespaceModule.ts new file mode 100644 index 00000000..78186b75 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NamespaceModule.ts @@ -0,0 +1,13 @@ +export module Test +{ + /********** Negative Test ************* + + + /** + * Can't publish a namespace (module) + * @ambrosia publish=true + */ + namespace MyNS { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NestedFunction.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NestedFunction.ts new file mode 100644 index 00000000..c32106b1 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NestedFunction.ts @@ -0,0 +1,16 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Cannot publish a local (nested) function + * @ambrosia publish=true + */ + export function parentFn(): void { + /** @ambrosia publish=true */ + function localFn(): void { + } + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NestedFunction2.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NestedFunction2.ts new file mode 100644 index 00000000..40d739ae --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NestedFunction2.ts @@ -0,0 +1,10 @@ + export class SomeClass { + // Cannot publish a local (nested) function in a static method + static someStaticMethod(): void + { + /** @ambrosia publish=true */ + function localFn(): void + { + } + } +} diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoFunctionComplexType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoFunctionComplexType.ts new file mode 100644 index 00000000..ae32d779 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoFunctionComplexType.ts @@ -0,0 +1,20 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Unsupported type (FunctionType) in complex type property + * @ambrosia publish=true + */ + export type myComplexType = + { + p1: + { + fn: /* Test 1*/ () => /* Test 2*/ void, + p3: number + }, + p2: string + }; + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoFunctionType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoFunctionType.ts new file mode 100644 index 00000000..9d0bf24c --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoFunctionType.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Function types are not supported + * @ambrosia publish=true + */ + export type fnType = (p1: number) => string; + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoIntersectionType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoIntersectionType.ts new file mode 100644 index 00000000..94389b3c --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoIntersectionType.ts @@ -0,0 +1,15 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Intersection types are not supported + * @ambrosia publish=true + */ + export type IntersectionType = FullName[] & ShortName[]; + export type ShortName = { first: string }; + export type FullName = { first: string, last: string}; + + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoTaggedItems.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoTaggedItems.ts new file mode 100644 index 00000000..d584f6e4 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_NoTaggedItems.ts @@ -0,0 +1,12 @@ +export module Test +{ + /** + * Invalid test - no objects are tagged to be generated + */ + export function NothingIsTagged() + { + console.log(`A function exists but nothing in file is tagged so pops an error.`); + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_OptionalProperties.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_OptionalProperties.ts new file mode 100644 index 00000000..0ffbfd3e --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_OptionalProperties.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Types with optional properties are not supported + * @ambrosia publish=true + */ + export type MyTypeWithOptionalMembers = { foo: string, bar?: number }; + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_OverloadedFunction.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_OverloadedFunction.ts new file mode 100644 index 00000000..51679652 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_OverloadedFunction.ts @@ -0,0 +1,14 @@ +export module Test +{ + //** Negative test + + /** + * The ambrosia tag must be on the implementation of an overloaded function + * @ambrosia publish=true + */ + export function fnOverload(): void; + export function fnOverload(name?: string): void { + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_PublishClass.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_PublishClass.ts new file mode 100644 index 00000000..6630dcf5 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_PublishClass.ts @@ -0,0 +1,13 @@ +export module Test +{ + /********** Tagging interfaces are not valid scenarios ************* + + + /** + * Can't publish a class + * @ambrosia publish=true + */ + class MyClass { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_PublishMethodBeforeRef.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_PublishMethodBeforeRef.ts new file mode 100644 index 00000000..31157722 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_PublishMethodBeforeRef.ts @@ -0,0 +1,17 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Can't publish any method while references to unpublished types exist + * @ambrosia publish=true + */ + export type MyType = Name[]; + export type Name = { first: string, last: string }; + /** @ambrosia publish=true */ + export function fn(): void { + } + + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_QuoteAttributeValue.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_QuoteAttributeValue.ts new file mode 100644 index 00000000..aaa6e6bd --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_QuoteAttributeValue.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Cannot use quotes around attribute values + * @ambrosia publish="true" + */ + export function MyFn8(): void { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_RunTimeBool.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_RunTimeBool.ts new file mode 100644 index 00000000..7fdd638d --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_RunTimeBool.ts @@ -0,0 +1,13 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * doRuntimeTypeChecking attribute must be a boolean + * @ambrosia publish=true, doRuntimeTypeChecking=Hello + */ + export function MyFn4(): void { + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_SingleUInt8Array.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_SingleUInt8Array.ts new file mode 100644 index 00000000..1f29a58a --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_SingleUInt8Array.ts @@ -0,0 +1,13 @@ +export module Test +{ + /** + * Method with single 'rawParams: Uint8Array' parameter cannot be a Post method (ie. missing the 'methodID=' attribute) + * @param rawParams Description of the format of the custom serialized byte array. + * @ambrosia publish=true + */ + export function takesCustomSerializedParams(rawParams: Uint8Array): void { + } + + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod1.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod1.ts new file mode 100644 index 00000000..4d3c59c2 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod1.ts @@ -0,0 +1,10 @@ +class StaticStuff { + /** + * The parent class of a published static method must be exported. + * @ambrosia publish=true + */ + static hello(name: string): void { + console.log(`Hello ${name}!`); + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod2.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod2.ts new file mode 100644 index 00000000..437fed09 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod2.ts @@ -0,0 +1,11 @@ +export class StaticStuff { + /** + * A method must have the 'static' modifier to be published. + * @ambrosia publish=true + */ + hello(name: string): void { + console.log(`Hello ${name}!`); + } +} + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod3.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod3.ts new file mode 100644 index 00000000..1bd9b2da --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod3.ts @@ -0,0 +1,13 @@ +// Cannot publish a static method from a class expression +export class MoreStaticStuff { + public utilities = new class Foo { + constructor() { + } + + /** @ambrosia publish=true */ + static helloAgain(name: string) { + console.log(`Hello ${name}!`); + } + }(); +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod4.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod4.ts new file mode 100644 index 00000000..36769d3e --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StaticMethod4.ts @@ -0,0 +1,11 @@ +class MyClassWithPrivateMember +{ + /** + * Can't publish a private static method + * @ambrosia publish=true + */ + private static privateMethod(): void + { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StringEnum.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StringEnum.ts new file mode 100644 index 00000000..3dd39fe7 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_StringEnum.ts @@ -0,0 +1,17 @@ +export module Test +{ + /********** Enum type (string enum - initialize strings with strings and not as number) ************* + * @ambrosia publish=true + */ + export enum PrintMediaString { + NewspaperStringEnum = "NEWSPAPER", + NewsletterStringEnum = "NEWSLETTER", + MagazineStringEnum = "MAGAZINE", + BookStringEnum = "BOOK" + } + + PrintMediaString.NewspaperStringEnum; //returns NEWSPAPER + PrintMediaString['Magazine'];//returns MAGAZINE + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TagInterface.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TagInterface.ts new file mode 100644 index 00000000..fc0ba9f9 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TagInterface.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Tagging interfaces are not valid scenarios ************* + + /** @ambrosia publish=true */ + interface IFoo + { + foo: number; + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TagMethod.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TagMethod.ts new file mode 100644 index 00000000..0c511260 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TagMethod.ts @@ -0,0 +1,13 @@ +export class Time +{ + /********** Tagging methods are not valid scenarios ************* + + /** @ambrosia publish=true */ + currentYear(): number + { + return (2020); + } + +} + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TupleType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TupleType.ts new file mode 100644 index 00000000..7d8451d5 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TupleType.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Tuple types are not supported + * @ambrosia publish=true + */ + export type MyTupleType = [string, number]; + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TwoAmbrTags.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TwoAmbrTags.ts new file mode 100644 index 00000000..60780ddc --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_TwoAmbrTags.ts @@ -0,0 +1,15 @@ +export module Test +{ + /********** Negative Test ************* + + + /** + * Ambrosia tag can only appear once + * @ambrosia publish=false + * @ambrosia publish=true + */ + export function MyFn7(): void { + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnionType.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnionType.ts new file mode 100644 index 00000000..4e30b2bd --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnionType.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Union types are not supported + * @ambrosia publish=true + */ + export type MyUnionType = string | number; + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnionTypeCommented.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnionTypeCommented.ts new file mode 100644 index 00000000..6a72ba0f --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnionTypeCommented.ts @@ -0,0 +1,26 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * Correctly handle line-breaks and comments in an unsupported return type + * @ambrosia publish=true + */ + export function myComplexReturnFunction(): + { + // TEST0 + r1: string, + r2: number | + // TEST1 + /* + TEST2 + */ + string + } + { + return (null); + } + + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnknownAtt_Method.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnknownAtt_Method.ts new file mode 100644 index 00000000..d9c8b9f2 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnknownAtt_Method.ts @@ -0,0 +1,12 @@ +export module Test +{ + /********** Negative Test ************* + /** + * Unknown attribute name [on a method] + * @ambrosia published=true + */ + export function MyFn6(): void + { + } +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnknownAtt_Type.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnknownAtt_Type.ts new file mode 100644 index 00000000..b73b05dd --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_UnknownAtt_Type.ts @@ -0,0 +1,10 @@ +export module Test +{ + /********** Negative Test ************* + /** + * Unknown attribute name [on a type] + * @ambrosia published=true + */ + export type NewType = number[]; +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_VersionInt.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_VersionInt.ts new file mode 100644 index 00000000..7b4a6806 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/NegativeTests/TS_VersionInt.ts @@ -0,0 +1,13 @@ +export module Test +{ + /********** Negative Test ************* + + /** + * version attribute must be an integer + * @ambrosia publish=true, version=Hello + */ + export function MyFn3(): void { + } + +} + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts new file mode 100644 index 00000000..6ada35ac --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_AmbrosiaTag.ts @@ -0,0 +1,78 @@ +/** + Test File to test all the the ways that the ambrosia tag can be set and still work +*/ + +export namespace Test { + + + /** @ambrosia publish=true */ + export function OneLineNoComment() { + console.log("One Line with no extra comment"); + } + + /** Multi Line with Comment before Tag + * but still before tag + * @ambrosia publish=true + */ + export function MultiLineCommentBeforeTag() { + console.log("Multi Line before tag"); + } + + /** Multi Line with Comment before Tag */ + /** but still before tag -- since separate comment, these will not show in .g.ts*/ + /** @ambrosia publish=true + */ + export function MultiSeparateLinesCommentBeforeTag() { + console.log("Multi Separate Comment Line before tag"); + } + + /** Multi Line with Comment after Tag */ + /** @ambrosia publish=true + */ + /** Separate Comment after tag -- causes a warning that Skipping Function*/ + export function SeparateLinesCommentAfterTag() { + console.log("Separate Comment Line after tag"); + } + + + /************** Have a space after the tag before function declaration + * @ambrosia publish=true + */ + + export function EmptyLineBetweenTagAndFctn() { + console.log("Empty line between tag and fctn"); + } + + /****** Spacing around the tag + * @ambrosia publish=true + */ + export function SpacingAroundTag() { + console.log("Spacing in front and behind tag"); + } + + /** JS Doc + * @ambrosia publish=true + */ + export function JSDOcTag() { + console.log("JSDOcTag"); + } + + + /* This will NOT generate code - causes a warning that Skipping Function */ + /******** @ambrosia publish=true */ + export function NotJSDOcTag() { + console.log("NotJSDOcTag"); + } + + /* Proper way to tag Overloaded functions */ + export function fnOverload(): void; + export function fnOverload(name: string): void; + /** + * The ambrosia tag must be on the implementation of an overloaded function + * @ambrosia publish=true + */ + export function fnOverload(name?: string): void { + } + + +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParam.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParam.ts new file mode 100644 index 00000000..1b1f800e --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParam.ts @@ -0,0 +1,15 @@ + +export namespace Test +{ + + /** + * Method to test custom serialized parameters. + * @ambrosia publish=true, methodID=2 + * @param rawParams Description of the format of the custom serialized byte array. + */ + export function takesCustomSerializedParams(rawParams: Uint8Array): void { + } + + +} + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts new file mode 100644 index 00000000..465adbe8 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_CustomSerialParamNoRawParam.ts @@ -0,0 +1,17 @@ +/** + Test when missing @param rawParams +*/ + +export namespace Test +{ + + /** + * Method to test custom serialized parameters. + * @ambrosia publish=true, methodID=2 + */ + export function takesCustomSerializedParams(rawParams: Uint8Array): void { + } + + +} + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts new file mode 100644 index 00000000..744c8a5a --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlerWarnings.ts @@ -0,0 +1,26 @@ +// Tests to handle Event Handlers warnings +// Can't add to negative because not failing +// Can't add to TS_EventHandlers.ts because using functions already defined + + +// This is a bit of a rare case ... a warning will show success. Will want to verify warning though. +export namespace HandlerNegativeTests { + // Handler with incorrect parameters + // Note: This only produces a warning, not an error + export function onRecoveryComplete(name: string): void { + } + + // Handler with incorrect return type + // Note: This only produces a warning, not an error + export function onBecomingPrimary(): number { + return (123); + } +} + +/** @ambrosia publish=true */ +export function unused(): void { +} + + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlers.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlers.ts new file mode 100644 index 00000000..47da17b9 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_EventHandlers.ts @@ -0,0 +1,68 @@ +// Tests to handle Event Handlers. +// Event handler function in the input source file for any AppEvent will automatically get wired-up at code gen (publisher side) +// Even if have the @ambrosia tag, it should NOT code gen to consumer. + +// have couple inside namespace +export namespace Test +{ + + /** Fake Event Handler due to case in the name so this will be generated + * @ambrosia publish=true + */ + export function onbecomingprimary(): void + { + console.log(`Fake Event Handler due to name case so just seen as typical function`); + } + + export function onRecoveryComplete(/** Bar! */): /** Foo! */ void + { + console.log(`On Recovery`); + } + + ///** @ambrosia publish=true */ Putting an Ambrosia tag on Event Handler will cause error + export function onBecomingPrimary(): void + { + console.log(`Becoming primary`); + } +} + +// have some outside namespace + + export function onICStopped(exitCode: number): void + { + console.log(`The IC stopped with exit code ${exitCode}`); + } + + export function onICStarted(): void + { + console.log(`The IC Started`); + } + + export function onICStarting(): void + { + console.log(`The IC is starting`); + } + + export function onICReadyForSelfCallRpc(): void + { + console.log(`The IC Ready`); + } + + export function onUpgradeStateAndCode(): void + { + console.log(`The onUpGrade`); + } + + export function onIncomingCheckpointStreamSize(): void + { + console.log(`The incoming checkpoint`); + } + + //** This is valid EventHandler but do not add code for event to make sure publisher handles an event that isn't defined */ + //** Should put a "TODO" comment in publisher generated code */ + //export function onFirstStart(): void + //{ + //console.log(`on First Start`); + //} + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType1.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType1.ts new file mode 100644 index 00000000..2c861536 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType1.ts @@ -0,0 +1,9 @@ +/** + * Generic built-in types can be used, but only with concrete types (not type placeholders, eg. "T"): Example #1 + * @ambrosia publish = true + */ +export type NameToNumberDictionary = Map; + + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType2.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType2.ts new file mode 100644 index 00000000..d6a58259 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_GenType2.ts @@ -0,0 +1,15 @@ +/** + * Generic built-in types can be used, but only with concrete types (not type placeholders, eg. "T"): Example #2 + * @ambrosia publish = true + */ +export type EmployeeWithGenerics = { firstNames: Set<{ name: string, nickNames: NickNames }>, lastName: string, birthYear: number }; + +/** + * Test for a literal-object array type; this should generate a 'NickNames_Element' class and then redefine the type of NickNames as Nicknames_Element[]. + * This is done to makes it easier for the consumer to create a NickNames instance. + * @ambrosia publish = true + */ +export type NickNames = { name: string }[]; + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment.ts new file mode 100644 index 00000000..645fc4d0 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment.ts @@ -0,0 +1,12 @@ +/** Some static methods. */ +export class StaticStuff { + /** @ambrosia publish=true */ + static hello(name: string): void { + console.log("Hello ${name}!"); + } +} + + + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment2.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment2.ts new file mode 100644 index 00000000..c8eb6fb6 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_JSDocComment2.ts @@ -0,0 +1,29 @@ +/** The Fooiest Foo ever! This comment not generated because no direct published entities - Baz will though */ +export namespace Foo { + export namespace Bar { + /** + * The Baziest Baz... + * ...ever! + */ + export namespace Baz { + /** + * Generic built-in types can be used, but only with concrete types (not type placeholders, eg. "T"): Example #1 + * @ambrosia publish = true + */ + export type NameToNumberDictionary = Map; + } + } + export namespace Woo { + /** */ + export namespace Hoo { + /** @ambrosia publish = true */ + export type NumberToNameDictionary = Map; + } + } +} + + + + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_LitObjArray.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_LitObjArray.ts new file mode 100644 index 00000000..007a1083 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_LitObjArray.ts @@ -0,0 +1,10 @@ + /** + * Test for a literal-object array type; this should generate a 'NickNames_Element' class and then redefine the type of NickNames as Nicknames_Element[]. + * This is done to makes it easier for the consumer to create a NickNames instance. + * @ambrosia publish = true + */ + export type NickNames = { name: string }[]; + + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_MiscTests.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_MiscTests.ts new file mode 100644 index 00000000..29c52eaa --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_MiscTests.ts @@ -0,0 +1,29 @@ +/** + Test File of misc tests. If find a theme or grouping then move out of this file into separate file +*/ + +export namespace Test { + + + /** + * Correctly handle line-breaks and comments + * @ambrosia publish=true + */ + export function myComplexReturnFunction(): + { + // TEST0 + r1: string, + r2: + // TEST1 + /* + TEST2 + */ + string + } + { + return (null); + } + + + +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_StaticMethod.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_StaticMethod.ts new file mode 100644 index 00000000..ba70b7ee --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_StaticMethod.ts @@ -0,0 +1,11 @@ +export class StaticStuff +{ + /** @ambrosia publish=true */ + static hello(name: string): void + { + console.log(`Hello ${name}!`); + } +} + + + diff --git a/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_Types.ts b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_Types.ts new file mode 100644 index 00000000..9b1db6c0 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/JS_CodeGen_TestFiles/TS_Types.ts @@ -0,0 +1,165 @@ +/** + Test File to test all the Types for typescripts + Has the basic types +*/ + +export namespace Test +{ + + /************* Primitives - bool, string, number, array ******** + * @ambrosia publish=true + * + */ + export function BasicTypes(isFalse: boolean, height: number,mystring: string = "doublequote",mystring2: string = 'singlequote',my_array:number[] = [1, 2, 3],notSure: any = 4) + { + console.log(isFalse); + console.log(height); + console.log(mystring); + console.log(mystring2); + console.log(my_array); + console.log(notSure); + } + + //**** String Enums are not supported scenario */ + + /*********** Enum type (numeric enum - strings as number) as return ************* + * @ambrosia publish=true + */ + export enum PrintMedia { + Newspaper = 1, + Newsletter, + Magazine, + Book + } + + /********* Function using / returning Numeric Enum **** + * @ambrosia publish=true + */ + export function getMedia(mediaName: string): PrintMedia { + if ( mediaName === 'Forbes' || mediaName === 'Outlook') { + return PrintMedia.Magazine; + } + } + + /********** Enum type (Reverse Mapped enum - can access the value of a member and also a member name from its value) ************* + * @ambrosia publish=true + */ + export enum PrintMediaReverse { + NewspaperReverse = 1, + NewsletterReverse, + MagazineReverse, + BookReverse + } + + PrintMediaReverse.MagazineReverse; // returns 3 + PrintMediaReverse["MagazineReverse"];// returns 3 + PrintMediaReverse[3]; // returns MagazineReverse + + + /** @ambrosia publish=true */ + export enum MyEnumAA { + aa = -1, + bb = -123, + cc = 123, + dd = 0 + } + + /** @ambrosia publish=true */ + export enum MyEnumBBB { + aaa = -1, + bbb + } + + + + /************* Void type ************* + * @ambrosia publish=true + */ + export function warnUser(): void + { + alert("This is my warning message"); + } + + + /*************** Complex Type ************* + * @ambrosia publish=true + */ + export type Name = + { + // Test 1 + first: string, // Test 2 + /** Test 3 */ + last: string /* Test 4 */ + } + + /************** Example of a type that references another type *************. + * @ambrosia publish=true + */ + export type Names = Name[]; + + + /************** Example of a nested complex type.************* + * @ambrosia publish=true + */ + export type Nested = + { + abc: + { + a: Uint8Array, + b: + { + c: Names + } + } + } + + /************** Example of a [post] method that uses custom types. ************* + * @ambrosia publish=true, version=1 + */ + export function makeName(firstName: string = "John", lastName: string /** Foo */ = "Doe"): Names + { + let names: Names; + let name: Name = { first: firstName, last: lastName }; + names.push(name); + return (names); + } + + + /********* Function returning number **** + * @ambrosia publish=true + */ + export function return_number(strvalue: string): number + { + if (strvalue == "99") + { + return 99; + } + } + + /********* Function returning string **** + * @ambrosia publish=true + */ + export function returnstring(numvalue: number): string + { + if (numvalue == 9999) + { + return '99'; + } + } + + /********* Function with missing types **** + * Function with missing type information + * @ambrosia publish=true + */ + export function fnWithMissingType(p1, p2: number): void { + } + + /** + * Type with missing type information + * @ambrosia publish=true + */ + export type typeWithMissingType = { p1, p2: number }; + + +} + \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/TestCodeGen.ts b/AmbrosiaTest/JSCodeGen/TestCodeGen.ts new file mode 100644 index 00000000..034a5ffd --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/TestCodeGen.ts @@ -0,0 +1,49 @@ +// Note: Build the ambrosia-node*.tgz in \AmbrosiaJS\Ambrosia-Node\build.ps1 +// The "ambrosia-node" package was installed using "npm install ..\Ambrosia-Node\ambrosia-node-0.0.73.tgz", +// which also installed all the required [production] package dependencies (eg. azure-storage). +import Ambrosia = require("ambrosia-node"); +import Utils = Ambrosia.Utils; +import Meta = Ambrosia.Meta; + + +main(); + + +/***** TO DO +* Code gen options: file type, merge type, other flags (basically, all the parameter of Meta.emitTypeScriptFileFromSource()) +* TS namespaces: nested, co-mingled with non-namespace scoped entities, faithfully carried over to the generated ConsumerInterface.g.ts. +* While emitTypeScriptFileFromSource() should be the subject of the majority of testing [because I expected it will be the most used technique], it would also be good to test emitTypeScriptFile() too. This can be accomplished by calling Meta.publishFromSource() beforehand, which will enable you to leverage your earlier investment in input .ts files +* +* Another possible TO DO: want to run publisher side if the consumer side fails? Maybe not ... since this is ran for neg tests too +*/ + +// A "bootstrap" program that code-gen's the publisher/consumer TypeScript files. +async function main() +{ + try + { + await Ambrosia.initializeAsync(Ambrosia.LBInitMode.CodeGen); + let sourceFile: string = Utils.getCommandLineArg("sourceFile"); + let generatedFileName: string = Utils.getCommandLineArg("generatedFileName", "TestOutput") ?? "TestOutput"; + + // If want to run as separate generation steps for consumer and publisher + //Meta.emitTypeScriptFileFromSource(sourceFile, { fileKind: Meta.GeneratedFileKind.Consumer, mergeType: Meta.FileMergeType.None, emitGeneratedTime: false, generatedFileName: generatedFileName+"_Consumer" }); + //Meta.emitTypeScriptFileFromSource(sourceFile, { fileKind: Meta.GeneratedFileKind.Publisher, mergeType: Meta.FileMergeType.None, emitGeneratedTime: false, generatedFileName: generatedFileName+"_Publisher" }); + + // Use this for single call to generate both consumer and publisher + Meta.emitTypeScriptFileFromSource(sourceFile, { fileKind: Meta.GeneratedFileKind.All, mergeType: Meta.FileMergeType.None, emitGeneratedTime: false, generatedFilePrefix: generatedFileName }); + + + // Something like this instead of just running them both +// if (Meta.emitTypeScriptFileFromSource(sourceFile, { fileKind: Meta.GeneratedFileKind.Consumer, mergeType: Meta.FileMergeType.None, emitGeneratedTime: false, generatedFileName: generatedFileName+"_Consumer" }) > 0) + // { + // Meta.emitTypeScriptFileFromSource(sourceFile, { fileKind: Meta.emitTypeScriptFileFromSource(sourceFile, { fileKind: Meta.GeneratedFileKind.Publisher, mergeType: Meta.FileMergeType.None, emitGeneratedTime: false, generatedFileName: generatedFileName+"_Publisher" }); + // } + + + } + catch (error) + { + Utils.tryLog(error); + } +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/ambrosiaConfig-schema.json b/AmbrosiaTest/JSCodeGen/ambrosiaConfig-schema.json new file mode 100644 index 00000000..841145ec --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/ambrosiaConfig-schema.json @@ -0,0 +1,181 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema", + "type": "object", + "description": "Runtime configuration settings for the 'ambrosia-node' package.", + "required": ["instanceName", "icCraPort"], + "properties": { + "$schema" : { + "type": "string", + "description": "The location of the matching JSON schema file, which is required for IntelliSense and auto-completion when editing using VSCode/VS2019+." + }, + "autoRegister": { + "type": "boolean", + "description": "Whether to automatically [re]register this Ambrosia Immortal instance at startup. When true, the following settings must also be explicitly set: icReceivePort, icSendPort, icLogFolder. If [re]registration succeeds, this setting automatically resets to false. Defaults to false.", + "default": false + }, + "instanceName": { + "type": "string", + "description": "The name this Ambrosia Immortal instance will be referred to by all instances (including itself).", + "default": "myInstance" + }, + "icCraPort": { + "type": "number", + "description": "The port number that the Common Runtime for Applications (CRA) layer uses." + }, + "icReceivePort": { + "type": "number", + "description": "The port number that the Immortal Coordinator (IC) receives on. If not provided, it will be read from the registration." + }, + "icSendPort": { + "type": "number", + "description": "The port number that the Immortal Coordinator (IC) sends on. If not provided, it will be read from the registration." + }, + "icLogFolder": { + "type": "string", + "description": "The folder where the Immortal Coordinator (IC) will write its logs (or read logs from if doing \"time-travel debugging\"). If not provided, it will be read from the registration." + }, + "icLogStorageType" : { + "type": "string", + "enum": [ "Files", "Blobs" ], + "description": "The storage type that the Immortal Coordinator (IC) logs will be persisted in. Defaults to \"Files\".", + "default": "Files" + }, + "icBinFolder": { + "type": "string", + "description": "The folder path(s) where the Immortal Coordinator (IC) binaries exist. Separate multiple paths with ';'. If not specified, the 'AMBROSIATOOLS' environment variable will be used." + }, + "icIPv4Address" : { + "type": "string", + "description": "An override IPv4 address for the Immortal Coordinator (IC) to use instead of the local IPv4 address." + }, + "icHostingMode" : { + "type": "string", + "enum": [ "Integrated", "Separated" ], + "description": "The hosting mode for the Immortal Coordinator (IC), which affects where and how the IC runs. If not explicitly set, the value will be computed based on the value provided for (or the omission of) 'icIPv4Address'.", + "default": "Integrated" + }, + "useNetCore": { + "type": "boolean", + "description": "Whether to use .NET Core (instead of .Net Framework) to run the Immortal Coordinator (IC) [this is Windows-only option]. Defaults to false.", + "default": false + }, + "debugStartCheckpoint": { + "type": "number", + "description": "The checkpoint number to start \"time-travel debugging\" from. Defaults to 0 (which means don't debug)." + }, + "debugTestUpgrade": { + "type": "boolean", + "description": "Whether to perform a test upgrade (for debugging/testing purposes). If set to true, a non-zero 'debugStartCheckpoint' must also be specified. Defaults to false.", + "default": false + }, + "logTriggerSizeInMB": { + "type": "number", + "description": "The size (in MB) the log must reach before the IC will take a checkpoint and start a new log." + }, + "isActiveActive": { + "type": "boolean", + "description": "Whether this [primary] instance will run in an active-active configuration. If specified, overrides the registered value." + }, + "replicaNumber": { + "type": "number", + "description": "The replica (secondary) ID this instance will use in an active-active configuration. MUST match the value used when registering the replica with 'AddReplica'." + }, + "appVersion": { + "type": "number", + "description": "The nominal version of this Immortal instance. Used to identify the log sub-folder name (ie. <icInstanceName>_<appVersion>) that will be logged to (or read from if debugStartCheckpoint is specified)." + }, + "upgradeVersion": { + "type": "number", + "description": "The nominal version this Immortal instance should upgrade (migrate) to at startup. Must be greater than 'appVersion' to trigger an upgrade. Test the upgrade first by setting 'debugTestUpgrade' to true." + }, + "activeCode": { + "type": "string", + "enum": [ "VCurrent", "VNext" ], + "default": "VCurrent", + "description": "Which version of application code is currently active (before an upgrade: \"VCurrent\"; immediately after an upgrade: \"VNext\"). This setting is changed automatically during an upgrade, but must be manually changed back to 'VCurrent' (along with updating the code) when preparing for a subsequent upgrade. Defaults to \"VCurrent\"." + }, + "secureNetworkAssemblyName" : { + "type": "string", + "description": "The name of the .NET assembly used to establish a secure network channel between ICs." + }, + "secureNetworkClassName" : { + "type": "string", + "description": "The name of the .NET class (that implements ISecureStreamConnectionDescriptor) in 'secureNetworkAssemblyName'." + }, + "lbOptions": { + "type": "object", + "description": "Options used to control the behavior of the language-binding.", + "properties": { + "deleteLogs": { + "type": "boolean", + "description": "[Debug] Set this to true to clear the IC logs (all prior checkpoints and logged state changes will be PERMANENTLY LOST, and recovery will not run). Defaults to false.", + "default": false + }, + "deleteRemoteCRAConnections": { + "type": "boolean", + "description": "[Debug] Set this to true to delete any previously created non-local CRA connections [from this instance] at startup. Defaults to false.", + "default": false + }, + "allowCustomJSONSerialization": { + "type": "boolean", + "description": "Set this to false to disable the specialized JSON serialization of BigInt and typed-arrays (eg. Uint8Array). Defaults to true.", + "default": true + }, + "typeCheckIncomingPostMethodParameters": { + "type": "boolean", + "description": "Set this to false to skip type-checking the parameters of incoming post methods for correctness against published methods/types. Defaults to true.", + "default": true + }, + "outputLoggingLevel": { + "type": "string", + "enum": [ "Minimal", "Normal", "Verbose" ], + "default": "Normal", + "description": "The level of detail to include in the language-binding output log. Defaults to \"Normal\"." + }, + "outputLogDestination": { + "type": "string", + "enum": [ "Console", "File", "ConsoleAndFile" ], + "description": "Destination(s) where the language-binding will log output. Defaults to 'Console'. While logging to the console is useful during development/debugging, for production set it to 'File' (for performance).", + "default": "Console" + }, + "outputLogFolder": { + "type": "string", + "description": "The folder where the language-binding will write output log files (when outputLogDestination is 'File' or 'ConsoleAndFile'). Defaults to './outputLogs'.", + "default": "./outputLogs" + }, + "allowDisplayOfRpcParams": { + "type": "boolean", + "description": "Set this to true to allow incoming RPC parameters [which can contain privacy/security related content] to be displayed/logged. Defaults to false.", + "default": false + }, + "allowPostMethodTimeouts": { + "type": "boolean", + "description": "Set this to false to disable the timeout feature of post methods. Defaults to true.", + "default": true + }, + "allowPostMethodErrorStacks": { + "type": "boolean", + "description": "Set this to true to enable sending a full stack trace in a post method error result. Defaults to false.", + "default": false + }, + "enableTypeScriptStackTraces": { + "type": "boolean", + "description": "Enables an Error stack trace to refer to TypeScript files/locations (when available) instead of JavaScript files/locations. Defaults to true.", + "default": true + }, + "maxInFlightPostMethods": + { + "type": "number", + "description": "Set this to a positive integer to generate a warning whenever the number of in-flight post methods reaches this threshold. Defaults to -1 (no limit).", + "default": -1 + }, + "messageBytePoolSizeInMB": + { + "type": "number", + "description": "The size (in MB) of the message byte pool used for optimizing message construction. Defaults to 2MB.", + "default": 2 + } + } + } + } +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/ambrosiaConfig.json b/AmbrosiaTest/JSCodeGen/ambrosiaConfig.json new file mode 100644 index 00000000..ba8b9a21 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/ambrosiaConfig.json @@ -0,0 +1,28 @@ +{ + "$schema": "./ambrosiaConfig-schema.json", + "autoRegister": false, + "instanceName": "server", + "icCraPort": 2500, + "icReceivePort": 2000, + "icSendPort": 2001, + "icLogFolder": "C:/logs/", + "icBinFolder": "C:/src/Git/PostSledgehammer/AMBROSIA/ImmortalCoordinator/bin/x64/Release;C:/src/Git/PostSledgehammer/AMBROSIA/Ambrosia/Ambrosia/bin/x64/Release", + "useNetCore": false, + "logTriggerSizeInMB": 1024, + "debugStartCheckpoint": 0, + "debugTestUpgrade": false, + "appVersion": 0, + "upgradeVersion": 0, + "activeCode": "VCurrent", + "lbOptions": + { + "deleteLogs": true, + "deleteRemoteCRAConnections": false, + "outputLogDestination": "ConsoleAndFile", + "outputLogFolder": "./outputLogs", + "outputLoggingLevel": "Normal", + "allowDisplayOfRpcParams": true, + "allowPostMethodTimeouts": true, + "enableTypeScriptStackTraces": true + } +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/ambrosiaConfig.json.old b/AmbrosiaTest/JSCodeGen/ambrosiaConfig.json.old new file mode 100644 index 00000000..ba8b9a21 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/ambrosiaConfig.json.old @@ -0,0 +1,28 @@ +{ + "$schema": "./ambrosiaConfig-schema.json", + "autoRegister": false, + "instanceName": "server", + "icCraPort": 2500, + "icReceivePort": 2000, + "icSendPort": 2001, + "icLogFolder": "C:/logs/", + "icBinFolder": "C:/src/Git/PostSledgehammer/AMBROSIA/ImmortalCoordinator/bin/x64/Release;C:/src/Git/PostSledgehammer/AMBROSIA/Ambrosia/Ambrosia/bin/x64/Release", + "useNetCore": false, + "logTriggerSizeInMB": 1024, + "debugStartCheckpoint": 0, + "debugTestUpgrade": false, + "appVersion": 0, + "upgradeVersion": 0, + "activeCode": "VCurrent", + "lbOptions": + { + "deleteLogs": true, + "deleteRemoteCRAConnections": false, + "outputLogDestination": "ConsoleAndFile", + "outputLogFolder": "./outputLogs", + "outputLoggingLevel": "Normal", + "allowDisplayOfRpcParams": true, + "allowPostMethodTimeouts": true, + "enableTypeScriptStackTraces": true + } +} \ No newline at end of file diff --git a/AmbrosiaTest/JSCodeGen/package-lock.json b/AmbrosiaTest/JSCodeGen/package-lock.json new file mode 100644 index 00000000..f7c05d72 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/package-lock.json @@ -0,0 +1,520 @@ +{ + "name": "jscode-gen", + "version": "0.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@types/node": { + "version": "14.14.43", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.14.43.tgz", + "integrity": "sha512-3pwDJjp1PWacPTpH0LcfhgjvurQvrZFBrC6xxjaUEZ7ifUtT32jtjPxEMMblpqd2Mvx+k8haqQJLQxolyGN/cQ==" + }, + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + }, + "ambrosia-node": { + "version": "file:ambrosia-node-0.0.79.tgz", + "integrity": "sha512-XWOZhGMhc822WQ/11O0yrf7slWRuHuvOWmlSrs5XDJGvVW+mhDBQkoqvNu9OiKOxguijAYe1wBSq2Hi/ha0NKw==", + "requires": { + "@types/node": "^14.14.37", + "azure-storage": "^2.10.3", + "source-map-support": "^0.5.19" + } + }, + "asn1": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" + }, + "aws4": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", + "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" + }, + "azure-storage": { + "version": "2.10.3", + "resolved": "https://registry.npmjs.org/azure-storage/-/azure-storage-2.10.3.tgz", + "integrity": "sha512-IGLs5Xj6kO8Ii90KerQrrwuJKexLgSwYC4oLWmc11mzKe7Jt2E5IVg+ZQ8K53YWZACtVTMBNO3iGuA+4ipjJxQ==", + "requires": { + "browserify-mime": "~1.2.9", + "extend": "^3.0.2", + "json-edm-parser": "0.1.2", + "md5.js": "1.3.4", + "readable-stream": "~2.0.0", + "request": "^2.86.0", + "underscore": "~1.8.3", + "uuid": "^3.0.0", + "validator": "~9.4.1", + "xml2js": "0.2.8", + "xmlbuilder": "^9.0.7" + } + }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "requires": { + "tweetnacl": "^0.14.3" + } + }, + "browserify-mime": { + "version": "1.2.9", + "resolved": "https://registry.npmjs.org/browserify-mime/-/browserify-mime-1.2.9.tgz", + "integrity": "sha1-rrGvKN5sDXpqLOQK22j/GEIq8x8=" + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" + }, + "combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "requires": { + "delayed-stream": "~1.0.0" + } + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "requires": { + "assert-plus": "^1.0.0" + } + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" + }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" + }, + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" + }, + "form-data": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", + "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + } + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "requires": { + "assert-plus": "^1.0.0" + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" + }, + "har-validator": { + "version": "5.1.5", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", + "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", + "requires": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + } + }, + "hash-base": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/hash-base/-/hash-base-3.1.0.tgz", + "integrity": "sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==", + "requires": { + "inherits": "^2.0.4", + "readable-stream": "^3.6.0", + "safe-buffer": "^5.2.0" + }, + "dependencies": { + "readable-stream": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", + "requires": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + } + } + } + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" + }, + "json-edm-parser": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/json-edm-parser/-/json-edm-parser-0.1.2.tgz", + "integrity": "sha1-HmCw/vG8CvZ7wNFG393lSGzWFbQ=", + "requires": { + "jsonparse": "~1.2.0" + } + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" + }, + "jsonparse": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.2.0.tgz", + "integrity": "sha1-XAxWhRBxYOcv50ib3eoLRMK8Z70=" + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "md5.js": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/md5.js/-/md5.js-1.3.4.tgz", + "integrity": "sha1-6b296UogpawYsENA/Fdk1bCdkB0=", + "requires": { + "hash-base": "^3.0.0", + "inherits": "^2.0.1" + } + }, + "mime-db": { + "version": "1.47.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz", + "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==" + }, + "mime-types": { + "version": "2.1.30", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz", + "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==", + "requires": { + "mime-db": "1.47.0" + } + }, + "oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" + }, + "process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=" + }, + "psl": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", + "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + }, + "readable-stream": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz", + "integrity": "sha1-j5A0HmilPMySh4jaz80Rs265t44=", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.1", + "isarray": "~1.0.0", + "process-nextick-args": "~1.0.6", + "string_decoder": "~0.10.x", + "util-deprecate": "~1.0.1" + }, + "dependencies": { + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=" + } + } + }, + "request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + } + }, + "safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "sax": { + "version": "0.5.8", + "resolved": "https://registry.npmjs.org/sax/-/sax-0.5.8.tgz", + "integrity": "sha1-1HLbIo6zMcJQaw6MFVJK25OdEsE=" + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "source-map-support": { + "version": "0.5.19", + "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.19.tgz", + "integrity": "sha512-Wonm7zOCIJzBGQdB+thsPar0kYuCIzYvxZwlBa87yi/Mdjv7Tip2cyVbLj5o0cFPN4EVkuTwb3GDDyUx2DGnGw==", + "requires": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "sshpk": { + "version": "1.16.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", + "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "requires": { + "safe-buffer": "~5.2.0" + } + }, + "tough-cookie": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", + "requires": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + } + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" + }, + "typescript": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.2.4.tgz", + "integrity": "sha512-V+evlYHZnQkaz8TRBuxTA92yZBPotr5H+WhQ7bD3hZUndx5tGOa1fuCgeSjxAzM1RiN5IzvadIXTVefuuwZCRg==" + }, + "underscore": { + "version": "1.8.3", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.8.3.tgz", + "integrity": "sha1-Tz+1OxBuYJf8+ctBCfKl6b36UCI=" + }, + "uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "requires": { + "punycode": "^2.1.0" + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + }, + "validator": { + "version": "9.4.1", + "resolved": "https://registry.npmjs.org/validator/-/validator-9.4.1.tgz", + "integrity": "sha512-YV5KjzvRmSyJ1ee/Dm5UED0G+1L4GZnLN3w6/T+zZm8scVua4sOhYKWTUrKa0H/tMiJyO9QLHMPN+9mB/aMunA==" + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "xml2js": { + "version": "0.2.8", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.2.8.tgz", + "integrity": "sha1-m4FpCTFjH/CdGVdUn69U9PmAs8I=", + "requires": { + "sax": "0.5.x" + } + }, + "xmlbuilder": { + "version": "9.0.7", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", + "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=" + } + } +} diff --git a/AmbrosiaTest/JSCodeGen/package.json b/AmbrosiaTest/JSCodeGen/package.json new file mode 100644 index 00000000..0165540d --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/package.json @@ -0,0 +1,13 @@ +{ + "name": "jscode-gen", + "version": "0.0.0", + "description": "JSCodeGen", + "main": "app.js", + "author": { + "name": "" + }, + "dependencies": { + "ambrosia-node": "file:ambrosia-node-0.0.79.tgz", + "typescript": "^4.2.4" + } +} diff --git a/AmbrosiaTest/JSCodeGen/tsconfig.json b/AmbrosiaTest/JSCodeGen/tsconfig.json new file mode 100644 index 00000000..f2d49088 --- /dev/null +++ b/AmbrosiaTest/JSCodeGen/tsconfig.json @@ -0,0 +1,13 @@ +{ + "compilerOptions": { + "target": "es6", + "module": "CommonJS", + "sourceMap": true, + "declaration": true, + "outDir": "./out", // There will be a generated .js and .js.map for each compiled .ts file, so we keep them separate from the source + "listEmittedFiles": true + }, + "files": [ + "TestCodeGen.ts" // Note that all imported files will be automatically included/compiled, so we don't need to list them all explicitly + ] +} \ No newline at end of file diff --git a/Architecture.svg b/Architecture.svg new file mode 100644 index 00000000..3c0f5a5e --- /dev/null +++ b/Architecture.svg @@ -0,0 +1,284 @@ + + + + + + + + + + + + + + + + + + + + + + + + + Page-1 + + + + + Sheet.1 + + Disk storage + + + + + + + + + + + + + + + Disk storage.6 + + + + + + + + + + + + + + + Disk storage.7 + + + + + + + + + + + + + + + + Rectangle.42 + Immortal Coordinator + + + + + + + Immortal Coordinator + + Sheet.6 + + Disk storage + + + + + + + + + + + + + + + Disk storage.6 + + + + + + + + + + + + + + + Disk storage.7 + + + + + + + + + + + + + + + + Rectangle.50 + Immortal Coordinator + + + + + + + Immortal Coordinator + + Sheet.11 + + Sheet.12 + + Rectangle.48 + Application + + + + + + + Application + + Rectangle.51 + AMBROSIA Binding + + + + + + + AMBROSIA Binding + + + Rectangle.54 + + + + + + + + Sheet.16 + + Sheet.17 + + Rectangle.48 + Application + + + + + + + Application + + Rectangle.51 + AMBROSIA Binding + + + + + + + AMBROSIA Binding + + + Rectangle.54 + + + + + + + + Dynamic connector.61 + + + + Dynamic connector.63 + + + + Dynamic connector.64 + + + + Dynamic connector.66 + + + + Dynamic connector.68 + + + + Rectangle.71 + + + + + + + Rectangle.72 + + + + + + + Sheet.28 + Immortal 1 + + + + Immortal 1 + + Sheet.29 + Immortal 2 + + + + Immortal 2 + + diff --git a/AzureBlobsLogPicker/AzureBlobsLogPicker.cs b/AzureBlobsLogPicker/AzureBlobsLogPicker.cs new file mode 100644 index 00000000..8c5a60fc --- /dev/null +++ b/AzureBlobsLogPicker/AzureBlobsLogPicker.cs @@ -0,0 +1,414 @@ +using System; +using Azure.Storage.Blobs; +using Azure.Storage.Blobs.Specialized; +using Azure; +using Azure.Storage.Blobs.Models; +using System.Threading.Tasks; +using System.Threading; +using CRA.ClientLibrary; +using System.IO; +using System.Collections.Generic; +using System.Diagnostics; +using System.ComponentModel; + +namespace Ambrosia +{ + internal class AzureBlobsLogWriter : IDisposable, ILogWriter + { + static Dictionary _previousOpenAttempts = new Dictionary(); + BlobContainerClient _blobsContainerClient; + AppendBlobClient _logClient; + MemoryStream _bytesToSend; + BlobLeaseClient _leaseClient; + BlobLease _curLease; + AppendBlobRequestConditions _leaseCondition; + Thread _leaseRenewThread; + IDictionary _blobMetadata; + volatile bool _stopRelockThread; + volatile bool _relockThreadStopped; + + public AzureBlobsLogWriter(BlobContainerClient blobsContainerClient, + string fileName, + bool appendOpen = false) + { + fileName = AzureBlobsLogsInterface.PathFixer(fileName); + _blobsContainerClient = blobsContainerClient; + _logClient = _blobsContainerClient.GetAppendBlobClient(fileName); + ETag currentETag; + if (_previousOpenAttempts.ContainsKey(fileName) && appendOpen) + { + // We've opened this blob before and want to be non-destructive. We don't need to CreateIfNotExists, which could be VERY slow. + currentETag = _logClient.GetProperties().Value.ETag; + } + else + { + try + { + // Create the file non-destructively if needed, guaranteeing write continuity on creation by grabbing the etag of the create, if needed + if (appendOpen) + { + var response = _logClient.CreateIfNotExists(); + if (response != null) + { + currentETag = response.Value.ETag; + } + else + { + currentETag = _logClient.GetProperties().Value.ETag; + } + } + else + { + currentETag = _logClient.Create().Value.ETag; + } + } + catch { currentETag = _logClient.GetProperties().Value.ETag; } + } + // Try to grab the blob lease + _leaseClient = _logClient.GetBlobLeaseClient(); + // The blob hasn't be touched since the last time. This is a candidate for breaking the lease. + if (_previousOpenAttempts.ContainsKey(fileName) && (_previousOpenAttempts[fileName].ToString().Equals(currentETag.ToString()))) + { + _previousOpenAttempts[fileName] = currentETag; + // The blob hasn't been updated. Try to break the lease and reacquire + var requestConditions = new BlobRequestConditions(); + requestConditions = new BlobRequestConditions(); + requestConditions.IfMatch = currentETag; + // If the condition fails in the break, it's because someone else managed to touch the file, so give up + ETag newETag; + try + { + newETag = _leaseClient.Break(null, requestConditions).Value.ETag; + } + catch (Exception e) { newETag = currentETag; } + var etagCondition = new RequestConditions(); + etagCondition.IfMatch = newETag; + // If the condition fails, someone snuck in and grabbed the lock before we could. Give up. + _curLease = _leaseClient.Acquire(TimeSpan.FromSeconds(-1), etagCondition).Value; + } + else + { + // Not a candidate for breaking the lease. Just try to acquire. + _previousOpenAttempts[fileName] = currentETag; + _curLease = _leaseClient.Acquire(TimeSpan.FromSeconds(-1)).Value; + } + + _leaseCondition = new AppendBlobRequestConditions(); + _leaseCondition.LeaseId = _curLease.LeaseId; + // We got the lease! Set up thread to periodically touch the blob to prevent others from breaking the lease. + _blobMetadata = _logClient.GetProperties().Value.Metadata; + _stopRelockThread = false; + _relockThreadStopped = false; + _leaseRenewThread = new Thread(() => + { + while (!_stopRelockThread) + { + Thread.Sleep(100); + var response = _logClient.SetMetadata(_blobMetadata, _leaseCondition); + } + _relockThreadStopped = true; + }) { IsBackground = true }; + _leaseRenewThread.Start(); + _bytesToSend = new MemoryStream(); + Debug.Assert(_logClient.Exists()); + } + + public ulong FileSize { + get + { + BlobProperties blobProps = _logClient.GetProperties(); + return (ulong) blobProps.ContentLength; + } + } + + public void Dispose() + { + _stopRelockThread = true; + while (!_relockThreadStopped) { Thread.Sleep(100); } + while(_leaseRenewThread.IsAlive); + _leaseClient.Release(); + } + + public void Flush() + { + var numSendBytes = _bytesToSend.Length; + var OrigSendBytes = numSendBytes; + var buffer = _bytesToSend.GetBuffer(); + int bufferPosition = 0; + while (numSendBytes > 0) + { + int numAppendBytes = (int) Math.Min(numSendBytes, 1024*1024); + var sendStream = new MemoryStream(buffer, bufferPosition, numAppendBytes); + _logClient.AppendBlock(sendStream, null, _leaseCondition); + bufferPosition += numAppendBytes; + numSendBytes -= numAppendBytes; + } + Debug.Assert(OrigSendBytes == _bytesToSend.Length); + _bytesToSend.Position = 0; + _bytesToSend.SetLength(0); + } + + public async Task FlushAsync() + { + var numSendBytes = _bytesToSend.Length; + var OrigSendBytes = numSendBytes; + var buffer = _bytesToSend.GetBuffer(); + int bufferPosition = 0; + while (numSendBytes > 0) + { + int numAppendBytes = (int)Math.Min(numSendBytes, 256 * 1024); + var sendStream = new MemoryStream(buffer, bufferPosition, numAppendBytes); + await _logClient.AppendBlockAsync(sendStream, null, _leaseCondition); + bufferPosition += numAppendBytes; + numSendBytes -= numAppendBytes; + } + Debug.Assert(OrigSendBytes == _bytesToSend.Length); + _bytesToSend.Position = 0; + _bytesToSend.SetLength(0); + } + + public void WriteInt(int value) + { + _bytesToSend.WriteInt(value); + } + public void WriteIntFixed(int value) + { + _bytesToSend.WriteIntFixed(value); + } + + public void WriteLongFixed(long value) + { + _bytesToSend.WriteLongFixed(value); + } + public void Write(byte[] buffer, + int offset, + int length) + { + _bytesToSend.Write(buffer, offset, length); + } + + public async Task WriteAsync(byte[] buffer, + int offset, + int length) + { + await _bytesToSend.WriteAsync(buffer, offset, length); + } + } + + internal class AzureBlobsLogWriterStatics : ILogWriterStatic + { + BlobContainerClient _blobsContainerClient; + + public AzureBlobsLogWriterStatics(BlobContainerClient blobsContainerClient) + { + _blobsContainerClient = blobsContainerClient; + } + + public void CreateDirectoryIfNotExists(string path) + { + path = AzureBlobsLogsInterface.PathFixer(path); + var logClient = _blobsContainerClient.GetAppendBlobClient(path); + if (!logClient.Exists()) + { + logClient.Create(); + } + } + + public bool DirectoryExists(string path) + { + path = AzureBlobsLogsInterface.PathFixer(path); + return FileExists(path); + } + + public bool FileExists(string path) + { + path = AzureBlobsLogsInterface.PathFixer(path); + var logClient = _blobsContainerClient.GetAppendBlobClient(path); + return logClient.Exists(); + } + + public void DeleteFile(string path) + { + // This operation hangs mysteriously with Azure blobs sometimes, so I just won't do it. This will leave the kill file around, but it causes no harm +/* path = AzureBlobsLogsInterface.PathFixer(path); + Console.WriteLine("Deleting " + path); + var logClient = _blobsContainerClient.GetAppendBlobClient(path); + logClient.DeleteIfExists();*/ + } + + public ILogWriter Generate(string fileName, + uint chunkSize, + uint maxChunksPerWrite, + bool appendOpen = false) + { + return new AzureBlobsLogWriter(_blobsContainerClient, fileName, appendOpen); + } + } + + public class AzureBlobsLogReader : ILogReader + { + BlobDownloadInfo _download; + BlobClient _logClient; + long _streamOffset; + + public long Position + { + get { return _download.Content.Position + _streamOffset; } + set + { + _download.Content.Dispose(); + if (value > 0) + { + _streamOffset = value - 1; + var downloadRange = new HttpRange(value - 1); + _download = _logClient.Download(downloadRange); + _download.Content.ReadByte(); + } + else + { + _streamOffset = 0; + _download = _logClient.Download(); + } + } + } + + public AzureBlobsLogReader(BlobContainerClient blobsContainerClient, string fileName) + { + fileName = AzureBlobsLogsInterface.PathFixer(fileName); + _logClient = blobsContainerClient.GetBlobClient(fileName); + var downloadRange = new HttpRange(0); + _download = _logClient.Download(downloadRange); + } + + public async Task> ReadIntAsync(byte[] buffer) + { + return await _download.Content.ReadIntAsync(buffer); + } + + public async Task> ReadIntAsync(byte[] buffer, CancellationToken ct) + { + return await _download.Content.ReadIntAsync(buffer, ct); + } + + public Tuple ReadInt(byte[] buffer) + { + return _download.Content.ReadInt(buffer); + } + + public int ReadInt() + { + return _download.Content.ReadInt(); + } + + public async Task ReadAllRequiredBytesAsync(byte[] buffer, + int offset, + int count, + CancellationToken ct) + { + return await _download.Content.ReadAllRequiredBytesAsync(buffer, offset, count, ct); + } + + public async Task ReadAllRequiredBytesAsync(byte[] buffer, + int offset, + int count) + { + return await _download.Content.ReadAllRequiredBytesAsync(buffer, offset, count); + } + + public int ReadAllRequiredBytes(byte[] buffer, + int offset, + int count) + { + return _download.Content.ReadAllRequiredBytes(buffer, offset, count); + } + + public long ReadLongFixed() + { + return _download.Content.ReadLongFixed(); + } + + public int ReadIntFixed() + { + return _download.Content.ReadIntFixed(); + } + + public byte[] ReadByteArray() + { + return _download.Content.ReadByteArray(); + } + + public int ReadByte() + { + return _download.Content.ReadByte(); + } + + public int Read(byte[] buffer, int offset, int count) + { + return _download.Content.Read(buffer, offset, count); + } + + public void Dispose() + { + _download.Dispose(); + } + } + + internal class AzureBlobsLogReaderStatics : ILogReaderStatic + { + BlobContainerClient _blobsContainerClient; + + public AzureBlobsLogReaderStatics(BlobContainerClient blobsContainerClient) + { + _blobsContainerClient = blobsContainerClient; + } + + public ILogReader Generate(string fileName) + { + return new AzureBlobsLogReader(_blobsContainerClient, fileName); + } + } + + + public static class AzureBlobsLogsInterface + { + static BlobServiceClient _blobsClient; + static BlobContainerClient _blobsContainerClient; + + internal static string PathFixer(string fileName) + { + var substrings = fileName.Split('/'); + string fixedFileName = ""; + bool emptyFileName = true; + foreach (var substring in substrings) + { + var subdirCands = substring.Split('\\'); + foreach (var subdir in subdirCands) + { + if (subdir.CompareTo("") != 0) + { + if (emptyFileName) + { + fixedFileName = subdir; + emptyFileName = false; + } + else + { + fixedFileName += "/" + subdir; + } + } + } + } + return fixedFileName; + } + + public static void SetToAzureBlobsLogs() + { + var storageConnectionString = Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING"); + _blobsClient = new BlobServiceClient(storageConnectionString); + _blobsContainerClient = _blobsClient.GetBlobContainerClient("ambrosialogs"); + _blobsContainerClient.CreateIfNotExists(); + LogReaderStaticPicker.curStatic = new AzureBlobsLogReaderStatics(_blobsContainerClient); + LogWriterStaticPicker.curStatic = new AzureBlobsLogWriterStatics(_blobsContainerClient); + } + } +} diff --git a/AzureBlobsLogPicker/AzureBlobsLogPicker.csproj b/AzureBlobsLogPicker/AzureBlobsLogPicker.csproj new file mode 100644 index 00000000..ff3047e9 --- /dev/null +++ b/AzureBlobsLogPicker/AzureBlobsLogPicker.csproj @@ -0,0 +1,29 @@ + + + + netstandard2.0 + true + ../Ambrosia/Ambrosia.snk + + + + + + + + + + + + + + + + + + + + + + + diff --git a/BuildAmbrosiaAfterNugetChange.ps1 b/BuildAmbrosiaAfterNugetChange.ps1 new file mode 100644 index 00000000..f8e6eae7 --- /dev/null +++ b/BuildAmbrosiaAfterNugetChange.ps1 @@ -0,0 +1,99 @@ +########################################### +# +# Script to build Ambrosia projects locally that are related to Nuget changes +# Handles the code generation and builds that get checked in so all done in a script +# +# Call: +# .\BuildAmbrosiaAfterNugetChange.ps1 +# +# Note: Run this script AFTER running UpdateAmbrosiaForNugetRelease.ps1 +# This will generate all the necessary files and rebuild everything locally with the new nuget references +# +# Note: The msbuild.exe for VS 2017 needs to be in the path. Most likely it is here (C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\MSBuild\15.0\Bin) +# or run from Command Prompt for VS 2017 - then need to: powershell.exe -noexit -file BuildAmbrosiaAfterNugetChange.ps1 +# +# +########################################### + + +########################################################################## +# +# Build projects which also includes generating files +# +########################################################################## + +$CurrentDir = $(get-location); +$BuildPlatform = "X64"; +$BuildConfiguration = "Release"; +$BuildVisualStudioVersion = "15.0"; + +Write-output "------------- Clean Everything first -------------" +msbuild.exe $CurrentDir'\Clients\CSharp\AmbrosiaCS\AmbrosiaCS.sln' /t:"Clean" /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTest\PerformanceTest.sln' /t:"Clean" /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTestInterruptible\PerformanceTest.sln' /t:"Clean" /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\HelloWorld.sln' /t:"Clean" /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\StreamingDemo\StreamingDemo.sln' /t:"Clean" /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +Write-output "------------- Finish Cleaning everything -------------" + +Write-output "------------- Build AmbrosiaCS -------------" +msbuild.exe $CurrentDir'\Clients\CSharp\AmbrosiaCS\AmbrosiaCS.sln' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion + +Write-output "------------- Build PerformanceTest -------------" +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTest\API\ServerAPI.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTest\ClientAPI\ClientAPI.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion + +# Generate assemblies from PerformanceTest Dir +cd InternalImmortals\PerformanceTest +.\Generate-Assemblies.ps1 +cd .. +cd .. +# Build entire solution -- TO DO - NOT WORKING -- Works if run in VS though +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTest\PerformanceTest.sln' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion + + +Write-output "------------- Build PerformanceTestInterruptible -------------" +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTestInterruptible\API\ServerAPI.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTestInterruptible\IJob\IJob.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +# Generate assemblies from PerformanceTest Dir +cd InternalImmortals\PerformanceTestInterruptible +.\Generate-Assemblies.ps1 +cd .. +cd .. +# Build entire solution -- TO DO - NOT WORKING -- Works if run in VS though +msbuild.exe $CurrentDir'\InternalImmortals\PerformanceTestInterruptible\PerformanceTest.sln' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion + + +Write-output "------------- Build HelloWorld -------------" +# Build interfaces - 3 client / 1 server +msbuild.exe $CurrentDir'\Samples\HelloWorld\GeneratedSourceFiles\Client1Interfaces\latest\Client1Interfaces.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\GeneratedSourceFiles\Client2Interfaces\latest\Client2Interfaces.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\GeneratedSourceFiles\Client3Interfaces\latest\Client3Interfaces.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\GeneratedSourceFiles\ServerInterfaces\latest\ServerInterfaces.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +# Build I* projects - 3 client / 1 server +msbuild.exe $CurrentDir'\Samples\HelloWorld\IClient1\IClient1.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\IClient2\IClient2.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\IClient3\IClient3.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\HelloWorld\ServerAPI\IServer.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +# Generate assemblies +cd Samples\HelloWorld +.\Generate-Assemblies.ps1 +cd .. +cd .. +# Build entire solution -- TO DO - NOT WORKING -- Works if run in VS though +msbuild.exe $CurrentDir'\Samples\HelloWorld\HelloWorld.sln' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion + + +Write-output "------------- Build StreamingDemo -------------" +msbuild.exe $CurrentDir'\Samples\StreamingDemo\AnalyticsAPI\AnalyticsAPI.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +msbuild.exe $CurrentDir'\Samples\StreamingDemo\DashboardAPI\DashboardAPI.csproj' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion +# Generate assemblies +cd Samples\StreamingDemo +.\Generate-Assemblies.ps1 +cd .. +cd .. +msbuild.exe $CurrentDir'\Samples\StreamingDemo\StreamingDemo.sln' /nologo /nr:false /p:platform=$BuildPlatform /p:configuration=$BuildConfiguration /p:VisualStudioVersion=$BuildVisualStudioVersion + +Write-output "--------------------------------------------" +Write-output "------------- DONE!!! -------------" +Write-output "--------------------------------------------" + diff --git a/BuildCore.cmd b/BuildCore.cmd index 06c416ba..c4886388 100644 --- a/BuildCore.cmd +++ b/BuildCore.cmd @@ -1,4 +1,9 @@ -dotnet publish -o ./bin -c Release -f netcoreapp2.0 -r win10-x64 Ambrosia/Ambrosia/Ambrosia.csproj -dotnet publish -o ./bin -c Release -f netcoreapp2.0 -r win10-x64 ImmortalCoordinator/ImmortalCoordinator.csproj -dotnet publish -o ./bin -c Release -f netcoreapp2.0 -r win10-x64 Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj -dotnet publish -o ./bin -c Release -f netcoreapp2.0 -r win10-x64 DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj +@echo off +set BuildConfig=Release +if "%1" == "debug" set BuildConfig=Debug + +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/netcoreapp3.1 -c %BuildConfig% -f netcoreapp3.1 -r win10-x64 Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/netcoreapp3.1 -c %BuildConfig% -f netstandard2.0 Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/netcoreapp3.1 -c %BuildConfig% -f netcoreapp3.1 -r win10-x64 ImmortalCoordinator/ImmortalCoordinator.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/netcoreapp3.1 -c %BuildConfig% -f netcoreapp3.1 -r win10-x64 Ambrosia/Ambrosia/Ambrosia.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/netcoreapp3.1 -c %BuildConfig% -f netcoreapp3.1 -r win10-x64 DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj diff --git a/Buildnet461.cmd b/Buildnet461.cmd new file mode 100644 index 00000000..640581b1 --- /dev/null +++ b/Buildnet461.cmd @@ -0,0 +1,9 @@ +@echo off +set BuildConfig=Release +if "%1" == "debug" set BuildConfig=Debug + +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/net461 -c %BuildConfig% -f net461 -r win10-x64 Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/net461 -c %BuildConfig% -f netstandard2.0 -r win10-x64 Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/net461 -c %BuildConfig% -f net461 -r win10-x64 ImmortalCoordinator/ImmortalCoordinator.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/net461 -c %BuildConfig% -f net461 -r win10-x64 Ambrosia/Ambrosia/Ambrosia.csproj +dotnet publish -o /ambrosia/ambrosia/bin/x64/Release/net461 -c %BuildConfig% -f net461 -r win10-x64 DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..99b5462f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,8 @@ +# Contributing + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +See the [CONTRIBUTING](./CONTRIBUTING) folder for information and +documentation for new contributors to the project, or those adding +AMBROSIA language bindings for additional languages. + diff --git a/CONTRIBUTING/AMBROSIA_client_network_protocol.md b/CONTRIBUTING/AMBROSIA_client_network_protocol.md new file mode 100644 index 00000000..2b3b8bf2 --- /dev/null +++ b/CONTRIBUTING/AMBROSIA_client_network_protocol.md @@ -0,0 +1,299 @@ + +Client Protocol for AMBROSIA network participants +================================================= + +Each application has an AMBROSIA reliability coordinator assigned to it. +The coordinator is located within the same physical machine/container, and +must survive or fail with the application process. This process separation +is designed to minimize assumptions about the application and maximize +language-agnosticity. The combination of an application and its associated IC forms an "immortal". +The coordinator (also known as an Immortal Coordinator) communicates +via TCP/IP over 2 local sockets with the application through a language-specific +binding. This document covers how a language binding should communicate with +it's Immortal Coordinator, providing a high-level spec for a language binding author. + +Overview and Terminology +------------------------ + +In AMBROSIA a set of application processes (services) serve as communication +endpoints, communicating *exclusively* through the network of Immortal +Coordinators, which collectively serve as the message bus. The individual +processes (or objects contained therein) are the *Immortals* which survive the +failure of individual machines. + +Below we use the following terminology: + + * Committer ID - an arbitrary (32 bit) identifier for a communication endpoint + (an app or service) in the network of running "immortals". This is typically + generated automatically the first time each application process starts. + It is distinct from the destination *name*. + + * Destination name - the string identifying a communication endpoint (typically the service/app name), often + human readable. + + * Sequence ID - the (monotonically increasing) number of a log entry. Note that + each logical immortal has its own log. + + * "Async/await" RPCs - are *futures*; they return a value back to the + caller. Because AMBROSIA ensures reliability, they are semantically + identical to function calls, without introducing new failure modes such as + timeouts or disconnections. + + * "Fire and Forget" RPCs - launch a remote computation, but provide no + information back to the caller. Note that even an async/await RPC with a + "void" return value indicates more to the caller (namely, that the remote + computation has completed). + + * "Language Binding" (LB) - the language-specific AMBROSIA binding that + exposes the programming interfaces and handles all communication with + the associated Immortal Coordinator (IC). + +Required Helper Functions +------------------------- + +In order to build the binary message formats described below, we assume that the +new client software can access TCP sockets and additionally implements the +following serialized datatypes. + + * ZigZagInt - a zig-zag encoded 32-bit signed integer + * ZigZagLong - a zig-zag encoded 64-bit signed integer + * IntFixed - a 32-bit little endian number + * LongFixed - a 64-bit little endian number + +The variable-length integers are in the same format used by, for example, +[Protobufs](https://developers.google.com/protocol-buffers/docs/encoding). + + +Message Formats +--------------- + + * LogRecords - *log header* followed by zero or more messages. + * Message - all regular AMBROSIA messages + +All information received from the reliability coordinator is in the form of a sequence of log records. +Each log record has a 24 byte header, followed by the actual record contents. The header is as follows: + + * Bytes [0-3]: The committer ID for the service, this should be constant for all records for the lifetime of the service. The format is IntFixed. + * Bytes [4-7]: The size of the whole log record, in bytes, including the header. The format is IntFixed. + * Bytes [8-15]: The check bytes to check the integrity of the log record. The format is LongFixed. + * Bytes [16-23]: The log record sequence ID. Excluding records labeled with sequence ID -1, these should be in order. The format is LongFixed. + +The rest of the record is a sequence of messages, packed tightly, each with the following format: + + * Size : Number of bytes taken by Type and Data; 1 to 5 bytes, depending on value (format ZigZagInt). + * Type : A byte which indicates the type of message. + * Data : A variable length sequence of bytes which depends on the message type. + + +All information sent to the reliability coordinator is in the form of a sequence of messages with the format specified above. +Message types and associated data which may be sent to or received by services/apps: + + * 15 - `BecomingPrimary` (Received) : No data + + * 14 - `TrimTo`: Only used in IC to IC communication. The IC will never send this message type to the LB. + + * 13 - `CountReplayableRPCBatchByte` (Recieved): Similar to `RPCBatch`, but the data also includes a count (ZigZagInt) + of non-Impulse (replayable) messages after the count of RPC messages. + + * 12 `UpgradeService` (Received): No data + + * 11 `TakeBecomingPrimaryCheckpoint` (Received): No data + + * 10 `UpgradeTakeCheckpoint` (Received): No data + + * 9 `InitialMessage` (Sent/Received): Data can be any arbitrary bytes. The `InitialMessage` message will simply be echoed back + to the service which can use it to bootstrap service start behavior. In the C# language binding, the data is a complete incoming RPC + message that will be the very first RPC message it receives. + + * 8 `Checkpoint` (Sent/Received): The data is a single 64 bit number (ZigZagLong). + This message is immediately followed (no additional header) by the checkpoint itself, + which is a binary blob. + The reason that checkpoints are not sent in the message payload directly is + so that they can have a 64-bit instead of 32-bit length, in order to support + large checkpoints. + + * 5 `RPCBatch` (Sent/Received): Data is a count (ZigZagInt) of the number of RPC messages in the batch, followed by the corresponding RPC messages. + When sent by the LB, this message is essentially a performance hint to the IC that enables optimized processing of the RPCs, even for as few as 2 RPCs. + + * 2 `TakeCheckpoint` (Sent/Received): No data. + When sent by the LB, this message requests the IC to take a checkpoint immediately rather than waiting until the log reaches the IC's `--logTriggerSize` (which defaults to 1024 MB). + + * 1 `AttachTo` (Sent): Data is the destination instance name in UTF-8. The name must match the name used when the instance was logically created (registered). + The `AttachTo` message must be sent (once) for each outgoing RPC destination, excluding the local instance, prior to sending an RPC. + + * 0 - Incoming `RPC` (Received): + + - Byte 0 of data is reserved (RPC or return value). + - Next is a variable length int (ZigZagInt) which is a method ID. Negative method ID's are reserved for system use. + - The next byte is the RPC type: 0 = Async/Await, 1 = Fire-and-Forget (aka. Fork), 2 = Impulse. + - The remaining bytes are the serialized arguments packed tightly. + + * 0 - Outgoing `RPC` (Sent): + + - First is a variable length int (ZigZagInt) which is the length of the destination service/app name. For a self call, this should be set to 0 and the following field omitted. + - Next are the actual bytes (in UTF-8) for the name of the destination service/app. + - Next follow all four fields listed above under "Incoming RPC". + That is, an Outgoing RPC is just an incoming RPC with two extra fields on the front. + + +Communication Protocols +----------------------- + +### Starting up: + +If starting up for the first time: + + * Receive a `TakeBecomingPrimaryCheckpoint` message + * Send an `InitialMessage` + * Send a `Checkpoint` message + * Normal processing + +If recovering, but not upgrading, a standalone (non-active/active) immortal: + + * Receive a `Checkpoint` message + * Receive logged replay messages + * Receive `TakeBecomingPrimaryCheckpoint` message + * Send a `Checkpoint` message + * Normal processing + +If recovering, but not upgrading, in active/active: + + * Receive a `Checkpoint` message + * Receive logged replay messages + * Receive `BecomingPrimary` message + * Normal processing + +If recovering and upgrading a standalone immortal, or starting as an upgrading secondary in active/active: + + * Receive a `Checkpoint` message + * Receive logged replay messages + > Note: Replayed messages MUST be processed by the old (pre-upgrade) code to prevent changing the generated sequence + of messages that will be sent to the IC as a consequence of replay.
Further, this requires that your + service (application) is capable of dynamically switching (at runtime) from the old to the new version of its code. + See '[App Upgrade](#app-upgrade)' below. + * Receive `UpgradeTakeCheckpoint` message + * Upgrade state and code + * Send a `Checkpoint` message for upgraded state + * Receive `TakeCheckpoint` message. This is usually the next message received, but other messages can come before it. + * Send a `Checkpoint` message [the upgrade is complete once the IC receives this checkpoint] + > Note: The second checkpoint is necessary for the successful handoff of the new version in the active/active case, + which is the scenario which will use the upgrade feature the most. But the additional `TakeCheckpoint` will also be + received when running standalone. + * Normal processing + +If performing a repro test: + + * Receive a `Checkpoint` message + * Receive logged replay messages + +> Repro testing, also known as "Time-Travel Debugging", allows a given existing log to be replayed, for example to re-create +the sequence of messages (and resulting state changes) that led to a bug. See '[App Upgrade](#app-upgrade)' below. + +If performing an upgrade test: + + * Receive a `Checkpoint` message + * Receive `UpgradeService` message + * Upgrade state and code + * Receive logged replay messages + +> Upgrade testing, in addition to testing the upgrade code path, allows messages to be replayed against an upgraded +service to verify if the changes cause bugs. This helps catch regressions before actually upgrading the live service. +See '[App Upgrade](#app-upgrade)' below. + +### Normal processing: + + * Receive an arbitrary mix of `RPC`, `RPCBatch`, and `TakeCheckpoint` messages. + * Persisted application state (the content of a checkpoint) should only ever be changed + as a consequence of processing `RPC` and `RPCBatch` messages. This ensures that the + application state can always be deterministically re-created during replay (recovery). + * The LB must never process messages [that modify application state] while it's in the process + of either loading (receiving) or taking (sending) a checkpoint. This ensures the integrity of + the checkpoint as a point-in-time snapshot of application state. + +### Receive logged replay messages: + + * During recovery, it is a violation of the recovery protocol for the application to send an Impulse RPC. So while a replayed Impulse RPC can send + Fork RPCs, it cannot send Impulse RPCs. If it does, the language binding should throw an error. + +### Attach-before-send protocol: + +* Before an RPC is sent to an Immortal instance (other than to the local Immortal), the `AttachTo` message must be sent (once). + This instructs the local IC to make the necessary TCP connections to the destination IC. + +### Active/Active: + +This is a high-availability configuration (used for server-side services only) involving at least +3 immortal (service/LB + IC pair) instances: A **primary**, a **checkpointing secondary**, and one or more +**standby secondaries**, which are continuously recovering until they become primary. A secondary is also +sometimes referred to as a replica. Despite typically running on separate machines (and in separate racks +and/or datacenters), all instances "share" the log and checkpoint files. Failover happens when the primary +loses its lock on the log file. The primary is the non-redundant instance. If it fails, one of the standby +secondaries will become the primary, after completing recovery. The checkpointing secondary never becomes +the primary, and if it fails, the next started replica becomes the checkpointing secondary, even if it's the +first started replica after all replicas fail. + +The primary never takes checkpoints, except when it first starts (ie. before there are any logs). +Thereafter, all checkpointing is handled by the checkpointing secondary. This arrangement allows +the primary to never have to "pause" to take a checkpoint, increasing availability. A deep dive +into the theory behind active/active can be found in the [Shrink](https://www.vldb.org/pvldb/vol10/p505-goldstein.pdf) +paper, and how to configure an active/active setup is explained [here](https://github.com/microsoft/AMBROSIA/blob/3d86a6c140c823f594bf6e8daa9de14ed5ed6d80/Samples/HelloWorld/ActiveActive-Windows.md). + +The language binding is oblivious as to whether it's in an active/active configuration or not. However, it +must be aware of whether it's a primary or not primarily so that it can generate an error if an attempt is +made to send an Impulse before the instance has become the primary (it's a violation of the Ambrosia protocol to send an Impulse during recovery). +The LB must also notify the host service (app) when it has become the primary for example, so that the service +doesn't try to send the aforementioned Impulse before it's valid to do so. + +There are 3 different messages that tell the LB it is becoming the primary, with each occurring under different circumstances: +* `TakeBecomingPrimaryCheckpoint` The instance is becoming the primary and **should** take a checkpoint (ie. this is the first start of the primary). +* `BecomingPrimary` The instance is becoming the primary but **should not** take a checkpoint (ie. this is a non-first start of the primary). +* `UpgradeTakeCheckpoint` The instance is a primary that is being upgraded and **should** take a checkpoint. Note that only a newly registered secondary + can be upgraded, and it will cause all other secondaries along with the existing primary to die (see '[App Upgrade](#app-upgrade)' below). + +Finally, "non-active/active" (or "standalone") refers to a single immortal instance running by itself without any secondaries. + +### App Upgrade: + +Upgrade is the process of migrating an instance from one version of code and state to another version of code and +state ("state" in this context means the application state data). From the LB's perspective there are no version +numbers involved: it simply has code/state for VCurrent and code/state for VNext. Both versions must be present so +that the app can recover using VCurrent, but then proceed using VNext. When the LB receives `UpgradeTakeCheckpoint` +(or `UpgradeService` when doing an upgrade test) it switches over the state and code from VCurrent to VNext. +Note that the lack of version numbering from the LB's perspective is in contrast to the parameters supplied to +`Ambrosia.exe RegisterInstance` (see below) which are specific integer version numbers. These numbers refer to "the migration +version of the instance", not "the version of the running state/code". This loose relationship is by design to offer maximum +flexibility to the deployment configuration of the service. For example, to perform a "downgrade", the downgraded code would be +included in the app as the VNext code, while the `upgradeVersion` number used in `RegisterInstance` to prepare for the +downgrade (see below) would still be increased. This illustrates how the term 'upgrade' is more accurately thought of as referring +to the _migration_ of code (and state). + +Performing an upgrade of a standalone instance always involves stopping the app (or service), so it always involves downtime. The steps are: +* Stop the current instance. +* Run `Ambrosia.exe RegisterInstance --instanceName=xxxxx --currentVersion=n --upgradeVersion=m` where n and m are the integer version numbers with m > n. + Note that this is an abridged version of the actual command; running `Ambrosia.exe RegisterInstance` requires that you re-specify **all** previously provided parameters, otherwise they will revert to their default values. +* Start the new instance (that contains the VCurrent and VNext app code, and the VCurrent-to-VNext state conversion code). +* The upgrade is complete after the IC receives the checkpoint taken in response to the next `TakeCheckpoint` received after `UpgradeTakeCheckpoint` (see '[Communication Protocols](#communication-protocols)' above). +* Once the upgrade is complete, the instance must be re-registered with the new `--currentVersion` before the next restart (but only while the instance is stopped):\ + `Ambrosia.exe RegisterInstance --instanceName=xxxxx --currentVersion=m` +* Further, before the next restart the application must be swapped for one that uses the VNext code (or the existing application should be configured to only use the VNext code). + +To upgrade an active/active instance a new replica (secondary) is registered and started, which upgrades the current version, similar to +the previous example, but for a new replica. When the replica finishes recovering, it stops the primary, and holds a +lock on the log file which prevents other secondaries from becoming primary. Upon completion of state and code upgrades, +including taking the first checkpoint for the new version, execution continues and the suspended secondaries die. +If the upgrade fails, the upgrading secondary releases the lock on the log, and one of the suspended secondaries becomes +primary and continues with the old version of state/code. + +Upgrade is intended mainly for use in active/active (ie. high availability scenarios). Standalone immortal upgrades are typically expected to involve simply deleting the logs during the installation of the upgraded app. + +Before doing a real (live) upgrade you can test the upgrade with this [abridged] example command: + +`Ambrosia.exe DebugInstance --checkpoint=3 --currentVersion=0 --testingUpgrade` + +> Note: Performing an upgrade test leads to a `UpgradeService` message being received as opposed to a `UpgradeTakeCheckpoint` message being +received when doing a real (live) upgrade. + +Doing a repro test (aka. "Time-Travel Debugging") is similar, just with `--testingUpgrade` ommitted): + +`Ambrosia.exe DebugInstance --checkpoint=1 --currentVersion=0` + diff --git a/CONTRIBUTING/README.md b/CONTRIBUTING/README.md new file mode 100644 index 00000000..812a4a51 --- /dev/null +++ b/CONTRIBUTING/README.md @@ -0,0 +1,90 @@ + + +CONTRIBUTING GUIDE +================== + +For developers interested in adding to AMBROSIA, or developing new +[language-level or RPC-framework bindings to AMBROSIA](#new-client-bindings), +this document provides a few pointers. + +We invite developers wishing to build on or contribute to AMBROSIA to join our [gitter community](https://gitter.im/AMBROSIA-resilient-systems/Lobby?utm_source=share-link&utm_medium=link&utm_campaign=share-link). + +Overview of repository +---------------------- + +AMBROSIA is implemented in C# and built with Visual Studio 2019 or dotnet +CLI tooling. Within the top level of this source repository, you will +find. + +(1) Core libraries and tools: + + * `./Ambrosia`: the core reliable messaging and runtime coordination engine. + + * `./ImmortalCoordinator`: the wrapper program around the core library that + must be run as a daemon alongside each AMBROSIA application process. + + * `./DevTools`: additional console tools for interacting with the + Azure metadata that supports an Ambrosia service. + + * `./Scripts`: scripts used when running automated tests (CI) as well + as the runAmbrosiaService.sh script which provides an example means + of executing an app+coordinator. + + * `./Samples/AKS-scripts`: scripts to get a user started with + AMBROSIA on Kubernetes on Azure. + +(2) Client libraries: + + * `./Clients`: these provide idiomatic bindings into different + programming languages. + +(3) Sample programs and tests: + + * `./Samples`: starting point examples for AMBROSIA users. + + * `./InternalImmortals`: internal test AMBROSIA programs, demos, and + benchmarks. + + * `./AmbrosiaTest`: testing code + + +New Client Bindings +=================== + +AMBROSIA is designed to keep its runtime components in a separate +process (ImmortalCoordinator) than the running application process. +The coordinator and the application communicate over a pair of TCP +connections. + +This separation makes the runtime component of AMBROSIA completely +language-agnostic. All that is needed is for the application +processes to speak the low-level messaging protocol with the +coordinator. + +For a new language or RPC framewrok, there are two ways to accomplish +this: (1) do the work yourself to implement the wire protocol, (2) +wrap the provided standalone native code library (which is small with +zero dependencies), to create a higher-level language binding. + + +Implement the low-level wire protocol +------------------------------------- + +Refer to +[AMBROSIA_client_network_protocol.md](AMBROSIA_client_network_protocol.md) +for details on the specification applications must meet to communicate +with ImmortalCoordinator at runtime over TCP sockets. + + +Wrap the Native Client +---------------------- + +`Clients/C` contains a small library that handles the wire protocol. +That is it deals with decoding headers, variable width integer +encodings, and so on. It provides a primitive messaging abstraction +for sending payloads of bytes with method IDs attached, but nothing more. + +This native code client library is written in vanilla C code, free of +runtime dependencies. Thus, it can be wrapped in any high-level +language that supports C calling conventions in its foreign function +interface. diff --git a/Clients/C/Makefile b/Clients/C/Makefile index 78e87fda..ac69ce8e 100644 --- a/Clients/C/Makefile +++ b/Clients/C/Makefile @@ -1,10 +1,15 @@ -# Put your -D variables here, e.g. -DDEBUG -DEFINES= +# Put your -D variables here: +DEFINES ?= + +EXTRA_DEFINES = -DIPV4 +# ^ TODO build everything twice for IPV6 vs IPV4. +# TODOTODO fix it so that one compile can work for both. + +ALL_DEFINES = $(DEFINES) $(EXTRA_DEFINES) GNULIBS= -lpthread -GNUOPTS= -pthread -O3 -# -std=c11 +GNUOPTS= -pthread -O0 -g HEADERS= include/ambrosia/internal/spsc_rring.h include/ambrosia/client.h include/ambrosia/internal/bits.h @@ -13,12 +18,19 @@ OBJS1= $(patsubst src/%.c,bin/static/%.o, $(SRCS) ) OBJS2= $(patsubst src/%.c,bin/shared/%.o, $(SRCS) ) -COMP= gcc $(DEFINES) -I include/ $(GNUOPTS) -LINK= gcc +COMP= gcc $(ALL_DEFINES) -I include/ $(GNUOPTS) +LINK= gcc LIBNAME=libambrosia -all: bin/$(LIBNAME).a bin/$(LIBNAME).so +all: bin/$(LIBNAME).a bin/$(LIBNAME).so bin/native_hello.exe + +debug: + $(MAKE) DEFINES="-DAMBCLIENT_DEBUG" clean publish + +bin/native_hello.exe: native_hello.c $(OBJS1) $(HEADERS) + $(COMP) -c $< -o bin/static/hello.o + $(LINK) $(OBJS1) bin/static/hello.o $(GNULIBS) -o $@ bin/$(LIBNAME).a: $(OBJS1) ar rcs $@ $(OBJS1) @@ -40,7 +52,14 @@ bin/shared: objclean: rm -rf bin +# Copy to the head of the working copy. +publish: all + rm -rf ../../bin/include ../../bin/libambrosia.* + mkdir -p ../../bin/include + cp -a bin/libambrosia.* ../../bin/ + cp -a include ../../bin/ + clean: objclean rm -f \#* .\#* *~ -.PHONY: lin clean objclean +.PHONY: lin clean objclean publish diff --git a/Clients/C/include/ambrosia/client.h b/Clients/C/include/ambrosia/client.h index ab9f442e..eb1efeaa 100644 --- a/Clients/C/include/ambrosia/client.h +++ b/Clients/C/include/ambrosia/client.h @@ -11,11 +11,17 @@ #include // va_list #endif +// #include "ambrosia/internal/bits.h" + +// ------------------------------------------------- // Data formats used by the AMBROSIA "wire protocol" // ------------------------------------------------- +// The fixed header size used by the protocol: #define AMBROSIA_HEADERSIZE 24 +// A C struct which matches the format of the header. +// // The compiler shouldn't insert any padding for this one, but we use // the pragma to make absolutely sure: // #pragma pack(1) @@ -26,7 +32,9 @@ struct log_hdr { int64_t seqID; }; -enum MsgType { RPC=0, // +// This enum is established by the wire protocol, which fixes this +// assignment of (8 bit) integers to message types. +enum MsgType { RPC=0, // AttachTo=1, // dest str TakeCheckpoint=2, // no data RPCBatch=5, // count, msg seq @@ -38,68 +46,30 @@ enum MsgType { RPC=0, // }; - -// The soft limit after which we should send on the socket. -// TEMP: this will be replaced by a ringbuffer and a concurrent -// network progress thread. -// #define AMBCLIENT_DEFAULT_BUFSIZE 4096 -#define AMBCLIENT_DEFAULT_BUFSIZE (20*1024*1024) - // Print extremely verbose debug output to stdout: -// #define AMBCLIENT_DEBUG #define amb_dbg_fd stderr // ^ Non-constant initializer... -#ifdef AMBCLIENT_DEBUG -static inline void amb_sleep_seconds(double n) { -#ifdef _WIN32 - Sleep((int)(n * 1000)); -#else - int64_t nanos = (int64_t)(10e9 * n); - const struct timespec ts = {0, nanos}; - nanosleep(&ts, NULL); -#endif -} - -extern volatile int64_t debug_lock; - -static inline void amb_debug_log(const char *format, ...) -{ - va_list args; - va_start(args, format); - amb_sleep_seconds((double)(rand()%1000) * 0.00001); // .01 - 10 ms -#ifdef _WIN32 - while ( 1 == InterlockedCompareExchange64(&debug_lock, 1, 0) ) { } -#else - while ( 1 == __sync_val_compare_and_swap(&debug_lock, 1, 0) ) { } -#endif - fprintf(amb_dbg_fd," [AMBCLIENT] "); - vfprintf(amb_dbg_fd,format, args); - fflush(amb_dbg_fd); - debug_lock = 0; - va_end(args); -} -#else -// inline void amb_debug_log(const char *format, ...) { } -#define amb_debug_log(...) {} -#endif - //------------------------------------------------------------------------------ // FIXME: these should become PRIVATE to the library: extern int g_to_immortal_coord, g_from_immortal_coord; -extern int upport, downport; - // Communicates with the server to establish normal operation. // // ARGS: two valid socket file descriptors which must have been -// received from a call to connect_sockets. -void startup_protocol(int upfd, int downfd); +// received from a call to amb_connect_sockets. +void amb_startup_protocol(int upfd, int downfd); -void connect_sockets(int* upptr, int* downptr); +// Connect to the ImmortalCoordinator. Use the provided ports. +// +// On the "up" port we connect, and on "down" the coordinator connects +// to us. This function writes the file descriptors for the opened +// connections into the pointers provided as the last two arguments. +void amb_connect_sockets(int upport, int downport, int* up_fd_ptr, int* down_fd_ptr); +// Encoding and Decoding message types //------------------------------------------------------------------------------ // PRECONDITION: sufficient space free at output pointer. @@ -124,14 +94,69 @@ void* amb_write_outgoing_rpc(void* buf, char* dest, int32_t destLen, char RPC_or void amb_send_outgoing_rpc(void* tempbuf, char* dest, int32_t destLen, char RPC_or_RetVal, int32_t methodID, char fireForget, void* args, int argsLen); + +// Read a full log header off the socket, writing it into the provided pointer. void amb_recv_log_hdr(int sockfd, struct log_hdr* hdr); -// TEMP - audit me +//------------------------------------------------------------------------------ + +// USER DEFINED: FIXME: REPLACE W CALLBACK +extern void send_dummy_checkpoint(int upfd); + +// USER-DEFINED: FIXME: turn into a callback (currently defined by application): +extern void amb_dispatch_method(int32_t methodID, void* args, int argsLen); + + +// TEMP - audit me - need to add a hash table to track attached destinations: void attach_if_needed(char* dest, int destLen); -// Remove this? -// void send_message(char* buf, int len); +//------------------------------------------------------------------------------ + +// PHASE 1/3 +// +// This performs the full setup process: attaching to the Immortal +// Coordinator on the specified ports, creating a network progress +// thread in the background, and executing the first phases of the +// App/Coordinator communication protocol. +// +// ARG: upport: the port on which we will reach out and connect to the +// coordinator on localhost (127.0.0.1 or ::1). This is used to +// send data to the coordinator. +// +// ARG: downport: (after upport is connected) the port on which we +// will listen for the coordinator to connect to us. This is +// used to receive data from the coordinator. +// +// ARG: bufSz: the size of the buffer used to buffer small messages on +// their way to the ImmortalCoordinator. If this is zero, or +// negative, a default is used. +// +// ARG: +// +// RETURNS: +// +// EFFECTS: +void amb_initialize_client_runtime(int upport, int downport, int bufSz); + +// PHASE 2/3 +// +// The heart of the runtime: enter the processing loop. Read log +// entries from the coordinator and make "up-calls" (callbacks) into +// the application when we receive incoming messages. These call +// backs in turn send outgoing messages, and so on. +void amb_normal_processing_loop(); + +// PHASE 3/3 +// +// This can be called by the client application at any time post +// initalization. It signals that the main event loop +// (amb_normal_processing_loop) should exit. +// +// It does NOT transfer control away from the current function +// (longjmp), rather it returns to the caller, which is expected to +// return normally to the event handler loop. +void amb_shutdown_client_runtime(); // ------------------------------------------------------------ @@ -157,31 +182,40 @@ void* read_zigzag_int(void* ptr, int32_t* ret); int zigzag_int_size(int32_t value); +// Debugging +//------------------------------------------------------------------------------ + +#ifdef AMBCLIENT_DEBUG +extern volatile int64_t amb_debug_lock; + +extern void amb_sleep_seconds(double n); + +static inline void amb_debug_log(const char *format, ...) +{ + va_list args; + va_start(args, format); + amb_sleep_seconds((double)(rand()%1000) * 0.00001); // .01 - 10 ms +#ifdef _WIN32 + while ( 1 == InterlockedCompareExchange64(&amb_debug_lock, 1, 0) ) { } +#else + while ( 1 == __sync_val_compare_and_swap(&amb_debug_lock, 1, 0) ) { } +#endif + fprintf(amb_dbg_fd," [AMBCLIENT] "); + vfprintf(amb_dbg_fd,format, args); + fflush(amb_dbg_fd); + amb_debug_lock = 0; + va_end(args); +} +#else +// inline void amb_debug_log(const char *format, ...) { } +#define amb_debug_log(...) {} +#endif + + // ------------------------------------------------------------ // A standardized, cross-platform way used by this library to acquire // the last error message from a system call. char* amb_get_error_string(); -// Internal helper: try repeatedly on a socket until all bytes are sent. -static inline void socket_send_all(int sock, const void* buf, size_t len, int flags) { - char* cur = (char*)buf; - int remaining = len; - while (remaining > 0) { - int n = send(sock, cur, remaining, flags); - if (n < 0) { - char* err = amb_get_error_string(); - fprintf(stderr,"\nERROR: failed send (%d bytes, of %d) which left errno = %s\n", - remaining, (int)len, err); - abort(); - } - cur += n; - remaining -= n; -#ifdef AMBCLIENT_DEBUG - if (remaining > 0) - amb_debug_log(" Warning: socket send didn't get all bytes across (%d of %d), retrying.\n", n, remaining); -#endif - } -} - #endif diff --git a/Clients/C/include/ambrosia/internal/bits.h b/Clients/C/include/ambrosia/internal/bits.h index e69de29b..5cb233e8 100644 --- a/Clients/C/include/ambrosia/internal/bits.h +++ b/Clients/C/include/ambrosia/internal/bits.h @@ -0,0 +1,69 @@ +// Small helpers and potentially reusable bits. + + +// Internal helper: try repeatedly on a socket until all bytes are sent. +// +// The Linux man pages are vague on when send on a (blocking) socket +// can return less than the requested number of bytes. This little +// helper simply retries. +static inline +void amb_socket_send_all(int sock, const void* buf, size_t len, int flags) { + char* cur = (char*)buf; + int remaining = len; + while (remaining > 0) { + int n = send(sock, cur, remaining, flags); + if (n < 0) { + char* err = amb_get_error_string(); + fprintf(stderr,"\nERROR: failed send (%d bytes, of %d) which left errno = %s\n", + remaining, (int)len, err); + abort(); + } + cur += n; + remaining -= n; +#ifdef AMBCLIENT_DEBUG + if (remaining > 0) + amb_debug_log(" Warning: socket send didn't get all bytes across (%d of %d), retrying.\n", n, remaining); +#endif + } +} + +static inline +void print_hex_bytes(FILE* fd, char* ptr, int len) { + const int limit = 100; // Only print this many: + fprintf(fd,"0x"); + int j; + for (j=0; j < len && j < limit; j++) { + fprintf(fd,"%02hhx", (unsigned char)ptr[j]); + if (j % 2 == 1) + fprintf(fd," "); + } + if (j +#include + +#include +#include +#include +#include + +#ifdef _WIN32 +#error "Windows not finished" +#else + // Unix variants, but really just Linux for now: + #include + #include +#endif + +// #include "ambrosia/internal/spsc_rring.h" +#include "ambrosia/client.h" + +#include "ambrosia/internal/bits.h" // amb_socket_send_all + + +// An example service sitting on top of AMBROSIA +//------------------------------------------------------------------------------ + +enum MethodTable { STARTUP_MSG_ID=32 }; + +// FIXME: add g_numRPCBytes as an argument to startup.... +// startup a ROUND. Called once per round. +void startup(int64_t n) { + printf("\nHello! Received message from self: %lld\n", n); + // TODO: send n-1 and count down... + + printf("\nSignaling shutdown to runtime...\n"); + amb_shutdown_client_runtime(); +} + +// Everything in this section should, in principle, be automatically GENERATED: +//------------------------------------------------------------------------------ + +void send_dummy_checkpoint(int upfd) { + const char* dummy_checkpoint = "dummyckpt"; + int strsize = strlen(dummy_checkpoint); + + // New protocol, the payload is just a 64 bit size: + int msgsize = 1 + 8; + char* buf = alloca(msgsize + 5 + strsize); + char* bufcur = write_zigzag_int(buf, msgsize); // Size (including type tag) + *bufcur++ = Checkpoint; // Type + *((int64_t*)bufcur) = strsize; // 8 byte size + bufcur += 8; + + assert(bufcur-buf == 9 + zigzag_int_size(msgsize)); + + // Then write the checkpoint itself AFTER the regular message: + bufcur += sprintf(bufcur, "%s", dummy_checkpoint); // Dummy checkpoint. + + amb_socket_send_all(upfd, buf, bufcur-buf, 0); + +#ifdef AMBCLIENT_DEBUG + amb_debug_log(" Trivial checkpoint message sent to coordinator (%lld bytes), checkpoint %d bytes\n", + (int64_t)(bufcur-buf), strsize); + amb_debug_log(" Message was: "); + print_hex_bytes(amb_dbg_fd,buf, bufcur-buf); fprintf(amb_dbg_fd,"\n"); +#endif +} + + +// Translate from untyped blobs to the multi-arity calling conventions +// of each RPC entrypoint. +void amb_dispatch_method(int32_t methodID, void* args, int argsLen) { + switch(methodID) { + case STARTUP_MSG_ID: + startup(10); + break; + + default: + fprintf(stderr, "ERROR: cannot dispatch unknown method ID: %d\n", methodID); + abort(); + } +} + + +// Basic example application +// ------------------------------------------------------------ + +int main(int argc, char** argv) +{ + printf("Begin Hello-World AMBROSIA + native-client\n"); + + int upport = 1000, downport = 1001; + if (argc >= 2) upport = atoi(argv[1]); + if (argc >= 3) downport = atoi(argv[2]); + + printf("Connecting to my coordinator on ports: %d (up), %d (down)\n", upport, downport); + printf("The 'up' port we connect, and the 'down' one the coordinator connects to us.\n"); + amb_initialize_client_runtime(upport, downport, 0); + // ^ Calls callbacks for reading checkpoint and sending init message. + + // Enter processing loop until a message handler calls shutdown. + amb_normal_processing_loop(); + printf("\nReturned from AMBROSIA message processing loop. All done.\n"); +} diff --git a/Clients/C/run_hello_world.sh b/Clients/C/run_hello_world.sh new file mode 100755 index 00000000..5e1881b7 --- /dev/null +++ b/Clients/C/run_hello_world.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -euo pipefail +echo +echo "--------------------------------------------------------------------------------" +echo "Run Hello World app process" +echo "--------------------------------------------------------------------------------" +echo +if ! [ ${PORTOFFSET:+defined} ]; then PORTOFFSET=0; fi +PORT1=$((6000 + PORTOFFSET)) +PORT2=$((6001 + PORTOFFSET)) +PORT3=$((7000 + PORTOFFSET)) +PORT4=$((7001 + PORTOFFSET)) + +export AMBROSIA_IMMORTALCOORDINATOR_PORT=$((6000 + PORTOFFSET)) +export AMBROSIA_INSTANCE_NAME=hello`whoami` +set -x + +time Ambrosia RegisterInstance -i $AMBROSIA_INSTANCE_NAME --rp $PORT1 --sp $PORT2 -l "./logs/" +time Ambrosia RegisterInstance -i $AMBROSIA_INSTANCE_NAME --rp $PORT3 --sp $PORT4 -l "./logs/" + +rm -rf logs # Delete logs and run fresh for this example. + +runAmbrosiaService.sh ./bin/native_hello.exe $PORT1 $PORT2 + +set +x +echo "Attempt a cleanup of our table metadata:" +time UnsafeDeregisterInstance $AMBROSIA_INSTANCE_NAME || true +echo "All done." diff --git a/Clients/C/src/ambrosia_client.c b/Clients/C/src/ambrosia_client.c index d84d8237..a78979db 100644 --- a/Clients/C/src/ambrosia_client.c +++ b/Clients/C/src/ambrosia_client.c @@ -1,4 +1,6 @@ +// See client.h header for function-level documentation. + #include #include #include @@ -11,60 +13,48 @@ #ifdef _WIN32 #define WIN32_LEAN_AND_MEAN - /* #include */ - /* #include */ - /* #include */ - /* #include */ - // for SIO_LOOPBACK_FAST_PATH: #include #pragma comment(lib,"ws2_32.lib") //Winsock Library - - /* #define int32_t INT32 */ - /* #define uint32_t UINT32 */ - /* #define int64_t INT64 */ - /* #define uint64_t UINT64 */ - #else - // *nix, but really Linux only for now: - /* #include */ - /* #include */ #include - // #include #include // inet_pton #include // gethostbyname - /* #include */ + #include // sched_yield + #include #endif #include "ambrosia/client.h" #include "ambrosia/internal/bits.h" -// Library-level global variables: +// For network progress thread only: +#include "ambrosia/internal/spsc_rring.h" + +// Library-level (private) global variables: // -------------------------------------------------- // FIXME: looks like we need a hashtable after all... int g_attached = 0; // For now, ONE destination. - -// This follows the rule that the RECV side acts as the server: -int upport = 1000; // Send. Up to the reliability-coordinator-as-server -int downport = 1001; // Recv. Down from the coordinator (we're server) - - // Global variables that should be initialized once for the library. // We can ONLY ever have ONE reliability coordinator. int g_to_immortal_coord, g_from_immortal_coord; + +// An INTERNAL global representing whether the client is terminating +// this AMBROSIA instance/network-endpoint. +int g_amb_client_terminating = 0; + #ifdef IPV4 const char* coordinator_host = "127.0.0.1"; -#else -// char* host = "0:0:0:0:0:0:0:1"; -// char* host = "1:2:3:4:5:6:7:8"; +#elif defined IPV6 const char* coordinator_host = "::1"; +#else +#error "Preprocessor: Expected IPV4 or IPV6 to be defined." #endif #ifdef AMBCLIENT_DEBUG -// volatile int64_t debug_lock = 0; +volatile int64_t amb_debug_lock = 0; #endif @@ -85,17 +75,15 @@ char* amb_get_error_string() { return strerror(errno); #endif } - -void print_hex_bytes(FILE* fd, char* ptr, int len) { - const int limit = 100; // Only print this many: - fprintf(fd,"0x"); - int j; - for (j=0; j < len && j < limit; j++) { - fprintf(fd,"%02hhx", (unsigned char)ptr[j]); - if (j % 2 == 1) - fprintf(fd," "); - } - if (j= 0) { fprintf(stderr,"\nERROR: connection interrupted. Did not receive all %d bytes of log header, only %d:\n ", - AMBROSIA_HEADERSIZE, num); + AMBROSIA_HEADERSIZE, num); print_hex_bytes(amb_dbg_fd,(char*)hdr, num); fprintf(amb_dbg_fd,"\n"); } fprintf(stderr,"\nERROR: failed recv (logheader), which left errno = %s\n", err); abort(); } amb_debug_log("Read log header: { commit %d, sz %d, checksum %lld, seqid %lld }\n", - hdr->commitID, hdr->totalSize, hdr->checksum, hdr->seqID ); + hdr->commitID, hdr->totalSize, hdr->checksum, hdr->seqID ); // printf("Hex: "); print_hex_bytes((char*)hdr,AMBROSIA_HEADERSIZE); printf("\n"); return; } -// ------------------------------------------------------------ + +// ============================================================================== +// Manage the state of the client (networking/connections) +// ============================================================================== void attach_if_needed(char* dest, int destLen) { // HACK: only working for one dest atm... @@ -299,65 +269,87 @@ void attach_if_needed(char* dest, int destLen) { print_hex_bytes(amb_dbg_fd, sendbuf, cur-sendbuf); fprintf(amb_dbg_fd,"\n"); #endif - socket_send_all(g_to_immortal_coord, sendbuf, cur-sendbuf, 0); + amb_socket_send_all(g_to_immortal_coord, sendbuf, cur-sendbuf, 0); g_attached = 1; amb_debug_log(" attach message sent (%d bytes)\n", cur-sendbuf); } } -/* -// INEFFICIENT version that makes an extra copy: -void send_message(char* buf, int len) { - attach_if_needed(destName, ??); // Hard-coded global dest name. +// Hacky busy-wait by thread-yielding for now: +// FIXME: NEED BACKOFF! +static inline +void amb_yield_thread() { +#ifdef _WIN32 + SwitchToThread(); +#else + sched_yield(); +#endif +} - // FIXME - LAME COPY to PREPEND header bytes! - char* sendbuf = (char*)malloc(1 + 5 + destLen + 1 + 5 + 1 + len); - char* newpos = amb_write_outgoing_rpc(sendbuf, destName, destLen, 0, TPUT_MSG_ID, 1, buf, len); - // FIXME: one system call per message! - socket_send_all(g_to_immortal_coord, sendbuf, newpos-sendbuf, 0); -#ifdef AMBCLIENT_DEBUG - amb_debug_log("Sent %d byte message up to coordinator, argsLen %d...\n Hex: ", newpos-sendbuf, len); - print_hex_bytes(amb_dbg_fd, sendbuf, newpos-sendbuf); - fprintf(amb_dbg_fd,"\n Decimal: "); - print_decimal_bytes(sendbuf, newpos-sendbuf); printf("\n"); +// Launch a background thread that progresses the network. +#ifdef _WIN32 +DWORD WINAPI amb_network_progress_thread( LPVOID lpParam ) +#else +void* amb_network_progress_thread( void* lpParam ) #endif - free(sendbuf); -} -*/ +{ + printf(" *** Network progress thread starting...\n"); + int hot_spin_amount = 1; // 100 + int spin_tries = hot_spin_amount; + while(1) { + int numbytes = -1; + char* ptr = peek_buffer(&numbytes); + if (numbytes > 0) { + amb_debug_log(" network thread: sending slice of %d bytes\n", numbytes); + amb_socket_send_all(g_to_immortal_coord, ptr, numbytes, 0); + pop_buffer(numbytes); // Must be at least this many. + spin_tries = hot_spin_amount; + } else if ( spin_tries == 0) { + spin_tries = hot_spin_amount; + // amb_debug_log(" network thread: yielding to wait...\n"); +#ifdef AMBCLIENT_DEBUG + amb_sleep_seconds(0.5); + amb_sleep_seconds(0.05); +#endif + amb_yield_thread(); + } else spin_tries--; + } + return 0; +} -// Begin connect_sockets: +// Begin amb_connect_sockets: // -------------------------------------------------- #ifdef _WIN32 void enable_fast_loopback(SOCKET sock) { int OptionValue = 1; DWORD NumberOfBytesReturned = 0; int status = WSAIoctl(sock, SIO_LOOPBACK_FAST_PATH, - &OptionValue, - sizeof(OptionValue), - NULL, - 0, - &NumberOfBytesReturned, - 0, - 0); + &OptionValue, + sizeof(OptionValue), + NULL, + 0, + &NumberOfBytesReturned, + 0, + 0); if (SOCKET_ERROR == status) { DWORD LastError = WSAGetLastError(); if (WSAEOPNOTSUPP == LastError) { - printf("WARNING: this platform doesn't support the fast loopback (needs Windows Server >= 2012).\n"); + printf("WARNING: this platform doesn't support the fast loopback (needs Windows Server >= 2012).\n"); } else { - fprintf(stderr, "\nERROR: Loopback Fastpath WSAIoctl failed with code: %d", - LastError); - abort(); + fprintf(stderr, "\nERROR: Loopback Fastpath WSAIoctl failed with code: %d", + LastError); + abort(); } } } -void connect_sockets(int* upptr, int* downptr) { +void amb_connect_sockets(int upport, int downport, int* upptr, int* downptr) { WSADATA wsa; SOCKET sock; @@ -403,13 +395,13 @@ void connect_sockets(int* upptr, int* downptr) { if (connect(sock, (struct sockaddr*)&addr, sizeof(addr)) < 0) { fprintf(stderr, "\nERROR: Failed to connect to-socket (ipv6): %s:%d\n Error: %s", - coordinator_host, upport, amb_get_error_string()); + coordinator_host, upport, amb_get_error_string()); abort(); } /* DWORD ipv6only = 0; if (SOCKET_ERROR == setsockopt(sock, IPPROTO_IPV6, - IPV6_V6ONLY, (char*)&ipv6only, sizeof(ipv6only) )) { + IPV6_V6ONLY, (char*)&ipv6only, sizeof(ipv6only) )) { fprintf(stderr, "\nERROR: Failed to setsockopt.\n"); closesocket(sock); abort(); @@ -422,16 +414,16 @@ void connect_sockets(int* upptr, int* downptr) { char upportstr[16]; sprintf(upportstr, "%d", upport); if (! WSAConnectByName(sock, - host, - upportstr, - &dwLocalAddr, - (SOCKADDR*)&LocalAddr, - &dwRemoteAddr, - (SOCKADDR*)&RemoteAddr, - NULL, - NULL) ) { + host, + upportstr, + &dwLocalAddr, + (SOCKADDR*)&LocalAddr, + &dwRemoteAddr, + (SOCKADDR*)&RemoteAddr, + NULL, + NULL) ) { fprintf(stderr, "\nERROR: Failed to connect (IPV6) to-socket: %s:%d\n Error: %s\n", - host, upport, amb_get_error_string()); + host, upport, amb_get_error_string()); abort(); } */ @@ -457,7 +449,7 @@ void connect_sockets(int* upptr, int* downptr) { if( bind(tempsock, (struct sockaddr *)&addr , sizeof(addr)) == SOCKET_ERROR) { fprintf(stderr,"\nERROR: bind returned error, addr:port is %s:%d\n Error was: %d\n", - coordinator_host, downport, WSAGetLastError()); + coordinator_host, downport, WSAGetLastError()); abort(); } @@ -492,7 +484,7 @@ void connect_sockets(int* upptr, int* downptr) { // if ( bind(tempsock, &addr, sizeof(sockaddr_in6)) == SOCKET_ERROR) { fprintf(stderr,"\nERROR: bind() failed with error when connecting to addr:port %s:%d: %s\n", - coordinator_host, downport, amb_get_error_string() ); + coordinator_host, downport, amb_get_error_string() ); closesocket(tempsock); WSACleanup(); abort(); @@ -516,7 +508,7 @@ void connect_sockets(int* upptr, int* downptr) { // Establish both connections with the reliability coordinator. // Takes two output parameters where it will write the resulting sockets. -void connect_sockets(int* upptr, int* downptr) { +void amb_connect_sockets(int upport, int downport, int* upptr, int* downptr) { #ifdef IPV4 struct hostent* immortalCoord; struct sockaddr_in addr; @@ -577,13 +569,13 @@ void connect_sockets(int* upptr, int* downptr) { #endif if (bind(tempfd, (struct sockaddr *) &addr, sizeof(addr)) < 0) { fprintf(stderr,"\nERROR: bind returned error, addr:port is %s:%d\n ERRNO was: %s\n", - coordinator_host, downport, strerror(errno)); + coordinator_host, downport, strerror(errno)); abort(); } if ( listen(tempfd,5) ) { fprintf(stderr,"\nERROR: listen returned error, addr:port is %s:%d\n ERRNO was: %s\n", - coordinator_host, downport, strerror(errno)); + coordinator_host, downport, strerror(errno)); abort(); } #ifdef IPV4 @@ -600,18 +592,14 @@ void connect_sockets(int* upptr, int* downptr) { return; } #endif -// End connect_sockets +// End amb_connect_sockets // (Runtime library) Startup. //------------------------------------------------------------------------------ - -// FIXME: move this to a callback argument: -extern void send_dummy_checkpoint(int upfd); - - -void startup_protocol(int upfd, int downfd) { +// Execute the startup messaging protocol. +void amb_startup_protocol(int upfd, int downfd) { struct log_hdr hdr; memset((void*) &hdr, 0, AMBROSIA_HEADERSIZE); assert(sizeof(struct log_hdr) == AMBROSIA_HEADERSIZE); @@ -623,7 +611,7 @@ void startup_protocol(int upfd, int downfd) { amb_debug_log(" Log header received, now waiting on payload (%d bytes)...\n", payloadSz); if(recv(downfd, buf, payloadSz, MSG_WAITALL) < payloadSz) { fprintf(stderr,"\nERROR: connection interrupted. Did not receive all %d bytes of payload following header.", - payloadSz); + payloadSz); abort(); } @@ -646,6 +634,7 @@ void startup_protocol(int upfd, int downfd) { case TakeBecomingPrimaryCheckpoint: amb_debug_log("Starting up for the first time (TakeBecomingPrimaryCheckpoint)\n"); break; + case Checkpoint: fprintf(stderr, "RECOVER mode ... not implemented yet.\n"); @@ -672,6 +661,7 @@ void startup_protocol(int upfd, int downfd) { int32_t msgsize; char *msgbufcur, *bufcur; + // FIXME!! Factor this out into the client application: #define STARTUP_ID 32 @@ -697,17 +687,17 @@ void startup_protocol(int upfd, int downfd) { int totalbytes = msgsize + (bufcur-buf); amb_debug_log(" Now will send InitialMessage to ImmortalCoordinator, %lld total bytes, %d in payload.\n", - (int64_t)totalbytes, msgsize); + (int64_t)totalbytes, msgsize); #ifdef AMBCLIENT_DEBUG amb_debug_log(" Message: "); print_hex_bytes(amb_dbg_fd, buf, msgsize + (bufcur-buf)); fprintf(amb_dbg_fd,"\n"); #endif - socket_send_all(upfd, buf, totalbytes, 0); + amb_socket_send_all(upfd, buf, totalbytes, 0); /* for(int i=0; i Read message, type %d, payload size %d\n", type, msgsize-1); + bufcur = amb_handle_rpc(bufcur, msgsize-1); + amb_debug_log(" --> handling that message read %d bytes off the batch\n", (int)(bufcur - lastbufcur)); + rawsize -= (bufcur - lastbufcur); + } + } + break; + + case TakeCheckpoint: + send_dummy_checkpoint(upfd); + break; + default: + fprintf(stderr, "ERROR: unexpected or unrecognized message type: %d", tag); + abort(); + break; + } + } + } + amb_debug_log("Client signaled shutdown, normal_processing_loop exiting cleanly...\n"); + return; +} + + diff --git a/Clients/C/src/spsc_rring.c b/Clients/C/src/spsc_rring.c index a1b189e8..2dc3e794 100644 --- a/Clients/C/src/spsc_rring.c +++ b/Clients/C/src/spsc_rring.c @@ -1,4 +1,6 @@ +// See the corresponding header for function-level documentation. + #include #include #include @@ -9,9 +11,12 @@ #include // sched_yield #endif +// ---------------------------------------------------------------------------- // FIXME: replace globals with a proper API for dynamically allocating buffers: +// ---------------------------------------------------------------------------- + +// TODO: FACTOR THESE INTO A STRUCT TO ALLOW MORE THAN ONE INSTANCE: -// TODO: FACTOR OUT: // Single-producer Single-consumer concurrent ring buffer: char* g_buffer = NULL; volatile int g_buffer_head = 0; // Byte offset into buffer, written by consumer. @@ -22,37 +27,65 @@ volatile int g_buffer_end = -1; // The current capacity, MODIFIED dynamically by int orig_buffer_end = -1; // Snapshot of the original buffer capacity. int g_buffer_last_reserved = -1; // The number of bytes in the last reserve call (producer-private) -// int g_buffer_total_reserved = -1; // The number of bytes in the last reserve call (producer-private) +// Debugging +//-------------------------------------------------------------------------------- + +// Fine-grained debugging. Turned off statically to avoid overhead. +#ifdef SPSC_RRING_DEBUG +volatile int64_t spsc_debug_lock = 0; +void spsc_rring_debug_log(const char *format, ...) +{ + va_list args; + va_start(args, format); + sleep_seconds((double)(rand()%1000) * 0.00001); // .01 - 10 ms + while ( 1 == InterlockedCompareExchange64(&spsc_debug_lock, 1, 0) ) { } + fprintf(dbg_fd," [AMBCLIENT] "); + vfprintf(dbg_fd,format, args); + fflush(dbg_fd); + spsc_debug_lock = 0; + va_end(args); +} +#else +// inline void spsc_rring_debug_log(const char *format, ...) { } +#define spsc_rring_debug_log(...) {} +#endif + + +// Buffer life cycle // ------------------------------------------------------------ -void new_buffer(int sz) { +void new_buffer(int sz) +{ + if (g_buffer != NULL) { + fprintf(stderr, "ERROR: tried to call new_buffer a second time\n"); + fprintf(stderr, "Only one global ring buffer permitted for now."); + abort(); + } g_buffer = malloc(sz); orig_buffer_end = sz; // Need room for the largest message. spsc_rring_debug_log("Initialized global buffer, address %p\n", g_buffer); } -void reset_buffer() { +void reset_buffer() +{ g_buffer_end = orig_buffer_end; } +void free_buffer() +{ + spsc_rring_debug_log("Freeing buffer %p\n", g_buffer); + free(g_buffer); + g_buffer = NULL; + orig_buffer_end = -1; +} + +// Buffer operations //-------------------------------------------------------------------------------- -// (Consumer) Wait until a number of (contiguous) bytes is available within the -// buffer, and write the pointer to those bytes into the pointer argument. -// -// This only reads in units of "complete messages", but it is UNKNOWN -// how many complete messages are returned into the buffer. -// -// RETURN: the pointer P to the available bytes. -// RETURN(param): set N to the (nonzero) number of bytes read. -// POSTCOND: the permission to read N bytes from P -// POSTCOND: the caller must use pop_buffer(N) to actually -// free these bytes for reuse. -// -// IDEMPOTENT! Only pop actually clears the bytes. -char* peek_buffer(int* numread) { +char* peek_buffer(int* numread) +{ while (1) { int observed_head = g_buffer_head; // We "own" the head (and _end) @@ -90,9 +123,8 @@ char* peek_buffer(int* numread) { } } -// (Consumer) Free N bytes from the ring buffer, marking them as consumed and -// allowing the storage to be reused. -void pop_buffer(int numread) { +void pop_buffer(int numread) +{ int observed_head = g_buffer_head; // We "own" the head int observed_end = g_buffer_end; // We "own" the end spsc_rring_debug_log(" pop_buffer: advancing head (%d) by %d\n", observed_head, numread); @@ -120,25 +152,10 @@ void pop_buffer(int numread) { } } -// From old flush_buffer: - /* - // ASSUMPTION: only complete RPC messages reside in the buffer! - // This sends out the RPCBatch header followed by the messages. - if (g_buffer_msgs > 1) { - spsc_rring_debug_log(" sending RPCBatch of size %d\n", g_buffer_msgs); - char tempbuf[16]; - char* cur = tempbuf; - int allbytes = g_buffer_tail + 1 + zigzag_int_size(g_buffer_msgs); - cur = write_zigzag_int(cur, allbytes); // Size - *cur++ = RPCBatch; // Type - cur = write_zigzag_int(cur, g_buffer_msgs); // RPCBatch has numMsgs 1st - send_all(g_to_immortal_coord, tempbuf, cur-tempbuf, 0); - } - */ - // Hacky busy-wait by thread-yielding for now: -static inline void wait() { +static inline void wait() +{ #ifdef _WIN32 SwitchToThread(); #else @@ -147,22 +164,14 @@ static inline void wait() { } -// (Producer) Grab a cursor for writing an (unspecified) number of bytes to the -// tail of the buffer. It's ok to RESERVE more than you ultimately USE. -char* reserve_buffer(int len) { +char* reserve_buffer(int len) +{ if (len > orig_buffer_end) { fprintf(stderr,"\nERROR: reserve_buffer request bigger than allocated buffer itself! %d", len); abort(); } - /* - while (len > g_buffer_end) { - spsc_rring_debug_log(" reserve_buffer: producer waiting until consumer un-shrinks the buffer..\n"); - wait(); - } - */ - while(1) // Retry loop. - { + { int our_tail = g_buffer_tail; int observed_head = g_buffer_head; // Only consumer changes this. int observed_end = g_buffer_end; @@ -172,87 +181,60 @@ char* reserve_buffer(int len) { else headroom = observed_end - our_tail; spsc_rring_debug_log(" reserve_buffer: headroom = %d (head/tail/end %d / %d / %d)\n", - headroom, observed_head, our_tail, observed_end); + headroom, observed_head, our_tail, observed_end); if (len < headroom) { - g_buffer_last_reserved = len; - return g_buffer+our_tail; // good to go! + g_buffer_last_reserved = len; + return g_buffer+our_tail; // good to go! } else if (our_tail < observed_head) // Torn state { - int clearpos = our_tail + len; - if ( clearpos < observed_end ) { - // Don't wait for state change, wait till we have just enough room: - // while( g_buffer_head < clearpos ) - spsc_rring_debug_log("! reserve_buffer: wait for head to advance. Head/tail/end: %d %d %d\n", - observed_head, our_tail, observed_end); - } else { - // Otherwise we have to wait for state change. In natural - // state the shrunk buffer is restored. - // while( g_buffer_head < our_tail ) - spsc_rring_debug_log("! reserve_buffer: wait to exit torn state. Head/tail/end: %d %d %d\n", - observed_head, our_tail, observed_end); - } - wait(); - continue; + int clearpos = our_tail + len; + if ( clearpos < observed_end ) { + // Don't wait for state change, wait till we have just enough room: + // while( g_buffer_head < clearpos ) + spsc_rring_debug_log("! reserve_buffer: wait for head to advance. Head/tail/end: %d %d %d\n", + observed_head, our_tail, observed_end); + } else { + // Otherwise we have to wait for state change. In natural + // state the shrunk buffer is restored. + // while( g_buffer_head < our_tail ) + spsc_rring_debug_log("! reserve_buffer: wait to exit torn state. Head/tail/end: %d %d %d\n", + observed_head, our_tail, observed_end); + } + wait(); + continue; } else // Natural state but need to switch. { - // In the natural state, we may be near the _end and need to - // shrink/wrap-early. BUT, we cannot wrap if head is squatting at - // the start -- that would make a full state appear empty. - while ( observed_head == 0 ) { - spsc_rring_debug_log("! reserve_buffer: stalling EARLY WRAP (tail %d), until head moves off the start mark\n", - our_tail); - wait(); - observed_head = g_buffer_head; - } - - spsc_rring_debug_log("! reserve_buffer: committing an EARLY WRAP, shrinking end from %d to %d\n", - observed_end, our_tail); - // We're in "natural" not "torn" state until *we* change it. - g_buffer_end = our_tail; // The state gives us "the lock" on this var. - our_tail = 0; - g_buffer_tail = 0; // State change! Torn state. - continue; + // In the natural state, we may be near the _end and need to + // shrink/wrap-early. BUT, we cannot wrap if head is squatting at + // the start -- that would make a full state appear empty. + while ( observed_head == 0 ) { + spsc_rring_debug_log("! reserve_buffer: stalling EARLY WRAP (tail %d), until head moves off the start mark\n", + our_tail); + wait(); + observed_head = g_buffer_head; + } + + spsc_rring_debug_log("! reserve_buffer: committing an EARLY WRAP, shrinking end from %d to %d\n", + observed_end, our_tail); + // We're in "natural" not "torn" state until *we* change it. + g_buffer_end = our_tail; // The state gives us "the lock" on this var. + our_tail = 0; + g_buffer_tail = 0; // State change! Torn state. + continue; } } } - -/* -// Finish the data transfer (corresponding to the previous reserve_buffer) but -// do NOT actually release the bytes to the consumer yet. That waits until -// someone calls "release_buffer" instead of this procedure. -// -// Adds "len" bytes to the tail of the buffer. This number must be -// less than or equal to the amount reserved. -static inline void finished_reserve_buffer(int len) { - if (len > g_buffer_last_reserved) { - fprintf(stderr, "ERROR: cannot finish/release %d bytes, only reserved %d\n", len, g_buffer_last_reserved); - abort(); - } - // Ammendment: - g_buffer_total_reserved -= g_buffer_last_reserved; - g_buffer_total_reserved += len; - g_buffer_last_reserved = -1; - // We don't write g_buffer_tail, because we don't want the consumer to have it yet: -} -*/ - -// (Producer) Add "len" bytes to the tail and release the buffer. -// This number must be less than or equal to the amount reserved. -// -// ASSUMPTION: only call release to COMPLETE a message: -void release_buffer(int len) { - // finished_reserve_buffer(len); - // g_buffer_tail += g_buffer_total_reserved; // Publish it! - // g_buffer_total_reserved = 0; - +void release_buffer(int len) +{ spsc_rring_debug_log(" => release_buffer of %d bytes, new tail %d\n", len, g_buffer_tail + len); if (len > g_buffer_last_reserved) { - fprintf(stderr, "ERROR: cannot finish/release %d bytes, only reserved %d\n", len, g_buffer_last_reserved); + fprintf(stderr, "ERROR: cannot finish/release %d bytes, only reserved %d\n", + len, g_buffer_last_reserved); abort(); } g_buffer_tail += len; diff --git a/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj b/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj index b38dda87..58385b5f 100644 --- a/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj +++ b/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj @@ -1,7 +1,7 @@  Exe - netcoreapp2.0;net46 + net461;netcoreapp3.1 true x64 win7-x64 @@ -9,6 +9,9 @@ AmbrosiaCS + true + false + ../../../Ambrosia/Ambrosia.snk NETFRAMEWORK @@ -17,18 +20,30 @@ NETCORE - - + + + + + + + + + + 4.3.0 + - + 4.5.0 + + 4.5.0 + @@ -101,4 +116,10 @@ Resources.Designer.cs + + + PreserveNewest + AmbrosiaCS.csproj + + diff --git a/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.sln b/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.sln index 710f5c8d..ac956594 100644 --- a/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.sln +++ b/Clients/CSharp/AmbrosiaCS/AmbrosiaCS.sln @@ -1,47 +1,112 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.27703.2000 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.29920.165 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaCS", "AmbrosiaCS.csproj", "{EC500C57-A702-4A67-9A34-E7DF27FA4C01}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Ambrosia", "..\..\..\Ambrosia\Ambrosia\Ambrosia.csproj", "{B83690A7-C017-4FEB-B61E-17E4FC1AD5D5}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaLibCS", "..\AmbrosiaLibCS\AmbrosiaLibCS.csproj", "{F1931B29-C2C6-4E02-A359-939D94ADCF33}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "adv-file-ops", "..\..\..\Ambrosia\adv-file-ops\adv-file-ops.vcxproj", "{5852AC33-6B01-44F5-BAF3-2AAF796E8449}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaLib", "..\..\..\AmbrosiaLib\Ambrosia\AmbrosiaLib.csproj", "{50740F6E-998B-4C05-B951-77678839323A}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaLibCS", "..\AmbrosiaLibCS\AmbrosiaLibCS.csproj", "{F1931B29-C2C6-4E02-A359-939D94ADCF33}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SharedAmbrosiaTools", "..\..\..\SharedAmbrosiaTools\SharedAmbrosiaTools.csproj", "{0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ImmortalCoordinator", "..\..\..\ImmortalCoordinator\ImmortalCoordinator.csproj", "{19E0096D-50D6-431F-8749-B6F1B2E66E16}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AzureBlobsLogPicker", "..\..\..\AzureBlobsLogPicker\AzureBlobsLogPicker.csproj", "{C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GenericLogPicker", "..\..\..\GenericLogPicker\GenericLogPicker.csproj", "{7C351DDF-4370-45DC-B108-C8F0DE1833D6}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|ARM64 = Debug|ARM64 Debug|x64 = Debug|x64 + Release|Any CPU = Release|Any CPU + Release|ARM64 = Release|ARM64 Release|x64 = Release|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution + {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Debug|Any CPU.ActiveCfg = Debug|x64 + {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Debug|ARM64.ActiveCfg = Debug|x64 {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Debug|x64.ActiveCfg = Debug|x64 {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Debug|x64.Build.0 = Debug|x64 + {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Release|Any CPU.ActiveCfg = Release|x64 + {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Release|ARM64.ActiveCfg = Release|x64 {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Release|x64.ActiveCfg = Release|x64 {EC500C57-A702-4A67-9A34-E7DF27FA4C01}.Release|x64.Build.0 = Release|x64 - {B83690A7-C017-4FEB-B61E-17E4FC1AD5D5}.Debug|x64.ActiveCfg = Debug|x64 - {B83690A7-C017-4FEB-B61E-17E4FC1AD5D5}.Debug|x64.Build.0 = Debug|x64 - {B83690A7-C017-4FEB-B61E-17E4FC1AD5D5}.Release|x64.ActiveCfg = Release|x64 - {B83690A7-C017-4FEB-B61E-17E4FC1AD5D5}.Release|x64.Build.0 = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Debug|x64.ActiveCfg = Debug|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Debug|x64.Build.0 = Debug|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Release|x64.ActiveCfg = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Release|x64.Build.0 = Release|x64 - {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|x64.ActiveCfg = Debug|x64 - {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|x64.Build.0 = Debug|x64 - {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|x64.ActiveCfg = Release|x64 - {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|x64.Build.0 = Release|x64 + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|ARM64.Build.0 = Debug|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|x64.ActiveCfg = Debug|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Debug|x64.Build.0 = Debug|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|Any CPU.Build.0 = Release|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|ARM64.ActiveCfg = Release|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|ARM64.Build.0 = Release|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|x64.ActiveCfg = Release|Any CPU + {F1931B29-C2C6-4E02-A359-939D94ADCF33}.Release|x64.Build.0 = Release|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Debug|Any CPU.Build.0 = Debug|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Debug|ARM64.Build.0 = Debug|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Debug|x64.ActiveCfg = Debug|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Debug|x64.Build.0 = Debug|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Release|Any CPU.ActiveCfg = Release|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Release|Any CPU.Build.0 = Release|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Release|ARM64.ActiveCfg = Release|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Release|ARM64.Build.0 = Release|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Release|x64.ActiveCfg = Release|Any CPU + {50740F6E-998B-4C05-B951-77678839323A}.Release|x64.Build.0 = Release|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Debug|ARM64.Build.0 = Debug|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Debug|x64.ActiveCfg = Debug|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Debug|x64.Build.0 = Debug|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Release|Any CPU.Build.0 = Release|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Release|ARM64.ActiveCfg = Release|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Release|ARM64.Build.0 = Release|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Release|x64.ActiveCfg = Release|Any CPU + {0B2EB3C8-C898-44A0-B8D4-9DD840638BA5}.Release|x64.Build.0 = Release|Any CPU + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Debug|Any CPU.Build.0 = Debug|Any CPU + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Debug|ARM64.ActiveCfg = Debug|ARM64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Debug|ARM64.Build.0 = Debug|ARM64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Debug|x64.ActiveCfg = Debug|x64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Debug|x64.Build.0 = Debug|x64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Release|Any CPU.ActiveCfg = Release|Any CPU + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Release|Any CPU.Build.0 = Release|Any CPU + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Release|ARM64.ActiveCfg = Release|ARM64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Release|ARM64.Build.0 = Release|ARM64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Release|x64.ActiveCfg = Release|x64 + {19E0096D-50D6-431F-8749-B6F1B2E66E16}.Release|x64.Build.0 = Release|x64 + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Debug|ARM64.Build.0 = Debug|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Debug|x64.ActiveCfg = Debug|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Debug|x64.Build.0 = Debug|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Release|Any CPU.Build.0 = Release|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Release|ARM64.ActiveCfg = Release|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Release|ARM64.Build.0 = Release|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Release|x64.ActiveCfg = Release|Any CPU + {C2BCD2DE-DE66-4FCD-A2B8-A9E42881D653}.Release|x64.Build.0 = Release|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Debug|Any CPU.Build.0 = Debug|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Debug|ARM64.Build.0 = Debug|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Debug|x64.ActiveCfg = Debug|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Debug|x64.Build.0 = Debug|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Release|Any CPU.ActiveCfg = Release|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Release|Any CPU.Build.0 = Release|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Release|ARM64.ActiveCfg = Release|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Release|ARM64.Build.0 = Release|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Release|x64.ActiveCfg = Release|Any CPU + {7C351DDF-4370-45DC-B108-C8F0DE1833D6}.Release|x64.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/Clients/CSharp/AmbrosiaCS/CodeGeneration.md b/Clients/CSharp/AmbrosiaCS/CodeGeneration.md new file mode 100644 index 00000000..e922459e --- /dev/null +++ b/Clients/CSharp/AmbrosiaCS/CodeGeneration.md @@ -0,0 +1,102 @@ +# Code Generation + +AMBROSIA's C# client makes use of T4 template-based code generation to create an AMBROSIA wrapper for each method in the user-defined APIs. + +## Running Code Generation + +Once your C# APIs are defined, you can run code generation in the following way: + +```shell +Usage: AmbrosiaCS.exe CodeGen [OPTIONS] +Options: + -a, --assembly=VALUE An input assembly file location. [REQUIRED] + -o, --outputAssemblyName=VALUE + An output assembly name. [REQUIRED] + -f, --targetFramework=VALUE + The output assembly target framework. [> 1 + REQUIRED] + -p, --project=VALUE An input project file location for reference + resolution. + -h, --help show this message and exit +``` + +The command above would output a C# project named .csproj under + +```shell +GeneratedSourceFiles//latest/ +``` + +Any historical generated files under that directory would be moved to a directory under the parent path, creating a history directory that can be sorted by creation date. + +Now you can add a project reference to each of your API implementation projects: + +```xml + + + +``` + +Since the path to the most recent generated project is static, you would not need to change your reference each time you run code generation. + +## Code Generation Deep Dive + +### Generating the .csproj file + +AmbrosiaCS.exe parses both AmbrosiaCS.csproj and any other .csproj file given to it as a command-line parameter (-p=), if any are given. + +The code then extracts any project references or package references from these csproj files and copies them over to the new generated .csproj. + +All references would be copied over to the new .csproj file. However, if two different versions of the same package exist - a warning message would be outputted to console. + +The generated project's target frameworks are copied over from AmbrosiaCS.csproj. + +### Generating the code + +AmbrosiaCS.exe dynamically loads all assemblies given to it as command-line parameters (-a=). + +For each interface defined, 3 different .cs files would be generated using T4 template-based code generation. + +#### ProxyInterfacesGenerator.tt + +*Generates ProxyInterfaces_*.cs + +The ProxyInterfacesGenerator template creates the AMBROSIA interface that wraps the user-defined interface. + +For each method **Foo**(...) in the user-defined interface, this template would generate an interface containing the following method declarations: + +| Generated method | Purpose | Condition | +| ------------------------------------------------------------ | -------------------------------- | -------------------------------------------------------- | +| void **Foo**Fork(...) | Fire-and-forget calls to **Foo** | Generated only if **Foo**'s return type is void | +| Task\ **Foo**Async(...) (or Task **Foo**Async(...) if T is void) | Awaitable calls to **Foo** | Generated only if **Foo** is not defined as an *Impulse* | + +*Note: ProxyInterfacesGenerator template also generates a copy of the original user-defined interface. This is for convenience purposes.* + +#### ProxyGenerator.tt + +*Generates Proxy_.cs* + +The ProxyGenerator template generates an implementation for the AMBROSIA interfaces generated by ProxyInterfaceGenerator.tt. + +##### FooFork(...) Implementation + +The implementation of **Foo**Fork simply generates the RPC call that will result in the dispatching of the method **Foo** on the receiving Immortal, sends it and returns. + +##### FooAsync(...) Implementation + +The implementation of **Foo**Async generates the RPC call that will result in the dispatching of the method **Foo** on the receiving Immortal, sends it and awaits on a TaskCompletionSource that will signal the call completion. If **Foo**'s return value is different than null this TaskCompletionSource object would also return a result, which would in turn be returned by **Foo**Async. + +*Note: The implementation of **Foo**Async also contains portions related to checkpointing. This logic is explained in further detail in the Immortal documentation.* + +#### DispatcherGenerator.tt + +*Generates Dispatcher_.cs* + +The DispatcherGenerator template generates an implementation for the abstract class Immortal.Dispatcher for each user-generated interface. The overriden method, DispatchToMethod, handles the dispatching of incoming RPC requests to the appropriate methods in the user code. In case of an async method with a return type, it also handles sending the result back to the RPC sender. + +In addition to the 3 aforementioned templates which generate a .cs file for each interface in the inputted assemblies, there is one more template which is generated only once: + +#### ImmortalSerializerGenerator.tt + +*Generates ImmortalSerializer.cs* + +The ImmortalSerializerGenerator template gets the generated types of proxies and proxy interfaces (generated by ProxyGenerator.tt and ProxyInterfacesGenerator.tt) and produces a serializer for the Immortal class which has these new types defined as KnownTypes. \ No newline at end of file diff --git a/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.cs b/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.cs index 0048dd49..a561f162 100644 --- a/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.cs +++ b/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.cs @@ -1,7 +1,7 @@ // ------------------------------------------------------------------------------ // // This code was generated by a tool. -// Runtime Version: 15.0.0.0 +// Runtime Version: 16.0.0.0 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. @@ -18,8 +18,8 @@ namespace Ambrosia /// Class to produce the template output /// - #line 1 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "15.0.0.0")] + #line 1 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "16.0.0.0")] internal partial class DispatcherGenerator : DispatcherGeneratorBase { #line hidden @@ -29,10 +29,9 @@ internal partial class DispatcherGenerator : DispatcherGeneratorBase public virtual string TransformText() { this.Write("\r\nusing System;\r\nusing System.Collections.Generic;\r\nusing System.Threading.Tasks;" + - "\r\nusing Ambrosia;\r\nusing static Ambrosia.StreamCommunicator;\r\nusing LocalAmbrosi" + - "aRuntime;\r\n\r\nnamespace "); + "\r\nusing Ambrosia;\r\nusing static Ambrosia.StreamCommunicator;\r\n\r\nnamespace "); - #line 14 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 13 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Namespace)); #line default @@ -40,7 +39,7 @@ public virtual string TransformText() this.Write("\r\n{\r\n /// \r\n /// This class runs in the process of the object that" + " implements the interface "); - #line 17 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 16 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default @@ -49,21 +48,21 @@ public virtual string TransformText() "tiated in ImmortalFactory.CreateServer when a bootstrapper registers a container" + "\r\n /// that supports the interface "); - #line 20 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 19 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default #line hidden this.Write(".\r\n /// \r\n class "); - #line 22 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 21 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(className)); #line default #line hidden this.Write(" : Immortal.Dispatcher\r\n {\r\n private readonly "); - #line 24 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 23 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(interfaceType.Name)); #line default @@ -71,7 +70,7 @@ public virtual string TransformText() this.Write(" instance;\r\n\t\tprivate readonly ExceptionSerializer exceptionSerializer = new Exce" + "ptionSerializer(new List());\r\n\r\n public "); - #line 27 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 26 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(className)); #line default @@ -81,14 +80,14 @@ public virtual string TransformText() { this.instance = ("); - #line 30 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 29 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(interfaceType.Name)); #line default #line hidden this.Write(") z;\r\n }\r\n\r\n public "); - #line 33 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 32 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(className)); #line default @@ -98,7 +97,7 @@ public virtual string TransformText() { this.instance = ("); - #line 36 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 35 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(interfaceType.Name)); #line default @@ -112,11 +111,11 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType { case 0: // Entry point - this.EntryPoint(); + await this.EntryPoint(); break; "); - #line 47 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 46 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" foreach (var M in this.methods) { var idNumber = M.idNumber; @@ -128,21 +127,21 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType #line hidden this.Write(" case "); - #line 53 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 52 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(idNumber)); #line default #line hidden this.Write(":\r\n // "); - #line 54 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 53 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write("Async\r\n {\r\n // deserialize arguments\r\n"); - #line 57 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 56 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" foreach (var p in M.Parameters) { var parIndex = p.Position; @@ -153,56 +152,56 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType #line hidden this.Write("\r\n // arg"); - #line 63 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 62 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write(": "); - #line 63 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 62 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(p.TypeName)); #line default #line hidden this.Write("\r\n "); - #line 64 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 63 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.DeserializeValue(p.ParameterType, parName))); #line default #line hidden this.Write("\r\n"); - #line 65 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 64 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } #line default #line hidden this.Write(" // call the method\r\n"); - #line 67 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 66 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" if (!voidMethod) { #line default #line hidden this.Write("\t\t\t\t\t\tvar p_"); - #line 68 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 67 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write(" = default("); - #line 68 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 67 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.ReturnType.Name)); #line default #line hidden this.Write(");\r\n"); - #line 69 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 68 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } #line default @@ -210,49 +209,49 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType this.Write("\t\t\t\t\t\tbyte[] argExBytes = null;\r\n\t\t\t\t\t\tint argExSize = 0;\r\n\t\t\t\t\t\tException currEx" + " = null;\r\n\t\t\t\t\t\tint arg"); - #line 73 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 72 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write("Size = 0;\r\n\t\t\t\t\t\tbyte[] arg"); - #line 74 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 73 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write("Bytes = null;\r\n\r\n\t\t\t\t\t\ttry \r\n\t\t\t\t\t\t{\r\n"); - #line 78 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 77 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" if (!voidMethod) { #line default #line hidden this.Write("\t\t\t\t\t\t\tp_"); - #line 79 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 78 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write(" =\r\n"); - #line 80 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 79 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } #line default #line hidden this.Write("\t\t\t\t\t\t\t\tawait this.instance."); - #line 81 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 80 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write("Async("); - #line 81 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 80 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parameterString)); #line default @@ -261,7 +260,7 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType "\n if (!rpcType.IsFireAndForget())\r\n " + " {\r\n"); - #line 90 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 89 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" if (!voidMethod) { #line default @@ -269,21 +268,21 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType this.Write(" // serialize result and send it back\r\n\t\t\t\t\t\tif (currE" + "x != null)\r\n\t\t\t\t\t\t{\r\n\t\t\t"); - #line 94 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 93 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.ComputeExceptionSize())); #line default #line hidden this.Write("\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse \r\n\t\t\t\t\t\t{\r\n\t\t\t"); - #line 98 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 97 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.ComputeArgumentSize(M.ReturnType, M.Parameters.Count()))); #line default #line hidden this.Write("\r\n\t\t\t\t\t\t}\r\n"); - #line 100 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 99 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } else { #line default @@ -291,14 +290,14 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType this.Write(" // serialize result and send it back (there isn\'t one" + ")\r\n arg"); - #line 102 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 101 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write("Size = 0;\r\n"); - #line 103 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 102 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } #line default @@ -306,42 +305,42 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType this.Write(" var wp = this.StartRPC_ReturnValue(senderOfRPC, seque" + "nceNumber, currEx == null ? arg"); - #line 104 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 103 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write("Size : argExSize, currEx == null ? "); - #line 104 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 103 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(!voidMethod ? "ReturnValueTypes.ReturnValue" : "ReturnValueTypes.EmptyReturnValue")); #line default #line hidden this.Write(" : ReturnValueTypes.Exception);\r\n\r\n"); - #line 106 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 105 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" if (!voidMethod) { #line default #line hidden this.Write("\t\r\n\t\t\t\t\t\tif (currEx != null)\r\n\t\t\t\t\t\t{\r\n\t\t\t"); - #line 109 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 108 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.SerializeException())); #line default #line hidden this.Write("\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\telse \r\n\t\t\t\t\t\t{\r\n "); - #line 113 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 112 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.SerializeValue(M.Parameters.Count()))); #line default #line hidden this.Write("\r\n\t\t\t\t\t\t}\r\n"); - #line 115 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 114 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } #line default @@ -349,7 +348,7 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType this.Write(" this.ReleaseBufferAndSend();\r\n " + " }\r\n }\r\n break;\r\n"); - #line 120 "C:\Git\Franklin\AmbrosiaCS\DispatcherGenerator.tt" + #line 119 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\DispatcherGenerator.tt" } #line default @@ -365,7 +364,7 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType /// /// Base class for this transformation /// - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "15.0.0.0")] + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "16.0.0.0")] internal class DispatcherGeneratorBase { #region Fields diff --git a/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.tt b/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.tt index eee612b8..a0069b89 100644 --- a/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.tt +++ b/Clients/CSharp/AmbrosiaCS/DispatcherGenerator.tt @@ -9,7 +9,6 @@ using System.Collections.Generic; using System.Threading.Tasks; using Ambrosia; using static Ambrosia.StreamCommunicator; -using LocalAmbrosiaRuntime; namespace <#= this.interfaceType.Namespace #> { @@ -42,7 +41,7 @@ namespace <#= this.interfaceType.Namespace #> { case 0: // Entry point - this.EntryPoint(); + await this.EntryPoint(); break; <# foreach (var M in this.methods) { diff --git a/Clients/CSharp/AmbrosiaCS/Program.cs b/Clients/CSharp/AmbrosiaCS/Program.cs index 7b4afda9..9c14f8fb 100644 --- a/Clients/CSharp/AmbrosiaCS/Program.cs +++ b/Clients/CSharp/AmbrosiaCS/Program.cs @@ -1,13 +1,11 @@ -using System; +using Microsoft.CodeAnalysis.CSharp; +using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Reflection; using System.Text; using System.Xml.Linq; -using AmbrosiaCS.Properties; -using Mono.Options; -using Microsoft.CodeAnalysis.CSharp; namespace Ambrosia { @@ -15,9 +13,9 @@ class Program { private static AmbrosiaCSRuntimeModes _runtimeMode; private static List _assemblyNames; + private static List _projectFiles; private static string _outputAssemblyName; - private static string _targetFramework; - private static string _binPath; + private static List _targetFrameworks = new List(); static void Main(string[] args) { @@ -35,7 +33,7 @@ static void Main(string[] args) private static void RunCodeGen() { - var directoryName = @"latest\"; + var directoryName = @"latest"; var generatedDirectory = "GeneratedSourceFiles"; if (!Directory.Exists(generatedDirectory)) @@ -130,119 +128,123 @@ private static void RunCodeGen() var immortalSerializerSource = new ImmortalSerializerGenerator(generatedProxyNames, generatedProxyNamespaces).TransformText(); sourceFiles.Add(new SourceFile { FileName = $"ImmortalSerializer.cs", SourceCode = immortalSerializerSource, }); - var referenceLocations = new Dictionary(); - var assemblyFileNames = _assemblyNames.Select(Path.GetFileName).ToList(); - foreach (var fileName in Directory.GetFiles(_binPath, "*.dll", SearchOption.TopDirectoryOnly) - .Union(Directory.GetFiles(_binPath, "*.exe", SearchOption.TopDirectoryOnly))) + var conditionToPackageInfo = new Dictionary>>(); + var conditionToProjectReference = new Dictionary>(); + + var execAssembly = Assembly.GetExecutingAssembly(); + var projFile = Path.Combine(Path.GetDirectoryName(execAssembly.Location), $@"{execAssembly.GetName().Name}.csproj"); + _projectFiles.Add(projFile); + + var defaultConditionString = string.Empty; + foreach (var projectFile in _projectFiles) { - var assemblyPath = Path.GetFullPath(fileName); - if (assemblyFileNames.Contains(Path.GetFileName(assemblyPath))) - { - continue; - } + var doc = XDocument.Load(projectFile); - Assembly assembly; - try + foreach (var itemGroup in doc.Descendants("ItemGroup")) { - assembly = Assembly.LoadFile(assemblyPath); - } - catch (Exception) - { - continue; - } - var assemblyName = assembly.GetName().Name; - var assemblyLocation = assembly.Location; + var itemGroupCondition = itemGroup.Attributes().FirstOrDefault(a => a.Name == "Condition"); + var condition = itemGroupCondition == null ? defaultConditionString : itemGroupCondition.Value; - var assemblyLocationUri = new Uri(assemblyLocation); - var assemblyLocationRelativePath = new Uri(Path.GetFullPath(directoryPath)).MakeRelativeUri(assemblyLocationUri).ToString(); - referenceLocations.Add(assemblyName, assemblyLocationRelativePath); - } + foreach (var packageReference in itemGroup.Descendants("PackageReference")) + { + var elements = packageReference.Elements(); + var attributes = packageReference.Attributes().ToList(); + var packageIncludeAttribute = attributes.FirstOrDefault(a => a.Name == "Include"); + var packageUpdateAttribute = attributes.FirstOrDefault(a => a.Name == "Update"); + if (packageIncludeAttribute == null && packageUpdateAttribute == null) continue; - var conditionToPackageInfo = new Dictionary>>(); - var doc = XDocument.Parse(Resources.AmbrosiaCS); + var packageNameAttribute = packageIncludeAttribute ?? packageUpdateAttribute; + var packageName = packageNameAttribute.Value; - foreach (var itemGroup in doc.Descendants("ItemGroup")) - { - var itemGroupCondition = itemGroup.Attributes().FirstOrDefault(a => a.Name == "Condition"); - var condition = itemGroupCondition == null ? string.Empty : itemGroupCondition.Value; + var versionAttribute = attributes.FirstOrDefault(a => a.Name == "Version"); - foreach (var packageReference in itemGroup.Descendants("PackageReference")) - { - var elements = packageReference.Elements(); - var attributes = packageReference.Attributes().ToList(); - var packageIncludeAttribute = attributes.FirstOrDefault(a => a.Name == "Include"); - var packageUpdateAttribute = attributes.FirstOrDefault(a => a.Name == "Update"); - if (packageIncludeAttribute == null && packageUpdateAttribute == null) continue; + string packageVersion; + if (versionAttribute == null) + { + var packageVersionElement = elements.FirstOrDefault(e => e.Name == "Version"); + if (packageVersionElement == null) continue; + packageVersion = packageVersionElement.Value; + } + else + { + packageVersion = versionAttribute.Value; + } - var packageNameAttribute = packageIncludeAttribute ?? packageUpdateAttribute; - var packageName = packageNameAttribute.Value; - var packageMode = packageNameAttribute.Name.ToString(); + if (!conditionToPackageInfo.ContainsKey(condition)) + { + conditionToPackageInfo.Add(condition, new Dictionary>()); + } - var versionAttribute = attributes.FirstOrDefault(a => a.Name == "Version"); + var packageReferenceInfo = new PackageReferenceInfo(packageName, packageVersion, packageReference.ToString()); + if (!conditionToPackageInfo[condition].ContainsKey(packageName)) + { + conditionToPackageInfo[condition].Add(packageName, new HashSet()); + } - string packageVersion; - if (versionAttribute == null) - { - var packageVersionElement = elements.FirstOrDefault(e => e.Name == "Version"); - if (packageVersionElement == null) continue; - packageVersion = packageVersionElement.Value; - } - else - { - packageVersion = versionAttribute.Value; + conditionToPackageInfo[condition][packageName].Add(packageReferenceInfo); } - if (!conditionToPackageInfo.ContainsKey(condition)) + foreach (var projectReference in itemGroup.Descendants("ProjectReference")) { - conditionToPackageInfo.Add(condition, new List>()); + var attributes = projectReference.Attributes().ToList(); + var projectIncludeAttribute = attributes.FirstOrDefault(a => a.Name == "Include"); + var projectPath = projectIncludeAttribute.Value; + var formerBasePath = new Uri(new FileInfo(projectFile).Directory.FullName + Path.DirectorySeparatorChar); + var currentBasePath = new Uri(new DirectoryInfo(directoryPath).FullName + Path.DirectorySeparatorChar); + var projectPathUri = new Uri(formerBasePath, projectPath); + var newRelativePath = currentBasePath.MakeRelativeUri(projectPathUri); + + if (!conditionToProjectReference.ContainsKey(condition)) + { + conditionToProjectReference.Add(condition, new HashSet()); + } + + conditionToProjectReference[condition].Add(projectReference.ToString().Replace(projectPath.ToString(), newRelativePath.ToString())); } - conditionToPackageInfo[condition].Add(new Tuple(packageMode, packageName, packageVersion)); } } - var conditionalPackageReferences = new List(); - foreach (var cpi in conditionToPackageInfo) + var defaultConditionInfo = conditionToPackageInfo.ContainsKey(defaultConditionString) ? conditionToPackageInfo[defaultConditionString] : null; + + foreach (var cp in conditionToPackageInfo) { - var packageReferences = new List(); - foreach (var pi in cpi.Value) + foreach (var nameToInfo in cp.Value) { - packageReferences.Add( -$@" "); - } + var packageInfos = new HashSet(nameToInfo.Value.Union( + defaultConditionInfo == null || !defaultConditionInfo.ContainsKey(nameToInfo.Key) + ? new List() : defaultConditionInfo[nameToInfo.Key].ToList())); - if (cpi.Key == String.Empty || cpi.Key == _targetFramework) - { - conditionalPackageReferences.Add( -$@" -{string.Join("\n", packageReferences)} - -"); - } + if (packageInfos.Count > 1) + { + Console.WriteLine($"WARNING: Detected multiple versions of package {nameToInfo.Key} : {string.Join(",", packageInfos.Select(pi => pi.PackageVersion))}"); + } + } } - var references = new List(); - foreach (var rl in referenceLocations) + var conditionalPackageReferences = new List(); + foreach (var cpi in conditionToPackageInfo) { - references.Add( - $@" - {rl.Value} - "); + conditionalPackageReferences.Add($"{string.Join("\n", cpi.Value.SelectMany(v => v.Value).Select(pri => pri.ReferenceString))}"); } - var referencesItemGroup = - $@" -{string.Join("\n", references)} - -"; + var conditionalProjectReferences = new List(); + foreach (var cpi in conditionToProjectReference) + { + conditionalProjectReferences.Add($"{string.Join("\n", cpi.Value)}"); + } - var projectFileSource = + var projectFileSource = $@" - - {_targetFramework} - -{referencesItemGroup}{string.Join(string.Empty, conditionalPackageReferences)}"; - var projectSourceFile = - new SourceFile() { FileName = $"{_outputAssemblyName}.csproj", SourceCode = projectFileSource }; + + {string.Join(";", _targetFrameworks)} + + {string.Join(string.Empty, conditionalPackageReferences)} + {string.Join(string.Empty, conditionalProjectReferences)} + "; + + var projectFileXml = XDocument.Parse(projectFileSource); + + var projectSourceFile = new SourceFile { FileName = $"{_outputAssemblyName}.csproj", SourceCode = projectFileXml.ToString() }; sourceFiles.Add(projectSourceFile); var trees = sourceFiles @@ -274,11 +276,12 @@ private static OptionSet ParseOptions(string[] args, out bool shouldShowHelp) { var showHelp = false; var assemblyNames = new List(); + var projectFiles = new List(); var codeGenOptions = new OptionSet { - { "a|assembly=", "An input assembly name. [REQUIRED]", assemblyName => assemblyNames.Add(Path.GetFullPath(assemblyName)) }, + { "a|assembly=", "An input assembly file location. [REQUIRED]", a => assemblyNames.Add(Path.GetFullPath(a)) }, { "o|outputAssemblyName=", "An output assembly name. [REQUIRED]", outputAssemblyName => _outputAssemblyName = outputAssemblyName }, - { "f|targetFramework=", "The output assembly target framework. [REQUIRED]", f => _targetFramework = f }, - { "b|binPath=", "The bin path containing the output assembly dependencies.", b => _binPath = b }, + { "f|targetFramework=", "The output assembly target framework. [> 1 REQUIRED]", f => _targetFrameworks.Add(f) }, + { "p|project=", "An input project file location for reference resolution. ", p => projectFiles.Add(Path.GetFullPath(p)) }, { "h|help", "show this message and exit", h => showHelp = h != null }, }; @@ -309,6 +312,7 @@ private static OptionSet ParseOptions(string[] args, out bool shouldShowHelp) shouldShowHelp = showHelp; _assemblyNames = assemblyNames; + _projectFiles = projectFiles; return codeGenOptions; } @@ -318,14 +322,15 @@ private static void ValidateOptions(OptionSet options, bool shouldShowHelp) var errorMessage = string.Empty; if (_assemblyNames.Count == 0) errorMessage += "At least one input assembly is required."; if (_outputAssemblyName == null) errorMessage += "Output assembly name is required."; - if (_targetFramework == null) errorMessage += "Target framework is required."; + if (_targetFrameworks.Count == 0) errorMessage += "At least one target framework is required."; var assemblyFilesNotFound = _assemblyNames.Where(an => !File.Exists(an)).ToList(); if (assemblyFilesNotFound.Count > 0) errorMessage += $"Unable to find the following assembly files:\n{string.Join("\n", assemblyFilesNotFound)}"; - if (!Directory.Exists(_binPath)) - errorMessage += $"Unable to find the dependencies bin path: {_binPath}"; + var projectFilesNotFound = _projectFiles.Where(pf => !File.Exists(pf)).ToList(); + if (_projectFiles.Count > 0 && projectFilesNotFound.Count > 0) + errorMessage += $"Unable to find the following project files:\n{string.Join("\n", projectFilesNotFound)}"; if (errorMessage != string.Empty) { @@ -357,6 +362,42 @@ private static void ShowHelp(Dictionary modeT } } + public class PackageReferenceInfo : IEquatable + { + public string PackageName { get; } + + public string PackageVersion { get; } + + public string ReferenceString { get; } + + public PackageReferenceInfo(string packageName, string packageVersion, string referenceString) + { + this.PackageName = packageName; + this.PackageVersion = packageVersion; + this.ReferenceString = referenceString; + } + + public bool Equals(PackageReferenceInfo other) + { + if (ReferenceEquals(null, other)) return false; + if (ReferenceEquals(this, other)) return true; + return string.Equals(PackageName, other.PackageName) && string.Equals(PackageVersion, other.PackageVersion); + } + + public override bool Equals(object obj) + { + if (ReferenceEquals(null, obj)) return false; + if (ReferenceEquals(this, obj)) return true; + if (obj.GetType() != this.GetType()) return false; + return Equals((PackageReferenceInfo) obj); + } + + public override int GetHashCode() + { + return (PackageName + PackageVersion).GetHashCode(); + } + } + internal class SourceFile { public string FileName; diff --git a/Clients/CSharp/AmbrosiaCS/Properties/Resources.Designer.cs b/Clients/CSharp/AmbrosiaCS/Properties/Resources.Designer.cs deleted file mode 100644 index 71435ee8..00000000 --- a/Clients/CSharp/AmbrosiaCS/Properties/Resources.Designer.cs +++ /dev/null @@ -1,84 +0,0 @@ -//------------------------------------------------------------------------------ -// -// This code was generated by a tool. -// Runtime Version:4.0.30319.42000 -// -// Changes to this file may cause incorrect behavior and will be lost if -// the code is regenerated. -// -//------------------------------------------------------------------------------ - -namespace AmbrosiaCS.Properties { - using System; - - - /// - /// A strongly-typed resource class, for looking up localized strings, etc. - /// - // This class was auto-generated by the StronglyTypedResourceBuilder - // class via a tool like ResGen or Visual Studio. - // To add or remove a member, edit your .ResX file then rerun ResGen - // with the /str option, or rebuild your VS project. - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("System.Resources.Tools.StronglyTypedResourceBuilder", "15.0.0.0")] - [global::System.Diagnostics.DebuggerNonUserCodeAttribute()] - [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] - internal class Resources { - - private static global::System.Resources.ResourceManager resourceMan; - - private static global::System.Globalization.CultureInfo resourceCulture; - - [global::System.Diagnostics.CodeAnalysis.SuppressMessageAttribute("Microsoft.Performance", "CA1811:AvoidUncalledPrivateCode")] - internal Resources() { - } - - /// - /// Returns the cached ResourceManager instance used by this class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Resources.ResourceManager ResourceManager { - get { - if (object.ReferenceEquals(resourceMan, null)) { - global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("AmbrosiaCS.Properties.Resources", typeof(Resources).Assembly); - resourceMan = temp; - } - return resourceMan; - } - } - - /// - /// Overrides the current thread's CurrentUICulture property for all - /// resource lookups using this strongly typed resource class. - /// - [global::System.ComponentModel.EditorBrowsableAttribute(global::System.ComponentModel.EditorBrowsableState.Advanced)] - internal static global::System.Globalization.CultureInfo Culture { - get { - return resourceCulture; - } - set { - resourceCulture = value; - } - } - - /// - /// Looks up a localized string similar to <Project Sdk="Microsoft.NET.Sdk"> - /// <PropertyGroup> - /// <OutputType>Exe</OutputType> - /// <TargetFrameworks>netcoreapp2.0;net46</TargetFrameworks> - /// <TargetLatestRuntimePatch>true</TargetLatestRuntimePatch> - /// <Platforms>x64</Platforms> - /// <RuntimeIdentifiers>win7-x64</RuntimeIdentifiers> - /// <AllowUnsafeBlocks>true</AllowUnsafeBlocks> - /// <RootNamespace>AmbrosiaCS</RootNamespace> - /// <ApplicationIcon /> - /// <StartupObject /> - /// </PropertyGroup> - /// <PropertyGroup Condition="$([System.Text.Regul [rest of string was truncated]";. - /// - internal static string AmbrosiaCS { - get { - return ResourceManager.GetString("AmbrosiaCS", resourceCulture); - } - } - } -} diff --git a/Clients/CSharp/AmbrosiaCS/Properties/Resources.resx b/Clients/CSharp/AmbrosiaCS/Properties/Resources.resx deleted file mode 100644 index e4dccf9b..00000000 --- a/Clients/CSharp/AmbrosiaCS/Properties/Resources.resx +++ /dev/null @@ -1,124 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - text/microsoft-resx - - - 2.0 - - - System.Resources.ResXResourceReader, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - - - System.Resources.ResXResourceWriter, System.Windows.Forms, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089 - - - - ..\AmbrosiaCS.csproj;System.String, mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089;utf-8 - - \ No newline at end of file diff --git a/Clients/CSharp/AmbrosiaCS/ProxyGenerator.cs b/Clients/CSharp/AmbrosiaCS/ProxyGenerator.cs index c37edf46..43244447 100644 --- a/Clients/CSharp/AmbrosiaCS/ProxyGenerator.cs +++ b/Clients/CSharp/AmbrosiaCS/ProxyGenerator.cs @@ -1,7 +1,7 @@ // ------------------------------------------------------------------------------ // // This code was generated by a tool. -// Runtime Version: 15.0.0.0 +// Runtime Version: 16.0.0.0 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. @@ -18,8 +18,8 @@ namespace Ambrosia /// Class to produce the template output /// - #line 1 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "15.0.0.0")] + #line 1 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "16.0.0.0")] internal partial class ProxyGenerator : ProxyGeneratorBase { #line hidden @@ -31,7 +31,7 @@ public virtual string TransformText() this.Write("\r\nusing System;\r\nusing System.Threading.Tasks;\r\nusing System.Threading.Tasks.Data" + "flow;\r\nusing Ambrosia;\r\nusing static Ambrosia.StreamCommunicator;\r\n\r\n"); - #line 13 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 13 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" var originalInterfaceName = this.interfaceType.Name; @@ -40,7 +40,7 @@ public virtual string TransformText() #line hidden this.Write("\r\nnamespace "); - #line 17 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 17 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Namespace)); #line default @@ -52,7 +52,7 @@ public virtual string TransformText() /// It runs within the client's process, so it is generated in the language that the client is using. /// It is returned from ImmortalFactory.CreateClient when a client requests a container that supports the interface "); - #line 22 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 22 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(generatedClientInterfaceName)); #line default @@ -60,21 +60,21 @@ public virtual string TransformText() this.Write(".\r\n /// \r\n [System.Runtime.Serialization.DataContract]\r\n publi" + "c class "); - #line 25 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 25 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(className)); #line default #line hidden this.Write(" : Immortal.InstanceProxy, "); - #line 25 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 25 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(generatedClientInterfaceName)); #line default #line hidden this.Write("\r\n {\r\n\r\n public "); - #line 28 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 28 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(className)); #line default @@ -82,7 +82,7 @@ public virtual string TransformText() this.Write("(string remoteAmbrosiaRuntime, bool attachNeeded)\r\n : base(remoteAmbro" + "siaRuntime, attachNeeded)\r\n {\r\n }\r\n\r\n"); - #line 33 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 33 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" foreach (var M in this.methods) { var idNumber = M.idNumber; @@ -94,182 +94,127 @@ public virtual string TransformText() - #line default - #line hidden - - #line 43 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - if (!isImpulseHandler) { - #line default #line hidden this.Write(" async "); - #line 44 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture("Task" + (voidMethod ? "" : "<" + returnTypeName + ">"))); - - #line default - #line hidden - this.Write("\r\n "); - - #line 45 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(generatedClientInterfaceName)); - - #line default - #line hidden - this.Write("."); - - #line 45 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); - - #line default - #line hidden - this.Write("Async("); - - #line 45 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(parameterDeclarationString)); - - #line default - #line hidden - this.Write(")\r\n {\r\n\t\t\t"); - - #line 47 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture((voidMethod ? "" : "return"))); - - #line default - #line hidden - this.Write(" await "); - - #line 47 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); - - #line default - #line hidden - this.Write("Async("); - - #line 47 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(parameterString)); - - #line default - #line hidden - this.Write(");\r\n }\r\n\r\n async "); - - #line 50 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 43 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture("Task" + (voidMethod ? "" : "<" + returnTypeName + ">"))); #line default #line hidden this.Write("\r\n "); - #line 51 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 44 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write("Async("); - #line 51 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 44 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parameterDeclarationString)); #line default #line hidden this.Write(")\r\n {\r\n SerializableTaskCompletionSource rpcTask;\r\n " + "// Make call, wait for reply\r\n // Compute size of serialized argument" + - "s\r\n var totalArgSize = 0;\r\n\r\n"); + "s\r\n var totalArgSize = 0;\r\n"); - #line 58 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - - foreach (var p in M.Parameters) { - var parIndex = p.Position; - - - #line default - #line hidden - - #line 62 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 50 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" if (!voidMethod) { #line default #line hidden this.Write("\t\t\tvar p_"); - #line 63 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 51 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Parameters.Count())); #line default #line hidden this.Write(" = default("); - #line 63 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 51 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.ReturnType.Name)); #line default #line hidden this.Write(");\r\n"); - #line 64 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 52 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } + #line default + #line hidden + + #line 53 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" + + foreach (var p in M.Parameters) { + var parIndex = p.Position; + + #line default #line hidden this.Write("\t\t\tint arg"); - #line 65 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 57 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("Size = 0;\r\n\t\t\tbyte[] arg"); - #line 66 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 58 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("Bytes = null;\r\n\r\n // Argument "); - #line 68 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 60 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("\r\n "); - #line 69 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 61 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.ComputeArgumentSize(p.ParameterType, p.Position))); #line default #line hidden this.Write("\r\n totalArgSize += arg"); - #line 70 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 62 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("Size;\r\n"); - #line 71 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 63 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } #line default #line hidden this.Write("\r\n var wp = this.StartRPC<"); - #line 73 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 65 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(voidMethod ? "object" : returnTypeName)); #line default #line hidden this.Write(">(methodIdentifier: "); - #line 73 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 65 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(idNumber)); #line default #line hidden this.Write(" /* method identifier for "); - #line 73 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 65 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default @@ -278,7 +223,7 @@ public virtual string TransformText() "var asyncContext = new AsyncContext { SequenceNumber = Immortal.CurrentSequenceN" + "umber };\r\n\r\n // Serialize arguments\r\n\r\n"); - #line 78 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 70 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" foreach (var p in M.Parameters) { var parIndex = p.Position; @@ -288,107 +233,111 @@ public virtual string TransformText() #line hidden this.Write("\r\n // Serialize arg"); - #line 83 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 75 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("\r\n "); - #line 84 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 76 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.SerializeValue(parIndex))); #line default #line hidden this.Write("\r\n"); - #line 85 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 77 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } #line default #line hidden - this.Write(@" - ReleaseBufferAndSend(); - - var taskToWaitFor = Immortal.CallCache.Data[asyncContext.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync(); - var currentResult = await taskToWaitFor; - - var isSaved = await Immortal.TrySaveContextContinuationAsync(currentResult); - - if (isSaved) - { - taskToWaitFor = Immortal.CallCache.Data[asyncContext.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync(); - currentResult = await taskToWaitFor; - } - - "); - - #line 100 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(!voidMethod ? "var result =" : "")); - - #line default - #line hidden - this.Write(" await Immortal.TryTakeCheckpointContinuationAsync(currentResult);\r\n\r\n"); - - #line 102 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + this.Write(" int taskId;\r\n\t\t\tlock (Immortal.DispatchTaskIdQueueLock)\r\n " + + "{\r\n while (!Immortal.DispatchTaskIdQueue.Data.TryDequeue(out task" + + "Id)) { }\r\n }\r\n\r\n ReleaseBufferAndSend();\r\n\r\n\t\t\tImmortal.St" + + "artDispatchLoop();\r\n\r\n\t\t\tvar taskToWaitFor = Immortal.CallCache.Data[asyncContex" + + "t.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync();\r\n var cu" + + "rrentResult = await taskToWaitFor;\r\n\r\n\t\t\twhile (currentResult.AdditionalInfoType" + + " != ResultAdditionalInfoTypes.SetResult)\r\n {\r\n switch " + + "(currentResult.AdditionalInfoType)\r\n {\r\n case " + + "ResultAdditionalInfoTypes.SaveContext:\r\n await Immortal.S" + + "aveTaskContextAsync();\r\n taskToWaitFor = Immortal.CallCac" + + "he.Data[asyncContext.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync();\r" + + "\n break;\r\n case ResultAdditionalInfoTy" + + "pes.TakeCheckpoint:\r\n var sequenceNumber = await Immortal" + + ".TakeTaskCheckpointAsync();\r\n Immortal.StartDispatchLoop(" + + ");\r\n taskToWaitFor = Immortal.GetTaskToWaitForWithAdditio" + + "nalInfoAsync(sequenceNumber);\r\n break;\r\n }" + + "\r\n\r\n currentResult = await taskToWaitFor;\r\n }\r\n\r\n " + + " lock (Immortal.DispatchTaskIdQueueLock)\r\n {\r\n I" + + "mmortal.DispatchTaskIdQueue.Data.Enqueue(taskId);\r\n }\t\r\n"); + + #line 113 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" if (voidMethod) { #line default #line hidden this.Write("\t\t\treturn;\r\n"); - #line 104 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 115 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } else { #line default #line hidden this.Write("\t\t\treturn ("); - #line 105 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 116 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(returnTypeName)); #line default #line hidden - this.Write(") result.Result;\r\n"); - - #line 106 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" - } - - #line default - #line hidden - this.Write(" }\r\n"); + this.Write(") currentResult.Result;\r\n"); - #line 108 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 117 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } #line default #line hidden - this.Write("\r\n void "); + this.Write(" }\r\n\r\n void "); - #line 110 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 120 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(generatedClientInterfaceName)); #line default #line hidden this.Write("."); - #line 110 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 120 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write("Fork("); - #line 110 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 120 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parameterDeclarationString)); #line default #line hidden - this.Write(")\r\n {\r\n SerializableTaskCompletionSource rpcTask;\r\n\r\n " + - " // Compute size of serialized arguments\r\n var totalArgSize = 0;\r\n\r\n" + - ""); + this.Write(")\r\n {\r\n"); + + #line 122 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" + if (isImpulseHandler) { + + #line default + #line hidden + this.Write("\t\t\tif (!Immortal.IsPrimary)\r\n\t\t\t{\r\n throw new Exception(\"Unable to" + + " send an Impulse RPC while not being primary.\");\r\n\t\t\t}\r\n\r\n"); + + #line 128 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" + } + + #line default + #line hidden + this.Write(" SerializableTaskCompletionSource rpcTask;\r\n\r\n // Compute s" + + "ize of serialized arguments\r\n var totalArgSize = 0;\r\n\r\n"); - #line 117 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 134 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" foreach (var p in M.Parameters) { var parIndex = p.Position; @@ -398,77 +347,77 @@ public virtual string TransformText() #line hidden this.Write(" // Argument "); - #line 121 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 138 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("\r\n\t\t\tint arg"); - #line 122 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 139 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("Size = 0;\r\n\t\t\tbyte[] arg"); - #line 123 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 140 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("Bytes = null;\r\n\r\n "); - #line 125 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 142 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.ComputeArgumentSize(p.ParameterType, p.Position))); #line default #line hidden this.Write("\r\n totalArgSize += arg"); - #line 126 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 143 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("Size;\r\n"); - #line 127 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 144 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } #line default #line hidden this.Write("\r\n var wp = this.StartRPC<"); - #line 129 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 146 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(voidMethod ? "object" : returnTypeName)); #line default #line hidden this.Write(">("); - #line 129 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 146 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(idNumber)); #line default #line hidden this.Write(" /* method identifier for "); - #line 129 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 146 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write(" */, totalArgSize, out rpcTask, "); - #line 129 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 146 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(isImpulseHandler ? "RpcTypes.RpcType.Impulse" : "RpcTypes.RpcType.FireAndForget")); #line default #line hidden this.Write(");\r\n\r\n // Serialize arguments\r\n\r\n"); - #line 133 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 150 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" foreach (var p in M.Parameters) { var parIndex = p.Position; @@ -478,21 +427,21 @@ public virtual string TransformText() #line hidden this.Write("\r\n // Serialize arg"); - #line 138 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 155 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parIndex)); #line default #line hidden this.Write("\r\n "); - #line 139 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 156 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.SerializeValue(p.Position))); #line default #line hidden this.Write("\r\n"); - #line 140 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 157 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } #line default @@ -500,28 +449,28 @@ public virtual string TransformText() this.Write("\r\n this.ReleaseBufferAndSend();\r\n return;\r\n }\r\n\r\n " + " private "); - #line 146 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 163 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(voidMethod ? "object" : returnTypeName)); #line default #line hidden this.Write("\r\n "); - #line 147 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 164 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write("_ReturnValue(byte[] buffer, int cursor)\r\n {\r\n"); - #line 149 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 166 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" if (voidMethod) { #line default #line hidden this.Write(" // buffer will be an empty byte array since the method "); - #line 150 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 167 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default @@ -529,28 +478,28 @@ public virtual string TransformText() this.Write(" returns void\r\n // so nothing to read, just getting called is the sign" + "al to return to the client\r\n return this;\r\n"); - #line 153 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 170 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } else { #line default #line hidden this.Write(" // deserialize return value\r\n "); - #line 155 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 172 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(Utilities.DeserializeValue(M.ReturnType, "returnValue"))); #line default #line hidden this.Write("\r\n return returnValue;\r\n"); - #line 157 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 174 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } #line default #line hidden this.Write(" }\r\n"); - #line 159 "C:\Git\Franklin\AmbrosiaCS\ProxyGenerator.tt" + #line 176 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyGenerator.tt" } @@ -568,7 +517,7 @@ public virtual string TransformText() /// /// Base class for this transformation /// - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "15.0.0.0")] + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "16.0.0.0")] internal class ProxyGeneratorBase { #region Fields diff --git a/Clients/CSharp/AmbrosiaCS/ProxyGenerator.tt b/Clients/CSharp/AmbrosiaCS/ProxyGenerator.tt index 8713a7cc..41d30606 100644 --- a/Clients/CSharp/AmbrosiaCS/ProxyGenerator.tt +++ b/Clients/CSharp/AmbrosiaCS/ProxyGenerator.tt @@ -40,13 +40,6 @@ namespace <#= this.interfaceType.Namespace #> var voidMethod = M.voidMethod; #> -<# if (!isImpulseHandler) { #> - async <#= "Task" + (voidMethod ? "" : "<" + returnTypeName + ">") #> - <#= generatedClientInterfaceName #>.<#= M.Name #>Async(<#= parameterDeclarationString #>) - { - <#= (voidMethod ? "" : "return") #> await <#= M.Name #>Async(<#= parameterString #>); - } - async <#= "Task" + (voidMethod ? "" : "<" + returnTypeName + ">") #> <#= M.Name #>Async(<#= parameterDeclarationString #>) { @@ -54,14 +47,13 @@ namespace <#= this.interfaceType.Namespace #> // Make call, wait for reply // Compute size of serialized arguments var totalArgSize = 0; - +<# if (!voidMethod) { #> + var p_<#= M.Parameters.Count() #> = default(<#= M.ReturnType.Name #>); +<# } #> <# foreach (var p in M.Parameters) { var parIndex = p.Position; #> -<# if (!voidMethod) { #> - var p_<#= M.Parameters.Count() #> = default(<#= M.ReturnType.Name #>); -<# } #> int arg<#= parIndex #>Size = 0; byte[] arg<#= parIndex #>Bytes = null; @@ -83,32 +75,57 @@ namespace <#= this.interfaceType.Namespace #> // Serialize arg<#= parIndex #> <#= Utilities.SerializeValue(parIndex) #> <# } #> + int taskId; + lock (Immortal.DispatchTaskIdQueueLock) + { + while (!Immortal.DispatchTaskIdQueue.Data.TryDequeue(out taskId)) { } + } ReleaseBufferAndSend(); + Immortal.StartDispatchLoop(); + var taskToWaitFor = Immortal.CallCache.Data[asyncContext.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync(); var currentResult = await taskToWaitFor; - var isSaved = await Immortal.TrySaveContextContinuationAsync(currentResult); - - if (isSaved) - { - taskToWaitFor = Immortal.CallCache.Data[asyncContext.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync(); - currentResult = await taskToWaitFor; - } - - <#= !voidMethod ? "var result =" : ""#> await Immortal.TryTakeCheckpointContinuationAsync(currentResult); - + while (currentResult.AdditionalInfoType != ResultAdditionalInfoTypes.SetResult) + { + switch (currentResult.AdditionalInfoType) + { + case ResultAdditionalInfoTypes.SaveContext: + await Immortal.SaveTaskContextAsync(); + taskToWaitFor = Immortal.CallCache.Data[asyncContext.SequenceNumber].GetAwaitableTaskWithAdditionalInfoAsync(); + break; + case ResultAdditionalInfoTypes.TakeCheckpoint: + var sequenceNumber = await Immortal.TakeTaskCheckpointAsync(); + Immortal.StartDispatchLoop(); + taskToWaitFor = Immortal.GetTaskToWaitForWithAdditionalInfoAsync(sequenceNumber); + break; + } + + currentResult = await taskToWaitFor; + } + + lock (Immortal.DispatchTaskIdQueueLock) + { + Immortal.DispatchTaskIdQueue.Data.Enqueue(taskId); + } <# if (voidMethod) { #> return; <# } else { #> - return (<#= returnTypeName #>) result.Result; + return (<#= returnTypeName #>) currentResult.Result; <# } #> } -<# } #> void <#= generatedClientInterfaceName #>.<#= M.Name #>Fork(<#= parameterDeclarationString #>) { +<# if (isImpulseHandler) { #> + if (!Immortal.IsPrimary) + { + throw new Exception("Unable to send an Impulse RPC while not being primary."); + } + +<# } #> SerializableTaskCompletionSource rpcTask; // Compute size of serialized arguments diff --git a/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.cs b/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.cs index e6b590d3..4a1af6eb 100644 --- a/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.cs +++ b/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.cs @@ -1,7 +1,7 @@ // ------------------------------------------------------------------------------ // // This code was generated by a tool. -// Runtime Version: 15.0.0.0 +// Runtime Version: 16.0.0.0 // // Changes to this file may cause incorrect behavior and will be lost if // the code is regenerated. @@ -18,8 +18,8 @@ namespace Ambrosia /// Class to produce the template output /// - #line 1 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "15.0.0.0")] + #line 1 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "16.0.0.0")] internal partial class ProxyInterfaceGenerator : ProxyInterfaceGeneratorBase { #line hidden @@ -31,14 +31,14 @@ public virtual string TransformText() this.Write("\r\nusing System;\r\nusing Ambrosia;\r\nusing System.Threading.Tasks;\r\nusing static Amb" + "rosia.StreamCommunicator;\r\n\r\nnamespace "); - #line 12 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 12 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Namespace)); #line default #line hidden this.Write("\r\n{\r\n /// \r\n // Generated from "); - #line 15 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 15 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default @@ -46,14 +46,14 @@ public virtual string TransformText() this.Write(" by the proxy generation.\r\n // This is the API that any immortal implementing " + "the interface must be a subtype of.\r\n /// \r\n public interface "); - #line 18 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 18 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default #line hidden this.Write("\r\n {\r\n"); - #line 20 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 20 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" foreach (var M in this.interfaceType.Methods) { var parameterDeclarationString = Utilities.ParameterDeclarationString(M); @@ -63,35 +63,35 @@ public virtual string TransformText() #line hidden this.Write(" "); - #line 24 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 24 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture("Task" + (M.voidMethod ? "" : "<" + M.ReturnType.Name + ">"))); #line default #line hidden this.Write(" "); - #line 24 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 24 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(M.Name)); #line default #line hidden this.Write("Async("); - #line 24 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 24 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parameterDeclarationString)); #line default #line hidden this.Write(");\r\n"); - #line 25 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 25 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" } #line default #line hidden this.Write(" }\r\n\r\n /// \r\n // Generated from "); - #line 29 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 29 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default @@ -99,80 +99,46 @@ public virtual string TransformText() this.Write(" by the proxy generation.\r\n // This is the API that is used to call a immortal" + " that implements\r\n /// \r\n [Ambrosia.InstanceProxy(typeof("); - #line 32 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 32 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default #line hidden this.Write("))]\r\n public interface "); - #line 33 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 33 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(this.interfaceType.Name)); #line default #line hidden this.Write("Proxy\r\n {\r\n"); - #line 35 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 35 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" foreach (var M in this.interfaceType.Methods) { var methodName = M.Name; var parameterDeclarationString = Utilities.ParameterDeclarationString(M); - #line default - #line hidden - - #line 40 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" - if (!M.isImpulseHandler) { - - #line default - #line hidden - this.Write(" Task"); - - #line 41 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(M.voidMethod ? "" : "<" + M.ReturnType.Name + ">")); - - #line default - #line hidden - this.Write(" "); - - #line 41 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(methodName)); - - #line default - #line hidden - this.Write("Async("); - - #line 41 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" - this.Write(this.ToStringHelper.ToStringWithCulture(parameterDeclarationString)); - - #line default - #line hidden - this.Write(");\r\n"); - - #line 42 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" - } - #line default #line hidden this.Write(" void "); - #line 43 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 40 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(methodName)); #line default #line hidden this.Write("Fork("); - #line 43 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 40 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" this.Write(this.ToStringHelper.ToStringWithCulture(parameterDeclarationString)); #line default #line hidden this.Write(");\r\n"); - #line 44 "C:\Git\Franklin\AmbrosiaCS\ProxyInterfaceGenerator.tt" + #line 41 "C:\Repos\AMBROSIA2\Clients\CSharp\AmbrosiaCS\ProxyInterfaceGenerator.tt" } #line default @@ -188,7 +154,7 @@ public virtual string TransformText() /// /// Base class for this transformation /// - [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "15.0.0.0")] + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.TextTemplating", "16.0.0.0")] internal class ProxyInterfaceGeneratorBase { #region Fields diff --git a/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.tt b/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.tt index def95186..8525e15c 100644 --- a/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.tt +++ b/Clients/CSharp/AmbrosiaCS/ProxyInterfaceGenerator.tt @@ -37,9 +37,6 @@ namespace <#= this.interfaceType.Namespace #> var methodName = M.Name; var parameterDeclarationString = Utilities.ParameterDeclarationString(M); #> -<# if (!M.isImpulseHandler) { #> - Task<#= M.voidMethod ? "" : "<" + M.ReturnType.Name + ">" #> <#= methodName #>Async(<#= parameterDeclarationString #>); -<# } #> void <#= methodName #>Fork(<#= parameterDeclarationString #>); <# } #> } diff --git a/Clients/CSharp/AmbrosiaCS/Utilities.cs b/Clients/CSharp/AmbrosiaCS/Utilities.cs index a7da95fe..da60575f 100644 --- a/Clients/CSharp/AmbrosiaCS/Utilities.cs +++ b/Clients/CSharp/AmbrosiaCS/Utilities.cs @@ -280,7 +280,8 @@ public static string GetTypeDefinitionInformationName(Type t) for (int i = 0; i < t.GetGenericArguments().Length; i++) { Type genericType = t.GetGenericArguments()[i]; - sb.Append(genericType.FullName); + var genericTypeDefinitionInfo = GetTypeDefinitionInformation(genericType); + sb.Append(genericTypeDefinitionInfo.Namespace + "." + genericTypeDefinitionInfo.Name); if (i < t.GetGenericArguments().Length - 1) { sb.Append(","); diff --git a/Clients/CSharp/AmbrosiaLibCS/AmbrosiaFactory.cs b/Clients/CSharp/AmbrosiaLibCS/AmbrosiaFactory.cs index fee6a7b6..beebabdc 100644 --- a/Clients/CSharp/AmbrosiaLibCS/AmbrosiaFactory.cs +++ b/Clients/CSharp/AmbrosiaLibCS/AmbrosiaFactory.cs @@ -7,6 +7,10 @@ using System.Threading.Tasks; using System.Runtime.Serialization; using Ambrosia; +using System.Threading; +using System.Net; +using System.Net.Sockets; +using CRA.ClientLibrary; namespace Ambrosia { @@ -30,8 +34,7 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType { case 0: // Entry point - //this.OnFirstStart(); - EntryPointAsync().Wait(); + await EntryPoint(); break; default: @@ -40,12 +43,149 @@ public override async Task DispatchToMethod(int methodId, RpcTypes.RpcType } return true; } - - private async Task EntryPointAsync() { this.EntryPoint(); } } public class AmbrosiaFactory { + static Thread _iCThread; + private static string GetLocalIPAddress() + { + var host = Dns.GetHostEntry(Dns.GetHostName()); + foreach (var ip in host.AddressList) + { + if (ip.AddressFamily == AddressFamily.InterNetwork) + { + return ip.ToString(); + } + } + throw new InvalidOperationException("Local IP Address Not Found!"); + } + + static void startIC(string _instanceName, + int port, + int replicaNumber, + string secureNetworkClassName, + string secureNetworkAssemblyName) + { + StartupParamOverrides.sendPort = 0; + StartupParamOverrides.receivePort = 0; + var replicaName = $"{_instanceName}{replicaNumber}"; + + var ipAddress = GetLocalIPAddress(); + + string storageConnectionString = null; + + if (storageConnectionString == null) + { + storageConnectionString = Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING"); + } + + if (storageConnectionString == null) + { + throw new InvalidOperationException("Cannot start the IC. Azure storage connection string not found. Use appSettings in your app.config to provide this using the key AZURE_STORAGE_CONN_STRING, or use the environment variable AZURE_STORAGE_CONN_STRING."); + } + + int connectionsPoolPerWorker; + string connectionsPoolPerWorkerString = "0"; + if (connectionsPoolPerWorkerString != null) + { + try + { + connectionsPoolPerWorker = Convert.ToInt32(connectionsPoolPerWorkerString); + } + catch + { + throw new InvalidOperationException("Maximum number of connections per CRA worker is wrong. Use appSettings in your app.config to provide this using the key CRA_WORKER_MAX_CONN_POOL."); + } + } + else + { + connectionsPoolPerWorker = 1000; + } + + ISecureStreamConnectionDescriptor descriptor = null; + if (secureNetworkClassName != null) + { + Type type; + if (secureNetworkAssemblyName != null) + { + var assembly = Assembly.Load(secureNetworkAssemblyName); + type = assembly.GetType(secureNetworkClassName); + } + else + { + type = Type.GetType(secureNetworkClassName); + } + descriptor = (ISecureStreamConnectionDescriptor)Activator.CreateInstance(type); + } + + var dataProvider = new CRA.DataProvider.Azure.AzureDataProvider(storageConnectionString); + var worker = new CRAWorker + (replicaName, ipAddress, port, + dataProvider, descriptor, connectionsPoolPerWorker); + + worker.DisableDynamicLoading(); + worker.SideloadVertex(new AmbrosiaRuntime(), "ambrosia"); + + worker.Start(); + } + + /// + /// Gesture that deploys a (non-upgradeable) service WITH its IC running in the same process, for TTD. The result is a service that implements the API defined + /// in . + /// + /// The interface that defines the API of the deployed service. + /// + /// The instance to deploy. It must implement . + /// The path used to get to the logs. + /// The number of the checkpoint to start TTD from + /// The version number used to start TTD from + /// + public static IDisposable Deploy(string serviceName, + Immortal instance, + string serviceLogPath, + long checkpointToLoad = 1, + int currentVersion = 0) + { + _iCThread = new Thread(() => { + var myRuntime = new AmbrosiaRuntime(); + myRuntime.InitializeRepro(serviceName, serviceLogPath, checkpointToLoad, currentVersion, + false); + }) { IsBackground = true }; + _iCThread.Start(); + + // Wait for the IC to finish setting up the named pipes before continuing (waiting avoids a potential deadlock) + while (!AmbrosiaRuntime._listening) ; + return Deploy(serviceName, instance, 0, 0); + } + + /// + /// Gesture that deploys a (non-upgradeable) service WITH its IC running in the same process. The result is a service that implements the API defined + /// in . + /// + /// The interface that defines the API of the deployed service. + /// + /// The instance to deploy. It must implement . + /// The IC communication port number. + /// The replica number of this instance if this is an active/active deployment. 0 otherwise + /// The name of the class used to ensure secure communication, if used. + /// The bane if the assembly used to ensure secure communication, if used. + /// + public static IDisposable Deploy(string serviceName, + Immortal instance, + int iCPort, + int replicaNumber = 0, + string secureNetworkClassName = null, + string secureNetworkAssemblyName = null) + { + _iCThread = new Thread(() => startIC(serviceName, iCPort, replicaNumber, secureNetworkClassName, secureNetworkAssemblyName)) { IsBackground = true }; + _iCThread.Start(); + + // Wait for the IC to finish setting up the named pipes before continuing (waiting avoids a potential deadlock) + while (!AmbrosiaRuntime._listening) ; + return Deploy(serviceName, instance, 0, 0); + } + /// /// Gesture that deploys a (non-upgradeable) service. The result is a service that implements the API defined /// in . @@ -68,7 +208,7 @@ public static IDisposable Deploy(string serviceName, Immortal instance, int r { throw new ArgumentException($"The instance to be deployed is of type '{immortalType.Name}' does not implement the type {typeOfT.Name}."); } - + // Generate server Ambrosia instance and cache it. Use type parameter T to tell the generation what // interface to generate a proxy for. Immortal.Dispatcher serverContainer; @@ -93,6 +233,74 @@ public static IDisposable Deploy(string serviceName, Immortal instance, int r return serverContainer; } + /// + /// Gesture that deploys an upgradeable service WITH its IC running in the same process. The result is a service that implements the API defined + /// in , but when it gets a special upgrade message, turns into a service that + /// implements the API defined by . + /// The upgrade happens by creating an instance of the type and passing to its + /// constructor the existing service instance so it can migrate over any needed state from it. + /// + /// The interface that defines the API of the deployed service. + /// The interface that defines the API of the service after it has been upgraded. + /// + /// The type of the class that implements and which has a unary + /// constructor that takes an argument of type . + /// + /// + /// The instance to deploy. It must implement . + /// The IC communication port number. + /// The replica number of this instance if this is an active/active deployment. 0 otherwise + /// The name of the class used to ensure secure communication, if used. + /// The bane if the assembly used to ensure secure communication, if used. + /// + public static IDisposable Deploy(string serviceName, + Immortal instance, + int iCPort, + int replicaNumber = 0, + string secureNetworkClassName = null, + string secureNetworkAssemblyName = null) + where T2 : T + where Z2 : Immortal, T2 // *and* Z2 has a ctor that takes a Immortal as a parameter + { + _iCThread = new Thread(() => startIC(serviceName, iCPort, replicaNumber, secureNetworkClassName, secureNetworkAssemblyName)) { IsBackground = true }; + _iCThread.Start(); + + // Wait for the IC to finish setting up the named pipes before continuing (waiting avoids a potential deadlock) + while (!AmbrosiaRuntime._listening) ; + return Deploy(serviceName, instance, 0, 0); + } + + + /// Gesture that deploys an upgradeable service WITH its IC running in the same process, for TTD. The result is a service that implements the API defined + /// in . + /// + /// The interface that defines the API of the deployed service. + /// + /// The instance to deploy. It must implement . + /// The path used to get to the logs. + /// The number of the checkpoint to start TTD from + /// The version number used to start TTD from + public static IDisposable Deploy(string serviceName, + Immortal instance, + string serviceLogPath, + long checkpointToLoad = 1, + int currentVersion = 0) + where T2 : T + where Z2 : Immortal, T2 // *and* Z2 has a ctor that takes a Immortal as a parameter + { + _iCThread = new Thread(() => { + var myRuntime = new AmbrosiaRuntime(); + myRuntime.InitializeRepro(serviceName, serviceLogPath, checkpointToLoad, currentVersion, + true); + }) + { IsBackground = true }; + _iCThread.Start(); + + // Wait for the IC to finish setting up the named pipes before continuing (waiting avoids a potential deadlock) + while (!AmbrosiaRuntime._listening) ; + return Deploy(serviceName, instance, 0, 0); + } + /// /// Gesture that deploys an upgradeable service. The result is a service that implements the API defined /// in , but when it gets a special upgrade message, turns into a service that diff --git a/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.csproj b/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.csproj index 896ec396..fab8f82b 100644 --- a/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.csproj +++ b/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.csproj @@ -1,38 +1,59 @@  Library - net46;netcoreapp2.0 + pdbonly + true + netstandard2.0 true - x64 - win7-x64 true AmbrosiaLibCS + true + true + ../../../Ambrosia/Ambrosia.snk + AnyCPU;x64 - - NETFRAMEWORK + + + portable + true - - NETCORE + + + portable + true + + + portable + true + + + + $(DefineConstants);NETSTANDARD + + + + - - 2.6.1 - + + + + + + + + - - 4.3.0 - + + - - - - 4.5.0 - + - + + diff --git a/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.sln b/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.sln index 7cdbd8e0..b46e4596 100644 --- a/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.sln +++ b/Clients/CSharp/AmbrosiaLibCS/AmbrosiaLibCS.sln @@ -1,7 +1,7 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio 15 -VisualStudioVersion = 15.0.27703.2026 +# Visual Studio Version 16 +VisualStudioVersion = 16.0.30011.22 MinimumVisualStudioVersion = 10.0.40219.1 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{F6B866E4-635F-44BE-8F32-DC076BAB8581}" ProjectSection(SolutionItems) = preProject @@ -9,35 +9,99 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution EndProjectSection EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaLibCS", "AmbrosiaLibCS.csproj", "{246B969F-4C65-40F6-89FE-41560F7E0EAD}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Ambrosia", "..\..\..\Ambrosia\Ambrosia\Ambrosia.csproj", "{D13153B4-1C8D-49E2-93FA-560C0878F1A1}" - ProjectSection(ProjectDependencies) = postProject - {5852AC33-6B01-44F5-BAF3-2AAF796E8449} = {5852AC33-6B01-44F5-BAF3-2AAF796E8449} - EndProjectSection +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AmbrosiaLib", "..\..\..\AmbrosiaLib\Ambrosia\AmbrosiaLib.csproj", "{D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SharedAmbrosiaTools", "..\..\..\SharedAmbrosiaTools\SharedAmbrosiaTools.csproj", "{C9EEE5DD-8A33-415A-B799-6FF467F4DE28}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ImmortalCoordinator", "..\..\..\ImmortalCoordinator\ImmortalCoordinator.csproj", "{6DC6D762-9290-4C30-A9D3-8D60CBF85467}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AzureBlobsLogPicker", "..\..\..\AzureBlobsLogPicker\AzureBlobsLogPicker.csproj", "{1920E2E7-A110-4AFA-8808-A1D04FDCE841}" EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "adv-file-ops", "..\..\..\Ambrosia\adv-file-ops\adv-file-ops.vcxproj", "{5852AC33-6B01-44F5-BAF3-2AAF796E8449}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "GenericLogPicker", "..\..\..\GenericLogPicker\GenericLogPicker.csproj", "{475A559B-745C-4E92-A686-B94F05725DBE}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|ARM64 = Debug|ARM64 Debug|x64 = Debug|x64 + Release|Any CPU = Release|Any CPU + Release|ARM64 = Release|ARM64 Release|x64 = Release|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|x64.ActiveCfg = Debug|x64 - {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|x64.Build.0 = Debug|x64 - {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|x64.ActiveCfg = Release|x64 - {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|x64.Build.0 = Release|x64 - {D13153B4-1C8D-49E2-93FA-560C0878F1A1}.Debug|x64.ActiveCfg = Debug|x64 - {D13153B4-1C8D-49E2-93FA-560C0878F1A1}.Debug|x64.Build.0 = Debug|x64 - {D13153B4-1C8D-49E2-93FA-560C0878F1A1}.Release|x64.ActiveCfg = Release|x64 - {D13153B4-1C8D-49E2-93FA-560C0878F1A1}.Release|x64.Build.0 = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Debug|x64.ActiveCfg = Debug|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Debug|x64.Build.0 = Debug|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Release|x64.ActiveCfg = Release|x64 - {5852AC33-6B01-44F5-BAF3-2AAF796E8449}.Release|x64.Build.0 = Release|x64 + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|Any CPU.Build.0 = Debug|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|ARM64.Build.0 = Debug|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|x64.ActiveCfg = Debug|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Debug|x64.Build.0 = Debug|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|Any CPU.ActiveCfg = Release|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|Any CPU.Build.0 = Release|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|ARM64.ActiveCfg = Release|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|ARM64.Build.0 = Release|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|x64.ActiveCfg = Release|Any CPU + {246B969F-4C65-40F6-89FE-41560F7E0EAD}.Release|x64.Build.0 = Release|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Debug|ARM64.Build.0 = Debug|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Debug|x64.ActiveCfg = Debug|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Debug|x64.Build.0 = Debug|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Release|Any CPU.Build.0 = Release|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Release|ARM64.ActiveCfg = Release|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Release|ARM64.Build.0 = Release|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Release|x64.ActiveCfg = Release|Any CPU + {D3C67C37-70F5-4466-9BEE-AC5A8DCC4D9E}.Release|x64.Build.0 = Release|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Debug|ARM64.Build.0 = Debug|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Debug|x64.ActiveCfg = Debug|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Debug|x64.Build.0 = Debug|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Release|Any CPU.Build.0 = Release|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Release|ARM64.ActiveCfg = Release|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Release|ARM64.Build.0 = Release|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Release|x64.ActiveCfg = Release|Any CPU + {C9EEE5DD-8A33-415A-B799-6FF467F4DE28}.Release|x64.Build.0 = Release|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Debug|ARM64.Build.0 = Debug|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Debug|x64.ActiveCfg = Debug|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Debug|x64.Build.0 = Debug|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Release|Any CPU.Build.0 = Release|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Release|ARM64.ActiveCfg = Release|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Release|ARM64.Build.0 = Release|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Release|x64.ActiveCfg = Release|Any CPU + {6DC6D762-9290-4C30-A9D3-8D60CBF85467}.Release|x64.Build.0 = Release|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Debug|Any CPU.Build.0 = Debug|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Debug|ARM64.Build.0 = Debug|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Debug|x64.ActiveCfg = Debug|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Debug|x64.Build.0 = Debug|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Release|Any CPU.ActiveCfg = Release|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Release|Any CPU.Build.0 = Release|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Release|ARM64.ActiveCfg = Release|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Release|ARM64.Build.0 = Release|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Release|x64.ActiveCfg = Release|Any CPU + {1920E2E7-A110-4AFA-8808-A1D04FDCE841}.Release|x64.Build.0 = Release|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Debug|Any CPU.Build.0 = Debug|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Debug|ARM64.ActiveCfg = Debug|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Debug|ARM64.Build.0 = Debug|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Debug|x64.ActiveCfg = Debug|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Debug|x64.Build.0 = Debug|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Release|Any CPU.Build.0 = Release|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Release|ARM64.ActiveCfg = Release|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Release|ARM64.Build.0 = Release|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Release|x64.ActiveCfg = Release|Any CPU + {475A559B-745C-4E92-A686-B94F05725DBE}.Release|x64.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/Clients/CSharp/AmbrosiaLibCS/Checkpointing.md b/Clients/CSharp/AmbrosiaLibCS/Checkpointing.md new file mode 100644 index 00000000..9d310406 --- /dev/null +++ b/Clients/CSharp/AmbrosiaLibCS/Checkpointing.md @@ -0,0 +1,55 @@ +# Checkpointing the Immortal State + +Every time a Take Checkpoint message is received by the Immortal, the Immortal saves and serializes its current state and sends it over to the ImmortalCoordinator, which logs the checkpoint for later recovery. Since the Immortal does not process any other messages while handling the Take Checkpoint message, its state is guaranteed not to change whilst saving its state. + +The checkpointing differs between two scenarios - an Immortal which contains no async calls to other Immortals, and an Immortal which contains async calls to other Immortals. The difference stems from the fact that with async calls, the Immortal needs to keep track of their current state and continuation. For the latter scenario we are making use of a 3rd party open-source code to serialize a state of a C# Task (the original code repository can be found on GitHub here: https://github.com/ljw1004/blog/tree/master/Async/AsyncWorkflow). + +## Checkpointing - w/o async calls + +The simplest form of checkpointing is when the Immortal is only making Fork calls to other Immortals. In this scenario the Immortal does not care about the state of each call, only if it was made or not. + +In this case, the immortal instance is simply being serialized at its current state using the generated ImmortalSerializer and the serialized state is then sent over to the ImmortalCoordinator. As no other messages are being handled concurrently, this is a deterministic process. + +## Checkpointing - w/ async calls + +Checkpointing with async calls is a more elaborate process, as the current state of the Immortal instance must consider the state of each async call, and upon recovery should be able to continue handling responses to previously sent async calls. + +In order to serialize the current state machine we are utilizing external open-source code, contained in the TaskCheckpoint class (link to GitHub repository mentioned above). The Task which state is serialized is defined in OnFirstStartWrapper(), upon calling: + +```c# + await this.OnFirstStart().RunWithCheckpointing(ref this.SerializedTask); +``` + +The Task which state is serialized is the one returned by the async method *this.OnFirstStart()*, and the state is being serialized and saved into the StringBuilder object *this.SerializedTask*. + +**Note:** In order for this serialization to work, every variable on the call stack must be serializable. + +The actual state of the task will be saved by one of the tasks awaiting a response (there must always be at least one pending Task, otherwise execution wouldn't have been handed over to processing a new incoming message - the TakeCheckpoint message). + +For this purpose, the Immortal contains two main data structures: + +```c# +public SerializableCallCache CallCache = new SerializableCallCache(); +``` + +The *CallCache* is matching between RPC sequence numbers and a TaskCompletionSource object which is set to contain the result of the async call once a response arrives. This structure is serialized upon checkpointing and copied over to a recovering Immortal instance. + +and: + +```c# +public SerializableCache TaskIdToSequenceNumber = new SerializableCache(); +``` + +The *TaskIdToSequenceNumber* is matching between the TaskId associated with the current async call and the RPC sequence number. This structure is of course not copied over upon recovery. + +Once a TakeCheckpoint message gets in, we start by going over each TaskCompletionSource in the *CallCache* and set its result, signaling it to save its current sequence number and TaskId into the *TaskIdToSequenceNumber* cache. We will also pick the first **awaited** TaskCompletionSource to actually take the checkpoint. After each Task completes saving its context or taking a checkpoint, we create a new TaskCompletionSource for it to await. + +*TaskIdToSequenceNumber* is used in the process of the state serialization, when serializing Task objects returned by async calls. This allows the following scenario: + +```c# +var task1 = this._server.MAsync(buffer); +... +var result1 = await task1; +``` + +Notice that in this case, if a TakeCheckpoint message was received anywhere between the *async* call and the *await*, *task1* should be serialized as part of the call stack. Since Task is not a serializable type, we switch the Task object with the matching TaskCompletionSource (for the same sequence number) upon serialization, which is completed only if a result had been obtained prior to taking the checkpoint. \ No newline at end of file diff --git a/Clients/CSharp/AmbrosiaLibCS/EventBuffer.cs b/Clients/CSharp/AmbrosiaLibCS/EventBuffer.cs index 9418c04e..db32a890 100644 --- a/Clients/CSharp/AmbrosiaLibCS/EventBuffer.cs +++ b/Clients/CSharp/AmbrosiaLibCS/EventBuffer.cs @@ -165,7 +165,7 @@ internal async Task SendAsync(Stream outputStream, int numBytes = IntSize(bytesInBatchData + 1 + IntSize(numRPCs)) + 1 + IntSize(numRPCs); MemStreamForWritingBatchBytes.WriteInt(bytesInBatchData + 1 + IntSize(numRPCs)); - MemStreamForWritingBatchBytes.WriteByte(Ambrosia.AmbrosiaRuntime.RPCBatchByte); + MemStreamForWritingBatchBytes.WriteByte(AmbrosiaRuntimeLBConstants.RPCBatchByte); MemStreamForWritingBatchBytes.WriteInt(numRPCs); await outputStream.WriteAsync(TempArrForWritingBatchBytes, 0, numBytes); await outputStream.WriteAsync(curBuffer.PageBytes, posToStart, bytesInBatchData); diff --git a/Clients/CSharp/AmbrosiaLibCS/Immortal.cs b/Clients/CSharp/AmbrosiaLibCS/Immortal.cs index ddf394aa..2712f1d7 100644 --- a/Clients/CSharp/AmbrosiaLibCS/Immortal.cs +++ b/Clients/CSharp/AmbrosiaLibCS/Immortal.cs @@ -2,6 +2,7 @@ using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; +using System.IO.Pipes; using System.Linq; using System.Net; using System.Net.Sockets; @@ -12,7 +13,6 @@ using System.Threading.Tasks; using System.Threading.Tasks.Dataflow; using System.Xml; -using LocalAmbrosiaRuntime; using Remote.Linq.Expressions; using static Ambrosia.StreamCommunicator; @@ -27,8 +27,8 @@ namespace Ambrosia public abstract class Immortal : IDisposable { // Connection to the LocalAmbrosiaRuntime - private NetworkStream _ambrosiaReceiveFromStream; - private NetworkStream _ambrosiaSendToStream; + private Stream _ambrosiaReceiveFromStream; + private Stream _ambrosiaSendToStream; private OutputConnectionRecord _ambrosiaSendToConnectionRecord; protected string localAmbrosiaRuntime; // if at least one method in this API requires a return address @@ -38,11 +38,12 @@ public abstract class Immortal : IDisposable private readonly SerializableBufferBlock _toTaskBuffer = new SerializableBufferBlock(); private readonly SerializableBufferBlock _fromTaskBuffer = new SerializableBufferBlock(); - private bool _isFirstCheckpoint = true; private Dispatcher _dispatcher; private static FlexReadBuffer _inputFlexBuffer; private static int _cursor; + public bool IsPrimary = false; + [DataMember] [CopyFromDeserializedImmortal] public StringBuilder SerializedTask = new StringBuilder(); @@ -76,10 +77,11 @@ public abstract class Immortal : IDisposable /// Used to pass responsibility for the Dispatch loop to the most recently created /// task. /// - protected ConcurrentQueue dispatchTaskIdQueue = new ConcurrentQueue(); - - private readonly object myLock = new object(); - private readonly SameThreadTaskScheduler myTaskScheduler = new SameThreadTaskScheduler("AmbrosiaRPC"); + [DataMember] + public SerializableQueue DispatchTaskIdQueue = new SerializableQueue(); + [DataMember] + public readonly object DispatchTaskIdQueueLock = new object(); + public readonly SameThreadTaskScheduler DispatchTaskScheduler = new SameThreadTaskScheduler("AmbrosiaRPC"); /// /// If this is deployed with upgrade information, then this is the interface type that should be deployed. @@ -92,19 +94,16 @@ public abstract class Immortal : IDisposable protected abstract Task OnFirstStart(); - protected Task OnFirstStartWrapper() + protected async Task OnFirstStartWrapper() { - return new Task(async () => + try { - try - { - await this.OnFirstStart().RunWithCheckpointing(ref this.SerializedTask); - } - catch (Exception ex) - { - this.HandleExceptionWrapper(ex); - } - }); + await this.OnFirstStart().RunWithCheckpointing(ref this.SerializedTask); + } + catch (Exception ex) + { + this.HandleExceptionWrapper(ex); + } } protected void HandleExceptionWrapper(Exception ex) @@ -135,59 +134,91 @@ protected virtual void BecomingPrimary() { } // Hack for enabling fast IP6 loopback in Windows on .NET const int SIO_LOOPBACK_FAST_PATH = (-1744830448); - private void SetupConnections(int receivePort, int sendPort, out NetworkStream receiveStream, out NetworkStream sendStream, out OutputConnectionRecord connectionRecord) + private void SetupConnections(int receivePort, int sendPort, out Stream receiveStream, out Stream sendStream, out OutputConnectionRecord connectionRecord) { - Socket mySocket = null; - Byte[] optionBytes = BitConverter.GetBytes(1); + if ((sendPort == 0) && (receivePort == 0)) + { +#if DEBUG + Console.WriteLine("*X* Running tightly bound"); +#endif + // Note that we must wait for the IC to set up the anonymous pipes before getting the streams + while (StartupParamOverrides.ICSendPipeName == null) ; + receiveStream = new AnonymousPipeClientStream(PipeDirection.In, StartupParamOverrides.ICSendPipeName); + while (StartupParamOverrides.ICReceivePipeName == null) ; + sendStream = new AnonymousPipeClientStream(PipeDirection.Out, StartupParamOverrides.ICReceivePipeName); + } + else + { + Socket mySocket = null; + Byte[] optionBytes = BitConverter.GetBytes(1); #if _WINDOWS - mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); - mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); + mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); + mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); #else - mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); + mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); #endif - while (true) - { - try + while (true) { + Console.WriteLine("*X* Trying to connect IC and Language Binding"); + try + { #if _WINDOWS - mySocket.Connect(IPAddress.IPv6Loopback, sendPort); + mySocket.Connect(IPAddress.IPv6Loopback, sendPort); #else - mySocket.Connect(IPAddress.Loopback, sendPort); + mySocket.Connect(IPAddress.Loopback, sendPort); #endif - break; + break; + } + catch + { + Thread.Sleep(1000); + } } - catch { } - } - TcpClient tcpSendToClient = new TcpClient(); - tcpSendToClient.Client = mySocket; - sendStream = tcpSendToClient.GetStream(); - connectionRecord = new OutputConnectionRecord(); - connectionRecord.ConnectionStream = sendStream; - connectionRecord.placeInOutput = new EventBuffer.BuffersCursor(null, -1, 0); + TcpClient tcpClient = new TcpClient(); + tcpClient.Client = mySocket; + sendStream = tcpClient.GetStream(); + #if _WINDOWS - var ipAddress = IPAddress.IPv6Loopback; - mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); - mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); + mySocket = new Socket(AddressFamily.InterNetworkV6, SocketType.Stream, ProtocolType.Tcp); + mySocket.IOControl(SIO_LOOPBACK_FAST_PATH, optionBytes, null); #else - var ipAddress = IPAddress.Loopback; - mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); + mySocket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); #endif - var myReceiveFromEP = new IPEndPoint(ipAddress, receivePort); - mySocket.Bind(myReceiveFromEP); - mySocket.Listen(1); - var socket = mySocket.Accept(); - receiveStream = new NetworkStream(socket); + while (true) + { + Console.WriteLine("*X* Trying to do second connection between IC and Language Binding"); + try + { +#if _WINDOWS + mySocket.Connect(IPAddress.IPv6Loopback, receivePort); +#else + mySocket.Connect(IPAddress.Loopback, receivePort); +#endif + break; + } + catch + { + Thread.Sleep(1000); + } + } + tcpClient = new TcpClient(); + tcpClient.Client = mySocket; + receiveStream = tcpClient.GetStream(); + } - var processOutputTask = processOutputRequests(); + connectionRecord = new OutputConnectionRecord(); + connectionRecord.ConnectionStream = sendStream; + connectionRecord.placeInOutput = new EventBuffer.BuffersCursor(null, -1, 0); + var processOutputTask = processOutputRequestsAsync(); } - private async Task processOutputRequests() + private async Task processOutputRequestsAsync() { while (true) { var nextEntry = await _ambrosiaSendToConnectionRecord.WorkQ.DequeueAsync(); - AcquireOutputLock(1); + _outputLock.Acquire(1); if (nextEntry == -1) { // This is a send output @@ -195,7 +226,7 @@ private async Task processOutputRequests() _ambrosiaSendToConnectionRecord.placeInOutput = await _ambrosiaSendToConnectionRecord.BufferedOutput.SendAsync(_ambrosiaSendToStream, _ambrosiaSendToConnectionRecord.placeInOutput); } - ReleaseOutputLock(); + _outputLock.Release(); } } @@ -212,7 +243,23 @@ protected T GetProxy(string serviceName, bool attachNeeded = true) // Generate client container proxy and cache it. var typeOfT = typeof(T); - var proxyClass = typeOfT.Assembly.GetType(typeOfT.FullName + "_Implementation"); + + Type proxyClass; + + try + { + proxyClass = typeOfT.Assembly.GetType(typeOfT.FullName + "_Implementation"); + } + catch (Exception e) + { + throw new Exception($"Failed while trying to get the types for proxy type {typeOfT.FullName}", e); + } + + if (proxyClass == null) + { + throw new InvalidOperationException($"Couldn't find {typeOfT.FullName}_Implementation in {typeOfT.Assembly.FullName}"); + } + InstanceProxy.Immortal = this; var instance = Activator.CreateInstance(proxyClass, serviceName, attachNeeded); @@ -232,7 +279,7 @@ protected Task DispatchWrapper(int bytesToRead = 0) { this.HandleException(ex); } - }); + }, cancelTokenSource.Token); } #pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously @@ -275,29 +322,29 @@ protected async Task Dispatch(int bytesToRead = 0) { //Console.WriteLine("Waiting for next batch of messages from the LAR"); - lock (myLock) + lock (DispatchTaskIdQueueLock) { - if (this.dispatchTaskIdQueue.Count > 1) + if (this.DispatchTaskIdQueue.Data.Count > 1) { int x; - while (!this.dispatchTaskIdQueue.TryDequeue(out x)) { } + while (!this.DispatchTaskIdQueue.Data.TryDequeue(out x)) { } break; // some other dispatch loop will take over, so just die. } } if (bytesToRead <= 24) { - int commitID = this._ambrosiaReceiveFromStream.ReadIntFixed(); - bytesToRead = this._ambrosiaReceiveFromStream.ReadIntFixed(); - long checkBytes = this._ambrosiaReceiveFromStream.ReadLongFixed(); - long writeSeqID = this._ambrosiaReceiveFromStream.ReadLongFixed(); + int commitID = await this._ambrosiaReceiveFromStream.ReadIntFixedAsync(cancelTokenSource.Token); + bytesToRead = await this._ambrosiaReceiveFromStream.ReadIntFixedAsync(cancelTokenSource.Token); + long checkBytes = await this._ambrosiaReceiveFromStream.ReadLongFixedAsync(cancelTokenSource.Token); + long writeSeqID = await this._ambrosiaReceiveFromStream.ReadLongFixedAsync(cancelTokenSource.Token); } while (bytesToRead > 24) { //Console.WriteLine("Waiting for the deserialization of a message from the LAR"); - await FlexReadBuffer.DeserializeAsync(this._ambrosiaReceiveFromStream, _inputFlexBuffer); + await FlexReadBuffer.DeserializeAsync(this._ambrosiaReceiveFromStream, _inputFlexBuffer, cancelTokenSource.Token); bytesToRead -= _inputFlexBuffer.Length; @@ -307,10 +354,10 @@ protected async Task Dispatch(int bytesToRead = 0) switch (firstByte) { - case AmbrosiaRuntime.InitalMessageByte: + case AmbrosiaRuntimeLBConstants.InitalMessageByte: { #if DEBUG - Console.WriteLine("Received an initial message"); + Console.WriteLine("*X* Received an initial message"); #endif _cursor++; @@ -323,16 +370,17 @@ protected async Task Dispatch(int bytesToRead = 0) //Task.Factory.StartNew( // () => this.OnFirstStart() // , CancellationToken.None, TaskCreationOptions.DenyChildAttach - // , myTaskScheduler + // , DispatchTaskScheduler // ); - this.OnFirstStartWrapper().Start(); + + await this.OnFirstStartWrapper(); break; } - case AmbrosiaRuntime.checkpointByte: + case AmbrosiaRuntimeLBConstants.checkpointByte: { #if DEBUG - Console.WriteLine("Received a checkpoint message"); + Console.WriteLine("*X* Received a checkpoint message"); #endif // TODO: this message should contain a (serialized - doh!) checkpoint. Restore the state. _cursor++; @@ -346,10 +394,10 @@ protected async Task Dispatch(int bytesToRead = 0) break; } - case AmbrosiaRuntime.takeCheckpointByte: + case AmbrosiaRuntimeLBConstants.takeCheckpointByte: { #if DEBUG - Console.WriteLine("Received a take checkpoint message"); + Console.WriteLine("*X* Received a take checkpoint message"); #endif _cursor++; @@ -358,32 +406,45 @@ protected async Task Dispatch(int bytesToRead = 0) break; } - case AmbrosiaRuntime.takeBecomingPrimaryCheckpointByte: + case AmbrosiaRuntimeLBConstants.becomingPrimaryByte: + { +#if DEBUG + Console.WriteLine("*X* Received a becoming primary message"); +#endif + _cursor++; + this.IsPrimary = true; + this.BecomingPrimary(); + break; + } + + + case AmbrosiaRuntimeLBConstants.takeBecomingPrimaryCheckpointByte: { #if DEBUG - Console.WriteLine("Received a take checkpoint message"); + Console.WriteLine("*X* Received a take checkpoint message"); #endif _cursor++; await this.TakeCheckpointAsync(); + this.IsPrimary = true; this.BecomingPrimary(); break; } - case AmbrosiaRuntime.upgradeTakeCheckpointByte: - case AmbrosiaRuntime.upgradeServiceByte: + case AmbrosiaRuntimeLBConstants.upgradeTakeCheckpointByte: + case AmbrosiaRuntimeLBConstants.upgradeServiceByte: { - if (firstByte == AmbrosiaRuntime.upgradeTakeCheckpointByte) + if (firstByte == AmbrosiaRuntimeLBConstants.upgradeTakeCheckpointByte) { #if DEBUG - Console.WriteLine("Received a upgrade and take checkpoint message"); + Console.WriteLine("*X* Received a upgrade and take checkpoint message"); #endif } else { #if DEBUG - Console.WriteLine("Received a upgrade service message"); + Console.WriteLine("*X* Received a upgrade service message"); #endif } _cursor++; @@ -412,9 +473,10 @@ protected async Task Dispatch(int bytesToRead = 0) // IMPORTANT: But the value for the back pointer to the server proxy should be the newly generated proxy newImmortal._dispatcher = typedProxy; - if (firstByte == AmbrosiaRuntime.upgradeTakeCheckpointByte) + if (firstByte == AmbrosiaRuntimeLBConstants.upgradeTakeCheckpointByte) { await newImmortal.TakeCheckpointAsync(); + newImmortal.IsPrimary = true; newImmortal.BecomingPrimary(); } @@ -427,26 +489,26 @@ protected async Task Dispatch(int bytesToRead = 0) this._dispatcher.upgradedProxy = typedProxy; // Need to die now, so do that by exiting loop - t.Start(); + t.Start(newImmortal.DispatchTaskScheduler); return; } - case AmbrosiaRuntime.RPCByte: - case AmbrosiaRuntime.RPCBatchByte: - case AmbrosiaRuntime.CountReplayableRPCBatchByte: + case AmbrosiaRuntimeLBConstants.RPCByte: + case AmbrosiaRuntimeLBConstants.RPCBatchByte: + case AmbrosiaRuntimeLBConstants.CountReplayableRPCBatchByte: { RPCsReceived++; var numberOfRPCs = 1; var lengthOfCurrentRPC = 0; int endIndexOfCurrentRPC = 0; - if (firstByte == AmbrosiaRuntime.RPCBatchByte || firstByte == AmbrosiaRuntime.CountReplayableRPCBatchByte) + if (firstByte == AmbrosiaRuntimeLBConstants.RPCBatchByte || firstByte == AmbrosiaRuntimeLBConstants.CountReplayableRPCBatchByte) { _cursor++; numberOfRPCs = _inputFlexBuffer.Buffer.ReadBufferedInt(_cursor); _cursor += IntSize(numberOfRPCs); - if (firstByte == AmbrosiaRuntime.CountReplayableRPCBatchByte) + if (firstByte == AmbrosiaRuntimeLBConstants.CountReplayableRPCBatchByte) { var numReplayableRPCs = _inputFlexBuffer.Buffer.ReadBufferedInt(_cursor); _cursor += IntSize(numReplayableRPCs); @@ -470,7 +532,7 @@ protected async Task Dispatch(int bytesToRead = 0) } var shouldBeRPCByte = _inputFlexBuffer.Buffer[_cursor]; - if (shouldBeRPCByte != AmbrosiaRuntime.RPCByte) + if (shouldBeRPCByte != AmbrosiaRuntimeLBConstants.RPCByte) { Console.WriteLine("UNKNOWN BYTE: {0}!!", shouldBeRPCByte); throw new Exception("Illegal leading byte in message"); @@ -492,7 +554,7 @@ protected async Task Dispatch(int bytesToRead = 0) //Console.WriteLine("Received RPC call to method with id: {0} and seq no.: {1}", methodId, CurrentSequenceNumber); #if DEBUG - Console.WriteLine($"Got response for {sequenceNumber} from {senderOfRPC}"); + Console.WriteLine($"*X* Got response for {sequenceNumber} from {senderOfRPC}"); #endif if (this.CallCache.Data.TryRemove(sequenceNumber, out var taskCompletionSource)) @@ -526,6 +588,8 @@ protected async Task Dispatch(int bytesToRead = 0) var errorMessage = $"Can't find sequence number {sequenceNumber} in cache"; throw new InvalidOperationException(errorMessage); } + + await Task.Yield(); } else // receiving an RPC { @@ -559,7 +623,7 @@ protected async Task Dispatch(int bytesToRead = 0) Buffer.BlockCopy(_inputFlexBuffer.Buffer, _cursor, localBuffer, 0, lengthOfSerializedArguments); //// BUGBUG: This works only if we are single-threaded and doing only fire-and-forget messages! - //while (myTaskScheduler.NumberOfScheduledTasks() == myTaskScheduler.MaximumConcurrencyLevel) + //while (DispatchTaskScheduler.NumberOfScheduledTasks() == DispatchTaskScheduler.MaximumConcurrencyLevel) //{ // // just busy wait until there is a free thread in the scheduler // // to handle this task. @@ -568,7 +632,7 @@ protected async Task Dispatch(int bytesToRead = 0) //Task.Factory.StartNew( // () => _dispatcher.DispatchToMethod(methodId, fireAndForget, senderOfRPC, CurrentSequenceNumber, localBuffer, 0) // , CancellationToken.None, TaskCreationOptions.DenyChildAttach - // , myTaskScheduler + // , DispatchTaskScheduler // ); try { @@ -721,22 +785,18 @@ public async Task SaveTaskAsync() private async Task TakeCheckpointAsync() { // wait for quiesence - AcquireOutputLock(2); + _outputLock.Acquire(2); _ambrosiaSendToConnectionRecord.BufferedOutput.LockOutputBuffer(); - // Save current task state unless just resumed from a serialized task - if (!this._isFirstCheckpoint || string.IsNullOrEmpty(this.SerializedTask.ToString())) - { - await this.SaveTaskAsync(); - } - this._isFirstCheckpoint = false; + // Save current task state + await this.SaveTaskAsync(); // Second, serialize state and send checkpoint // Need to directly write checkpoint to the stream so it comes *before* var checkpointSize = _immortalSerializer.SerializeSize(this); var sizeOfMessage = 1 + LongSize(checkpointSize); _ambrosiaSendToStream.WriteInt(sizeOfMessage); - _ambrosiaSendToStream.WriteByte(AmbrosiaRuntime.checkpointByte); + _ambrosiaSendToStream.WriteByte(AmbrosiaRuntimeLBConstants.checkpointByte); _ambrosiaSendToStream.WriteLong(checkpointSize); using (var passThruStream = new PassThruWriteStream(_ambrosiaSendToStream)) { @@ -744,21 +804,26 @@ private async Task TakeCheckpointAsync() } _ambrosiaSendToStream.Flush(); #if DEBUG - Console.WriteLine("Sent checkpoint back to LAR"); + Console.WriteLine("*X* Sent checkpoint back to LAR"); #endif _ambrosiaSendToConnectionRecord.BufferedOutput.UnlockOutputBuffer(); - ReleaseOutputLock(); + _outputLock.Release(); } - public async Task TryTakeCheckpointContinuationAsync(ResultAdditionalInfo currentResultAdditionalInfo) + public async Task TryTakeCheckpointContinuationAsync(ResultAdditionalInfo currentResultAdditionalInfo, int taskId) { - var result = (byte[])currentResultAdditionalInfo.Result; + var result = currentResultAdditionalInfo.Result; if (currentResultAdditionalInfo.AdditionalInfoType == ResultAdditionalInfoTypes.TakeCheckpoint) { var sequenceNumber = await this.TakeTaskCheckpointAsync(); this.StartDispatchLoop(); var resultAdditionalInfo = await this.GetTaskToWaitForWithAdditionalInfoAsync(sequenceNumber); // Re-await original task - result = (byte[])resultAdditionalInfo.Result; + result = resultAdditionalInfo.Result; + } + + lock (this.DispatchTaskIdQueueLock) + { + this.DispatchTaskIdQueue.Data.Enqueue(taskId); } return new ResultAdditionalInfo(result, currentResultAdditionalInfo.ResultType.Type); @@ -798,7 +863,7 @@ public async Task TakeTaskCheckpointAsync() public async Task SaveTaskContextAsync() { - var sequenceNumber = -1l; + var sequenceNumber = -1L; var message = await this._toTaskBuffer.Buffer.ReceiveAsync(); if (message is SaveContextMessageContainer saveContextMessage) @@ -815,9 +880,12 @@ public async Task SaveTaskContextAsync() public void StartDispatchLoop() { - var t = this.DispatchWrapper(); - this.dispatchTaskIdQueue.Enqueue(t.Id); - t.Start(); + lock (DispatchTaskIdQueueLock) + { + var t = this.DispatchWrapper(); + this.DispatchTaskIdQueue.Data.Enqueue(t.Id); + t.Start(this.DispatchTaskScheduler); + } } public Task GetTaskToWaitForAsync(long sequenceNumber) @@ -840,7 +908,7 @@ public Task GetTaskToWaitForWithAdditionalInfoAsync(long s return stcs.GetAwaitableTaskWithAdditionalInfoAsync(); } - [AttributeUsage(AttributeTargets.Field)] + [AttributeUsage(AttributeTargets.Field|AttributeTargets.Property)] public class CopyFromDeserializedImmortalAttribute : Attribute { @@ -861,12 +929,18 @@ private void CopyFromDeserializedImmortal(Stream dataStream) { continue; } - if (memberInfo.MemberType == MemberTypes.Field || memberInfo.MemberType == MemberTypes.Property) + if (memberInfo.MemberType == MemberTypes.Field) { var fi = (FieldInfo)memberInfo; var v = fi.GetValue(otherImmortal); fi.SetValue(this, v); } + else if (memberInfo.MemberType == MemberTypes.Property) + { + var pi = (PropertyInfo)memberInfo; + var v = pi.GetValue(otherImmortal); + pi.SetValue(this, v); + } else { throw new InvalidOperationException("Should never get here."); @@ -946,7 +1020,7 @@ private EventBuffer.BufferPage StartRPC( writablePage.curLength += localBuffer.WriteInt(writablePage.curLength, bytesPerMessage); // Write byte signalling that this is a RPC call - localBuffer[writablePage.curLength++] = AmbrosiaRuntime.RPCByte; + localBuffer[writablePage.curLength++] = AmbrosiaRuntimeLBConstants.RPCByte; // Write destination length, followed by the destination writablePage.curLength += localBuffer.WriteInt(writablePage.curLength, encodedDestinationLFRLength); @@ -973,7 +1047,7 @@ private EventBuffer.BufferPage StartRPC( taskToWaitFor = new SerializableTaskCompletionSource(typeof(T), newSequenceNumber); this.CallCache.Data.TryAdd(newSequenceNumber, taskToWaitFor); #if DEBUG - Console.WriteLine("Sent request for {0}", newSequenceNumber); + Console.WriteLine("*X* Sent request for {0}", newSequenceNumber); #endif } @@ -981,15 +1055,10 @@ private EventBuffer.BufferPage StartRPC( { // Sending this RPC call might trigger an incoming call to this container. // But the dispatch loop for this container might be blocked, so start up a new one. - var t = this.DispatchWrapper(); - this.dispatchTaskIdQueue.Enqueue(t.Id); - //Console.WriteLine("Starting a new dispatch loop from StartRPC (task {0})", t.Id); - //t.Start(myTaskScheduler); - t.Start(); + this.StartDispatchLoop(); } return writablePage; - } [AttributeUsage(AttributeTargets.Method)] @@ -1083,28 +1152,38 @@ public override Immortal Deserialize(Type runtimeType, Stream stream) } #region IDisposable Support - private bool disposedValue = false; // To detect redundant calls - volatile private int _quiesce; - private long _outputLock; - internal void AcquireOutputLock(long lockVal = 1) + public class CustomLock { - while (true) + public long Value; + + public CustomLock(long value) { - var origVal = Interlocked.CompareExchange(ref _outputLock, lockVal, 0); - if (origVal == 0) + this.Value = value; + } + + internal void Acquire(long lockVal = 1) + { + while (true) { - // We have the lock - break; + var origVal = Interlocked.CompareExchange(ref this.Value, lockVal, 0); + if (origVal == 0) + { + // We have the lock + break; + } } } - } - internal void ReleaseOutputLock() - { - Interlocked.Exchange(ref _outputLock, 0); + internal void Release() + { + Interlocked.Exchange(ref this.Value, 0); + } } + private bool disposedValue = false; // To detect redundant calls +// volatile private int _quiesce; + private CustomLock _outputLock = new CustomLock(0); protected virtual void Dispose(bool disposing) { @@ -1113,7 +1192,7 @@ protected virtual void Dispose(bool disposing) if (disposing) { #if DEBUG - Console.WriteLine("Dispatcher disposing"); + Console.WriteLine("*X* Dispatcher disposing"); #endif // TODO: dispose managed state (managed objects). this.cancelTokenSource.Cancel(true); @@ -1199,7 +1278,7 @@ public class InstanceProxy public InstanceProxy(string remoteAmbrosiaRuntime, bool attachNeeded) { #if DEBUG - Console.WriteLine($"InstanceProxy created to communicate with {remoteAmbrosiaRuntime}. (Attach: {attachNeeded})"); + Console.WriteLine($"*X* InstanceProxy created to communicate with {remoteAmbrosiaRuntime}. (Attach: {attachNeeded})"); #endif this.remoteAmbrosiaRuntime = remoteAmbrosiaRuntime; this.remoteAmbrosiaBytes = Encoding.UTF8.GetBytes(this.remoteAmbrosiaRuntime); @@ -1207,19 +1286,19 @@ public InstanceProxy(string remoteAmbrosiaRuntime, bool attachNeeded) if (attachNeeded) { - Immortal.AcquireOutputLock(3); + Immortal._outputLock.Acquire(3); #if DEBUG - Console.WriteLine("Sending attach message to: " + this.remoteAmbrosiaRuntime); + Console.WriteLine("*X* Sending attach message to: " + this.remoteAmbrosiaRuntime); #endif // Send attach message to the remote Ambrosia Runtime var destinationBytes = Encoding.UTF8.GetBytes(this.remoteAmbrosiaRuntime); // Write message size Immortal._ambrosiaSendToStream.WriteInt(1 + destinationBytes.Length); // Write message type - Immortal._ambrosiaSendToStream.WriteByte(AmbrosiaRuntime.attachToByte); + Immortal._ambrosiaSendToStream.WriteByte(AmbrosiaRuntimeLBConstants.attachToByte); // Write Destination Immortal._ambrosiaSendToStream.Write(destinationBytes, 0, destinationBytes.Length); - Immortal.ReleaseOutputLock(); + Immortal._outputLock.Release(); } } @@ -1265,7 +1344,19 @@ public Dispatcher(Immortal myImmortal, ImmortalSerializerBase myImmortalSerializ // need to set the self-proxy field var getProxyMethodDef = typeof(Immortal).GetMethod("GetProxy", BindingFlags.NonPublic | BindingFlags.Instance); var genericProxyMethod = getProxyMethodDef.MakeGenericMethod(baseType.GetGenericArguments().First()); - var selfProxy = genericProxyMethod.Invoke(myImmortal, new object[] { "", false, }); + + object selfProxy; + try + { + selfProxy = genericProxyMethod.Invoke(myImmortal, new object[] { "", false, }); + } + catch (TargetInvocationException e) + { + throw new InvalidOperationException( + "Failed to create the Dispatcher. Ensure that the type of the immortal inherits from Immortal where \"instance proxy type\" is the name of the type marked with the Ambrosia.InstanceProxy attribute.", + e); + } + var selfProxyField = baseType.GetField("thisProxy", BindingFlags.NonPublic | BindingFlags.Instance); selfProxyField.SetValue(myImmortal, selfProxy); } @@ -1282,7 +1373,7 @@ public Dispatcher(Immortal myImmortal, ImmortalSerializerBase myImmortalSerializ public void Start() { #if DEBUG - Console.WriteLine("Start Start()"); + Console.WriteLine("*X* Start Start()"); #endif var inputFlexBuffer = new FlexReadBuffer(); int commitID = MyImmortal._ambrosiaReceiveFromStream.ReadIntFixed(); @@ -1297,11 +1388,11 @@ public void Start() var firstByte = inputFlexBuffer.Buffer[cursor++]; - if (firstByte == AmbrosiaRuntime.checkpointByte) + if (firstByte == AmbrosiaRuntimeLBConstants.checkpointByte) { // Then this container is recovering #if DEBUG - Console.WriteLine("Received a checkpoint message"); + Console.WriteLine("*X* Received a checkpoint message"); #endif // TODO: this message should contain a (serialized - doh!) checkpoint. Restore the state. var sizeOfCheckpoint = inputFlexBuffer.Buffer.ReadBufferedLong(cursor); @@ -1312,36 +1403,32 @@ public void Start() } MyImmortal._immortalSerializer = this.MyImmortalSerializer; #if DEBUG - Console.WriteLine($"Deserialized: {this.MyImmortal.ToString()}"); + Console.WriteLine($"*X* Deserialized: {this.MyImmortal.ToString()}"); #endif if (!string.IsNullOrEmpty(this.MyImmortal.SerializedTask.ToString())) { var resumeMainTask = new Task(async () => await this.MyImmortal.Resume()); - resumeMainTask.Start(); + resumeMainTask.Start(MyImmortal.DispatchTaskScheduler); } else { // Now that the state is restored, start listening for incoming messages - var t = Dispatch(); - this.MyImmortal.dispatchTaskIdQueue.Enqueue(t.Id); - //Console.WriteLine("Starting a new dispatch loop from StartRPC (task {0})", t.Id); - //t.Start(myTaskScheduler); - t.Start(); + this.MyImmortal.StartDispatchLoop(); } } - else if (firstByte == AmbrosiaRuntime.takeCheckpointByte || firstByte == AmbrosiaRuntime.takeBecomingPrimaryCheckpointByte) + else if (firstByte == AmbrosiaRuntimeLBConstants.takeCheckpointByte || firstByte == AmbrosiaRuntimeLBConstants.takeBecomingPrimaryCheckpointByte) { // Then this container is starting for the first time - if (firstByte == AmbrosiaRuntime.takeCheckpointByte) + if (firstByte == AmbrosiaRuntimeLBConstants.takeCheckpointByte) { #if DEBUG - Console.WriteLine("Received a take checkpoint message"); + Console.WriteLine("*X* Received a take checkpoint message"); #endif } else { #if DEBUG - Console.WriteLine("Received a take becoming primary checkpoint message"); + Console.WriteLine("*X* Received a take becoming primary checkpoint message"); #endif } int sizeOfMessage; @@ -1350,20 +1437,20 @@ public void Start() // That way, if recovery happens after the first checkpoint (i.e., before a second checkpoint) // this message will get sent back to this container so it will restart properly { - MyImmortal.AcquireOutputLock(2); + MyImmortal._outputLock.Acquire(2); var initialMessageBytes = Encoding.UTF8.GetBytes("hello"); var initialMessageSize = initialMessageBytes.Length; sizeOfMessage = 1 + IntSize(initialMessageSize) + initialMessageSize; MyImmortal._ambrosiaSendToStream.WriteInt(sizeOfMessage); - MyImmortal._ambrosiaSendToStream.WriteByte(AmbrosiaRuntime.InitalMessageByte); + MyImmortal._ambrosiaSendToStream.WriteByte(AmbrosiaRuntimeLBConstants.InitalMessageByte); MyImmortal._ambrosiaSendToStream.WriteInt(initialMessageSize); MyImmortal._ambrosiaSendToStream.Write(initialMessageBytes, 0, initialMessageSize); MyImmortal._ambrosiaSendToStream.Flush(); - MyImmortal.ReleaseOutputLock(); + MyImmortal._outputLock.Release(); } #if DEBUG - Console.WriteLine("Sent initial message to LAR"); + Console.WriteLine("*X* Sent initial message to LAR"); #endif //// Side effect of calling StartRPC is to kick off the Dispatch loop in a different thread @@ -1376,13 +1463,13 @@ public void Start() { // wait for quiesence - MyImmortal.AcquireOutputLock(2); + MyImmortal._outputLock.Acquire(2); MyImmortal._ambrosiaSendToConnectionRecord.BufferedOutput.LockOutputBuffer(); var checkpointSize = this.MyImmortal._immortalSerializer.SerializeSize(this.MyImmortal); sizeOfMessage = 1 + LongSize(checkpointSize); MyImmortal._ambrosiaSendToStream.WriteInt(sizeOfMessage); - MyImmortal._ambrosiaSendToStream.WriteByte(AmbrosiaRuntime.checkpointByte); + MyImmortal._ambrosiaSendToStream.WriteByte(AmbrosiaRuntimeLBConstants.checkpointByte); MyImmortal._ambrosiaSendToStream.WriteLong(checkpointSize); using (var passThruStream = new PassThruWriteStream(MyImmortal._ambrosiaSendToStream)) { @@ -1391,16 +1478,17 @@ public void Start() MyImmortal._ambrosiaSendToStream.Flush(); MyImmortal._ambrosiaSendToConnectionRecord.BufferedOutput.UnlockOutputBuffer(); - MyImmortal.ReleaseOutputLock(); + MyImmortal._outputLock.Release(); } #if DEBUG - Console.WriteLine("Sent checkpoint back to LAR"); + Console.WriteLine("*X* Sent checkpoint back to LAR"); #endif - if (firstByte == AmbrosiaRuntime.takeBecomingPrimaryCheckpointByte) + if (firstByte == AmbrosiaRuntimeLBConstants.takeBecomingPrimaryCheckpointByte) { + this.MyImmortal.IsPrimary = true; this.MyImmortal.BecomingPrimary(); } @@ -1408,10 +1496,7 @@ public void Start() // The first step above means that the Dispatch loop will take care of calling the OnFirstStart method. // Third, start the dispatch loop as a new task - //Task.Factory.StartNew(() => Immortal.Dispatch()); - var t = Dispatch(); - this.MyImmortal.dispatchTaskIdQueue.Enqueue(t.Id); - t.Start(); + this.MyImmortal.StartDispatchLoop(); } else { @@ -1422,16 +1507,11 @@ public void Start() throw new Exception(s); } #if DEBUG - Console.WriteLine("End Start()"); + Console.WriteLine("*X* End Start()"); #endif } - public Task Dispatch() - { - return this.MyImmortal.DispatchWrapper(); - } - - public void EntryPoint() { this.MyImmortal.OnFirstStartWrapper(); } + public async Task EntryPoint() { await this.MyImmortal.OnFirstStartWrapper(); } protected void ReleaseBufferAndSend(bool doTheSend = true) { this.MyImmortal.ReleaseBufferAndSend(doTheSend: doTheSend); } public abstract Task DispatchToMethod(int methodId, RpcTypes.RpcType rpcType, string senderOfRPC, long sequenceNumber, byte[] buffer, int cursor); @@ -1480,7 +1560,7 @@ protected EventBuffer.BufferPage StartRPC_ReturnValue(string destination, long s writablePage.curLength += localBuffer.WriteInt(writablePage.curLength, bytesPerMessage); // Write byte signalling that this is a RPC call - localBuffer[writablePage.curLength++] = AmbrosiaRuntime.RPCByte; + localBuffer[writablePage.curLength++] = AmbrosiaRuntimeLBConstants.RPCByte; // Write destination length, followed by the destination writablePage.curLength += localBuffer.WriteInt(writablePage.curLength, destinationBytes.Length); @@ -1548,4 +1628,4 @@ public class AsyncContext [DataMember] public long SequenceNumber { get; set; } } -} \ No newline at end of file +} diff --git a/Clients/CSharp/AmbrosiaLibCS/Immortal.md b/Clients/CSharp/AmbrosiaLibCS/Immortal.md new file mode 100644 index 00000000..b3fd5660 --- /dev/null +++ b/Clients/CSharp/AmbrosiaLibCS/Immortal.md @@ -0,0 +1,190 @@ +# Immortal + +In this document we will deep-dive into the main logic of the Immortal, the building block of AMBROSIA. + +## The Dispatch Loop + +The Dispatch method is the main method in the Immortal class - it receives and executes incoming RPCs. This method consists of one big loop which continues processing incoming messages until either another Dispatch loop takes over or the current loop is awaiting an asynchronous operation to finish. + +Only one Dispatch loop is allowed to run at a time (with the exception of one loop finishing its handling of an awaited RPC call - resulting in one of the running loops committing suicide at the next iteration). This logic is controlled by the DispatchTaskIdQueue which keeps track of all active Dispatch loops running: + +```c# +lock (DispatchTaskIdQueueLock) +{ + if (this.DispatchTaskIdQueue.Data.Count > 1) + { + int x; + while (!this.DispatchTaskIdQueue.Data.TryDequeue(out x)) { } + break; // some other dispatch loop will take over, so just die. + } +} +``` + +Since we don't allow real concurrency when handling RPC calls, and there is still the chance for 2 Dispatch loops to be active before one dies (in the case mentioned above), we use a single-thread scheduler when starting a new Dispatch loop: + +```c# +public void StartDispatchLoop() +{ + lock (DispatchTaskIdQueueLock) + { + var t = this.DispatchWrapper(); + this.DispatchTaskIdQueue.Data.Enqueue(t.Id); + t.Start(this.DispatchTaskScheduler); + } +} +``` +After assuring that only one thread is running the Dispatch loop, we are ready to start reading incoming bytes from the wire. + +## Handling RPC Calls + +Handling each RPC call depends on its type, which is always defined in the first byte of the incoming RPC message. The RPC type constants are defined in the AmbrosiaRuntime class. + +### InitialMessage + +This type of message arrives only once at the beginning of the first run of the Immortal, and it triggers the Immortal to start running the OnFirstStart() method. + +#### Message format: + +| Field Name | R | messageLength | messageBuffer | +| ---------- | ---- | ------------- | ------------- | +| Field Type | byte | int | byte[] | + +* **R** - Determines the type of the RPC call ( = AmbrosiaRuntime.InitalMessageByte) +* **messageLength** - Determines the length of the message contained in messageBuffer +* **messageBuffer** - Contains an encoded message string (currently ignored in the code) + +### Checkpoint + +This type of message contains a checkpoint for the Immortal to recover from. The checkpoint consists of a serialized Immortal (serialized with the generated ImmortalSerializer class), which the current Immortal then copies all fields and properties from using the CopyFromDeserializedImmortal method. + +**Note:** Fields and properties which are not defined in a subclass of Immortal (e.g Immortal itself) should be decorated with the [CopyFromDeserializedImmortal] attribute if they should be copied during recovery. + +#### Message format: + +| Field Name | R | checkpointSize | checkpoint | +| ---------- | ---- | -------------- | ---------- | +| Field Type | byte | long | - | + +- **R** - Determines the type of the RPC call ( = AmbrosiaRuntime.checkpointByte) +- **checkpointSize** - Determines the size of the checkpoint contained in checkpoint +- **checkpoint** - Contains the serialized checkpoint. Since checkpointSize is a long, checkpoint is being passed to deserialization as a stream rather than a byte[] (in order to support large-sized checkpoints) + +### TakeCheckpoint + +This type of message signals the immortal to take a checkpoint at its current state. We will delve deeper into this logic later in this document. + +#### Message format: + +| Field Name | R | +| ---------- | ---- | +| Field Type | byte | + +- **R** - Determines the type of the RPC call ( = AmbrosiaRuntime.takeCheckpointByte) + +### TakeBecomingPrimaryCheckpoint + +This type of message signals the Immortal to take a checkpoint at its current state, as with TakeCheckpoint above. This message also signals the Immortal to call BecomingPrimary(). + +#### Message format: + +| Field Name | R | +| ---------- | ---- | +| Field Type | byte | + +- **R** - Determines the type of the RPC call ( = AmbrosiaRuntime.takeBecomingPrimaryCheckpointByte) + +### UpgradeService + +This type of message signals the Immortal to upgrade. The current Immortal creates an instance of the upgraded Immortal type. All the fields from the current Immortal instance are copied into the new Immortal instance. The new Immortal instance is then being started by starting a new Dispatch loop with the same single-threaded task scheduler and the same Task Id queue. The current Immortal's Dispatch loop would then commit suicide once the new Dispatch loop takes over. The new Dispatch loop will have the current Immortal's remaining number of bytes to read handed over to it, in order to continue processing messages from the same point. + +**Note:** The upgraded Immortal type is defined upon deploying the service when using the following AmbrosiaFactory.Deploy Method: + +```c# +public static IDisposable Deploy(string serviceName, Immortal instance, int receivePort, int sendPort) +``` + +#### Message format: + +| Field Name | R | +| ---------- | ---- | +| Field Type | byte | + +- **R** - Determines the type of the RPC call ( = AmbrosiaRuntime.upgradeServiceByte) + +### UpgradeServiceTakeCheckpoint + +This type of message signals the Immortal to upgrade, similarly to UpgradeService. It also signals the upgraded immortal to take a checkpoint at the time of creation (after the pervious Immortal's state has been copied over). + +#### Message format: + +| Field Name | R | +| ---------- | ---- | +| Field Type | byte | + +- **R** - Determines the type of the RPC call ( = AmbrosiaRuntime.upgradeServiceTakeCheckpointByte) + +### RPC + +RPCs are divided into Request RPCs, which trigger the Immortal to run a method defined in the user-defined interface (and returning a Response RPC if the method is not a Fire-and-Forget method) and Response RPCs which return a return value for a previously sent Request RPC. Method Ids are assigned upon code-generation and are used to encode the method for the Immortal to run. The matching between requests and responses is done by matching sequence numbers. + +* #### Request RPCs: + + In the case of a request RPC, the Immortal will call its dispatcher instance (of the auto-generated Dispatcher implementation) which will handle the method call (and return a response, if one is required). + + ##### Message format: + + | Field Name | R | ret | m | b | lFR | n | args | + | -------------- | ---- | ---- | ---- | ---- | ------ | ---- | ------ | + | **Field Type** | byte | byte | int | int | byte[] | long | byte[] | + + * **R** - Determines the type of the RPC call (= RPCByte) + * **ret** - Determines the type of the return value (None = 0, in the case of a request RPC) + * **m** - The method ID for the method to call + * **b** - Size of the sender name (Required only if RPC is not fire and forget - defined in RpcType.IsFireAndForget()) + * **lFR** - Encoded name of sender (Only if RPC is not fire and forget - defined in RpcType.IsFireAndForget()) + * **n** - Contains the sequence number of the request matching the response + * **args** - Contains serialized arguments, number and size baked into the generated code + +* #### Response RPCs: + + In the case of an RPC containing a response, the Immortal will "wake up" the task awaiting this response and perform a context switch to this task (the context switch would always result in switching to the woken-up task, as it is the only task running in parallel to the current task). + + In the case of an RPC containing an exception as its return value, the exception would be set in the awaiting task, and the same context switch will be made to that task. + + ##### Message format: + + | Field Name | R | ret | n | returnValue | + | -------------- | :--- | :--- | :--- | :---------- | + | **Field Type** | byte | byte | long | T | + + * **R** - Determines the type of the RPC call (= RPCByte) + * **ret** - Determines the type of the return value (values defined by enum ReturnValueTypes) + * **n** - Contains the sequence number of the request matching the response + * **returnValue** - Contains a return value of type T (defined in the signature of the method called in the RPC request) + +### RPCBatch + +In this case, we are receiving a batch of RPC messages. + +#### Message format: + +| Field Name | R | nRPC | RPC | RPC | ... | +| ---------- | ---- | ---- | ---- | ---- | ---- | +| Field Type | byte | int | | | | + +* **R** - Determines the type of the RPC call (= RPCBatchByte) +* **nRPC** - Number of RPCs in the batch +* **RPCs** - Batch of RPC messages in the format described above + +### CountReplayableRPCBatch + +#### Message format: + +| Field Name | R | nRPC | nRRPC | RPC | RPC | ... | +| ---------- | ---- | ---- | ----- | ---- | ---- | ---- | +| Field Type | byte | int | int | | | | + +- **R** - Determines the type of the RPC call (= RPCBatchByte) +- **nRPC** - Number of RPCs in the batch +- **nRRPC** - Number of replayable RPCs in the batch +- **RPCs** - Batch of RPC messages in the format described above \ No newline at end of file diff --git a/Clients/CSharp/AmbrosiaLibCS/SerializableQueue.cs b/Clients/CSharp/AmbrosiaLibCS/SerializableQueue.cs new file mode 100644 index 00000000..17b575de --- /dev/null +++ b/Clients/CSharp/AmbrosiaLibCS/SerializableQueue.cs @@ -0,0 +1,53 @@ +using System; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.Serialization; + +namespace Ambrosia +{ + [DataContract] + public class SerializableQueue + { + public ConcurrentQueue Data { get; private set; } + + [DataMember] + public List SerializedData { get; set; } + + public SerializableQueue() + { + this.Data = new ConcurrentQueue(); + } + + [OnSerializing] + public void SetSerializedQueue(StreamingContext context) + { + this.SerializedData = new List(); + while (!this.Data.IsEmpty) + { + if (this.Data.TryDequeue(out var next)) + { + this.SerializedData.Add(next); + } + } + + foreach (var next in this.SerializedData) + { + this.Data.Enqueue(next); + } + } + + [OnDeserialized] + public void SetQueue(StreamingContext context) + { + if (this.SerializedData != null) + { + this.Data = new ConcurrentQueue(); + foreach (var next in this.SerializedData) + { + this.Data.Enqueue(next); + } + } + } + } +} \ No newline at end of file diff --git a/Clients/CSharp/AmbrosiaLibCS/TaskCheckpoint.cs b/Clients/CSharp/AmbrosiaLibCS/TaskCheckpoint.cs index 50764bec..c6f157a7 100644 --- a/Clients/CSharp/AmbrosiaLibCS/TaskCheckpoint.cs +++ b/Clients/CSharp/AmbrosiaLibCS/TaskCheckpoint.cs @@ -221,18 +221,19 @@ public void OnCompleted(Action continuation) } public void OnCompletedRunner() { - var deleteFile = true; +// var deleteFile = true; try { awaiter.GetResult(); } - catch (DeferRemainderException) +/* catch (DeferRemainderException) { deleteFile = false; - } + }*/ catch (Exception) { } + //if (deleteFile /*&& File.Exists(fn)*/) fn.Clear(); continuation(); diff --git a/Clients/CSharp/AmbrosiaLibCS/TaskScheduler.cs b/Clients/CSharp/AmbrosiaLibCS/TaskScheduler.cs index b3b1a554..81b945a0 100644 --- a/Clients/CSharp/AmbrosiaLibCS/TaskScheduler.cs +++ b/Clients/CSharp/AmbrosiaLibCS/TaskScheduler.cs @@ -66,7 +66,7 @@ protected override bool TryExecuteTaskInline(Task task, bool task_was_previously private Thread StartThread(string name) { - var t = new Thread(MyThread) { Name = name }; + var t = new Thread(MyThread) { Name = name, IsBackground = true}; using (var start = new Barrier(2)) { t.Start(start); diff --git a/Clients/TypeScript/lib/Ambrosia.ts b/Clients/TypeScript/lib/Ambrosia.ts new file mode 100644 index 00000000..46519660 --- /dev/null +++ b/Clients/TypeScript/lib/Ambrosia.ts @@ -0,0 +1,164 @@ +import { parseLogRecords, byteArrayToBuffer, byteArrayFromBuffer, serializeMessage, serializeLogRecord, parseLogRecord, wholeSlice } from './AmbrosiaFormat' +import { ByteArray, Message, uint64 } from './AmbrosiaFormat' +import { MsgCountReplayableRPCBatch, MsgUpgradeService, MsgTakeBecomingPrimaryCheckpoint, MsgUpgradeTakeCheckpoint } from './AmbrosiaFormat' +import { MsgInitialMessage, MsgCheckpoint, MsgRPCBatch, MsgTakeCheckpoint, MsgAttachTo, MsgRPC } from './AmbrosiaFormat' + +import net = require("net"); + +type AmbrosiaLogEntry = Buffer +type AmbrosiaCheckpoint = Buffer + +class Ambrosia { + public buf: ByteArray = new ByteArray([]) + public replaying: boolean = true + public sendSocket: any + + constructor( + public onLogEntry: (logEntry: AmbrosiaLogEntry) => void, + public onBecomePrimary: () => void, + public onCheckpoint: () => AmbrosiaCheckpoint, + public onRestoreCheckpoint: (checkpoint: AmbrosiaCheckpoint) => void, + public host: string = "127.0.0.1", + public sendPort: number = 1000, + public receivePort: number = 1001 + ) { + // Open client for sending messages to the Immortal Coordinator. + this.sendSocket = new net.Socket(); + + this.sendSocket.connect(sendPort, host, () => { + this.trace("Connected to: " + host + ":" + sendPort) + }) + + // Wait for incoming connection from the Immortal Coordinator. + net.createServer((sock: any) => { + this.trace("Connected to: " + sock.remoteAddress + ":" + sock.remotePort) + + // Handle received data. + sock.on('data', (data: any) => { + this.trace("Received data from IC: " + data.toString('hex')) + + // Transfer bytes into shared buffer until we have parseable message. + var tmpBuffer : number[] = [] + + for(const d of data.values()) { + tmpBuffer.push(d) + } + + this.buf.concat(new ByteArray(tmpBuffer)) + + let accumulated : Message[] = [] + try { + let logRecords = parseLogRecords(wholeSlice(this.buf)) + for(var logRecord of logRecords) { + for(var msg of logRecord.msgs) { + accumulated.push(msg) + } + } + + this.buf = new ByteArray([]) + } catch (err) { + this.trace("Parsing error: " + err) + } + for(var msg of accumulated) + { + this.handle(msg) + } + }); + }).listen(receivePort, host) + } + + handle(message: Message) { + this.trace("Handle invoked for message type: " + message.typ) + + if(message instanceof MsgCountReplayableRPCBatch) { + throw new Error("Not implemented!") + } + else if(message instanceof MsgUpgradeService) { + throw new Error("Not implemented!") + } + else if(message instanceof MsgTakeBecomingPrimaryCheckpoint) { + // Disable replaying. + this.replaying = false + + // Create initial RPC message. + let rpcMessage = new MsgRPC() + rpcMessage.methodId = 0 + rpcMessage.isOutgoing = false + + // Create initial message containing the initial RPC message. + let initialMessage = new MsgInitialMessage() + initialMessage.rpc = rpcMessage + + // Send initial message. + this.sendBuffer(initialMessage) + + // Send checkpoint. + let checkpointMessage = new MsgCheckpoint() + checkpointMessage.checkpoint = byteArrayFromBuffer(this.onCheckpoint()) + this.sendBuffer(checkpointMessage) + } + else if(message instanceof MsgUpgradeTakeCheckpoint) { + throw new Error("Not implemented!") + } + else if(message instanceof MsgInitialMessage) { + // Nothing. + } + else if(message instanceof MsgCheckpoint) { + let buffer = byteArrayToBuffer(message.checkpoint) + this.onRestoreCheckpoint(buffer) + } + else if(message instanceof MsgRPCBatch) { + throw new Error("Not implemented!") + } + else if(message instanceof MsgTakeCheckpoint) { + // Send checkpoint. + let checkpointMessage = new MsgCheckpoint() + checkpointMessage.checkpoint = byteArrayFromBuffer(new Buffer(this.onCheckpoint())) + this.sendBuffer(checkpointMessage) + } + else if(message instanceof MsgAttachTo) { + throw new Error("Not implemented!") + } + else if(message instanceof MsgRPC) { + // console.log("**** method: " + message.methodId) + let logEntry = byteArrayToBuffer(message.serializedArgs) + this.onLogEntry(logEntry) + } + else { + throw new Error("Unknown message type!") + } + } + + isReplaying() { + return this.replaying + } + + selfRPC(logEntry: AmbrosiaLogEntry) { + this.trace("selfRPC invoked") + let message = new MsgRPC() + message.methodId = 0 + message.isOutgoing = true + message.isSelfCall = true + message.serializedArgs = byteArrayFromBuffer(logEntry) + this.sendBuffer(message) + } + + sendBuffer(message: Message) { + let serialized = serializeMessage(message) + + serialized.iterate((x: Buffer) => { + this.trace("About to send buffer:") + console.log(x) + + this.sendSocket.write(x, () => { + this.trace("Transmitted buffer with type: " + message.typ + " and size: " + x.byteLength) + }) + }) + } + + trace(message: string) { + console.log("AMBROSIA LOG: " + message) + } +} + +export default Ambrosia \ No newline at end of file diff --git a/Clients/TypeScript/lib/AmbrosiaFormat.ts b/Clients/TypeScript/lib/AmbrosiaFormat.ts new file mode 100644 index 00000000..a3c0129f --- /dev/null +++ b/Clients/TypeScript/lib/AmbrosiaFormat.ts @@ -0,0 +1,960 @@ +type uint8 = number; /* uint8 */ + +type uint32 = number; /* uint32 */ + +let maxUint32 = 4294967295 + +class uint64 { /* uint64 */ + high: uint32; + low: uint32; + constructor(high: uint32, low: uint32) { + this.high = high; + this.low = low; + } + add(y: uint64): void { + let low = this.low + y.low; + let high = this.high + y.high + Math.trunc(low / (maxUint32 + 1)) + low &= maxUint32; + this.low = low; + this.high = high; + } + lt(y: uint64): boolean { + return (this.high < y.high || (this.high == y.high && this.low < y.low)); + } + sub(y: uint64) : void { + if (this.lt(y)) { + throw new Error("cannot subtract greater integer"); + } + let low = this.low - y.low; + let high = this.high - y.high; + if (low < 0) { + low - 1; + high += maxUint32; + } + this.low = low; + this.high = high; + } +} + +function add64(x: uint64, y: uint64): uint64 { + let res = new uint64(x.high, x.low); + res.add(y); + return res; +} + +function sub64(x: uint64, y: uint64): uint64 { + let res = new uint64(x.high, x.low); + res.sub(y); + return res; +} + +interface ByteArrayIndex { /* 2D coordinates, MUST NOT be considered as a uint64 because not all subarrays of a byte array are full */ + first: uint32; + second: uint32; +} + +function copyIndex(x: ByteArrayIndex) : ByteArrayIndex { + return {first: x.first, second: x.second}; +} + +let hexits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'] + +class ByteArray { + public bytes: uint8[][]; + constructor(x: uint8[]) { + this.bytes = [x]; + } + byteAt (index: ByteArrayIndex) : uint8 { + let tindex : ByteArrayIndex = copyIndex(index); + while (tindex.first < this.bytes.length && tindex.second == this.bytes[tindex.first].length) { + tindex.first++; + tindex.second = 0; + } + if (tindex.first >= this.bytes.length) { + throw new Error("byteAt: high out of bounds"); + } + if (tindex.second >= this.bytes[tindex.first].length) { + throw new Error("byteAt: low out of bounds"); + } + return this.bytes[tindex.first][tindex.second]; + } + length () : uint64 { + let res : uint64 = new uint64(0, 0); + let i : uint32 = 0; + for (; i < this.bytes.length; ++i) { + // console.log(new uint64(0, this.bytes[i].length)); + res.add(new uint64(0, this.bytes[i].length)); + } + // console.log("!!") + // console.log(res) + return res; + } + concat (y: ByteArray) : void { + if (this.bytes.length > 0 && y.bytes.length == 1 && this.bytes[this.bytes.length - 1].length + y.bytes[0].length <= maxUint32) { + this.bytes[this.bytes.length - 1] = this.bytes[this.bytes.length - 1].concat(y.bytes[0]); + } else { + /* TODO: pack this.bytes and y.bytes first? */ + this.bytes = this.bytes.concat(y.bytes); + } + } + validIndex (x: ByteArrayIndex) : boolean { + return (x.first < this.bytes.length && x.second < this.bytes[x.first].length); + } + dist (x: ByteArrayIndex, y: ByteArrayIndex) : uint64 { + // console.log("starting slicelength") + /* assume x <= y */ + let temp : ByteArrayIndex = {first: x.first, second: x.second}; + let res : uint64 = new uint64(0, 0); + while (temp.first <= y.first) { + // console.log("going from " + x.first + " " + x.second + " to " + y.first + " " + y.second) + + if(x.first == y.first) { + let diff = y.second - x.second + // console.log("adding diff 0 : " + diff) + res.add(new uint64(0, diff)) + } + else if(temp.first == x.first) { + if (temp.second <= this.bytes[temp.first].length) { + let diff = this.bytes[temp.first].length - x.second + // console.log("length " + this.bytes[temp.first].length) + // console.log("adding diff 1 : " + diff) + res.add(new uint64(0, diff)) + } + } + else if(temp.first < y.first) { + let diff = this.bytes[temp.first].length + // console.log("adding diff 2: " + diff) + res.add(new uint64(0, diff)) + } + else if(temp.first == y.first) { + if (temp.second <= this.bytes[temp.first].length) { + let diff = this.bytes[temp.first].length - y.second + // console.log("adding diff 3: " + diff) + res.add(new uint64(0, diff)) + } + } + else { + throw new Error("slice length failure, unknown case") + } + + temp.first++; + temp.second = 0; + } + // console.log("slice length result: " + res.low) + return res; + } + shiftIndex (x: ByteArrayIndex, y: uint32) { + let tx : ByteArrayIndex = {first: x.first, second: x.second}; + let ty : uint32 = y; + while (ty > 0 && tx.first < this.bytes.length) { + if (tx.second <= this.bytes[tx.first].length) { + if (ty > this.bytes[tx.first].length - tx.second) { + tx.first++; + ty -= this.bytes[tx.first].length - tx.second; + tx.second = 0; + } else { + tx.second += ty; + ty = 0; + } + } + } + return tx; + } + shiftIndex64 (x: ByteArrayIndex, y: uint64) { + let tx : ByteArrayIndex = {first: x.first, second: x.second}; + let ty : uint64 = new uint64(y.high, y.low); + let tzero : uint64 = new uint64(0, 0); + while (tzero.lt(ty) && tx.first < this.bytes.length) { + if (tx.second <= this.bytes[tx.first].length) { + let len = new uint64(0, this.bytes[tx.first].length - tx.second); + if (len.lt(ty)) { + tx.first++; + ty.sub(len); + tx.second = 0; + } else { + tx.second += ty.low; /* here ty.high == 0 */ + ty = tzero; + } + } + } + return tx; + } + subarray(from: ByteArrayIndex, to: ByteArrayIndex) { + let res = new ByteArray([]); + let tfrom = {first: from.first, second: from.second}; + let tto = {first: to.first, second: to.second}; + while (tfrom.first <= tto.first) { + if (tfrom.first == tto.first && tfrom.second <= tto.second && (tfrom.second !== 0 || tto.second < this.bytes[tfrom.first].length)) { + // console.log("concat for res") + res.concat(new ByteArray(this.bytes[tfrom.first].slice(tfrom.second, tto.second))); + // console.log(res.toHexString()) + } + else if (tfrom.second !== 0) { + if (this.bytes[tfrom.first].length < tfrom.second) { + throw new Error("Bad tfrom index"); + } + res.concat(new ByteArray(this.bytes[tfrom.first].slice(tfrom.second))); + } else { + res.concat(new ByteArray(this.bytes[tfrom.first])); + } + tfrom.first++; + tfrom.second = 0; + } + // console.log("subarray returning ") + // console.log(res.toHexString()) + return res; + } + endIndex() { + if (this.bytes.length > 0) { + return {first: this.bytes.length - 1, second: this.bytes[this.bytes.length - 1].length}; + } else { + return {first: 0, second: 0}; + } + } + toHexString() { + let res = ""; + let i = 0; + for (; i < this.bytes.length; ++i) { + let b = this.bytes[i]; + let j = 0; + for (; j < b.length; ++j) { + let v = b[j]; + res = res.concat(hexits[Math.trunc(v/16)]); + res = res.concat(hexits[v%16]); + res = res.concat(" "); + } + } + return res; + } + + iterate(f: (x: Buffer) => void) { + let i = 0; + for (; i < this.bytes.length; ++i) { + let buf = Buffer.from(this.bytes[i]) + f(buf) + } + } +} + +function byteArrayFromBuffer(b: Buffer) : ByteArray { + // console.log("** serializing") + // console.log(b) + let res = new ByteArray([]) + for (var byte of b.values()) { + res.concat(new ByteArray([byte])) + } + return res +} + +function byteArrayToBuffer(byteArray: ByteArray) : Buffer { + // console.log("** byte array") + // console.log(byteArray.toHexString()) + let buffers : Buffer[] = [] + let i = 0 + for (; i < byteArray.bytes.length; ++i) { + buffers.push(new Buffer(byteArray.bytes[i])) + } + let concatd = Buffer.concat(buffers) + // console.log("** about to deserialize") + // console.log(concatd) + return concatd +} + +interface slice { + bytes: ByteArray; + from: ByteArrayIndex; + to: ByteArrayIndex; +} + +function sliceLength(input: slice) : uint64 { + return input.bytes.dist(input.from, input.to); +} + +function checkEnd(input: slice) { + if ((new uint64(0, 0)).lt(sliceLength(input))) { + throw new Error("Data present, none expected, slice length: " + sliceLength(input).low); + } +} + +function sliceCrop(input: slice) : ByteArray { + return input.bytes.subarray(input.from, input.to); +} + +function wholeSlice(b: ByteArray) : slice { + return { + bytes: b, + from: {first: 0, second: 0}, + to: b.endIndex() + } +} + +function unsafeParseByte(input: slice) : uint8 { + let res = input.bytes.byteAt(input.from); + input.from = input.bytes.shiftIndex(input.from, 1); + return res; +} + +function parseByte(input: slice) : uint8 { + if (sliceLength(input).lt(new uint64(0, 1))) { + throw new Error("parseIntFixed: not enough bytes"); + } + return unsafeParseByte(input); +} + +function serializeByte(x: uint8) : ByteArray { + return new ByteArray([x]); +} + +function parseByteArray(input: slice, size: uint64) : ByteArray { + if (sliceLength(input).lt(size)) { + throw new Error("parseByteArray: not enough bytes"); + } + let newFrom = input.bytes.shiftIndex64(input.from, size); + let res = input.bytes.subarray(input.from, newFrom); + input.from = newFrom; + return res; +} + +function serializeByteArray(array: ByteArray) : ByteArray { + let res = new ByteArray([]); + /* TODO: we should do a deep copy, not just a shallow copy */ + res.concat(array); + return res; +} + +function parseIntFixed(input: slice) : uint32 /* intFixed */ { + if (sliceLength(input).lt(new uint64(0, 4))) { + throw new Error("parseIntFixed: not enough bytes"); + } + let b0 = unsafeParseByte(input); + let b1 = unsafeParseByte(input); + let b2 = unsafeParseByte(input); + let b3 = unsafeParseByte(input); + /* little-endian */ + return (b0 + 256 * (b1 + 256 * (b2 + 256 * b3))); +} + +function serializeIntFixed(x: uint32): ByteArray { + let t = x; + let b0 = t % 256; + t = Math.trunc(t / 256); + let b1 = t % 256; + t = Math.trunc(t / 256); + let b2 = t % 256; + t = Math.trunc(t / 256); + let b3 = t % 256; + /* little-endian */ + return new ByteArray([b0, b1, b2, b3]); +} + +function parseLongFixed(input: slice) : uint64 { + /* little-endian */ + let low = parseIntFixed(input); + let high = parseIntFixed(input); + return new uint64(high, low); +} + +function serializeLongFixed(x: uint64 /* longFixed */): ByteArray { + /* little-endian */ + let res : ByteArray = serializeIntFixed(x.low); + res.concat(serializeIntFixed(x.high)); + return res; +} + +type zigzagInt = uint32; // uint64; // /* unsigned variable-length int, */ + + +function zigzagIntSize(value : uint32) : uint32 { + var sz : uint32 = 0; + var zigZagEncoded : uint32 = maxUint32 & ((value << 1) ^ (value >> 31)); + while ((zigZagEncoded & ~0x7F) !== 0) { + sz++; + zigZagEncoded >>= 7; + } + return sz + 1; +} + +function zigzagInt64Size(value : uint64) : uint32 { + // The size shall never use the high bits + return serializeZigzagInt64(value).length().low; +} + +function parseZigzagInt(input: slice) : zigzagInt { + var shift : uint32 = 7; + var currentByte : uint8 = parseByte(input); + var read : uint8 = 1; + var result : uint32 = currentByte & 0x7F; + while ((currentByte & 0x80) !== 0) { + currentByte = parseByte(input); + read++; + result |= (currentByte & 0x7F) << shift; + shift += 7; + if (read > 5) { + throw new Error("parseZigzagInt: number is too long"); + } + } + result = ((-(result & 1) ^ ((result >> 1) & 0x7FFFFFFF))); + result &= maxUint32; + return result; +} + +function parseZigzagInt64(input: slice) : uint64 { + var shift : uint32 = 7; + var currentByte : uint8 = parseByte(input); + var read : uint8 = 1; + var hi : uint32 = 0; + var lo : uint32 = currentByte & 0x7F; + while ((currentByte & 0x80) !== 0) { + currentByte = parseByte(input); + read++; + if (shift + 7 <= 32) { + lo |= (currentByte & 0x7F) << shift; + } else if (shift < 32) { + var currentBytel = currentByte & ((1 << (32 - shift)) - 1); + var currentByteh = currentByte >> (32 - shift); + lo |= (currentBytel & 0x7F) << shift; + hi |= (currentByteh & 0x7F) << 0; + } else { + hi |= (currentByte & 0x7F) << (shift - 32); + } + shift += 7; + if (read > 9) { + throw new Error("parseZigzagInt: number is too long"); + } + } + var hilsb : uint32 = hi & 1; + var lolsb : uint32 = lo & 1; + lo = ((lo >> 1) | (hilsb << 31)) ^ (-lolsb); + hi = (hi >> 1) ^ (-lolsb); + return new uint64(hi, lo); +} + +function serializeZigzagInt(value: zigzagInt): ByteArray { + var zigZagEncoded : uint32 = maxUint32 & ((value << 1) ^ (value >> 31)); + var bytes : uint8[] = []; + while ((zigZagEncoded & ~0x7F) !== 0) { + bytes = bytes.concat (0xFF & (zigZagEncoded | 0x80)); + zigZagEncoded >>= 7; + } + bytes = bytes.concat(0xFF & zigZagEncoded); + + return new ByteArray(bytes); +} + +function serializeZigzagInt64(value: uint64): ByteArray { + var hi : uint32 = value.high; + var lo : uint32 = value.low; + var himsb = hi >> 31; + var lomsb = lo >> 31; + var nhi = (hi << 1) ^ lomsb; + var nlo = (lo << 1) ^ himsb; + var bytes : uint8[] = []; + while (nhi !== 0 || ((nlo & ~0x7F) !== 0)) { + bytes = bytes.concat (0xFF & (nlo | 0x80)); + var hilsb7 = nhi & 0x7F; + nhi >>= 7; + nlo = (nlo >>= 7) ^ (hilsb7 << (32 - 7)); + } + bytes = bytes.concat(0xFF & nlo); + + return new ByteArray(bytes); +} + +interface header { + committerID: uint32 /* intFixed */; + size: uint32 /* intFixed */; + check: uint64 /* longFixed */; + logRecordSequenceID: uint64 /* longFixed */; +} + +function parseHeader(input: slice) : header { + let committerID = parseIntFixed(input); + let size = parseIntFixed(input); + let check = parseLongFixed(input); + let logRecordSequenceID = parseLongFixed(input); + return { + committerID: committerID, + size: size, + check: check, + logRecordSequenceID: logRecordSequenceID + }; +} + +function serializeHeader(x: header): ByteArray { + let res : ByteArray = serializeIntFixed(x.committerID); + res.concat(serializeIntFixed(x.size)); + res.concat(serializeLongFixed(x.check)); + res.concat(serializeLongFixed(x.logRecordSequenceID)); + return res; +} + +enum MessageType { + TrimTo = 14, + CountReplayableRPCBatch = 13, + UpgradeService = 12, + TakeBecomingPrimaryCheckpoint = 11, + UpgradeTakeCheckpoint = 10, + InitialMessage = 9, + Checkpoint = 8, + RPCBatch = 5, + TakeCheckpoint = 2, + AttachTo = 1, + RPC = 0 +} + +function parseMessageType (input: slice) : MessageType { + let v : uint8 = input.bytes.byteAt(input.from); + input.from = input.bytes.shiftIndex(input.from, 1); + if (MessageType[v]) { + let res : MessageType = v; + return res; + } + throw new Error("parseMessageType: invalid messageType"); +} + +function serializeMessageType (x: MessageType) : ByteArray { + return new ByteArray([x]); +} + +class Message { + typ: MessageType; + protected constructor(typ: MessageType) { + this.typ = typ; + } + serializePayload() : ByteArray { + throw new Error("Unimplemented: message.serializePayload"); + } +} + +class EmptyMessage extends Message { + constructor(typ: MessageType) { + super(typ); + } + serializePayload() : ByteArray { + return new ByteArray([]); + } +} + +class MsgUpgradeService extends EmptyMessage { + constructor() { + super(MessageType.UpgradeService); + } +} + +class MsgTakeBecomingPrimaryCheckpoint extends EmptyMessage { + constructor() { + super(MessageType.TakeBecomingPrimaryCheckpoint); + } +} + +class MsgUpgradeTakeCheckpoint extends EmptyMessage { + constructor() { + super(MessageType.UpgradeTakeCheckpoint); + } +} + +/* MsgInitialMessage depends on MsgRPC */ + +class MsgCheckpoint extends Message { + checkpoint: ByteArray; + expectedCheckpointSize: uint64; /* used only for parsing */ + /* constructor: only parse the payload, i.e. fills in + expectedCheckpointSize; then checkpoint must be filled in + later by the caller, not the constructor */ + constructor(input?: slice) { + super(MessageType.Checkpoint); + if (input) { + let sz = parseZigzagInt64(input); + this.expectedCheckpointSize = sz; + this.checkpoint = new ByteArray([]); + } else { + this.expectedCheckpointSize = new uint64(0, 0); + this.checkpoint = new ByteArray([]); + } + } + /* serializePayload should only serialize the payload excluding the actual checkpoint contents */ + serializePayload () : ByteArray { + return serializeZigzagInt64(this.checkpoint.length()); + } +} + +/* RPCBatch depends on RPC */ + +class MsgTakeCheckpoint extends EmptyMessage { + constructor() { + super(MessageType.TakeCheckpoint); + } +} + +class MsgAttachTo extends Message { + destinationBytes: ByteArray; + constructor(input?: slice) { + super(MessageType.AttachTo); + if (input) { + this.destinationBytes = sliceCrop(input); + input.from = copyIndex(input.to); + } else { + this.destinationBytes = new ByteArray([]); + } + } + serializePayload () : ByteArray { + let res : ByteArray = new ByteArray([]); + res.concat(this.destinationBytes); + return res; + } +} + +class MsgRPC extends Message { + destinationServiceName: Buffer; + methodId: uint32 /* zigzagInt */ ; + serializedArgs: ByteArray; + isOutgoing: boolean; + isSelfCall: boolean; + constructor(input?: slice) { + super(MessageType.RPC); + // console.log("we are here in msg rpc") + /* parsing ignores the destination service name */ + this.destinationServiceName = new Buffer([]) + if (input) { + this.isOutgoing = false; + this.isSelfCall = false; + // Omitted in the incoming RPC. + let reservedRPCOrReturn = parseByte(input); // value should be 0, but do we care at parsing? + this.methodId = parseZigzagInt(input); + let reservedFireAndForgetOrAsyncAwait = parseByte(input); // value should be 1, but do we care at parsing? + this.serializedArgs = sliceCrop(input); + input.from = copyIndex(input.to); + } else { + this.isOutgoing = true; + this.isSelfCall = false; + this.methodId = 0; + this.serializedArgs = new ByteArray([]); + } + } + serializePayload () : ByteArray { + let res : ByteArray = new ByteArray([]); + if (this.isOutgoing) { + /* serialization needs destination service name */ + if(this.isSelfCall) { + res.concat(serializeZigzagInt(0)); + } else { + res.concat(serializeZigzagInt(this.destinationServiceName.length)); + res.concat(byteArrayFromBuffer(this.destinationServiceName)); + } + } + res.concat(serializeByte(0)); /* reserved: RPC or Return */ + res.concat(serializeZigzagInt(this.methodId)); + res.concat(serializeByte(2)); /* reserved: Fire and Forget (1) or Async/Await (0) or Impulse (2) */ + res.concat(this.serializedArgs); + return res; + } +} + +function serializeMessage (msg: Message) : ByteArray { + let payload : ByteArray = msg.serializePayload (); + let typ : ByteArray = serializeMessageType(msg.typ); + let sz = add64(payload.length(), typ.length()); + // console.log("payload: ") + // console.log(payload) + // console.log("typ: ") + // console.log(typ) + // console.log("sz: ") + // console.log(sz) + if (sz.high > 0) { + throw new Error("message is too large, its size should fit in 32 bits"); + } + let res : ByteArray = serializeZigzagInt(sz.low); + res.concat(typ); + res.concat(payload); + /* Special case: concat the checkpoint */ + if (msg.typ == MessageType.Checkpoint) { + res.concat((msg as MsgCheckpoint).checkpoint); + } + return res; +} + +class MsgCountReplayableRPCBatch extends Message { + batch: MsgRPC[]; + replayable: uint32; + constructor(input?: slice, parseMessage?: ((input: slice) => MsgRPC)) { + super(MessageType.RPCBatch); + if (input) { + if (parseMessage) { + let res : MsgRPC[] = []; + let count = parseZigzagInt(input); + this.replayable = parseZigzagInt(input); + let i = 0; + for (; i < count; ++i) { + res = res.concat([parseMessage(input)]); + } + this.batch = res; + } else { + throw new Error("MsgRPCBatch: requires parseMessage"); + } + } else { + this.batch = []; + this.replayable = 0; + } + } + serializePayload() : ByteArray { + let res : ByteArray = serializeZigzagInt(this.batch.length); + res.concat(serializeZigzagInt(this.replayable)); + let i = 0; + for (; i < this.batch.length; ++i) { + res.concat(serializeMessage(this.batch[i])); + } + return res; + } +} + +class MsgInitialMessage extends Message { + rpc: MsgRPC; + constructor(input?: slice, parseMessage?: ((input: slice) => MsgRPC)) { + super(MessageType.InitialMessage); + if (input) { + if (parseMessage) { + this.rpc = parseMessage(input); + } else { + throw new Error("MsgInitialMessage: message parser required"); + } + } else { + this.rpc = new MsgRPC(); + } + } + serializePayload() : ByteArray { + if (this.rpc.isOutgoing) { + throw new Error("Initial message RPC should be an Incoming one"); + } + return serializeMessage(this.rpc); + } +} + +class MsgRPCBatch extends Message { + batch: MsgRPC[]; + constructor(input?: slice, parseMessage?: ((input: slice) => MsgRPC)) { + super(MessageType.RPCBatch); + if (input) { + if (parseMessage) { + let res : MsgRPC[] = []; + let count = parseZigzagInt(input); + let i = 0; + for (; i < count; ++i) { + res = res.concat([parseMessage(input)]); + } + this.batch = res; + } else { + throw new Error("MsgRPCBatch: requires parseMessage"); + } + } else { + this.batch = []; + } + } + serializePayload() : ByteArray { + let res : ByteArray = serializeZigzagInt(this.batch.length); + let i = 0; + for (; i < this.batch.length; ++i) { + res.concat(serializeMessage(this.batch[i])); + } + return res; + } +} + +function parseMessage (input: slice, expectedTyp?: MessageType) : Message { + function parseRPCMessage (input: slice) : MsgRPC { + let res = parseMessage(input, MessageType.RPC); + return res as MsgRPC; + } + let totalSize : uint64 = new uint64(0, parseZigzagInt(input)); + // console.log("totalSize: " + totalSize.low) + if (sliceLength(input).lt(totalSize)) { + throw new Error("parseMessage: not enough bytes for payload"); + } + let innerTo = input.bytes.shiftIndex64(input.from, totalSize); + let inner : slice = { + bytes: input.bytes, + from: copyIndex(input.from), + to: innerTo + }; + let typ : MessageType = parseMessageType(inner); + // console.log("type: " + typ) + if (expectedTyp) { + if (typ !== expectedTyp) { + throw new Error("Found a message of a different type than the one expected"); + } + } + var res : Message; + switch (typ) { + case MessageType.CountReplayableRPCBatch: + res = new MsgCountReplayableRPCBatch(inner, parseRPCMessage); + break; + case MessageType.UpgradeService: + res = new MsgUpgradeService(); + break; + case MessageType.TakeBecomingPrimaryCheckpoint: + res = new MsgTakeBecomingPrimaryCheckpoint(); + break; + case MessageType.UpgradeTakeCheckpoint: + res = new MsgUpgradeTakeCheckpoint(); + break; + case MessageType.InitialMessage: + res = new MsgInitialMessage(inner, parseRPCMessage); + // console.log("after new initial message") + break; + case MessageType.Checkpoint: + res = new MsgCheckpoint(inner); + break; + case MessageType.RPCBatch: + res = new MsgRPCBatch(inner, parseRPCMessage); + break; + case MessageType.TakeCheckpoint: + res = new MsgTakeCheckpoint(); + break; + case MessageType.AttachTo: + res = new MsgAttachTo(inner); + break; + case MessageType.RPC: + res = new MsgRPC(inner); + // console.log("after new RPC message") + break; + default: + throw new Error("Unsupported message type"); + } + + /* Check that we consumed every byte of the payload */ + checkEnd(inner); + + /* Notify the input that we consumed the payload bytes */ + input.from = innerTo; + + return res; +} + +interface logRecord { + header: header; + msgs: Message[]; +} + +let headerSize = 24; +let headerSize64 = new uint64(0, headerSize); + +function parseLogRecords (input: slice) : logRecord[] { + console.log("LOG RECORDS input size " + input.bytes.length().low) + + let logRecords : logRecord[] = []; + let zero = new uint64(0, 0); + while (zero.lt(sliceLength(input))) { + // console.log("slice length is now: " + sliceLength(input).low) + + /* Figure out how big the next log record is */ + let tmpSlice: slice = { + bytes: input.bytes, + from: copyIndex(input.from), + to: copyIndex(input.to) + }; + + let header = parseHeader(tmpSlice); + // console.log("slice size is " + header.size) + + /* Create a slice for that log record */ + let logRecord = parseLogRecord(input) + logRecords.push(logRecord) + } + + return logRecords +} + +function parseLogRecord (input: slice) : logRecord { + // console.log("LOG RECORD input size " + input.bytes.length().low) + + let header = parseHeader(input); + // console.log("headr size " + header.size) + if (header.size < headerSize) { + throw new Error("Size must include header size"); + } + let msgsSize = header.size - headerSize; + let msgsSize64 = new uint64(0, msgsSize); + // console.log("msgs size " + msgsSize64.low) + + if (sliceLength(input).lt(msgsSize64)) { + throw new Error("Not enough message bytes remaining"); + } + let innerTo = input.bytes.shiftIndex(input.from, msgsSize); + // console.log("input.from: " + input.from.second) + // console.log("innerTo: " + innerTo.second) + let inner: slice = { + bytes: input.bytes, + from: copyIndex(input.from), + to: innerTo + }; + let mySliceLength = sliceLength(inner).low + // console.log("slice length: " + mySliceLength) + let msgs : Message[] = []; + let zero = new uint64(0, 0); + let maybeCheckpointEndPosition : ByteArrayIndex = innerTo; + + while (zero.lt(sliceLength(inner))) { + // console.log("parsing message") + let msg = parseMessage(inner) + msgs = msgs.concat([msg]); + + if(msg instanceof MsgCheckpoint) { + /* Use slice to grab the checkpoint off the stream if we just saw a checkpoint message */ + let expectedCheckpointSize = (msg as MsgCheckpoint).expectedCheckpointSize + let innerCheckpointTo = input.bytes.shiftIndex64(innerTo, expectedCheckpointSize) + let innerCheckpoint: slice = { + bytes: input.bytes, + from: copyIndex(innerTo), + to: innerCheckpointTo + }; + + // console.log("checkpoint message!") + + /* Verify we have enough bytes to process the checkpoint, else wait */ + if (sliceLength(innerCheckpoint).lt(expectedCheckpointSize)) { + console.log("sliceLength: " + sliceLength(innerCheckpoint).low) + throw new Error("parseMessage: not enough bytes for checkpoint yet"); + } + + /* Take the checkpoint off of the stream */ + (msg as MsgCheckpoint).checkpoint = parseByteArray(innerCheckpoint, (msg as MsgCheckpoint).expectedCheckpointSize); + maybeCheckpointEndPosition = innerCheckpointTo + + // console.log("done capturing checkpoint") + } + + // console.log("done parsing message") + } + + input.from = maybeCheckpointEndPosition; + + return { + header: header, + msgs: msgs + } +} + +/* this serializer also adjusts the size field of the header */ + +function serializeLogRecord(header: header, msgs: Message[]) { + var i: number; + let n = msgs.length; + let lowMsgs : ByteArray = new ByteArray([]); + for (i = 0; i < n; ++i) { + lowMsgs.concat(serializeMessage(msgs[i])); + } + let size = add64(new uint64(0, headerSize), lowMsgs.length()); + if (size.high > 0) { + throw new Error("log record too long"); + } + header.size = size.low; + let res : ByteArray = serializeHeader(header); + res.concat(lowMsgs); + return res; +} + +export { serializeMessage, serializeLogRecord, parseLogRecord, ByteArray, wholeSlice } +export { Message, MessageType } +export { MsgCountReplayableRPCBatch, MsgUpgradeService, MsgTakeBecomingPrimaryCheckpoint, MsgUpgradeTakeCheckpoint } +export { MsgInitialMessage, MsgCheckpoint, MsgRPCBatch, MsgTakeCheckpoint, MsgAttachTo, MsgRPC } +export { uint64, uint32 } +export { byteArrayToBuffer, byteArrayFromBuffer, parseLogRecords } \ No newline at end of file diff --git a/Clients/TypeScript/package-lock.json b/Clients/TypeScript/package-lock.json new file mode 100644 index 00000000..cfad60ac --- /dev/null +++ b/Clients/TypeScript/package-lock.json @@ -0,0 +1,19 @@ +{ + "name": "microsoft-ambrosia", + "version": "0.0.1", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@types/node": { + "version": "12.7.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-12.7.2.tgz", + "integrity": "sha512-dyYO+f6ihZEtNPDcWNR1fkoTDf3zAK3lAABDze3mz6POyIercH0lEUawUFXlG8xaQZmm1yEBON/4TsYv/laDYg==" + }, + "typescript": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.5.3.tgz", + "integrity": "sha512-ACzBtm/PhXBDId6a6sDJfroT2pOWt/oOnk4/dElG5G33ZL776N3Y6/6bKZJBFpd+b05F3Ct9qDjMeJmRWtE2/g==", + "dev": true + } + } +} diff --git a/Clients/TypeScript/package.json b/Clients/TypeScript/package.json new file mode 100644 index 00000000..2eadcaa7 --- /dev/null +++ b/Clients/TypeScript/package.json @@ -0,0 +1,20 @@ +{ + "name": "microsoft-ambrosia", + "version": "0.0.1", + "description": "TypeScript Ambrosia Binding", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "ISC", + "devDependencies": { + "typescript": "^3.5.3" + }, + "dependencies": { + "@types/node": "^12.7.2" + }, + "bundleDependencies": [ + "@types/node" + ] +} diff --git a/Clients/TypeScript/tsconfig.json b/Clients/TypeScript/tsconfig.json new file mode 100644 index 00000000..b90db974 --- /dev/null +++ b/Clients/TypeScript/tsconfig.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "target": "es5", + "module": "commonjs", + "declaration": true, + "outDir": "./dist", + "strict": true, + "downlevelIteration": true + } +} \ No newline at end of file diff --git a/DevTools/UnsafeDeregisterInstance/App.config b/DevTools/UnsafeDeregisterInstance/App.config index 9c002c9d..da21b5b6 100644 --- a/DevTools/UnsafeDeregisterInstance/App.config +++ b/DevTools/UnsafeDeregisterInstance/App.config @@ -4,11 +4,5 @@ - - - - - - \ No newline at end of file diff --git a/DevTools/UnsafeDeregisterInstance/Program.cs b/DevTools/UnsafeDeregisterInstance/Program.cs index da147fdf..641c817e 100644 --- a/DevTools/UnsafeDeregisterInstance/Program.cs +++ b/DevTools/UnsafeDeregisterInstance/Program.cs @@ -1,6 +1,7 @@ using CRA.ClientLibrary; using System; using System.Collections.Generic; +using System.Diagnostics; using System.Linq; using System.Text; using System.Threading.Tasks; @@ -16,29 +17,32 @@ static void Main(string[] args) Console.WriteLine("UnsafeDeregisterInstance InstanceName"); Console.WriteLine("WARNING: This is a metadata hacking tool that should NEVER be used on a real deployment"); Console.WriteLine("This tool is a convenience for developers who want to more easily test certain application modfications"); + Console.WriteLine("Usage: UnsafeDeregisterInstance InstanceName"); return; } - var client = new CRAClientLibrary(Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING")); + Trace.Listeners.Add(new TextWriterTraceListener(Console.Out)); + var dataProvider = new CRA.DataProvider.Azure.AzureDataProvider(Environment.GetEnvironmentVariable("AZURE_STORAGE_CONN_STRING")); + var client = new CRAClientLibrary(dataProvider); var serviceName = args[0]; - foreach (var endpt in client.GetInputEndpoints(serviceName)) + foreach (var endpt in client.GetInputEndpointsAsync(serviceName).GetAwaiter().GetResult()) { client.DeleteEndpoint(serviceName, endpt); } - foreach (var endpt in client.GetOutputEndpoints(serviceName)) + foreach (var endpt in client.GetOutputEndpointsAsync(serviceName).GetAwaiter().GetResult()) { client.DeleteEndpoint(serviceName, endpt); } - foreach (var conn in client.GetConnectionsFromVertex(serviceName)) + foreach (var conn in client.GetConnectionsFromVertexAsync(serviceName).GetAwaiter().GetResult()) { - client.DeleteConnectionInfo(conn); + client.DeleteConnectionInfoAsync(conn).GetAwaiter().GetResult(); } - foreach (var conn in client.GetConnectionsToVertex(serviceName)) + foreach (var conn in client.GetConnectionsToVertexAsync(serviceName).GetAwaiter().GetResult()) { - client.DeleteConnectionInfo(conn); + client.DeleteConnectionInfoAsync(conn).GetAwaiter().GetResult(); } try { - client.DeleteVertex(serviceName); + client.DeleteVertexAsync(serviceName).GetAwaiter().GetResult(); } catch { } } diff --git a/DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj b/DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj index 7f77f3a7..4150a1ab 100644 --- a/DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj +++ b/DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj @@ -1,12 +1,14 @@  Exe - netcoreapp2.0;net46 + net461;netcoreapp3.1 true x64 win7-x64 true UnsafeDeregisterInstance + true + ../../Ambrosia/Ambrosia.snk NETFRAMEWORK @@ -18,10 +20,25 @@ NETCORE - - 9.3.2 + + + + + + + + + + + + + + + + 4.5.0 + + + 4.5.0 - - \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 757ab58d..b17b4890 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,41 +1,97 @@ # FROM microsoft/dotnet:2.1-sdk # FROM microsoft/dotnet:2.0.9-sdk-2.1.202 -FROM microsoft/dotnet:2.0-sdk +# FROM microsoft/dotnet:2.0-sdk +# FROM microsoft/dotnet:2.2-sdk-2.2.108 -- want this version +# FROM microsoft/dotnet:2.2-sdk +# FROM microsoft/dotnet:3.1-sdk -- no longer proper string format +FROM mcr.microsoft.com/dotnet/core/sdk:3.1 RUN apt-get update -y && \ apt-get install -y libunwind-dev apache2-utils make gcc -# netcat telnet net-tools lsof -ENV BLDFLAGS " -c Release -f netcoreapp2.0 -r linux-x64 " - -# NOTE: use the following for a debug build of AMBROSIA: -# ENV BLDFLAGS " -c Debug -f netcoreapp2.0 -r linux-x64 -p:DefineConstants=DEBUG " - -# Fine-grained version: +# Add only what we need, and add late to minimize rebuilds during development: +ADD AzureBlobsLogPicker /ambrosia/AzureBlobsLogPicker +ADD GenericLogPicker /ambrosia/GenericLogPicker +ADD SharedAmbrosiaTools /ambrosia/SharedAmbrosiaTools ADD ImmortalCoordinator /ambrosia/ImmortalCoordinator +ADD AmbrosiaLib /ambrosia/AmbrosiaLib ADD Ambrosia /ambrosia/Ambrosia ADD DevTools /ambrosia/DevTools WORKDIR /ambrosia +ENV AMBROSIA_DOTNET_FRAMEWORK=netcoreapp3.1 \ + AMBROSIA_DOTNET_FRAMEWORK_20=netstandard2.0 \ + AMBROSIA_DOTNET_CONF=Release \ + AMBROSIA_DOTNET_PLATFORM=linux-x64 + # This is the command we use to build each of the individual C# projects: -ENV BUILDIT "dotnet publish -o /ambrosia/bin $BLDFLAGS" +ENV BLDFLAGS " -c Release -f $AMBROSIA_DOTNET_FRAMEWORK -r $AMBROSIA_DOTNET_PLATFORM " +ENV BUILDIT "dotnet publish $BLDFLAGS" -RUN $BUILDIT Ambrosia/Ambrosia/Ambrosia.csproj -RUN $BUILDIT ImmortalCoordinator/ImmortalCoordinator.csproj -RUN $BUILDIT DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj +ENV BLDFLAGS_20 " -c Release -f $AMBROSIA_DOTNET_FRAMEWORK_20 -r $AMBROSIA_DOTNET_PLATFORM " +ENV BUILDIT_20 "dotnet publish $BLDFLAGS_20" -# Language binding: CSharp (depends on AmbrosiaLibCS on nuget) +# NOTE: use the following for a debug build of AMBROSIA: +# ENV BLDFLAGS " -c Debug -f netcoreapp3.1 -r linux-x64 -p:DefineConstants=DEBUG " + +RUN echo "****************************************** 1 Starting ********************************" +# (1) Language binding: CSharp (depends on AmbrosiaLibCS on nuget) +# ---------------------------------------------------------------- ADD Clients/CSharp /ambrosia/Clients/CSharp -RUN $BUILDIT Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj +RUN $BUILDIT -o /ambrosia/bin/codegen Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj && \ + cd bin && ln -s codegen/AmbrosiaCS + +RUN echo "****************************************** 2 Starting ********************************" + +# (2) Build the core executables and libraries: +# --------------------------------------------- +RUN $BUILDIT_20 -o /ambrosia/bin/AzureBlobsLogPicker AzureBlobsLogPicker/AzureBlobsLogPicker.csproj +RUN $BUILDIT_20 -o /ambrosia/bin/GenericLogPicker GenericLogPicker/GenericLogPicker.csproj +RUN $BUILDIT_20 -o /ambrosia/bin/SharedAmbrosiaTools SharedAmbrosiaTools/SharedAmbrosiaTools.csproj +RUN $BUILDIT -o /ambrosia/bin/runtime Ambrosia/Ambrosia/Ambrosia.csproj +RUN $BUILDIT_20 -o /ambrosia/bin/AmbrosiaLib AmbrosiaLib/Ambrosia/AmbrosiaLib.csproj +RUN $BUILDIT -o /ambrosia/bin/coord ImmortalCoordinator/ImmortalCoordinator.csproj +RUN $BUILDIT -o /ambrosia/bin/unsafedereg DevTools/UnsafeDeregisterInstance/UnsafeDeregisterInstance.csproj -# Low-level Native-code network client: +RUN cd bin && \ + ln -s runtime/Ambrosia Ambrosia && \ + #ln -s AzureBlobsLogPicker/AzureBlobsLogPicker && \ + #ln -s GenericLogPicker/GenericLogPicker && \ + #ln -s SharedAmbrosiaTools/SharedAmbrosiaTools && \ + ln -s coord/ImmortalCoordinator && \ + ln -s unsafedereg/UnsafeDeregisterInstance + +RUN echo "****************************************** 2B Starting ********************************" + +# (2B) Reduce the size of our dotnet binary distribution: +ADD ./Scripts/dedup_bindist.sh Scripts/ +RUN du -sch ./bin && \ + ./Scripts/dedup_bindist.sh && \ + du -sch ./bin + + +RUN echo "****************************************** 3 Starting ********************************" +# (3) Low-level Native-code network client: +# ----------------------------------------- ADD Clients/C /ambrosia/Clients/C -RUN cd Clients/C && make && \ - cp -a bin/libambrosia.* /ambrosia/bin/ && \ - cp -a include /ambrosia/bin/include +# This publishes to the build directory: bin/lib*.* and bin/include +RUN cd Clients/C && make debug # publish + + +RUN echo "****************************************** 4 Starting ********************************" +# (4) A script used by apps to start the ImmortalCoordinator: +# ----------------------------------------------------------- +ADD ./Scripts/runAmbrosiaService.sh bin/ + +RUN echo "****************************************** 4a Starting ********************************" +# We currently use this as a baseline source of dependencies for generated code: +ADD ./Clients/CSharp/AmbrosiaCS/AmbrosiaCS.csproj bin/AmbrosiaCS.csproj -ADD ./AKS-scripts/ScriptBits/runAmbrosiaService.sh bin/ -RUN cd bin && ln -s Ambrosia ambrosia +# Remove unnecessary execute permissions: +# RUN cd bin && (chmod -x *.dll *.so *.dylib *.a 2>/dev/null || echo ok) -ENV AMBROSIA_BINDIR="/ambrosia/bin" -ENV PATH="${PATH}:/ambrosia/bin" +RUN echo "****************************************** 4b Starting ********************************" +# Make "ambrosia", "AmbrosiaCS", and "ImmortalCoordinator" available on PATH: +ENV AMBROSIA_BINDIR="/ambrosia/bin" \ + PATH="${PATH}:/ambrosia/bin" +RUN echo "****************************************** Docker Done ********************************" \ No newline at end of file diff --git a/Dockerfile.release b/Dockerfile.release index fa1402ba..b0330c76 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,9 +1,23 @@ -FROM ambrosia-dev as dev +FROM ambrosia/ambrosia-dev as dev -# The releasedoes not require dotnet SDK to run Ambrosia binaries: +# The release does not require dotnet SDK to run Ambrosia binaries. +# So we start with a generic Ubuntu image: FROM ubuntu:18.04 + +# Also, apache2-utils provides rotatelogs, used by runAmbrosiaService.sh RUN apt-get update -y && \ - apt-get install -y libunwind-dev libicu60 apache2-utils + apt-get install -y apache2-utils + +# These dependencies are listed as .NET core runtime native dependencies: +# https://docs.microsoft.com/en-us/dotnet/core/linux-prerequisites?tabs=netcore2x +RUN apt-get install -y liblttng-ust0 libcurl3 libssl1.0.0 libkrb5-3 zlib1g libicu60 +# libicu52 (for 14.x) +# libicu55 (for 16.x) +# libicu57 (for 17.x) +# libicu60 (for 18.x) + +# These are additional .NET core dependencies BEFORE version 2.1: +RUN apt-get install -y libunwind-dev libuuid1 COPY --from=dev /ambrosia/bin /ambrosia/bin diff --git a/Samples/KubernetesLocalDemo/local-kube-ambrosia-demo.yml b/DustBin/KubernetesLocalDemo/local-kube-ambrosia-demo.yml similarity index 96% rename from Samples/KubernetesLocalDemo/local-kube-ambrosia-demo.yml rename to DustBin/KubernetesLocalDemo/local-kube-ambrosia-demo.yml index 119091fa..b1da8be3 100644 --- a/Samples/KubernetesLocalDemo/local-kube-ambrosia-demo.yml +++ b/DustBin/KubernetesLocalDemo/local-kube-ambrosia-demo.yml @@ -19,7 +19,7 @@ spec : # hostNetwork: true containers: - name : perftest-server - image : ambrosia-perftest + image : ambrosia/ambrosia-perftest imagePullPolicy : "Never" ports: - containerPort: 1500 @@ -58,7 +58,7 @@ spec : dnsPolicy: Default containers: - name : perftest-client - image : ambrosia-perftest + image : ambrosia/ambrosia-perftest imagePullPolicy : "Never" ports: - containerPort: 1500 diff --git a/Samples/KubernetesLocalDemo/run_local_demo.sh b/DustBin/KubernetesLocalDemo/run_local_demo.sh old mode 100644 new mode 100755 similarity index 78% rename from Samples/KubernetesLocalDemo/run_local_demo.sh rename to DustBin/KubernetesLocalDemo/run_local_demo.sh index 2ee09a1c..b28caeda --- a/Samples/KubernetesLocalDemo/run_local_demo.sh +++ b/DustBin/KubernetesLocalDemo/run_local_demo.sh @@ -20,11 +20,11 @@ else fi # Go and build the base images only if they are not found: -if [ "$($DOCKER images -q ambrosia-dev)" == "" ]; then - echo "Could not find 'ambrosia-dev' image, attempting to build it." +if [ "$($DOCKER images -q ambrosia/ambrosia-dev)" == "" ]; then + echo "Could not find 'ambrosia/ambrosia-dev' image, attempting to build it." # Top of Ambrosia source working dir: cd `dirname $0`/../../ - BUILD_DEV_IMAGE_ONLY=1 ./build_docker_images.sh + DONT_BUILD_RELEASE_IMAGE=1 ./build_docker_images.sh cd `dirname $0` fi diff --git a/DustBin/README.md b/DustBin/README.md new file mode 100644 index 00000000..f0adad30 --- /dev/null +++ b/DustBin/README.md @@ -0,0 +1,10 @@ +# DustBin + +This directory is for work-in-progress that is either unfinished or +currently broken. + +These can also be moved to branches/forks while they are completed. + +## Summary of status + + * KubernetesLocalDemo: needs network-related debugging [2018.12.09] diff --git a/Ambrosia/Ambrosia/LogWriter.cs b/GenericLogPicker/GenericLogPicker.cs similarity index 75% rename from Ambrosia/Ambrosia/LogWriter.cs rename to GenericLogPicker/GenericLogPicker.cs index 587932da..914d4e33 100644 --- a/Ambrosia/Ambrosia/LogWriter.cs +++ b/GenericLogPicker/GenericLogPicker.cs @@ -1,62 +1,26 @@ -using Microsoft.VisualStudio.Threading; +using System; +using Microsoft.VisualStudio.Threading; using Microsoft.Win32.SafeHandles; -#if NETFRAMEWORK -using mtcollections.persistent; -#endif -using System; -using System.Collections.Generic; +using FASTER.core; using System.IO; -using System.Linq; -using System.Net.Sockets; using System.Runtime.InteropServices; -using System.Text; using System.Threading; using System.Threading.Tasks; -#if WINDOWS_UWP -using Windows.Storage; -using Windows.Storage.FileProperties; -using Windows.Storage.Streams; -#endif +using CRA.ClientLibrary; namespace Ambrosia { - internal static class LogWriterUtils - { - internal static void Write(this LogWriter writer, - NetworkStream readStream, - long checkpointSize) - { - var blockSize = 1024 * 1024; - var buffer = new byte[blockSize]; - while (checkpointSize > 0) - { - int bytesRead; - if (checkpointSize >= blockSize) - { - bytesRead = readStream.Read(buffer, 0, blockSize); - } - else - { - bytesRead = readStream.Read(buffer, 0, (int)checkpointSize); - } - writer.Write(buffer, 0, bytesRead); - checkpointSize -= bytesRead; - } - } - } - -#if NETFRAMEWORK /// /// Internal class, wraps Overlapped structure, completion port callback and IAsyncResult /// sealed class AsyncJob : IAsyncResult, IDisposable { - #region privates + #region privates private readonly object _eventHandle = new object(); private bool _completedSynchronously = false; private bool _completed = false; private uint _errorCode = 0; - #endregion + #endregion public void SetEventHandle() { @@ -71,7 +35,7 @@ public AsyncJob() { } - #region IDisposable + #region IDisposable bool _disposed = false; public void Dispose() @@ -83,8 +47,7 @@ public void Dispose() GC.SuppressFinalize(this); } - #endregion - + #endregion public void CompleteSynchronously() { @@ -102,7 +65,7 @@ public void WaitForCompletion() public uint ErrorCode { get { return _errorCode; } } - #region IAsyncResult Members + #region IAsyncResult Members public object AsyncState { @@ -123,10 +86,9 @@ public bool IsCompleted { get { return _completed; } } - #endregion + #endregion } - internal class LocalStorageDevice : IDisposable { /// @@ -186,7 +148,7 @@ public LocalStorageDevice(string filename, bool enablePrivileges = false, if (enablePrivileges) { - Native32.EnableVolumePrivileges(ref filename, logHandle); + Native32.EnableVolumePrivileges(filename, logHandle); } if (useIoCompletionPort) @@ -202,7 +164,7 @@ public LocalStorageDevice(string filename, bool enablePrivileges = false, public void Dispose() { - Native32.CloseHandle(logHandle); + logHandle.Close(); } /// @@ -229,7 +191,7 @@ public uint GetSectorSize() return lpBytesPerSector; } - public unsafe void ReadAsync(ulong sourceAddress, + public unsafe void AsyncRead(ulong sourceAddress, IntPtr destinationAddress, uint readLength, IAsyncResult asyncResult) @@ -274,7 +236,7 @@ public unsafe void ReadAsync(ulong sourceAddress, } } - public unsafe void ReadAsync(ulong sourceAddress, + public unsafe void AsyncRead(ulong sourceAddress, IntPtr destinationAddress, uint readLength, IOCompletionCallback callback, @@ -311,7 +273,7 @@ public unsafe void ReadAsync(ulong sourceAddress, } } - public unsafe void WriteAsync(IntPtr sourceAddress, + public unsafe void AsyncWrite(IntPtr sourceAddress, ulong destinationAddress, uint numBytesToWrite, IOCompletionCallback callback, @@ -354,7 +316,8 @@ public unsafe void WriteAsync(IntPtr sourceAddress, } } } - internal class LogWriter : IDisposable + + internal class LogWriterWindows : IDisposable, ILogWriter { unsafe struct BytePtrWrapper { @@ -386,10 +349,10 @@ unsafe struct IOThreadState uint _allocations; uint _lastError; - public unsafe LogWriter(string fileName, - uint chunkSize, - uint maxChunksPerWrite, - bool appendOpen = false) + public unsafe LogWriterWindows(string fileName, + uint chunkSize, + uint maxChunksPerWrite, + bool appendOpen = false) { //Console.WriteLine("64-bitness: " + Environment.Is64BitProcess); _lastError = 0; @@ -514,15 +477,15 @@ private unsafe void DoWrite(ulong writePosInBuffer, ulong chunkNum, uint length) { - _IOThreadInfo[chunkNum].filePointer.WriteAsync((IntPtr)(_buf._ptr + writePosInBuffer), filePos, length, FlushCallback, _IOThreadInfo[chunkNum].ov_native); + _IOThreadInfo[chunkNum].filePointer.AsyncWrite((IntPtr)(_buf._ptr + writePosInBuffer), filePos, length, FlushCallback, _IOThreadInfo[chunkNum].ov_native); } - private unsafe void DoWriteForAsync(ulong writePosInBuffer, - ulong filePos, - ulong chunkNum, - uint length) + private unsafe void DoWriteForAsyncWrites(ulong writePosInBuffer, + ulong filePos, + ulong chunkNum, + uint length) { - _IOThreadInfoAsync[chunkNum].filePointer.WriteAsync((IntPtr)(_buf._ptr + writePosInBuffer), filePos, length, FlushAsyncCallBack, _IOThreadInfoAsync[chunkNum].ov_native); + _IOThreadInfoAsync[chunkNum].filePointer.AsyncWrite((IntPtr)(_buf._ptr + writePosInBuffer), filePos, length, FlushAsyncCallBack, _IOThreadInfoAsync[chunkNum].ov_native); } public void Flush() @@ -580,7 +543,6 @@ public void Flush() } } - public async Task FlushAsync() { long newAllocations = ((long)_fileSize - 1) / _allocationUnit + 1; @@ -607,14 +569,14 @@ public async Task FlushAsync() } for (ulong i = 0; i < numFullChunksToWrite; i++) { - DoWriteForAsync(curWritePos, _filePos + curWritePos, i, _chunkSize); + DoWriteForAsyncWrites(curWritePos, _filePos + curWritePos, i, _chunkSize); curWritePos += _chunkSize; } _bufBytesOccupied = finalChunkSize % sectorSize; if (_bufBytesOccupied != 0) { uint finalWriteSize = (uint)((finalChunkSize - 1) / sectorSize + 1) * sectorSize; - DoWriteForAsync(curWritePos, _filePos + curWritePos, numFullChunksToWrite, finalWriteSize); + DoWriteForAsyncWrites(curWritePos, _filePos + curWritePos, numFullChunksToWrite, finalWriteSize); curWritePos += finalWriteSize; await _writesFinishedQ.DequeueAsync(); _filePos = _filePos + curWritePos - sectorSize; @@ -624,7 +586,7 @@ public async Task FlushAsync() { if (finalChunkSize != 0) { - DoWriteForAsync(curWritePos, _filePos + curWritePos, numFullChunksToWrite, finalChunkSize); + DoWriteForAsyncWrites(curWritePos, _filePos + curWritePos, numFullChunksToWrite, finalChunkSize); await _writesFinishedQ.DequeueAsync(); } else @@ -635,7 +597,7 @@ public async Task FlushAsync() } if (_lastError > 0) { - throw new Exception("Error " +_lastError.ToString() + " writing data to log"); + throw new Exception("Error " + _lastError.ToString() + " writing data to log"); } } @@ -676,6 +638,25 @@ public void Write(byte[] buffer, _bufBytesOccupied += length; } + public void Write(byte[] buffer, + int offset, + int ilength) + { + ulong length = (ulong)ilength; + _fileSize += length; + while (length + _bufBytesOccupied > _bufSize) + { + ulong bufferToWrite = _bufSize - _bufBytesOccupied; + CopyBufferIntoUnmanaged(buffer, (ulong)offset, _bufBytesOccupied, bufferToWrite); + length -= bufferToWrite; + offset += (int)bufferToWrite; + _bufBytesOccupied = _bufSize; + Flush(); + } + CopyBufferIntoUnmanaged(buffer, (ulong)offset, _bufBytesOccupied, length); + _bufBytesOccupied += length; + } + public async Task WriteAsync(byte[] buffer, ulong offset, ulong length) @@ -712,6 +693,25 @@ public async Task WriteAsync(byte[] buffer, _bufBytesOccupied += length; } + public async Task WriteAsync(byte[] buffer, + int offset, + int iLength) + { + ulong length = (ulong)iLength; + _fileSize += length; + while (length + _bufBytesOccupied > _bufSize) + { + ulong bufferToWrite = _bufSize - _bufBytesOccupied; + CopyBufferIntoUnmanaged(buffer, (ulong)offset, _bufBytesOccupied, bufferToWrite); + length -= bufferToWrite; + offset += (int)bufferToWrite; + _bufBytesOccupied = _bufSize; + await FlushAsync(); + } + CopyBufferIntoUnmanaged(buffer, (ulong)offset, _bufBytesOccupied, length); + _bufBytesOccupied += length; + } + public unsafe void WriteByte(byte val) { _fileSize++; @@ -732,6 +732,7 @@ public unsafe void WriteInt(int value) } WriteByte((byte)zigZagEncoded); } + public void WriteIntFixed(int value) { WriteByte((byte)(value & 0xFF)); @@ -751,8 +752,11 @@ public void WriteLongFixed(long value) WriteByte((byte)((value >> 0x30) & 0xFF)); WriteByte((byte)((value >> 0x38) & 0xFF)); } + } - public static void CreateDirectoryIfNotExists(string path) + internal class LogWriterStaticsWindows : ILogWriterStatic + { + public void CreateDirectoryIfNotExists(string path) { if (!Directory.Exists(path)) { @@ -760,31 +764,35 @@ public static void CreateDirectoryIfNotExists(string path) } } - public static bool DirectoryExists(string path) + public bool DirectoryExists(string path) { return Directory.Exists(path); } - public static bool FileExists(string path) + public bool FileExists(string path) { return File.Exists(path); } - public static void DeleteFile(string path) + public void DeleteFile(string path) { File.Delete(path); } + + public ILogWriter Generate(string fileName, + uint chunkSize, + uint maxChunksPerWrite, + bool appendOpen = false) + { + return new LogWriterWindows(fileName, chunkSize, maxChunksPerWrite, appendOpen); + } } -#endif -#if NETCORE - internal class LogWriter : IDisposable + internal class LogWriterGeneric : IDisposable, ILogWriter { FileStream _logStream; - public unsafe LogWriter(string fileName, - uint chunkSize, - uint maxChunksPerWrite, - bool appendOpen = false) + public unsafe LogWriterGeneric(string fileName, + bool appendOpen = false) { _logStream = new FileStream(fileName, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read & ~FileShare.Inheritable); if (appendOpen) @@ -834,8 +842,11 @@ public async Task WriteAsync(byte[] buffer, { await _logStream.WriteAsync(buffer, offset, length); } + } - public static void CreateDirectoryIfNotExists(string path) + internal class LogWriterStaticsGeneric : ILogWriterStatic + { + public void CreateDirectoryIfNotExists(string path) { if (!Directory.Exists(path)) { @@ -843,198 +854,140 @@ public static void CreateDirectoryIfNotExists(string path) } } - public static bool DirectoryExists(string path) + public bool DirectoryExists(string path) { return Directory.Exists(path); } - public static bool FileExists(string path) + public bool FileExists(string path) { return File.Exists(path); } - public static void DeleteFile(string path) + public void DeleteFile(string path) { File.Delete(path); } + + public ILogWriter Generate(string fileName, + uint chunkSize, + uint maxChunksPerWrite, + bool appendOpen = false) + { + return new LogWriterGeneric(fileName, appendOpen); + } } -#endif - -#if WINDOWS_UWP - // I wrote this version of LogWriter using the documentation here: - // https://docs.microsoft.com/en-us/windows/uwp/files/quickstart-reading-and-writing-files - // - // TODO: figure out proper way to handle synchronous LogWriter methods when underlying UWP - // class only provides an async implementation - // - // TODO: figure out proper way to handle async LogWriter methods when underlying UWP class only - // provides a synchronous implementation - internal class LogWriter : IDisposable - { - StorageFile _file; - IRandomAccessStream _stream; - IOutputStream _outputStream; - DataWriter _dataWriter; - private ulong _fileSize = 0; + public class GenericFileLogReader : ILogReader + { + Stream stream; - public LogWriter(string fileName, - uint chunkSize, - uint maxChunksPerWrite, - bool appendOpen = false) + public long Position { - InitializeAsync(fileName, appendOpen).Wait(); + get { return stream.Position; } + set { stream.Position = value; } } - public async Task InitializeAsync(string fileName, bool appendOpen = false) + public GenericFileLogReader(string fileName) { - DirectoryInfo pathInfo = new DirectoryInfo(fileName); - string parentPath = pathInfo.Parent.FullName; - StorageFolder folder = await StorageFolder.GetFolderFromPathAsync(parentPath); - _file = await folder.CreateFileAsync(pathInfo.Name, CreationCollisionOption.OpenIfExists); - - _stream = await _file.OpenAsync(FileAccessMode.ReadWrite); - ulong position = 0; - if (appendOpen) - { - BasicProperties properties = await _file.GetBasicPropertiesAsync(); - position = properties.Size; - } - _outputStream = _stream.GetOutputStreamAt(position); - _dataWriter = new DataWriter(_outputStream); + stream = new FileStream(fileName, FileMode.Open, FileAccess.Read, FileShare.ReadWrite); } - public ulong FileSize { get { return _fileSize; } } + public async Task> ReadIntAsync(byte[] buffer) + { + return await stream.ReadIntAsync(buffer); + } - public void Dispose() + public async Task> ReadIntAsync(byte[] buffer, CancellationToken ct) { - _dataWriter.Dispose(); - _outputStream.Dispose(); - _stream.Dispose(); + return await stream.ReadIntAsync(buffer, ct); } - public void Flush() + + public Tuple ReadInt(byte[] buffer) { - _dataWriter.StoreAsync().AsTask().Wait(); - _outputStream.FlushAsync().AsTask().Wait(); + return stream.ReadInt(buffer); } - public async Task FlushAsync() + public int ReadInt() { - await _dataWriter.StoreAsync(); - await _outputStream.FlushAsync(); + return stream.ReadInt(); } - public void WriteByte(byte value) + public async Task ReadAllRequiredBytesAsync(byte[] buffer, + int offset, + int count, + CancellationToken ct) { - _fileSize++; - _dataWriter.WriteByte(value); + return await stream.ReadAllRequiredBytesAsync(buffer, offset, count, ct); } - // These three methods are all copied from the .NET Framework version of LogWriter - public unsafe void WriteInt(int value) + public async Task ReadAllRequiredBytesAsync(byte[] buffer, + int offset, + int count) { - var zigZagEncoded = unchecked((uint)((value << 1) ^ (value >> 31))); - while ((zigZagEncoded & ~0x7F) != 0) - { - WriteByte((byte)((zigZagEncoded | 0x80) & 0xFF)); - zigZagEncoded >>= 7; - } - WriteByte((byte)zigZagEncoded); + return await stream.ReadAllRequiredBytesAsync(buffer, offset, count); } - public void WriteIntFixed(int value) + + public int ReadAllRequiredBytes(byte[] buffer, + int offset, + int count) { - WriteByte((byte)(value & 0xFF)); - WriteByte((byte)((value >> 0x8) & 0xFF)); - WriteByte((byte)((value >> 0x10) & 0xFF)); - WriteByte((byte)((value >> 0x18) & 0xFF)); + return stream.ReadAllRequiredBytes(buffer, offset, count); } - public void WriteLongFixed(long value) + public long ReadLongFixed() { - WriteByte((byte)(value & 0xFF)); - WriteByte((byte)((value >> 0x8) & 0xFF)); - WriteByte((byte)((value >> 0x10) & 0xFF)); - WriteByte((byte)((value >> 0x18) & 0xFF)); - WriteByte((byte)((value >> 0x20) & 0xFF)); - WriteByte((byte)((value >> 0x28) & 0xFF)); - WriteByte((byte)((value >> 0x30) & 0xFF)); - WriteByte((byte)((value >> 0x38) & 0xFF)); + return stream.ReadLongFixed(); } - public void Write(byte[] buffer, - int offset, - int length) + public int ReadIntFixed() { - _fileSize += (ulong)length; + return stream.ReadIntFixed(); + } - // Hopefully there is a more performant way to do this - byte[] subBuffer = new byte[length]; - Array.Copy(buffer, offset, subBuffer, 0, length); - _dataWriter.WriteBytes(subBuffer); + public byte[] ReadByteArray() + { + return stream.ReadByteArray(); } - // Copied from Write() implementation above - public async Task WriteAsync(byte[] buffer, - int offset, - int length) + public int ReadByte() { - _fileSize += (ulong)length; + return stream.ReadByte(); + } - // Hopefully there is a more performant way to do this - byte[] subBuffer = new byte[length]; - Array.Copy(buffer, offset, subBuffer, 0, length); - _dataWriter.WriteBytes(subBuffer); + public int Read(byte[] buffer, int offset, int count) + { + return stream.Read(buffer, offset, count); } - public static void CreateDirectoryIfNotExists(string path) + public void Dispose() { - DirectoryInfo pathInfo = new DirectoryInfo(path); - string parentPath = pathInfo.Parent.FullName; - StorageFolder folder = StorageFolder.GetFolderFromPathAsync(parentPath).AsTask().Result; - folder.CreateFolderAsync(pathInfo.Name, CreationCollisionOption.OpenIfExists).AsTask().Wait(); + stream.Dispose(); } + } - public static bool DirectoryExists(string path) + internal class GenericFileLogReaderStatics : ILogReaderStatic + { + public ILogReader Generate(string fileName) { - DirectoryInfo pathInfo = new DirectoryInfo(path); - string parentPath = pathInfo.Parent.FullName; - StorageFolder parentFolder = StorageFolder.GetFolderFromPathAsync(parentPath).AsTask().Result; - bool result; - try - { - StorageFolder queriedFolder = parentFolder.GetFolderAsync(pathInfo.Name).AsTask().Result; - result = true; - } - catch (System.AggregateException) - { - result = false; - } - return result; + return new GenericFileLogReader(fileName); } + } + - public static bool FileExists(string path) + public static class GenericLogsInterface + { + public static void SetToGenericLogs() { - FileInfo pathInfo = new FileInfo(path); - string parentPath = pathInfo.Directory.FullName; - StorageFolder parentFolder = StorageFolder.GetFolderFromPathAsync(parentPath).AsTask().Result; - bool result; - try + LogReaderStaticPicker.curStatic = new GenericFileLogReaderStatics(); + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { - StorageFile queriedFile = parentFolder.GetFileAsync(pathInfo.Name).AsTask().Result; - result = true; + LogWriterStaticPicker.curStatic = new LogWriterStaticsWindows(); } - catch (System.AggregateException) + else { - result = false; + LogWriterStaticPicker.curStatic = new LogWriterStaticsGeneric(); } - return result; - } - - public static void DeleteFile(string path) - { - StorageFile file = StorageFile.GetFileFromPathAsync(path).AsTask().Result; - file.DeleteAsync().AsTask().Wait(); } } -#endif } diff --git a/GenericLogPicker/GenericLogPicker.csproj b/GenericLogPicker/GenericLogPicker.csproj new file mode 100644 index 00000000..50f49098 --- /dev/null +++ b/GenericLogPicker/GenericLogPicker.csproj @@ -0,0 +1,30 @@ + + + + netstandard2.0 + true + true + ../Ambrosia/Ambrosia.snk + + + $(DefineConstants);NETSTANDARD + + + + + + + + + + + + + + + + + + + + diff --git a/GenericLogPicker/Native32.cs b/GenericLogPicker/Native32.cs new file mode 100644 index 00000000..d61401ad --- /dev/null +++ b/GenericLogPicker/Native32.cs @@ -0,0 +1,336 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +namespace FASTER.core +{ + using System; + using System.Runtime.InteropServices; + using System.Security; + using Microsoft.Win32.SafeHandles; + using System.Threading; + using System.IO; + + /// + /// Interop with WINAPI for file I/O, threading, and NUMA functions. + /// + public static unsafe class Native32 + { + #region Native structs + [StructLayout(LayoutKind.Sequential)] + private struct LUID + { + public uint lp; + public int hp; + } + + [StructLayout(LayoutKind.Sequential)] + private struct LUID_AND_ATTRIBUTES + { + public LUID Luid; + public uint Attributes; + } + + [StructLayout(LayoutKind.Sequential)] + private struct TOKEN_PRIVILEGES + { + public uint PrivilegeCount; + public LUID_AND_ATTRIBUTES Privileges; + } + + [StructLayout(LayoutKind.Sequential)] + private struct MARK_HANDLE_INFO + { + public uint UsnSourceInfo; + public IntPtr VolumeHandle; + public uint HandleInfo; + } + #endregion + + #region io constants and flags + internal const int ERROR_IO_PENDING = 997; + internal const uint GENERIC_READ = 0x80000000; + internal const uint GENERIC_WRITE = 0x40000000; + internal const uint FILE_FLAG_DELETE_ON_CLOSE = 0x04000000; + internal const uint FILE_FLAG_NO_BUFFERING = 0x20000000; + internal const uint FILE_FLAG_OVERLAPPED = 0x40000000; + + internal const uint FILE_SHARE_DELETE = 0x00000004; + #endregion + + #region io functions + + [DllImport("Kernel32.dll", CharSet = CharSet.Unicode, SetLastError = true)] + internal static extern SafeFileHandle CreateFileW( + [In] string lpFileName, + [In] UInt32 dwDesiredAccess, + [In] UInt32 dwShareMode, + [In] IntPtr lpSecurityAttributes, + [In] UInt32 dwCreationDisposition, + [In] UInt32 dwFlagsAndAttributes, + [In] IntPtr hTemplateFile); + + [DllImport("Kernel32.dll", SetLastError = true)] + internal static extern bool ReadFile( + [In] SafeFileHandle hFile, + [Out] IntPtr lpBuffer, + [In] UInt32 nNumberOfBytesToRead, + [Out] out UInt32 lpNumberOfBytesRead, + [In] NativeOverlapped* lpOverlapped); + + [DllImport("Kernel32.dll", SetLastError = true)] + internal static extern bool WriteFile( + [In] SafeFileHandle hFile, + [In] IntPtr lpBuffer, + [In] UInt32 nNumberOfBytesToWrite, + [Out] out UInt32 lpNumberOfBytesWritten, + [In] NativeOverlapped* lpOverlapped); + + + internal enum EMoveMethod : uint + { + Begin = 0, + Current = 1, + End = 2 + } + + [DllImport("kernel32.dll", SetLastError = true)] + internal static extern uint SetFilePointer( + [In] SafeFileHandle hFile, + [In] int lDistanceToMove, + [In, Out] ref int lpDistanceToMoveHigh, + [In] EMoveMethod dwMoveMethod); + + + [DllImport("kernel32.dll", SetLastError = true)] + public static extern IntPtr CreateIoCompletionPort( + [In] SafeFileHandle fileHandle, + [In] IntPtr existingCompletionPort, + [In] UInt32 completionKey, + [In] UInt32 numberOfConcurrentThreads); + + [DllImport("kernel32.dll", SetLastError = true)] + internal static extern bool SetEndOfFile( + [In] SafeFileHandle hFile); + + + [DllImport("kernel32.dll", SetLastError = true, CharSet = CharSet.Auto)] + internal static extern bool GetDiskFreeSpace(string lpRootPathName, + out uint lpSectorsPerCluster, + out uint lpBytesPerSector, + out uint lpNumberOfFreeClusters, + out uint lpTotalNumberOfClusters); + + [DllImport("kernel32.dll", SetLastError = true)] + internal static extern bool DeleteFileW([MarshalAs(UnmanagedType.LPWStr)]string lpFileName); +#endregion + + #region Thread and NUMA functions + [DllImport("kernel32.dll")] + private static extern IntPtr GetCurrentThread(); + [DllImport("kernel32")] + internal static extern uint GetCurrentThreadId(); + [DllImport("kernel32.dll", SetLastError = true)] + private static extern uint GetCurrentProcessorNumber(); + [DllImport("kernel32.dll", SetLastError = true)] + private static extern uint GetActiveProcessorCount(uint count); + [DllImport("kernel32.dll", SetLastError = true)] + private static extern ushort GetActiveProcessorGroupCount(); + [DllImport("kernel32.dll", SetLastError = true)] + private static extern int SetThreadGroupAffinity(IntPtr hThread, ref GROUP_AFFINITY GroupAffinity, ref GROUP_AFFINITY PreviousGroupAffinity); + [DllImport("kernel32.dll", SetLastError = true)] + private static extern int GetThreadGroupAffinity(IntPtr hThread, ref GROUP_AFFINITY PreviousGroupAffinity); + + private static readonly uint ALL_PROCESSOR_GROUPS = 0xffff; + + [System.Runtime.InteropServices.StructLayoutAttribute(System.Runtime.InteropServices.LayoutKind.Sequential)] + private struct GROUP_AFFINITY + { + public ulong Mask; + public uint Group; + public uint Reserved1; + public uint Reserved2; + public uint Reserved3; + } + + /// + /// Accepts thread id = 0, 1, 2, ... and sprays them round-robin + /// across all cores (viewed as a flat space). On NUMA machines, + /// this gives us [socket, core] ordering of affinitization. That is, + /// if there are N cores per socket, then thread indices of 0 to N-1 map + /// to the range [socket 0, core 0] to [socket 0, core N-1]. + /// + /// Index of thread (from 0 onwards) + public static void AffinitizeThreadRoundRobin(uint threadIdx) + { + uint nrOfProcessors = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS); + ushort nrOfProcessorGroups = GetActiveProcessorGroupCount(); + uint nrOfProcsPerGroup = nrOfProcessors / nrOfProcessorGroups; + + GROUP_AFFINITY groupAffinityThread = default(GROUP_AFFINITY); + GROUP_AFFINITY oldAffinityThread = default(GROUP_AFFINITY); + + IntPtr thread = GetCurrentThread(); + GetThreadGroupAffinity(thread, ref groupAffinityThread); + + threadIdx = threadIdx % nrOfProcessors; + + groupAffinityThread.Mask = (ulong)1L << ((int)(threadIdx % (int)nrOfProcsPerGroup)); + groupAffinityThread.Group = (uint)(threadIdx / nrOfProcsPerGroup); + + if (SetThreadGroupAffinity(thread, ref groupAffinityThread, ref oldAffinityThread) == 0) + { + throw new Exception("Unable to affinitize thread"); + } + } + + /// + /// Accepts thread id = 0, 1, 2, ... and sprays them round-robin + /// across all cores (viewed as a flat space). On NUMA machines, + /// this gives us [core, socket] ordering of affinitization. That is, + /// if there are N cores per socket, then thread indices of 0 to N-1 map + /// to the range [socket 0, core 0] to [socket N-1, core 0]. + /// + /// Index of thread (from 0 onwards) + /// Number of NUMA sockets + public static void AffinitizeThreadShardedNuma(uint threadIdx, ushort nrOfProcessorGroups) + { + uint nrOfProcessors = GetActiveProcessorCount(ALL_PROCESSOR_GROUPS); + uint nrOfProcsPerGroup = nrOfProcessors / nrOfProcessorGroups; + + threadIdx = nrOfProcsPerGroup * (threadIdx % nrOfProcessorGroups) + (threadIdx / nrOfProcessorGroups); + AffinitizeThreadRoundRobin(threadIdx); + return; + } + #endregion + + #region Advanced file ops + [DllImport("advapi32.dll", SetLastError = true)] + private static extern bool LookupPrivilegeValue(string lpSystemName, string lpName, ref LUID lpLuid); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern IntPtr GetCurrentProcess(); + + [DllImport("advapi32", SetLastError = true)] + private static extern bool OpenProcessToken(IntPtr ProcessHandle, uint DesiredAccess, out SafeFileHandle TokenHandle); + + [DllImport("advapi32.dll", SetLastError = true)] + private static extern bool AdjustTokenPrivileges(SafeFileHandle tokenhandle, int disableprivs, ref TOKEN_PRIVILEGES Newstate, int BufferLengthInBytes, int PreviousState, int ReturnLengthInBytes); + + [DllImport("Kernel32.dll", SetLastError = true)] + private static extern bool DeviceIoControl(SafeFileHandle hDevice, uint IoControlCode, void* InBuffer, int nInBufferSize, IntPtr OutBuffer, int nOutBufferSize, ref uint pBytesReturned, IntPtr Overlapped); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern bool SetFilePointerEx(SafeFileHandle hFile, long liDistanceToMove, out long lpNewFilePointer, uint dwMoveMethod); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern bool SetFileValidData(SafeFileHandle hFile, long ValidDataLength); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern SafeFileHandle CreateFile(string filename, uint access, uint share, IntPtr securityAttributes, uint creationDisposition, uint flagsAndAttributes, IntPtr templateFile); + + /// + /// Enable privilege for process + /// + /// + public static bool EnableProcessPrivileges() + { + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + return false; + + TOKEN_PRIVILEGES token_privileges = default(TOKEN_PRIVILEGES); + token_privileges.PrivilegeCount = 1; + token_privileges.Privileges.Attributes = 0x2; + + if (!LookupPrivilegeValue(null, "SeManageVolumePrivilege", + ref token_privileges.Privileges.Luid)) return false; + + SafeFileHandle token; + if (!OpenProcessToken(GetCurrentProcess(), 0x20, out token)) + return false; + + if (!AdjustTokenPrivileges(token, 0, ref token_privileges, 0, 0, 0)) + { + token.Close(); + return false; + } + if (Marshal.GetLastWin32Error() != 0) + { + token.Close(); + return false; + } + token.Close(); + return true; + } + + private static uint CTL_CODE(uint DeviceType, uint Function, uint Method, uint Access) + { + return (((DeviceType) << 16) | ((Access) << 14) | ((Function) << 2) | (Method)); + } + + internal static bool EnableVolumePrivileges(string filename, SafeFileHandle handle) + { + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + return false; + + string volume_string = "\\\\.\\" + filename.Substring(0, 2); + + uint fileCreation = unchecked((uint)FileMode.Open); + + SafeFileHandle volume_handle = CreateFile(volume_string, 0, 0, IntPtr.Zero, fileCreation, + 0x80, IntPtr.Zero); + if (volume_handle == null) + { + return false; + } + + MARK_HANDLE_INFO mhi; + mhi.UsnSourceInfo = 0x1; + mhi.VolumeHandle = volume_handle.DangerousGetHandle(); + mhi.HandleInfo = 0x1; + + uint bytes_returned = 0; + bool result = DeviceIoControl(handle, CTL_CODE(0x9, 63, 0, 0), + (void*)&mhi, sizeof(MARK_HANDLE_INFO), IntPtr.Zero, + 0, ref bytes_returned, IntPtr.Zero); + + volume_handle.Close(); + return result; + } + + /// + /// Set file size + /// + /// + /// + /// + public static bool SetFileSize(SafeFileHandle file_handle, long file_size) + { + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + return false; + + if (!SetFilePointerEx(file_handle, file_size, out long newFilePtr, 0)) + { + return false; + } + + // Set a fixed file length + if (!SetEndOfFile(file_handle)) + { + return false; + } + + if (!SetFileValidData(file_handle, file_size)) + { + return false; + } + + return true; + } + + internal static int MakeHRFromErrorCode(int errorCode) + { + return unchecked(((int)0x80070000) | errorCode); + } + #endregion + } +} diff --git a/ICGUI/ICGUI.Android/Assets/AboutAssets.txt b/ICGUI/ICGUI.Android/Assets/AboutAssets.txt new file mode 100644 index 00000000..072563f8 --- /dev/null +++ b/ICGUI/ICGUI.Android/Assets/AboutAssets.txt @@ -0,0 +1,19 @@ +Any raw assets you want to be deployed with your application can be placed in +this directory (and child directories) and given a Build Action of "AndroidAsset". + +These files will be deployed with your package and will be accessible using Android's +AssetManager, like this: + +public class ReadAsset : Activity +{ + protected override void OnCreate (Bundle bundle) + { + base.OnCreate (bundle); + + InputStream input = Assets.Open ("my_asset.txt"); + } +} + +Additionally, some Android functions will automatically load asset files: + +Typeface tf = Typeface.CreateFromAsset (Context.Assets, "fonts/samplefont.ttf"); diff --git a/ICGUI/ICGUI.Android/ICGUI.Android.csproj b/ICGUI/ICGUI.Android/ICGUI.Android.csproj new file mode 100644 index 00000000..2b357d0c --- /dev/null +++ b/ICGUI/ICGUI.Android/ICGUI.Android.csproj @@ -0,0 +1,102 @@ + + + + Debug + AnyCPU + {34548405-D006-42FC-AEF9-1B4B8594EAE3} + {EFBA0AD7-5A72-4C68-AF49-83D382785DCF};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} + {c9e5eea5-ca05-42a1-839b-61506e0a37df} + Library + ICGUI.Droid + ICGUI.Android + True + True + Resources\Resource.designer.cs + Resource + Properties\AndroidManifest.xml + Resources + Assets + false + v9.0 + true + true + Xamarin.Android.Net.AndroidClientHandler + + + + + true + portable + false + bin\Debug + DEBUG; + prompt + 4 + None + + + true + portable + true + bin\Release + prompt + 4 + true + false + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + {d3885c87-d8f7-4523-857c-7a5b36b5bd10} + GenericLogPicker + + + {51B9AB64-D654-45D2-8F28-A193D3E6D6BA} + ICGUI + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.Android/MainActivity.cs b/ICGUI/ICGUI.Android/MainActivity.cs new file mode 100644 index 00000000..71770e7d --- /dev/null +++ b/ICGUI/ICGUI.Android/MainActivity.cs @@ -0,0 +1,35 @@ +using System; + +using Android.App; +using Android.Content.PM; +using Android.Runtime; +using Android.Views; +using Android.Widget; +using Android.OS; +using Ambrosia; + +namespace ICGUI.Droid +{ + [Activity(Label = "ICGUI", Icon = "@mipmap/icon", Theme = "@style/MainTheme", MainLauncher = true, ConfigurationChanges = ConfigChanges.ScreenSize | ConfigChanges.Orientation)] + public class MainActivity : global::Xamarin.Forms.Platform.Android.FormsAppCompatActivity + { + protected override void OnCreate(Bundle savedInstanceState) + { + GenericLogsInterface.SetToGenericLogs(); + TabLayoutResource = Resource.Layout.Tabbar; + ToolbarResource = Resource.Layout.Toolbar; + + base.OnCreate(savedInstanceState); + + Xamarin.Essentials.Platform.Init(this, savedInstanceState); + global::Xamarin.Forms.Forms.Init(this, savedInstanceState); + LoadApplication(new App()); + } + public override void OnRequestPermissionsResult(int requestCode, string[] permissions, [GeneratedEnum] Android.Content.PM.Permission[] grantResults) + { + Xamarin.Essentials.Platform.OnRequestPermissionsResult(requestCode, permissions, grantResults); + + base.OnRequestPermissionsResult(requestCode, permissions, grantResults); + } + } +} \ No newline at end of file diff --git a/ICGUI/ICGUI.Android/Properties/AndroidManifest.xml b/ICGUI/ICGUI.Android/Properties/AndroidManifest.xml new file mode 100644 index 00000000..bd321939 --- /dev/null +++ b/ICGUI/ICGUI.Android/Properties/AndroidManifest.xml @@ -0,0 +1,6 @@ + + + + + + diff --git a/ICGUI/ICGUI.Android/Properties/AssemblyInfo.cs b/ICGUI/ICGUI.Android/Properties/AssemblyInfo.cs new file mode 100644 index 00000000..f7e44ff9 --- /dev/null +++ b/ICGUI/ICGUI.Android/Properties/AssemblyInfo.cs @@ -0,0 +1,30 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using Android.App; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("ICGUI.Android")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("ICGUI.Android")] +[assembly: AssemblyCopyright("Copyright © 2014")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] +[assembly: ComVisible(false)] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] + +// Add some common permissions, these can be removed if not needed +[assembly: UsesPermission(Android.Manifest.Permission.Internet)] +[assembly: UsesPermission(Android.Manifest.Permission.WriteExternalStorage)] diff --git a/ICGUI/ICGUI.Android/Resources/AboutResources.txt b/ICGUI/ICGUI.Android/Resources/AboutResources.txt new file mode 100644 index 00000000..cb30f20b --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/AboutResources.txt @@ -0,0 +1,50 @@ +Images, layout descriptions, binary blobs and string dictionaries can be included +in your application as resource files. Various Android APIs are designed to +operate on the resource IDs instead of dealing with images, strings or binary blobs +directly. + +For example, a sample Android app that contains a user interface layout (main.xml), +an internationalization string table (strings.xml) and some icons (drawable-XXX/icon.png) +would keep its resources in the "Resources" directory of the application: + +Resources/ + drawable-hdpi/ + icon.png + + drawable-ldpi/ + icon.png + + drawable-mdpi/ + icon.png + + layout/ + main.xml + + values/ + strings.xml + +In order to get the build system to recognize Android resources, set the build action to +"AndroidResource". The native Android APIs do not operate directly with filenames, but +instead operate on resource IDs. When you compile an Android application that uses resources, +the build system will package the resources for distribution and generate a class called +"Resource" that contains the tokens for each one of the resources included. For example, +for the above Resources layout, this is what the Resource class would expose: + +public class Resource { + public class drawable { + public const int icon = 0x123; + } + + public class layout { + public const int main = 0x456; + } + + public class strings { + public const int first_string = 0xabc; + public const int second_string = 0xbcd; + } +} + +You would then use R.drawable.icon to reference the drawable/icon.png file, or Resource.layout.main +to reference the layout/main.xml file, or Resource.strings.first_string to reference the first +string in the dictionary file values/strings.xml. diff --git a/ICGUI/ICGUI.Android/Resources/Resource.designer.cs b/ICGUI/ICGUI.Android/Resources/Resource.designer.cs new file mode 100644 index 00000000..2ed61738 --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/Resource.designer.cs @@ -0,0 +1,151 @@ +#pragma warning disable 1591 +//------------------------------------------------------------------------------ +// +// This code was generated by a tool. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + +[assembly: global::Android.Runtime.ResourceDesignerAttribute("ICGUI.Droid.Resource", IsApplication=true)] + +namespace ICGUI.Droid +{ + + + [global::System.CodeDom.Compiler.GeneratedCodeAttribute("Xamarin.Android.Build.Tasks", "1.0.0.0")] + public partial class Resource + { + + static Resource() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + public static void UpdateIdValues() + { + } + + public partial class Attribute + { + + static Attribute() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + private Attribute() + { + } + } + + public partial class Color + { + + // aapt resource value: 0x7F010000 + public const int colorAccent = 2130771968; + + // aapt resource value: 0x7F010001 + public const int colorPrimary = 2130771969; + + // aapt resource value: 0x7F010002 + public const int colorPrimaryDark = 2130771970; + + // aapt resource value: 0x7F010003 + public const int launcher_background = 2130771971; + + static Color() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + private Color() + { + } + } + + public partial class Id + { + + // aapt resource value: 0x7F020000 + public const int sliding_tabs = 2130837504; + + // aapt resource value: 0x7F020001 + public const int toolbar = 2130837505; + + static Id() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + private Id() + { + } + } + + public partial class Layout + { + + // aapt resource value: 0x7F030000 + public const int Tabbar = 2130903040; + + // aapt resource value: 0x7F030001 + public const int Toolbar = 2130903041; + + static Layout() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + private Layout() + { + } + } + + public partial class Mipmap + { + + // aapt resource value: 0x7F040000 + public const int icon = 2130968576; + + // aapt resource value: 0x7F040001 + public const int icon_round = 2130968577; + + // aapt resource value: 0x7F040002 + public const int launcher_foreground = 2130968578; + + static Mipmap() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + private Mipmap() + { + } + } + + public partial class Style + { + + // aapt resource value: 0x7F050000 + public const int AppCompatDialogStyle = 2131034112; + + // aapt resource value: 0x7F050001 + public const int MainTheme = 2131034113; + + // aapt resource value: 0x7F050002 + public const int MainTheme_Base = 2131034114; + + static Style() + { + global::Android.Runtime.ResourceIdManager.UpdateIdValues(); + } + + private Style() + { + } + } + } +} +#pragma warning restore 1591 diff --git a/ICGUI/ICGUI.Android/Resources/layout/Tabbar.xml b/ICGUI/ICGUI.Android/Resources/layout/Tabbar.xml new file mode 100644 index 00000000..ad1f87d8 --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/layout/Tabbar.xml @@ -0,0 +1,11 @@ + + diff --git a/ICGUI/ICGUI.Android/Resources/layout/Toolbar.xml b/ICGUI/ICGUI.Android/Resources/layout/Toolbar.xml new file mode 100644 index 00000000..aabd0a3b --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/layout/Toolbar.xml @@ -0,0 +1,9 @@ + + diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-anydpi-v26/icon.xml b/ICGUI/ICGUI.Android/Resources/mipmap-anydpi-v26/icon.xml new file mode 100644 index 00000000..88d1d0a1 --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/mipmap-anydpi-v26/icon.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-anydpi-v26/icon_round.xml b/ICGUI/ICGUI.Android/Resources/mipmap-anydpi-v26/icon_round.xml new file mode 100644 index 00000000..88d1d0a1 --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/mipmap-anydpi-v26/icon_round.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-hdpi/icon.png b/ICGUI/ICGUI.Android/Resources/mipmap-hdpi/icon.png new file mode 100644 index 00000000..4623ca2c Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-hdpi/icon.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-hdpi/launcher_foreground.png b/ICGUI/ICGUI.Android/Resources/mipmap-hdpi/launcher_foreground.png new file mode 100644 index 00000000..a89e5bbc Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-hdpi/launcher_foreground.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-mdpi/icon.png b/ICGUI/ICGUI.Android/Resources/mipmap-mdpi/icon.png new file mode 100644 index 00000000..9b1d25e2 Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-mdpi/icon.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-mdpi/launcher_foreground.png b/ICGUI/ICGUI.Android/Resources/mipmap-mdpi/launcher_foreground.png new file mode 100644 index 00000000..431a8a05 Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-mdpi/launcher_foreground.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-xhdpi/icon.png b/ICGUI/ICGUI.Android/Resources/mipmap-xhdpi/icon.png new file mode 100644 index 00000000..844dfe54 Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-xhdpi/icon.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-xhdpi/launcher_foreground.png b/ICGUI/ICGUI.Android/Resources/mipmap-xhdpi/launcher_foreground.png new file mode 100644 index 00000000..9e9e4f8e Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-xhdpi/launcher_foreground.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-xxhdpi/icon.png b/ICGUI/ICGUI.Android/Resources/mipmap-xxhdpi/icon.png new file mode 100644 index 00000000..e20ec9ae Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-xxhdpi/icon.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-xxhdpi/launcher_foreground.png b/ICGUI/ICGUI.Android/Resources/mipmap-xxhdpi/launcher_foreground.png new file mode 100644 index 00000000..5f1e1356 Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-xxhdpi/launcher_foreground.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-xxxhdpi/icon.png b/ICGUI/ICGUI.Android/Resources/mipmap-xxxhdpi/icon.png new file mode 100644 index 00000000..8a08bf75 Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-xxxhdpi/icon.png differ diff --git a/ICGUI/ICGUI.Android/Resources/mipmap-xxxhdpi/launcher_foreground.png b/ICGUI/ICGUI.Android/Resources/mipmap-xxxhdpi/launcher_foreground.png new file mode 100644 index 00000000..aca9f8d1 Binary files /dev/null and b/ICGUI/ICGUI.Android/Resources/mipmap-xxxhdpi/launcher_foreground.png differ diff --git a/ICGUI/ICGUI.Android/Resources/values/colors.xml b/ICGUI/ICGUI.Android/Resources/values/colors.xml new file mode 100644 index 00000000..d9f6e0ba --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/values/colors.xml @@ -0,0 +1,7 @@ + + + #FFFFFF + #3F51B5 + #303F9F + #FF4081 + diff --git a/ICGUI/ICGUI.Android/Resources/values/styles.xml b/ICGUI/ICGUI.Android/Resources/values/styles.xml new file mode 100644 index 00000000..17a2eb0e --- /dev/null +++ b/ICGUI/ICGUI.Android/Resources/values/styles.xml @@ -0,0 +1,30 @@ + + + + + + + + + diff --git a/ICGUI/ICGUI.UWP/App.xaml b/ICGUI/ICGUI.UWP/App.xaml new file mode 100644 index 00000000..e51887d1 --- /dev/null +++ b/ICGUI/ICGUI.UWP/App.xaml @@ -0,0 +1,8 @@ + + + diff --git a/ICGUI/ICGUI.UWP/App.xaml.cs b/ICGUI/ICGUI.UWP/App.xaml.cs new file mode 100644 index 00000000..9e917b40 --- /dev/null +++ b/ICGUI/ICGUI.UWP/App.xaml.cs @@ -0,0 +1,106 @@ +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices.WindowsRuntime; +using Windows.ApplicationModel; +using Windows.ApplicationModel.Activation; +using Windows.Foundation; +using Windows.Foundation.Collections; +using Windows.UI.Xaml; +using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Controls.Primitives; +using Windows.UI.Xaml.Data; +using Windows.UI.Xaml.Input; +using Windows.UI.Xaml.Media; +using Windows.UI.Xaml.Navigation; + +namespace ICGUI.UWP +{ + /// + /// Provides application-specific behavior to supplement the default Application class. + /// + sealed partial class App : Application + { + /// + /// Initializes the singleton application object. This is the first line of authored code + /// executed, and as such is the logical equivalent of main() or WinMain(). + /// + public App() + { + this.InitializeComponent(); + this.Suspending += OnSuspending; + } + + /// + /// Invoked when the application is launched normally by the end user. Other entry points + /// will be used such as when the application is launched to open a specific file. + /// + /// Details about the launch request and process. + protected override void OnLaunched(LaunchActivatedEventArgs e) + { +#if DEBUG + if (System.Diagnostics.Debugger.IsAttached) + { + this.DebugSettings.EnableFrameRateCounter = true; + } +#endif + + Frame rootFrame = Window.Current.Content as Frame; + + // Do not repeat app initialization when the Window already has content, + // just ensure that the window is active + if (rootFrame == null) + { + // Create a Frame to act as the navigation context and navigate to the first page + rootFrame = new Frame(); + + rootFrame.NavigationFailed += OnNavigationFailed; + + Xamarin.Forms.Forms.Init(e); + + if (e.PreviousExecutionState == ApplicationExecutionState.Terminated) + { + //TODO: Load state from previously suspended application + } + + // Place the frame in the current Window + Window.Current.Content = rootFrame; + } + + if (rootFrame.Content == null) + { + // When the navigation stack isn't restored navigate to the first page, + // configuring the new page by passing required information as a navigation + // parameter + rootFrame.Navigate(typeof(MainPage), e.Arguments); + } + // Ensure the current window is active + Window.Current.Activate(); + } + + /// + /// Invoked when Navigation to a certain page fails + /// + /// The Frame which failed navigation + /// Details about the navigation failure + void OnNavigationFailed(object sender, NavigationFailedEventArgs e) + { + throw new Exception("Failed to load Page " + e.SourcePageType.FullName); + } + + /// + /// Invoked when application execution is being suspended. Application state is saved + /// without knowing whether the application will be terminated or resumed with the contents + /// of memory still intact. + /// + /// The source of the suspend request. + /// Details about the suspend request. + private void OnSuspending(object sender, SuspendingEventArgs e) + { + var deferral = e.SuspendingOperation.GetDeferral(); + //TODO: Save application state and stop any background activity + deferral.Complete(); + } + } +} diff --git a/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-100.png b/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-100.png new file mode 100644 index 00000000..c3e93b09 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-200.png b/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-200.png new file mode 100644 index 00000000..2709372a Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-400.png b/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-400.png new file mode 100644 index 00000000..ff031995 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/LargeTile.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-100.png b/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-100.png new file mode 100644 index 00000000..41108536 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-200.png b/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-200.png new file mode 100644 index 00000000..c6350958 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-400.png b/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-400.png new file mode 100644 index 00000000..e28c0528 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/SmallTile.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-100.png b/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-100.png new file mode 100644 index 00000000..eacdf203 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-200.png b/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-200.png new file mode 100644 index 00000000..caa5fc9c Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-400.png b/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-400.png new file mode 100644 index 00000000..16d97844 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/SplashScreen.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-100.png b/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-100.png new file mode 100644 index 00000000..c1709f2b Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-200.png b/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-200.png new file mode 100644 index 00000000..48732ffe Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-400.png b/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-400.png new file mode 100644 index 00000000..95456beb Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square150x150Logo.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-16.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-16.png new file mode 100644 index 00000000..0c6fd15f Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-16.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-256.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-256.png new file mode 100644 index 00000000..6635c80f Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-256.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-48.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-48.png new file mode 100644 index 00000000..ee58ea6d Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.altform-unplated_targetsize-48.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-100.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-100.png new file mode 100644 index 00000000..06fc87c1 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-200.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-200.png new file mode 100644 index 00000000..eaf27573 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-400.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-400.png new file mode 100644 index 00000000..8a4ee54f Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-16.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-16.png new file mode 100644 index 00000000..0c6fd15f Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-16.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-256.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-256.png new file mode 100644 index 00000000..6635c80f Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-256.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-48.png b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-48.png new file mode 100644 index 00000000..ee58ea6d Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Square44x44Logo.targetsize-48.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/StoreLogo.backup.png b/ICGUI/ICGUI.UWP/Assets/StoreLogo.backup.png new file mode 100644 index 00000000..a197aaf5 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/StoreLogo.backup.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-100.png b/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-100.png new file mode 100644 index 00000000..4fbbc70d Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-200.png b/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-200.png new file mode 100644 index 00000000..29db5019 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-400.png b/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-400.png new file mode 100644 index 00000000..383ad6e4 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/StoreLogo.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-100.png b/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-100.png new file mode 100644 index 00000000..476954a0 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-100.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-200.png b/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-200.png new file mode 100644 index 00000000..eacdf203 Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-200.png differ diff --git a/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-400.png b/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-400.png new file mode 100644 index 00000000..caa5fc9c Binary files /dev/null and b/ICGUI/ICGUI.UWP/Assets/Wide310x150Logo.scale-400.png differ diff --git a/ICGUI/ICGUI.UWP/ICGUI.UWP.csproj b/ICGUI/ICGUI.UWP/ICGUI.UWP.csproj new file mode 100644 index 00000000..5d299a44 --- /dev/null +++ b/ICGUI/ICGUI.UWP/ICGUI.UWP.csproj @@ -0,0 +1,164 @@ + + + + + Debug + x86 + {2F5C9EEE-5B7B-4C17-BE64-267C350BC871} + AppContainerExe + Properties + ICGUI.UWP + ICGUI.UWP + en-US + UAP + 10.0.17134.0 + 10.0.16299.0 + 14 + true + 512 + {A5A43C5B-DE2A-4C0C-9213-0A381AF9435A};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} + false + + + true + bin\ARM\Debug\ + DEBUG;TRACE;NETFX_CORE;WINDOWS_UWP + ;2008 + full + ARM + false + prompt + true + + + bin\ARM\Release\ + TRACE;NETFX_CORE;WINDOWS_UWP + true + ;2008 + pdbonly + ARM + false + prompt + true + true + + + true + bin\x64\Debug\ + DEBUG;TRACE;NETFX_CORE;WINDOWS_UWP + ;2008 + full + x64 + false + prompt + true + + + bin\x64\Release\ + TRACE;NETFX_CORE;WINDOWS_UWP + true + ;2008 + pdbonly + x64 + false + prompt + true + true + + + true + bin\x86\Debug\ + DEBUG;TRACE;NETFX_CORE;WINDOWS_UWP + ;2008 + full + x86 + false + prompt + true + + + bin\x86\Release\ + TRACE;NETFX_CORE;WINDOWS_UWP + true + ;2008 + pdbonly + x86 + false + prompt + true + true + + + + App.xaml + + + MainPage.xaml + + + + + + Designer + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + MSBuild:Compile + Designer + + + MSBuild:Compile + Designer + + + + + + + + + + {fe687d17-1499-49b4-99fe-9c30d48afce1} + AzureBlobsLogPicker + + + {51B9AB64-D654-45D2-8F28-A193D3E6D6BA} + ICGUI + + + + 14.0 + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.UWP/MainPage.xaml b/ICGUI/ICGUI.UWP/MainPage.xaml new file mode 100644 index 00000000..cf5c933f --- /dev/null +++ b/ICGUI/ICGUI.UWP/MainPage.xaml @@ -0,0 +1,15 @@ + + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.UWP/MainPage.xaml.cs b/ICGUI/ICGUI.UWP/MainPage.xaml.cs new file mode 100644 index 00000000..9ab9103e --- /dev/null +++ b/ICGUI/ICGUI.UWP/MainPage.xaml.cs @@ -0,0 +1,29 @@ +using Ambrosia; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices.WindowsRuntime; +using Windows.Foundation; +using Windows.Foundation.Collections; +using Windows.UI.Xaml; +using Windows.UI.Xaml.Controls; +using Windows.UI.Xaml.Controls.Primitives; +using Windows.UI.Xaml.Data; +using Windows.UI.Xaml.Input; +using Windows.UI.Xaml.Media; +using Windows.UI.Xaml.Navigation; + +namespace ICGUI.UWP +{ + public sealed partial class MainPage + { + public MainPage() + { + AzureBlobsLogsInterface.SetToAzureBlobsLogs(); + this.InitializeComponent(); + + LoadApplication(new ICGUI.App()); + } + } +} diff --git a/ICGUI/ICGUI.UWP/Package.appxmanifest b/ICGUI/ICGUI.UWP/Package.appxmanifest new file mode 100644 index 00000000..39bb8e69 --- /dev/null +++ b/ICGUI/ICGUI.UWP/Package.appxmanifest @@ -0,0 +1,55 @@ + + + + + + + + + + ICGUI.UWP + ee4606bf-f9d0-43e8-92cd-3a1495bc3afa + Assets\StoreLogo.png + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.UWP/Properties/AssemblyInfo.cs b/ICGUI/ICGUI.UWP/Properties/AssemblyInfo.cs new file mode 100644 index 00000000..8806fd00 --- /dev/null +++ b/ICGUI/ICGUI.UWP/Properties/AssemblyInfo.cs @@ -0,0 +1,29 @@ +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyTitle("ICGUI.UWP")] +[assembly: AssemblyDescription("")] +[assembly: AssemblyConfiguration("")] +[assembly: AssemblyCompany("")] +[assembly: AssemblyProduct("ICGUI.UWP")] +[assembly: AssemblyCopyright("Copyright © 2015")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Version information for an assembly consists of the following four values: +// +// Major Version +// Minor Version +// Build Number +// Revision +// +// You can specify all the values or you can default the Build and Revision Numbers +// by using the '*' as shown below: +// [assembly: AssemblyVersion("1.0.*")] +[assembly: AssemblyVersion("1.0.0.0")] +[assembly: AssemblyFileVersion("1.0.0.0")] +[assembly: ComVisible(false)] \ No newline at end of file diff --git a/ICGUI/ICGUI.UWP/Properties/Default.rd.xml b/ICGUI/ICGUI.UWP/Properties/Default.rd.xml new file mode 100644 index 00000000..7c40ffeb --- /dev/null +++ b/ICGUI/ICGUI.UWP/Properties/Default.rd.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.iOS/AppDelegate.cs b/ICGUI/ICGUI.iOS/AppDelegate.cs new file mode 100644 index 00000000..28e125b2 --- /dev/null +++ b/ICGUI/ICGUI.iOS/AppDelegate.cs @@ -0,0 +1,31 @@ +using System; +using System.Collections.Generic; +using System.Linq; + +using Foundation; +using UIKit; + +namespace ICGUI.iOS +{ + // The UIApplicationDelegate for the application. This class is responsible for launching the + // User Interface of the application, as well as listening (and optionally responding) to + // application events from iOS. + [Register("AppDelegate")] + public partial class AppDelegate : global::Xamarin.Forms.Platform.iOS.FormsApplicationDelegate + { + // + // This method is invoked when the application has loaded and is ready to run. In this + // method you should instantiate the window, load the UI into it and then make the window + // visible. + // + // You have 17 seconds to return from this method, or iOS will terminate your application. + // + public override bool FinishedLaunching(UIApplication app, NSDictionary options) + { + global::Xamarin.Forms.Forms.Init(); + LoadApplication(new App()); + + return base.FinishedLaunching(app, options); + } + } +} diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Contents.json b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 00000000..98f4d035 --- /dev/null +++ b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,117 @@ +{ + "images": [ + { + "scale": "2x", + "size": "20x20", + "idiom": "iphone", + "filename": "Icon40.png" + }, + { + "scale": "3x", + "size": "20x20", + "idiom": "iphone", + "filename": "Icon60.png" + }, + { + "scale": "2x", + "size": "29x29", + "idiom": "iphone", + "filename": "Icon58.png" + }, + { + "scale": "3x", + "size": "29x29", + "idiom": "iphone", + "filename": "Icon87.png" + }, + { + "scale": "2x", + "size": "40x40", + "idiom": "iphone", + "filename": "Icon80.png" + }, + { + "scale": "3x", + "size": "40x40", + "idiom": "iphone", + "filename": "Icon120.png" + }, + { + "scale": "2x", + "size": "60x60", + "idiom": "iphone", + "filename": "Icon120.png" + }, + { + "scale": "3x", + "size": "60x60", + "idiom": "iphone", + "filename": "Icon180.png" + }, + { + "scale": "1x", + "size": "20x20", + "idiom": "ipad", + "filename": "Icon20.png" + }, + { + "scale": "2x", + "size": "20x20", + "idiom": "ipad", + "filename": "Icon40.png" + }, + { + "scale": "1x", + "size": "29x29", + "idiom": "ipad", + "filename": "Icon29.png" + }, + { + "scale": "2x", + "size": "29x29", + "idiom": "ipad", + "filename": "Icon58.png" + }, + { + "scale": "1x", + "size": "40x40", + "idiom": "ipad", + "filename": "Icon40.png" + }, + { + "scale": "2x", + "size": "40x40", + "idiom": "ipad", + "filename": "Icon80.png" + }, + { + "scale": "1x", + "size": "76x76", + "idiom": "ipad", + "filename": "Icon76.png" + }, + { + "scale": "2x", + "size": "76x76", + "idiom": "ipad", + "filename": "Icon152.png" + }, + { + "scale": "2x", + "size": "83.5x83.5", + "idiom": "ipad", + "filename": "Icon167.png" + }, + { + "scale": "1x", + "size": "1024x1024", + "idiom": "ios-marketing", + "filename": "Icon1024.png" + } + ], + "properties": {}, + "info": { + "version": 1, + "author": "xcode" + } +} \ No newline at end of file diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon1024.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon1024.png new file mode 100644 index 00000000..9174c989 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon1024.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon120.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon120.png new file mode 100644 index 00000000..9c60a176 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon120.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon152.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon152.png new file mode 100644 index 00000000..448d6efb Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon152.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon167.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon167.png new file mode 100644 index 00000000..8524768f Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon167.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon180.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon180.png new file mode 100644 index 00000000..60a64703 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon180.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon20.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon20.png new file mode 100644 index 00000000..45268a64 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon20.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon29.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon29.png new file mode 100644 index 00000000..6a6c77a8 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon29.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon40.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon40.png new file mode 100644 index 00000000..cc7edcf5 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon40.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon58.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon58.png new file mode 100644 index 00000000..1ad04f00 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon58.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon60.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon60.png new file mode 100644 index 00000000..2dd52620 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon60.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon76.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon76.png new file mode 100644 index 00000000..b058cae2 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon76.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon80.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon80.png new file mode 100644 index 00000000..02e47a26 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon80.png differ diff --git a/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon87.png b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon87.png new file mode 100644 index 00000000..4954a4bd Binary files /dev/null and b/ICGUI/ICGUI.iOS/Assets.xcassets/AppIcon.appiconset/Icon87.png differ diff --git a/ICGUI/ICGUI.iOS/Entitlements.plist b/ICGUI/ICGUI.iOS/Entitlements.plist new file mode 100644 index 00000000..e9a3005f --- /dev/null +++ b/ICGUI/ICGUI.iOS/Entitlements.plist @@ -0,0 +1,7 @@ + + + + + + + diff --git a/ICGUI/ICGUI.iOS/ICGUI.iOS.csproj b/ICGUI/ICGUI.iOS/ICGUI.iOS.csproj new file mode 100644 index 00000000..e0342dbc --- /dev/null +++ b/ICGUI/ICGUI.iOS/ICGUI.iOS.csproj @@ -0,0 +1,140 @@ + + + + Debug + iPhoneSimulator + 8.0.30703 + 2.0 + {D152FAFE-5C23-4BDA-8FD7-44931F91076C} + {FEACFBD2-3405-455C-9665-78FE426C6842};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} + {6143fdea-f3c2-4a09-aafa-6e230626515e} + Exe + ICGUI.iOS + Resources + ICGUI.iOS + true + NSUrlSessionHandler + automatic + + + true + full + false + bin\iPhoneSimulator\Debug + DEBUG + prompt + 4 + x86_64 + None + true + + + none + true + bin\iPhoneSimulator\Release + prompt + 4 + None + x86_64 + + + true + full + false + bin\iPhone\Debug + DEBUG + prompt + 4 + ARM64 + iPhone Developer + true + Entitlements.plist + None + -all + + + none + true + bin\iPhone\Release + prompt + 4 + ARM64 + iPhone Developer + Entitlements.plist + + + + + + + + + + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + false + + + + + + + + + + + + + + + + + + {d3885c87-d8f7-4523-857c-7a5b36b5bd10} + GenericLogPicker + + + {51B9AB64-D654-45D2-8F28-A193D3E6D6BA} + ICGUI + + + \ No newline at end of file diff --git a/ICGUI/ICGUI.iOS/Info.plist b/ICGUI/ICGUI.iOS/Info.plist new file mode 100644 index 00000000..1dc93b2c --- /dev/null +++ b/ICGUI/ICGUI.iOS/Info.plist @@ -0,0 +1,38 @@ + + + + + UIDeviceFamily + + 1 + 2 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + UISupportedInterfaceOrientations~ipad + + UIInterfaceOrientationPortrait + UIInterfaceOrientationPortraitUpsideDown + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + MinimumOSVersion + 8.0 + CFBundleDisplayName + ICGUI + CFBundleIdentifier + com.companyname.ICGUI + CFBundleVersion + 1.0 + UILaunchStoryboardName + LaunchScreen + CFBundleName + ICGUI + XSAppIconAssets + Assets.xcassets/AppIcon.appiconset + + diff --git a/ICGUI/ICGUI.iOS/Main.cs b/ICGUI/ICGUI.iOS/Main.cs new file mode 100644 index 00000000..9591da53 --- /dev/null +++ b/ICGUI/ICGUI.iOS/Main.cs @@ -0,0 +1,21 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using Ambrosia; +using Foundation; +using UIKit; + +namespace ICGUI.iOS +{ + public class Application + { + // This is the main entry point of the application. + static void Main(string[] args) + { + // if you want to use a different Application Delegate class from "AppDelegate" + // you can specify it here. + GenericLogsInterface.SetToGenericLogs(); + UIApplication.Main(args, null, "AppDelegate"); + } + } +} diff --git a/Samples/StreamingDemo/ThrowAway/TwitterHostAPI/Properties/AssemblyInfo.cs b/ICGUI/ICGUI.iOS/Properties/AssemblyInfo.cs similarity index 80% rename from Samples/StreamingDemo/ThrowAway/TwitterHostAPI/Properties/AssemblyInfo.cs rename to ICGUI/ICGUI.iOS/Properties/AssemblyInfo.cs index 9bf750f9..5bb904a1 100644 --- a/Samples/StreamingDemo/ThrowAway/TwitterHostAPI/Properties/AssemblyInfo.cs +++ b/ICGUI/ICGUI.iOS/Properties/AssemblyInfo.cs @@ -2,34 +2,34 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; -// General Information about an assembly is controlled through the following +// General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. -[assembly: AssemblyTitle("TwitterHostAPI")] +[assembly: AssemblyTitle("ICGUI.iOS")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("TwitterHostAPI")] -[assembly: AssemblyCopyright("Copyright © 2018")] +[assembly: AssemblyProduct("ICGUI.iOS")] +[assembly: AssemblyCopyright("Copyright © 2014")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] -// Setting ComVisible to false makes the types in this assembly not visible -// to COM components. If you need to access a type in this assembly from +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM -[assembly: Guid("3962824f-9a7d-47af-ba9b-4a852ad7e8cc")] +[assembly: Guid("72bdc44f-c588-44f3-b6df-9aace7daafdd")] // Version information for an assembly consists of the following four values: // // Major Version -// Minor Version +// Minor Version // Build Number // Revision // -// You can specify all the values or you can default the Build and Revision Numbers +// You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] diff --git a/ICGUI/ICGUI.iOS/Resources/Default-568h@2x.png b/ICGUI/ICGUI.iOS/Resources/Default-568h@2x.png new file mode 100644 index 00000000..26c6461e Binary files /dev/null and b/ICGUI/ICGUI.iOS/Resources/Default-568h@2x.png differ diff --git a/ICGUI/ICGUI.iOS/Resources/Default-Portrait.png b/ICGUI/ICGUI.iOS/Resources/Default-Portrait.png new file mode 100644 index 00000000..5d0d1ab4 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Resources/Default-Portrait.png differ diff --git a/ICGUI/ICGUI.iOS/Resources/Default-Portrait@2x.png b/ICGUI/ICGUI.iOS/Resources/Default-Portrait@2x.png new file mode 100644 index 00000000..0ee2688e Binary files /dev/null and b/ICGUI/ICGUI.iOS/Resources/Default-Portrait@2x.png differ diff --git a/ICGUI/ICGUI.iOS/Resources/Default.png b/ICGUI/ICGUI.iOS/Resources/Default.png new file mode 100644 index 00000000..b74643c0 Binary files /dev/null and b/ICGUI/ICGUI.iOS/Resources/Default.png differ diff --git a/ICGUI/ICGUI.iOS/Resources/Default@2x.png b/ICGUI/ICGUI.iOS/Resources/Default@2x.png new file mode 100644 index 00000000..dbd6bd3e Binary files /dev/null and b/ICGUI/ICGUI.iOS/Resources/Default@2x.png differ diff --git a/ICGUI/ICGUI.iOS/Resources/LaunchScreen.storyboard b/ICGUI/ICGUI.iOS/Resources/LaunchScreen.storyboard new file mode 100644 index 00000000..a639c2f1 --- /dev/null +++ b/ICGUI/ICGUI.iOS/Resources/LaunchScreen.storyboard @@ -0,0 +1,39 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ICGUI/ICGUI/App.xaml b/ICGUI/ICGUI/App.xaml new file mode 100644 index 00000000..b391be5c --- /dev/null +++ b/ICGUI/ICGUI/App.xaml @@ -0,0 +1,11 @@ + + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI/App.xaml.cs b/ICGUI/ICGUI/App.xaml.cs new file mode 100644 index 00000000..f9e6a20b --- /dev/null +++ b/ICGUI/ICGUI/App.xaml.cs @@ -0,0 +1,28 @@ +using System; +using Xamarin.Forms; +using Xamarin.Forms.Xaml; + +namespace ICGUI +{ + public partial class App : Application + { + public App() + { + InitializeComponent(); + + MainPage = new MainPage(); + } + + protected override void OnStart() + { + } + + protected override void OnSleep() + { + } + + protected override void OnResume() + { + } + } +} diff --git a/ICGUI/ICGUI/AssemblyInfo.cs b/ICGUI/ICGUI/AssemblyInfo.cs new file mode 100644 index 00000000..c859952e --- /dev/null +++ b/ICGUI/ICGUI/AssemblyInfo.cs @@ -0,0 +1,3 @@ +using Xamarin.Forms.Xaml; + +[assembly: XamlCompilation(XamlCompilationOptions.Compile)] \ No newline at end of file diff --git a/ICGUI/ICGUI/ICGUI.csproj b/ICGUI/ICGUI/ICGUI.csproj new file mode 100644 index 00000000..33f95cdb --- /dev/null +++ b/ICGUI/ICGUI/ICGUI.csproj @@ -0,0 +1,22 @@ + + + + netstandard2.0 + true + + + + portable + true + + + + + + + + + + + + \ No newline at end of file diff --git a/ICGUI/ICGUI/MainPage.xaml b/ICGUI/ICGUI/MainPage.xaml new file mode 100644 index 00000000..c809a647 --- /dev/null +++ b/ICGUI/ICGUI/MainPage.xaml @@ -0,0 +1,30 @@ + + + + + +