From 274fe9f3759552f1cde5bcc440e5e301b30080bf Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 5 Feb 2021 01:32:17 -0800 Subject: [PATCH 01/35] Superpmi on Microbenchmarks --- eng/pipelines/coreclr/superpmi.yml | 21 ++ src/coreclr/scripts/superpmi.proj | 27 ++- src/coreclr/scripts/superpmi.py | 9 +- src/coreclr/scripts/superpmi_benchmarks.py | 180 ++++++++++++++++++ .../{superpmi-setup.py => superpmi_setup.py} | 61 ++++-- 5 files changed, 280 insertions(+), 18 deletions(-) create mode 100644 src/coreclr/scripts/superpmi_benchmarks.py rename src/coreclr/scripts/{superpmi-setup.py => superpmi_setup.py} (88%) diff --git a/eng/pipelines/coreclr/superpmi.yml b/eng/pipelines/coreclr/superpmi.yml index 163c8184c1e820..0e650dd29fb6ad 100644 --- a/eng/pipelines/coreclr/superpmi.yml +++ b/eng/pipelines/coreclr/superpmi.yml @@ -138,3 +138,24 @@ jobs: collectionType: pmi collectionName: tests +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + - Linux_arm + - Linux_arm64 + - Linux_x64 + - windows_x64 + - windows_x86 + - windows_arm64 + - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 + helixQueueGroup: ci + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release + collectionType: run + collectionName: benchmarks diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index 5fa51d7f055c2a..0e3f7f696bd52f 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -21,6 +21,12 @@ $(WorkItemDirectory)\pmiAssembliesDirectory %HELIX_WORKITEM_PAYLOAD%\binaries %HELIX_CORRELATION_PAYLOAD%\superpmi + + + %HELIX_CORRELATION_PAYLOAD%\performance + %PerformanceDirectory%\tools\dotnet\dotnet.exe + superpmi-shim-collector.dll + %HELIX_WORKITEM_UPLOAD_ROOT% $(BUILD_SOURCESDIRECTORY)\artifacts\helixresults @@ -31,6 +37,12 @@ $(WorkItemDirectory)/pmiAssembliesDirectory $HELIX_WORKITEM_PAYLOAD/binaries $HELIX_CORRELATION_PAYLOAD/superpmi + + + $HELIX_CORRELATION_PAYLOAD/performance + $PerformanceDirectory/tools/dotnet/dotnet + libsuperpmi-shim-collector.so + $HELIX_WORKITEM_UPLOAD_ROOT $(BUILD_SOURCESDIRECTORY)/artifacts/helixresults @@ -41,6 +53,10 @@ $(Python) $(WorkItemCommand) -assemblies $(PmiAssembliesDirectory) -arch $(Architecture) -build_type $(BuildConfig) -core_root $(SuperPMIDirectory) + + $(Python) $(SuperPMIDirectory)/superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -python_path $(Python) -core_root $(SuperPMIDirectory) -shim_name $(SpmiCollectorName) + + false false @@ -68,12 +84,12 @@ - + - + $(CollectionName).$(CollectionType).%(HelixWorkItem.PartitionId).$(MchFileTag) $(PmiAssembliesPayload)$(FileSeparatorChar)$(CollectionName)$(FileSeparatorChar)%(HelixWorkItem.PmiAssemblies) @@ -82,6 +98,13 @@ %(OutputFileName).mch;%(OutputFileName).mch.mct;%(OutputFileName).log + + + + $(CollectionName).$(CollectionType).$(MchFileTag) + $(WorkItemCommand) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log + + + + \ @@ -138,35 +164,9 @@ $(CollectionName).$(CollectionType).%(HelixWorkItem.Index).$(MchFileTag) - $(WorkItemCommand) --partition-count $(PartitionCount) --partition-index %(HelixWorkItem.Index) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log + $(WorkItemCommand) --partition_count $(PartitionCount) --partition_index %(HelixWorkItem.Index) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log $(WorkItemTimeout) %(OutputFileName).mch;%(OutputFileName).mch.mct;%(OutputFileName).log - - - - diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index eaf27e1be91a72..e3982a9e367dd5 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -31,6 +31,8 @@ parser.add_argument("-shim_name", help="Name of collector shim") parser.add_argument("-output_mch_path", help="Absolute path to the mch file to produce") parser.add_argument("-log_file", help="Name of the log file") +parser.add_argument("-partition_count", help="Total number of partitions") +parser.add_argument("-partition_index", help="Partition index to do the collection for") def setup_args(args): @@ -83,12 +85,12 @@ def setup_args(args): coreclr_args.verify(args, "partition_count", - lambda unused: True, + lambda partition_count: partition_count.isnumeric(), "Unable to set partition_count") coreclr_args.verify(args, "partition_index", - lambda unused: True, + lambda partition_index: partition_index.isnumeric(), "Unable to set partition_index") return coreclr_args From 492bc68e235c4829fc8e7c7f31de1fe0899e58d6 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 5 Feb 2021 12:37:14 -0800 Subject: [PATCH 06/35] fix the name of partition_index and partition_count --- src/coreclr/scripts/superpmi.proj | 2 +- src/coreclr/scripts/superpmi_setup.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index 6b78000dc4b804..aaa47e61086971 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -164,7 +164,7 @@ $(CollectionName).$(CollectionType).%(HelixWorkItem.Index).$(MchFileTag) - $(WorkItemCommand) --partition_count $(PartitionCount) --partition_index %(HelixWorkItem.Index) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log + $(WorkItemCommand) -partition_count $(PartitionCount) -partition_index %(HelixWorkItem.Index) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log $(WorkItemTimeout) %(OutputFileName).mch;%(OutputFileName).mch.mct;%(OutputFileName).log diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index edb893f247e9f6..90a726775e969f 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -449,7 +449,7 @@ def main(main_args): # Workitem directories workitem_directory = path.join(source_directory, "workitem") pmiassemblies_directory = path.join(workitem_directory, "pmiAssembliesDirectory") - input_artifacts = path.join(pmiassemblies_directory, coreclr_args.collection_name) + input_artifacts = "" # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2. @@ -463,6 +463,7 @@ def main(main_args): # copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False) # payload + input_artifacts = path.join(pmiassemblies_directory, coreclr_args.collection_name) exclude_directory = ['Core_Root'] if coreclr_args.collection_name == "tests" else [] exclude_files = native_binaries_to_ignore partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory) From 911a76dc84579744d08ac287943ba2564ecd6c0f Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 5 Feb 2021 15:27:35 -0800 Subject: [PATCH 07/35] Point the core_root path to the superpmi script --- src/coreclr/scripts/superpmi_benchmarks.py | 14 +++++++++++++- src/coreclr/scripts/superpmi_setup.py | 13 ++++++++----- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index e3982a9e367dd5..b19ba5cb92297c 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -126,7 +126,19 @@ def execute(coreclr_args, output_mch_name): " --envvar COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart\"", # superpmi.py collect arguments - "-temp_dir", performance_directory, "--skip_cleanup", "-output_mch_path", output_mch_name, "--use_zapdisable", "-log_file", log_file]) + + # Path to core_root because the script will be ran from "performance" repo. + "-core_root", core_root, + + # Specify that temp_dir is current performance directory, because in order to execute + # microbenchmarks, it needs access to the source code. + # Also, skip cleaning up once done, because the superpmi script is being + # executed from the same folder. + "-temp_dir", performance_directory, "--skip_cleanup", + + # Disable ReadyToRun so we always JIT R2R methods and collect them + "--use_zapdisable", + "-output_mch_path", output_mch_name, "-log_file", log_file]) def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 90a726775e969f..7887f404d06678 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -375,10 +375,16 @@ def setup_microbenchmark(workitem_directory, arch): build_env_vars["DOTNET_MULTILEVEL_LOOKUP"] = "0" build_env_vars["UseSharedCompilation"] = "false" - run_command(["python", "scripts/dotnet.py", "install", "--architecture", arch, "--install-dir", dotnet_directory]) + run_command([get_python_name(), "scripts/dotnet.py", "install", "--architecture", arch, "--install-dir", dotnet_directory]) run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", artifacts_packages_directory], _env=build_env_vars) run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory], _env=build_env_vars) +def get_python_name(): + if is_windows: + return "py -3" + else: + return "python3" + def set_pipeline_variable(name, value): """ This method sets pipeline variable. @@ -478,10 +484,7 @@ def main(main_args): set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("WorkItemDirectory", workitem_directory) set_pipeline_variable("InputArtifacts", input_artifacts) - if is_windows: - set_pipeline_variable("Python", "py -3") - else: - set_pipeline_variable("Python", "python3") + set_pipeline_variable("Python", get_python_name()) set_pipeline_variable("Architecture", arch) set_pipeline_variable("Creator", creator) set_pipeline_variable("Queue", helix_queue) From 78fd0a2a91f26a0a60b1fd8f20876f0c7991cbc2 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 5 Feb 2021 16:15:50 -0800 Subject: [PATCH 08/35] fix python to invoke for setup --- src/coreclr/scripts/superpmi_setup.py | 37 +++++++++++++++++++-------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 7887f404d06678..499cd138c157e0 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -42,6 +42,7 @@ from os.path import isfile, join, getsize from coreclr_arguments import * from superpmi import ChangeDir + # Start of parser object creation. parser = argparse.ArgumentParser(description="description") @@ -116,6 +117,7 @@ MAX_FILES_COUNT = 1500 + def setup_args(args): """ Setup the args for SuperPMI to use. @@ -330,7 +332,8 @@ def copy_files(src_path, dst_path, file_names): shutil.copy2(f, dst_path_of_file) -def partition_files(src_directory, dst_directory, max_size, exclude_directories=[], exclude_files=native_binaries_to_ignore): +def partition_files(src_directory, dst_directory, max_size, exclude_directories=[], + exclude_files=native_binaries_to_ignore): """ Copy bucketized files based on size to destination folder. Args: @@ -352,6 +355,7 @@ def partition_files(src_directory, dst_directory, max_size, exclude_directories= copy_files(src_directory, curr_dst_path, file_names) index += 1 + def setup_microbenchmark(workitem_directory, arch): """ Perform setup of microbenchmarks @@ -364,26 +368,37 @@ def setup_microbenchmark(workitem_directory, arch): ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory]) with ChangeDir(performance_directory): - + print("Inside directory: " + performance_directory) dotnet_directory = os.path.join(performance_directory, "tools", "dotnet") + dotnet_install_script = os.path.join(performance_directory, "scripts", "dotnet.py") dotnet_exe = os.path.join(dotnet_directory, "dotnet") - artifacts_directory = os.path.join(performance_directory, "artifacts") + artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") + if not isfile(dotnet_install_script): + print("Missing " + dotnet_install_script) + return + build_env_vars = os.environ.copy() build_env_vars["DOTNET_CLI_TELEMETRY_OPTOUT"] = "1" build_env_vars["DOTNET_MULTILEVEL_LOOKUP"] = "0" build_env_vars["UseSharedCompilation"] = "false" - run_command([get_python_name(), "scripts/dotnet.py", "install", "--architecture", arch, "--install-dir", dotnet_directory]) - run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", artifacts_packages_directory], _env=build_env_vars) - run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory], _env=build_env_vars) + run_command( + get_python_name() + [dotnet_install_script, "install", "--architecture", arch, "--install-dir", dotnet_directory]) + run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", + artifacts_packages_directory], _env=build_env_vars) + run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", + "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, + "-o", artifacts_directory], _env=build_env_vars) + def get_python_name(): if is_windows: - return "py -3" + return ["py", "-3"] else: - return "python3" + return ["python3"] + def set_pipeline_variable(name, value): """ This method sets pipeline variable. @@ -393,8 +408,8 @@ def set_pipeline_variable(name, value): value (string): Value of the variable. """ define_variable_format = "##vso[task.setvariable variable={0}]{1}" - print("{0} -> {1}".format(name, value)) # logging - print(define_variable_format.format(name, value)) # set variable + print("{0} -> {1}".format(name, value)) # logging + print(define_variable_format.format(name, value)) # set variable def main(main_args): @@ -484,7 +499,7 @@ def main(main_args): set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) set_pipeline_variable("WorkItemDirectory", workitem_directory) set_pipeline_variable("InputArtifacts", input_artifacts) - set_pipeline_variable("Python", get_python_name()) + set_pipeline_variable("Python", ' '.join(get_python_name())) set_pipeline_variable("Architecture", arch) set_pipeline_variable("Creator", creator) set_pipeline_variable("Queue", helix_queue) From 94cafa214270605e5f679309abd48021e924cb45 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 5 Feb 2021 23:15:58 -0800 Subject: [PATCH 09/35] Add verbosity and include all benchmarks --- src/coreclr/scripts/superpmi_benchmarks.py | 6 +++--- src/coreclr/scripts/superpmi_setup.py | 7 ++++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index b19ba5cb92297c..555313c0d743fa 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -15,10 +15,10 @@ import re import sys +from os import path from coreclr_arguments import * from superpmi import ChangeDir from superpmi_setup import run_command -from os import path # Start of parser object creation. is_windows = platform.system() == "Windows" @@ -121,7 +121,7 @@ def execute(coreclr_args, output_mch_name): python_path, path.join(superpmi_directory, "superpmi.py"), "collect", # dotnet command to execute Microbenchmarks.dll - dotnet_exe, "\"artifacts/Microbenchmarks.dll --filter *IniArray* --corerun " + path.join(core_root, corerun_exe_name) + + dotnet_exe, "\"artifacts/Microbenchmarks.dll --filter * --corerun " + path.join(core_root, corerun_exe_name) + " --partition_count " + partition_count + " --partition_index " + partition_index + " --envvar COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart\"", @@ -138,7 +138,7 @@ def execute(coreclr_args, output_mch_name): # Disable ReadyToRun so we always JIT R2R methods and collect them "--use_zapdisable", - "-output_mch_path", output_mch_name, "-log_file", log_file]) + "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug"]) def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 499cd138c157e0..2fde643dd15aa7 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -385,7 +385,7 @@ def setup_microbenchmark(workitem_directory, arch): build_env_vars["UseSharedCompilation"] = "false" run_command( - get_python_name() + [dotnet_install_script, "install", "--architecture", arch, "--install-dir", dotnet_directory]) + get_python_name() + [dotnet_install_script, "install", "--architecture", arch, "--install-dir", dotnet_directory, "--verbose"]) run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", artifacts_packages_directory], _env=build_env_vars) run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", @@ -394,6 +394,11 @@ def setup_microbenchmark(workitem_directory, arch): def get_python_name(): + """Gets the python name + + Returns: + string: Returns the appropriate python name depending on the OS. + """ if is_windows: return ["py", "-3"] else: From 31f4d8a510426371bf5ae0c265aba3982d78ccd9 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Sat, 6 Feb 2021 00:45:49 -0800 Subject: [PATCH 10/35] Fix the benchmarks invocation --- src/coreclr/scripts/superpmi_benchmarks.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 555313c0d743fa..f449ca173723a8 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -111,6 +111,7 @@ def execute(coreclr_args, output_mch_name): log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index + benchmarks_dll = path.join(performance_directory, "artifacts", "Microbenchmarks.dll") with ChangeDir(performance_directory): print("Inside " + performance_directory) @@ -121,9 +122,9 @@ def execute(coreclr_args, output_mch_name): python_path, path.join(superpmi_directory, "superpmi.py"), "collect", # dotnet command to execute Microbenchmarks.dll - dotnet_exe, "\"artifacts/Microbenchmarks.dll --filter * --corerun " + path.join(core_root, corerun_exe_name) + - " --partition_count " + partition_count + " --partition_index " + partition_index + - " --envvar COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart\"", + dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + + " --partition-count " + partition_count + " --partition-index " + partition_index + + " --envVars COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", # superpmi.py collect arguments From 8a44aa6ecd7f8d64f6f9202b46a991334b881620 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 9 Feb 2021 10:16:52 -0800 Subject: [PATCH 11/35] use benchmarks_ci.py script --- src/coreclr/scripts/superpmi.proj | 4 +- src/coreclr/scripts/superpmi.py | 14 +-- src/coreclr/scripts/superpmi_benchmarks.py | 129 +++++++++++++++------ src/coreclr/scripts/superpmi_setup.py | 44 ++++--- 4 files changed, 129 insertions(+), 62 deletions(-) diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index aaa47e61086971..38f7dc75e2905a 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -51,7 +51,6 @@ %HELIX_CORRELATION_PAYLOAD%\performance %PerformanceDirectory%\tools\dotnet\dotnet.exe - superpmi-shim-collector.dll %HELIX_WORKITEM_UPLOAD_ROOT% @@ -67,7 +66,6 @@ $HELIX_CORRELATION_PAYLOAD/performance $PerformanceDirectory/tools/dotnet/dotnet - libsuperpmi-shim-collector.so $HELIX_WORKITEM_UPLOAD_ROOT @@ -80,7 +78,7 @@ - $(Python) $(SuperPMIDirectory)/superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -python_path $(Python) -core_root $(SuperPMIDirectory) -shim_name $(SpmiCollectorName) + $(Python) $(SuperPMIDirectory)/superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -python_path $(Python) -core_root $(SuperPMIDirectory) -arch $(Architecture) diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py index 04326b947a2535..a2e7c6f69b43e3 100755 --- a/src/coreclr/scripts/superpmi.py +++ b/src/coreclr/scripts/superpmi.py @@ -543,12 +543,13 @@ class TempDir: Use with: "with TempDir() as temp_dir" to change to that directory and then automatically change back to the original working directory afterwards and remove the temporary - directory and its contents (if args.skip_cleanup is False). + directory and its contents (if skip_cleanup is False). """ - def __init__(self, path=None): + def __init__(self, path=None, skip_cleanup=False): self.mydir = tempfile.mkdtemp() if path is None else path self.cwd = None + self._skip_cleanup = skip_cleanup def __enter__(self): self.cwd = os.getcwd() @@ -557,10 +558,7 @@ def __enter__(self): def __exit__(self, exc_type, exc_val, exc_tb): os.chdir(self.cwd) - # Note: we are using the global `args`, not coreclr_args. This works because - # the `skip_cleanup` argument is not processed by CoreclrArguments, but is - # just copied there. - if not args.skip_cleanup: + if not self._skip_cleanup: shutil.rmtree(self.mydir) @@ -758,7 +756,7 @@ def collect(self): passed = False try: - with TempDir(self.coreclr_args.temp_dir) as temp_location: + with TempDir(self.coreclr_args.temp_dir, self.coreclr_args.skip_cleanup) as temp_location: # Setup all of the temp locations self.base_fail_mcl_file = os.path.join(temp_location, "basefail.mcl") self.base_mch_file = os.path.join(temp_location, "base.mch") @@ -1576,7 +1574,7 @@ def replay_with_asm_diffs(self): files_with_asm_diffs = [] files_with_replay_failures = [] - with TempDir(self.coreclr_args.temp_dir) as temp_location: + with TempDir(self.coreclr_args.temp_dir, self.coreclr_args.skip_cleanup) as temp_location: logging.debug("") logging.debug("Temp Location: %s", temp_location) logging.debug("") diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index f449ca173723a8..87c5a2f273ae3b 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -17,7 +17,7 @@ from os import path from coreclr_arguments import * -from superpmi import ChangeDir +from superpmi import ChangeDir, TempDir from superpmi_setup import run_command # Start of parser object creation. @@ -28,11 +28,11 @@ parser.add_argument("-superpmi_directory", help="Path to superpmi directory") parser.add_argument("-python_path", help="Path to python") parser.add_argument("-core_root", help="Path to Core_Root directory") -parser.add_argument("-shim_name", help="Name of collector shim") parser.add_argument("-output_mch_path", help="Absolute path to the mch file to produce") parser.add_argument("-log_file", help="Name of the log file") parser.add_argument("-partition_count", help="Total number of partitions") parser.add_argument("-partition_index", help="Partition index to do the collection for") +parser.add_argument("-arch", help="Architecture") def setup_args(args): @@ -65,7 +65,7 @@ def setup_args(args): coreclr_args.verify(args, "log_file", - lambda log_file: not os.path.isfile(log_file), + lambda log_file: True, #not os.path.isfile(log_file), "log_file already exist") coreclr_args.verify(args, @@ -78,11 +78,6 @@ def setup_args(args): lambda python_path: os.path.isfile(python_path), "python_path doesn't exist") - coreclr_args.verify(args, - "shim_name", - lambda unused: True, - "Unable to set shim_name") - coreclr_args.verify(args, "partition_count", lambda partition_count: partition_count.isnumeric(), @@ -93,6 +88,11 @@ def setup_args(args): lambda partition_index: partition_index.isnumeric(), "Unable to set partition_index") + coreclr_args.verify(args, + "arch", + lambda arch: arch.lower() in ["x86", "x64", "arm", "arm64"], + "Unable to set arch") + return coreclr_args @@ -107,40 +107,99 @@ def execute(coreclr_args, output_mch_name): core_root = coreclr_args.core_root superpmi_directory = coreclr_args.superpmi_directory performance_directory = coreclr_args.performance_directory - shim_name = coreclr_args.shim_name log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index - benchmarks_dll = path.join(performance_directory, "artifacts", "Microbenchmarks.dll") + arch = coreclr_args.arch + if is_windows: + shim_name = "superpmi-shim-collector.dll" + corerun_exe = "CoreRun.exe" + script_name = "run_microbenchmarks.bat" + else: + shim_name = "libsuperpmi-shim-collector.so" + corerun_exe = "corerun" + script_name = "run_microbenchmarks.sh" + # benchmarks_dll = path.join(performance_directory, "artifacts", "Microbenchmarks.dll") + + benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") + microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") + corerun_exe = path.join(core_root, corerun_exe) + script_args = f"--csproj {microbenchmark_proj} -f net6.0 --incremental no --filter *CheckArray* --architecture {arch}" + bdn_artifacts = f"--bdn-artifacts {path.join(performance_directory, 'artifacts', 'BenchmarkDotNet.Artifacts')}" + bdn_arguments = f"--bdn-arguments=\"--corerun {corerun_exe} --partition-count {partition_count} --partition-index {partition_index} " \ + f"--envVars COMPlus_JitName:{shim_name} --iterationCount 1 --warmupCount 0 --invocationCount 1 " \ + "--unrollFactor 1 --strategy ColdStart\"" + collection_command = f"{python_path} {benchmarks_ci_script} {script_args} {bdn_artifacts} {bdn_arguments}" + + with TempDir() as temp_location: + script_name = path.join(temp_location, script_name) + with open(script_name, "w") as collection_script: + contents = ["echo off", f"echo Invoking {collection_command}", collection_command] + collection_script.write(os.linesep.join(contents)) - with ChangeDir(performance_directory): - print("Inside " + performance_directory) - dotnet_exe_name = "dotnet.exe" if is_windows else "dotnet" - corerun_exe_name = "CoreRun.exe" if is_windows else "corerun" - dotnet_exe = path.join(performance_directory, "tools", "dotnet", dotnet_exe_name) run_command([ - python_path, path.join(superpmi_directory, "superpmi.py"), "collect", - - # dotnet command to execute Microbenchmarks.dll - dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + - " --partition-count " + partition_count + " --partition-index " + partition_index + - " --envVars COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", - - # superpmi.py collect arguments - - # Path to core_root because the script will be ran from "performance" repo. - "-core_root", core_root, - - # Specify that temp_dir is current performance directory, because in order to execute - # microbenchmarks, it needs access to the source code. - # Also, skip cleaning up once done, because the superpmi script is being - # executed from the same folder. - "-temp_dir", performance_directory, "--skip_cleanup", - - # Disable ReadyToRun so we always JIT R2R methods and collect them - "--use_zapdisable", + python_path, path.join(superpmi_directory, "superpmi.py"), "collect", script_name, "-core_root", core_root, + "-temp_dir", temp_location, "--skip_cleanup", # cleanup will happen once this block is done executing. + "--use_zapdisable", # Disable ReadyToRun so we always JIT R2R methods and collect them "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug"]) + # with ChangeDir(performance_directory): + # print("Inside " + performance_directory) + # # dotnet_exe_name = "dotnet.exe" if is_windows else "dotnet" + + # # dotnet_exe = path.join(performance_directory, "tools", "dotnet", dotnet_exe_name) + # benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") + # microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") + # corerun_exe = path.join(core_root, "CoreRun.exe" if is_windows else "corerun") + # # script_args = " --csproj " + microbenchmark_proj + " -f net6.0 --incremental no --architecture x64" + # # bdn_artifacts = " --bdn-artifacts " + path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts") + # # bdn_arguments = " --bdn-arguments=\"--filter * --corerun {} --partition-count {} --partition-index {} " \ + # # "--envVars COMPlus_JitName:{} --iterationCount 1 --warmupCount 0 --invocationCount 1 " \ + # # "--unrollFactor 1 --strategy ColdStart\"".format(corerun_exe, partition_count, + # # partition_index, shim_name) + + # script_args = ["--csproj", microbenchmark_proj, "-f", "net6.0", "--incremental", "no", "--architecture", "x64"] + # bdn_artifacts = ["--bdn-artifacts", path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts")] + # bdn_arguments = ["--bdn-arguments=\"--filter *", "--corerun", corerun_exe, "--partition-count", partition_count, "--partition-index", partition_index, + # "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", "--warmupCount", "0", "--invocationCount", "1", + # "--unrollFactor", "1", "--strategy", "ColdStart\""] + + # run_command([ + # python_path, path.join(superpmi_directory, "superpmi.py"), "collect", + + # # dotnet command to execute Microbenchmarks.dll + # # dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + + # # " --partition-count " + partition_count + " --partition-index " + partition_index + + # # " --envVars COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", + + # # dotnet_exe, "\"" + benchmarks_dll, "--filter", "*", "--corerun", path.join(core_root, corerun_exe_name), "--partition-count", + # # partition_count, "--partition-index", partition_index, "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", + # # "--warmupCount", "0", "--invocationCount", "1", "--unrollFactor", "1", "--strategy", "ColdStart\"", + + # # python_path, benchmarks_ci_script, script_args + bdn_artifacts + bdn_arguments, + # python_path, benchmarks_ci_script, "--csproj", microbenchmark_proj, "-f", "net6.0", "--incremental", "no", "--architecture", "x64", + # "--bdn-artifacts", path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts"), + # "--bdn-arguments=\"--filter *", "--corerun", corerun_exe, "--partition-count", partition_count, + # "--partition-index", partition_index, + # "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", "--warmupCount", "0", + # "--invocationCount", "1", + # "--unrollFactor", "1", "--strategy", "ColdStart\"", + + # # superpmi.py collect arguments + + # # Path to core_root because the script will be ran from "performance" repo. + # "-core_root", core_root, + + # # Specify that temp_dir is current performance directory, because in order to execute + # # microbenchmarks, it needs access to the source code. + # # Also, skip cleaning up once done, because the superpmi script is being + # # executed from the same folder. + # "-temp_dir", performance_directory, "--skip_cleanup", + + # # Disable ReadyToRun so we always JIT R2R methods and collect them + # "--use_zapdisable", + # "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug"]) + def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): """Perform the post processing of produced .mch file by stripping the method contexts diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 2fde643dd15aa7..9e4140f500a77f 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -457,26 +457,37 @@ def main(main_args): print('Copying {} -> {}'.format(coreclr_args.core_root_directory, superpmi_dst_directory)) copy_directory(coreclr_args.core_root_directory, superpmi_dst_directory, match_func=acceptable_copy) - # Clone and build jitutils - try: - with tempfile.TemporaryDirectory() as jitutils_directory: - run_command( - ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory]) - # Set dotnet path to run bootstrap - os.environ["PATH"] = path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"] - bootstrap_file = "bootstrap.cmd" if is_windows else "bootstrap.sh" - run_command([path.join(jitutils_directory, bootstrap_file)], jitutils_directory) - - copy_files(path.join(jitutils_directory, "bin"), superpmi_dst_directory, [path.join(jitutils_directory, "bin", "pmi.dll")]) - except PermissionError as pe_error: - # Details: https://bugs.python.org/issue26660 - print('Ignoring PermissionError: {0}'.format(pe_error)) - # Workitem directories workitem_directory = path.join(source_directory, "workitem") - pmiassemblies_directory = path.join(workitem_directory, "pmiAssembliesDirectory") input_artifacts = "" + if coreclr_args.collection_name == "benchmarks": + # Setup microbenchmarks + performance_directory = path.join(workitem_directory, "performance") + run_command( + ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory]) + else: + # Setup for pmi/crossgen runs + + # Clone and build jitutils + try: + with tempfile.TemporaryDirectory() as jitutils_directory: + run_command( + ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory]) + # Set dotnet path to run bootstrap + os.environ["PATH"] = path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"] + bootstrap_file = "bootstrap.cmd" if is_windows else "bootstrap.sh" + run_command([path.join(jitutils_directory, bootstrap_file)], jitutils_directory) + + copy_files(path.join(jitutils_directory, "bin"), superpmi_dst_directory, [path.join(jitutils_directory, "bin", "pmi.dll")]) + except PermissionError as pe_error: + # Details: https://bugs.python.org/issue26660 + print('Ignoring PermissionError: {0}'.format(pe_error)) + + # Setup microbenchmarks + if coreclr_args.collection_name == "benchmarks": + setup_microbenchmark(correlation_payload_directory, arch) + else: # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2. # The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios. @@ -489,6 +500,7 @@ def main(main_args): # copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False) # payload + pmiassemblies_directory = path.join(workitem_directory, "pmiAssembliesDirectory") input_artifacts = path.join(pmiassemblies_directory, coreclr_args.collection_name) exclude_directory = ['Core_Root'] if coreclr_args.collection_name == "tests" else [] exclude_files = native_binaries_to_ignore From bb59d9f1bf2474b914b948af1948e050b28b9223 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 9 Feb 2021 10:18:43 -0800 Subject: [PATCH 12/35] run all benchmarks --- src/coreclr/scripts/superpmi_benchmarks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 87c5a2f273ae3b..0c0c3251401d37 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -124,7 +124,7 @@ def execute(coreclr_args, output_mch_name): benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") corerun_exe = path.join(core_root, corerun_exe) - script_args = f"--csproj {microbenchmark_proj} -f net6.0 --incremental no --filter *CheckArray* --architecture {arch}" + script_args = f"--csproj {microbenchmark_proj} -f net6.0 --incremental no --filter * --architecture {arch}" bdn_artifacts = f"--bdn-artifacts {path.join(performance_directory, 'artifacts', 'BenchmarkDotNet.Artifacts')}" bdn_arguments = f"--bdn-arguments=\"--corerun {corerun_exe} --partition-count {partition_count} --partition-index {partition_index} " \ f"--envVars COMPlus_JitName:{shim_name} --iterationCount 1 --warmupCount 0 --invocationCount 1 " \ From ce26da6ba897b58bd08136262d60d3a35ac7d385 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 9 Feb 2021 11:29:55 -0800 Subject: [PATCH 13/35] fix the performance source code path --- src/coreclr/scripts/superpmi_setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 9e4140f500a77f..eb03fbc61ec1b2 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -463,7 +463,7 @@ def main(main_args): if coreclr_args.collection_name == "benchmarks": # Setup microbenchmarks - performance_directory = path.join(workitem_directory, "performance") + performance_directory = path.join(correlation_payload_directory, "performance") run_command( ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory]) else: From 8a5b218fe76282402de6cb2878f2319bb1692ac7 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 9 Feb 2021 22:58:42 -0800 Subject: [PATCH 14/35] see why dotnet install fails --- src/coreclr/scripts/superpmi_benchmarks.py | 4 ++++ src/coreclr/scripts/superpmi_setup.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 0c0c3251401d37..4327f8ee82be4f 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -15,6 +15,7 @@ import re import sys +import stat from os import path from coreclr_arguments import * from superpmi import ChangeDir, TempDir @@ -137,6 +138,9 @@ def execute(coreclr_args, output_mch_name): contents = ["echo off", f"echo Invoking {collection_command}", collection_command] collection_script.write(os.linesep.join(contents)) + if not is_windows: + os.chmod(script_name, stat.S_IRWXU) + run_command([ python_path, path.join(superpmi_directory, "superpmi.py"), "collect", script_name, "-core_root", core_root, "-temp_dir", temp_location, "--skip_cleanup", # cleanup will happen once this block is done executing. diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index eb03fbc61ec1b2..1b2b3c704a7b39 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -365,7 +365,7 @@ def setup_microbenchmark(workitem_directory, arch): performance_directory = path.join(workitem_directory, "performance") run_command( - ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory]) + ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) with ChangeDir(performance_directory): print("Inside directory: " + performance_directory) From f1a2dc22f67450c1b2979bdab9de38dad5d667cc Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 9 Feb 2021 23:00:04 -0800 Subject: [PATCH 15/35] Comment all jobs except benchmarks --- eng/pipelines/coreclr/superpmi.yml | 142 ++++++++++++++--------------- 1 file changed, 71 insertions(+), 71 deletions(-) diff --git a/eng/pipelines/coreclr/superpmi.yml b/eng/pipelines/coreclr/superpmi.yml index 0e650dd29fb6ad..75a5567cc06dba 100644 --- a/eng/pipelines/coreclr/superpmi.yml +++ b/eng/pipelines/coreclr/superpmi.yml @@ -42,57 +42,79 @@ jobs: jobParameters: testGroup: outerloop -- template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml - buildConfig: checked - platforms: - - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 - jobParameters: - testGroup: outerloop - liveLibrariesBuildConfig: Release +# - template: /eng/pipelines/common/platform-matrix.yml +# parameters: +# jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml +# buildConfig: checked +# platforms: +# - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 +# jobParameters: +# testGroup: outerloop +# liveLibrariesBuildConfig: Release -- template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml - buildConfig: checked - platforms: - # Linux tests are built on the OSX machines. - # - OSX_x64 - - Linux_arm - - Linux_arm64 - - Linux_x64 - - windows_x64 - - windows_x86 - - windows_arm64 - helixQueueGroup: ci - helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml - jobParameters: - testGroup: outerloop - liveLibrariesBuildConfig: Release - collectionType: pmi - collectionName: libraries +# - template: /eng/pipelines/common/platform-matrix.yml +# parameters: +# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml +# buildConfig: checked +# platforms: +# # Linux tests are built on the OSX machines. +# # - OSX_x64 +# - Linux_arm +# - Linux_arm64 +# - Linux_x64 +# - windows_x64 +# - windows_x86 +# - windows_arm64 +# helixQueueGroup: ci +# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml +# jobParameters: +# testGroup: outerloop +# liveLibrariesBuildConfig: Release +# collectionType: pmi +# collectionName: libraries -- template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml - buildConfig: checked - platforms: - # Linux tests are built on the OSX machines. - # - OSX_x64 - - Linux_arm - - Linux_arm64 - - Linux_x64 - - windows_x64 - - windows_x86 - - windows_arm64 - helixQueueGroup: ci - helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml - jobParameters: - testGroup: outerloop - liveLibrariesBuildConfig: Release - collectionType: crossgen - collectionName: libraries +# - template: /eng/pipelines/common/platform-matrix.yml +# parameters: +# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml +# buildConfig: checked +# platforms: +# # Linux tests are built on the OSX machines. +# # - OSX_x64 +# - Linux_arm +# - Linux_arm64 +# - Linux_x64 +# - windows_x64 +# - windows_x86 +# - windows_arm64 +# helixQueueGroup: ci +# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml +# jobParameters: +# testGroup: outerloop +# liveLibrariesBuildConfig: Release +# collectionType: crossgen +# collectionName: libraries + +# - template: /eng/pipelines/common/platform-matrix.yml +# parameters: +# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml +# buildConfig: checked +# platforms: +# # Linux tests are built on the OSX machines. +# # - OSX_x64 +# - Linux_arm +# - Linux_arm64 +# - Linux_x64 +# - windows_x64 +# - windows_x86 +# - windows_arm64 +# - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 +# helixQueueGroup: ci +# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml +# jobParameters: +# testGroup: outerloop +# liveLibrariesBuildConfig: Release +# collectionType: pmi +# collectionName: tests - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -116,28 +138,6 @@ jobs: collectionType: crossgen2 collectionName: libraries -- template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml - buildConfig: checked - platforms: - # Linux tests are built on the OSX machines. - # - OSX_x64 - - Linux_arm - - Linux_arm64 - - Linux_x64 - - windows_x64 - - windows_x86 - - windows_arm64 - - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 - helixQueueGroup: ci - helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml - jobParameters: - testGroup: outerloop - liveLibrariesBuildConfig: Release - collectionType: pmi - collectionName: tests - - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml From c04d9d1e88497ce248ce9a8ff5c038072ffe45a5 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Tue, 9 Feb 2021 23:13:50 -0800 Subject: [PATCH 16/35] update the right fork --- src/coreclr/scripts/superpmi_setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 1b2b3c704a7b39..69e110132db8f3 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -465,7 +465,7 @@ def main(main_args): # Setup microbenchmarks performance_directory = path.join(correlation_payload_directory, "performance") run_command( - ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory]) + ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) else: # Setup for pmi/crossgen runs From 15ef3f293b1c3fa1b9b19934bc56c3c3baf9964b Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Wed, 10 Feb 2021 10:49:51 -0800 Subject: [PATCH 17/35] Switch back to doing installing dotnet on azure machine --- src/coreclr/scripts/superpmi.proj | 2 +- src/coreclr/scripts/superpmi_benchmarks.py | 107 +++++++++++++++++++-- src/coreclr/scripts/superpmi_setup.py | 37 ++++--- 3 files changed, 117 insertions(+), 29 deletions(-) diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index 38f7dc75e2905a..69613a0df7940a 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -78,7 +78,7 @@ - $(Python) $(SuperPMIDirectory)/superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -python_path $(Python) -core_root $(SuperPMIDirectory) -arch $(Architecture) + $(Python) $(SuperPMIDirectory)/superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -core_root $(SuperPMIDirectory) -arch $(Architecture) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 4327f8ee82be4f..939d2beecac4ee 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -17,6 +17,7 @@ import stat from os import path +from os.path import isfile from coreclr_arguments import * from superpmi import ChangeDir, TempDir from superpmi_setup import run_command @@ -27,7 +28,6 @@ parser.add_argument("-performance_directory", help="Path to performance directory") parser.add_argument("-superpmi_directory", help="Path to superpmi directory") -parser.add_argument("-python_path", help="Path to python") parser.add_argument("-core_root", help="Path to Core_Root directory") parser.add_argument("-output_mch_path", help="Absolute path to the mch file to produce") parser.add_argument("-log_file", help="Name of the log file") @@ -74,11 +74,6 @@ def setup_args(args): lambda core_root: os.path.isdir(core_root), "core_root doesn't exist") - coreclr_args.verify(args, - "python_path", - lambda python_path: os.path.isfile(python_path), - "python_path doesn't exist") - coreclr_args.verify(args, "partition_count", lambda partition_count: partition_count.isnumeric(), @@ -96,6 +91,98 @@ def setup_args(args): return coreclr_args +def build_and_run(coreclr_args, output_mch_name): + """Build the microbenchmarks and run them under "superpmi collect" + + Args: + coreclr_args (CoreClrArguments): Arguments use to drive + output_mch_name (string): Name of output mch file name + """ + arch = coreclr_args.arch + python_path = sys.executable + core_root = coreclr_args.core_root + superpmi_directory = coreclr_args.superpmi_directory + performance_directory = coreclr_args.performance_directory + log_file = coreclr_args.log_file + partition_count = coreclr_args.partition_count + partition_index = coreclr_args.partition_index + dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) + dotnet_exe = os.path.join(dotnet_directory, "dotnet") + + #TODO: Use TMP instead of os.path.join(performance_directory, "artifacts") + artifacts_directory = os.path.join(performance_directory, "artifacts") + artifacts_packages_directory = os.path.join(artifacts_directory, "packages") + benchmarks_dll = path.join(artifacts_directory, "Microbenchmarks.dll") + + if is_windows: + shim_name = "superpmi-shim-collector.dll" + corerun_exe = "CoreRun.exe" + else: + shim_name = "libsuperpmi-shim-collector.so" + corerun_exe = "corerun" + + run_command( + [dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", + artifacts_packages_directory]) + + run_command( + [dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", + "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, + "-o", artifacts_directory]) + + with ChangeDir(performance_directory): + run_command([ + python_path, path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root, + # Specify that temp_dir is current performance directory, because in order to execute + # microbenchmarks, it needs access to the source code. + # Also, skip cleaning up once done, because the superpmi script is being + # executed from the same folder. + "-temp_dir", performance_directory, "--skip_cleanup", + # Disable ReadyToRun so we always JIT R2R methods and collect them + "--use_zapdisable", + "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug", + + # collection_command + dotnet_exe, + + # collection_args + f"{benchmarks_dll} --filter * --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ + "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", + + # dotnet command to execute Microbenchmarks.dll + # dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + + # " --partition-count " + partition_count + " --partition-index " + partition_index + + # " --envVars COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", + + # dotnet_exe, "\"" + benchmarks_dll, "--filter", "*", "--corerun", path.join(core_root, corerun_exe_name), "--partition-count", + # partition_count, "--partition-index", partition_index, "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", + # "--warmupCount", "0", "--invocationCount", "1", "--unrollFactor", "1", "--strategy", "ColdStart\"", + + # python_path, benchmarks_ci_script, script_args + bdn_artifacts + bdn_arguments, + # python_path, benchmarks_ci_script, "--csproj", microbenchmark_proj, "-f", "net6.0", "--incremental", "no", "--architecture", "x64", + # "--bdn-artifacts", path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts"), + # "--bdn-arguments=\"--filter *", "--corerun", corerun_exe, "--partition-count", partition_count, + # "--partition-index", partition_index, + # "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", "--warmupCount", "0", + # "--invocationCount", "1", + # "--unrollFactor", "1", "--strategy", "ColdStart\"", + + # # superpmi.py collect arguments + + # # Path to core_root because the script will be ran from "performance" repo. + # "-core_root", core_root, + + # # Specify that temp_dir is current performance directory, because in order to execute + # # microbenchmarks, it needs access to the source code. + # # Also, skip cleaning up once done, because the superpmi script is being + # # executed from the same folder. + # "-temp_dir", performance_directory, "--skip_cleanup", + + # # Disable ReadyToRun so we always JIT R2R methods and collect them + # "--use_zapdisable", + # "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug" + ]) def execute(coreclr_args, output_mch_name): """Execute the superpmi collection for Microbenchmarks @@ -104,7 +191,7 @@ def execute(coreclr_args, output_mch_name): coreclr_args (CoreclrArguments): Arguments output_mch_name (string): The name of output mch file name. """ - python_path = coreclr_args.python_path + python_path = sys.executable core_root = coreclr_args.core_root superpmi_directory = coreclr_args.superpmi_directory performance_directory = coreclr_args.performance_directory @@ -112,6 +199,7 @@ def execute(coreclr_args, output_mch_name): partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index arch = coreclr_args.arch + if is_windows: shim_name = "superpmi-shim-collector.dll" corerun_exe = "CoreRun.exe" @@ -120,7 +208,7 @@ def execute(coreclr_args, output_mch_name): shim_name = "libsuperpmi-shim-collector.so" corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" - # benchmarks_dll = path.join(performance_directory, "artifacts", "Microbenchmarks.dll") + benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") @@ -255,7 +343,8 @@ def main(main_args): coreclr_args = setup_args(main_args) all_output_mch_name = path.join(coreclr_args.output_mch_path + "_all.mch") - execute(coreclr_args, all_output_mch_name) + # execute(coreclr_args, all_output_mch_name) + build_and_run(coreclr_args, all_output_mch_name) if os.path.isfile(all_output_mch_name): pass else: diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 69e110132db8f3..95287368b1ec8e 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -365,33 +365,31 @@ def setup_microbenchmark(workitem_directory, arch): performance_directory = path.join(workitem_directory, "performance") run_command( - ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) + ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/performance", performance_directory]) with ChangeDir(performance_directory): - print("Inside directory: " + performance_directory) - dotnet_directory = os.path.join(performance_directory, "tools", "dotnet") + dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_install_script = os.path.join(performance_directory, "scripts", "dotnet.py") - dotnet_exe = os.path.join(dotnet_directory, "dotnet") - artifacts_directory = os.path.join(performance_directory, "artifacts") - artifacts_packages_directory = os.path.join(artifacts_directory, "packages") + # dotnet_exe = os.path.join(dotnet_directory, "dotnet") + # artifacts_directory = os.path.join(performance_directory, "artifacts") + # artifacts_packages_directory = os.path.join(artifacts_directory, "packages") if not isfile(dotnet_install_script): print("Missing " + dotnet_install_script) return - build_env_vars = os.environ.copy() - build_env_vars["DOTNET_CLI_TELEMETRY_OPTOUT"] = "1" - build_env_vars["DOTNET_MULTILEVEL_LOOKUP"] = "0" - build_env_vars["UseSharedCompilation"] = "false" + # build_env_vars = os.environ.copy() + # build_env_vars["DOTNET_CLI_TELEMETRY_OPTOUT"] = "1" + # build_env_vars["DOTNET_MULTILEVEL_LOOKUP"] = "0" + # build_env_vars["UseSharedCompilation"] = "false" run_command( get_python_name() + [dotnet_install_script, "install", "--architecture", arch, "--install-dir", dotnet_directory, "--verbose"]) - run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", - artifacts_packages_directory], _env=build_env_vars) - run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", - "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - "-o", artifacts_directory], _env=build_env_vars) - + # run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", + # artifacts_packages_directory], _env=build_env_vars) + # run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", + # "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, + # "-o", artifacts_directory], _env=build_env_vars) def get_python_name(): """Gets the python name @@ -463,9 +461,10 @@ def main(main_args): if coreclr_args.collection_name == "benchmarks": # Setup microbenchmarks - performance_directory = path.join(correlation_payload_directory, "performance") - run_command( - ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) + setup_microbenchmark(correlation_payload_directory, arch) + # performance_directory = path.join(correlation_payload_directory, "performance") + # run_command( + # ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) else: # Setup for pmi/crossgen runs From d28093abf6b8291ca9cb6bb4ec0431f586ffbc0b Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Wed, 10 Feb 2021 13:02:01 -0800 Subject: [PATCH 18/35] Put dotnet in script --- src/coreclr/scripts/superpmi_benchmarks.py | 60 ++++++++++++++++------ 1 file changed, 43 insertions(+), 17 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 939d2beecac4ee..8573530b999c89 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -112,43 +112,69 @@ def build_and_run(coreclr_args, output_mch_name): #TODO: Use TMP instead of os.path.join(performance_directory, "artifacts") artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") - benchmarks_dll = path.join(artifacts_directory, "Microbenchmarks.dll") + project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") + # benchmarks_dll = path.join('/home/kpathak/temp/artifacts', "MicroBenchmarks.dll") + benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") if is_windows: shim_name = "superpmi-shim-collector.dll" corerun_exe = "CoreRun.exe" + script_name = "run_microbenchmarks.bat" else: shim_name = "libsuperpmi-shim-collector.so" corerun_exe = "corerun" + script_name = "run_microbenchmarks.sh" run_command( - [dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", + [dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory]) run_command( - [dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", + [dotnet_exe, "build", project_file, "--configuration", "Release", "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory]) - with ChangeDir(performance_directory): + collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ + "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" + + with TempDir() as temp_location: + # script_name = path.join('/home/kpathak/temp/output', script_name) + script_name = path.join(temp_location, script_name) + with open(script_name, "w") as collection_script: + contents = [] + if not is_windows: + contents.append("#!/bin/bash") + contents.append(f"pushd {performance_directory}") + contents.append(collection_command) + + print() + print(f"{script_name} contents:") + print("******************************************") + print(os.linesep.join(contents)) + print("******************************************") + + collection_script.write(os.linesep.join(contents)) + + if not is_windows: + os.chmod(script_name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + + # with ChangeDir(performance_directory): run_command([ python_path, path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root, - # Specify that temp_dir is current performance directory, because in order to execute - # microbenchmarks, it needs access to the source code. - # Also, skip cleaning up once done, because the superpmi script is being - # executed from the same folder. - "-temp_dir", performance_directory, "--skip_cleanup", # Disable ReadyToRun so we always JIT R2R methods and collect them "--use_zapdisable", "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug", + script_name, + #---------------------------- + # # collection_command + # dotnet_exe, - # collection_command - dotnet_exe, - - # collection_args - f"{benchmarks_dll} --filter * --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ - f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ - "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", + # # collection_args + # f"\"{benchmarks_dll} --filter * --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + # f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ + # "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart\"", + #---------------------------- # dotnet command to execute Microbenchmarks.dll # dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + @@ -221,7 +247,7 @@ def execute(coreclr_args, output_mch_name): collection_command = f"{python_path} {benchmarks_ci_script} {script_args} {bdn_artifacts} {bdn_arguments}" with TempDir() as temp_location: - script_name = path.join(temp_location, script_name) + script_name = path.join(temp_location, script_name) with open(script_name, "w") as collection_script: contents = ["echo off", f"echo Invoking {collection_command}", collection_command] collection_script.write(os.linesep.join(contents)) From 8725b856146ab0bbbdba5040f3635d906b2a8b93 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Wed, 10 Feb 2021 15:48:03 -0800 Subject: [PATCH 19/35] fix dumpMap, revert change in superpmi.py --- src/coreclr/scripts/superpmi.py | 9 +++------ src/coreclr/scripts/superpmi_benchmarks.py | 8 ++++++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py index a2e7c6f69b43e3..5ed019662d2ffd 100755 --- a/src/coreclr/scripts/superpmi.py +++ b/src/coreclr/scripts/superpmi.py @@ -146,8 +146,8 @@ """ superpmi_collect_help = """\ -Command to run SuperPMI collect over. If using this option, the user should set the COMPlus_JitName to the shim collector -whenever appropriate. +Command to run SuperPMI collect over. Note that there cannot be any dotnet CLI commands +invoked inside this command, as they will fail due to the shim JIT being set. """ replay_mch_files_help = """\ @@ -868,10 +868,7 @@ def set_and_report_env(env, root_env, complus_env = None): collection_command_env = env_copy.copy() collection_complus_env = complus_env.copy() - # Note: Do not set COMPlus_JitName to collector when doing collection using command because - # the command might use dotnet CLI and setting the variable here would fail the dotnet CLI. - # Instead, let the caller set this variable at appropriate time. - # collection_complus_env["JitName"] = self.collection_shim_name + collection_complus_env["JitName"] = self.collection_shim_name set_and_report_env(collection_command_env, root_env, collection_complus_env) logging.info("Collecting using command:") diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 8573530b999c89..4d7a8ab56fa74d 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -143,8 +143,12 @@ def build_and_run(coreclr_args, output_mch_name): script_name = path.join(temp_location, script_name) with open(script_name, "w") as collection_script: contents = [] + # Unset the JitName so dotnet process will not fail if not is_windows: contents.append("#!/bin/bash") + contents.append("unset COMPlus_JitName") + else: + contents.append("set COMPlus_JitName=") contents.append(f"pushd {performance_directory}") contents.append(collection_command) @@ -333,12 +337,12 @@ def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): methods_to_strip_list = path.join(performance_directory, "methods_to_strip.mcl") mcs_exe = path.join(core_root, "mcs") - mcs_command = [mcs_exe, "/dumpMap", old_mch_filename] + mcs_command = [mcs_exe, "-dumpMap", old_mch_filename] # Gather method list to strip (mcs_out, mcs_error) = run_command(mcs_command, _capture_output=True) if len(mcs_error) > 0: - print("Error executing mcs /dumpMap") + print("Error executing mcs -dumpMap") return method_context_list = mcs_out.decode("utf-8").split(os.linesep) From 04ec9399fc1f4da9c695f451091fe0e5d735b324 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Wed, 10 Feb 2021 22:48:54 -0800 Subject: [PATCH 20/35] Produce artifacts in temp folder - Disable mcs -strip for now - Pass the JitName variable --- src/coreclr/scripts/superpmi_benchmarks.py | 76 ++++++++++++---------- 1 file changed, 42 insertions(+), 34 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 4d7a8ab56fa74d..a8b165b6829161 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -106,39 +106,43 @@ def build_and_run(coreclr_args, output_mch_name): log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index - dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) - dotnet_exe = os.path.join(dotnet_directory, "dotnet") - #TODO: Use TMP instead of os.path.join(performance_directory, "artifacts") - artifacts_directory = os.path.join(performance_directory, "artifacts") - artifacts_packages_directory = os.path.join(artifacts_directory, "packages") - project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") - # benchmarks_dll = path.join('/home/kpathak/temp/artifacts', "MicroBenchmarks.dll") - benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") - - if is_windows: - shim_name = "superpmi-shim-collector.dll" - corerun_exe = "CoreRun.exe" - script_name = "run_microbenchmarks.bat" - else: - shim_name = "libsuperpmi-shim-collector.so" - corerun_exe = "corerun" - script_name = "run_microbenchmarks.sh" - - run_command( + with TempDir() as temp_location: + dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) + dotnet_exe = os.path.join(dotnet_directory, "dotnet") + project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") + + # Produce artifacts in temp folder because on docker, payload directory (where performance + # directory resides) is read-only. + artifacts_directory = os.path.join(temp_location, "artifacts") + artifacts_packages_directory = os.path.join(artifacts_directory, "packages") + benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") + # benchmarks_dll = path.join('/home/kpathak/temp/artifacts', "MicroBenchmarks.dll") + + if is_windows: + # shim_name = "superpmi-shim-collector.dll" + shim_name = "%JitName%" + corerun_exe = "CoreRun.exe" + script_name = "run_microbenchmarks.bat" + else: + # shim_name = "libsuperpmi-shim-collector.so" + shim_name = "$JitName" + corerun_exe = "corerun" + script_name = "run_microbenchmarks.sh" + + run_command( [dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory]) - run_command( - [dotnet_exe, "build", project_file, "--configuration", "Release", - "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - "-o", artifacts_directory]) + run_command( + [dotnet_exe, "build", project_file, "--configuration", "Release", + "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, + "-o", artifacts_directory]) - collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ - f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ - "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" + collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ + "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" - with TempDir() as temp_location: # script_name = path.join('/home/kpathak/temp/output', script_name) script_name = path.join(temp_location, script_name) with open(script_name, "w") as collection_script: @@ -146,8 +150,10 @@ def build_and_run(coreclr_args, output_mch_name): # Unset the JitName so dotnet process will not fail if not is_windows: contents.append("#!/bin/bash") + contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") else: + contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") contents.append(f"pushd {performance_directory}") contents.append(collection_command) @@ -372,15 +378,17 @@ def main(main_args): """ coreclr_args = setup_args(main_args) - all_output_mch_name = path.join(coreclr_args.output_mch_path + "_all.mch") - # execute(coreclr_args, all_output_mch_name) - build_and_run(coreclr_args, all_output_mch_name) - if os.path.isfile(all_output_mch_name): - pass - else: + # all_output_mch_name = path.join(coreclr_args.output_mch_path + "_all.mch") + # build_and_run(coreclr_args, all_output_mch_name) + build_and_run(coreclr_args, coreclr_args.output_mch_path) + + # if not os.path.isfile(all_output_mch_name): + # print("No mch file generated.") + if not os.path.isfile(coreclr_args.output_mch_path): print("No mch file generated.") - strip_unrelated_mc(coreclr_args, all_output_mch_name, coreclr_args.output_mch_path) + #TODO: Disable stripping for now until we solve mcs.exe problems + # strip_unrelated_mc(coreclr_args, all_output_mch_name, coreclr_args.output_mch_path) if __name__ == "__main__": From 8def38a812d68c33fad938aff0872e2c19453c74 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Wed, 10 Feb 2021 23:34:24 -0800 Subject: [PATCH 21/35] Experimental: Exit on failure --- src/coreclr/scripts/superpmi_benchmarks.py | 21 ++++++++++++--------- src/coreclr/scripts/superpmi_setup.py | 20 ++++++++++++-------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index a8b165b6829161..fa1300433c1957 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -18,6 +18,7 @@ import stat from os import path from os.path import isfile +from shutil import copyfile from coreclr_arguments import * from superpmi import ChangeDir, TempDir from superpmi_setup import run_command @@ -131,13 +132,13 @@ def build_and_run(coreclr_args, output_mch_name): script_name = "run_microbenchmarks.sh" run_command( - [dotnet_exe, "restore", project_file, "--packages", - artifacts_packages_directory]) + [dotnet_exe, "restore", project_file, "--packages", + artifacts_packages_directory], _exit_on_fail=True) run_command( [dotnet_exe, "build", project_file, "--configuration", "Release", "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - "-o", artifacts_directory]) + "-o", artifacts_directory], _exit_on_fail=True) collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ @@ -218,7 +219,7 @@ def build_and_run(coreclr_args, output_mch_name): # # Disable ReadyToRun so we always JIT R2R methods and collect them # "--use_zapdisable", # "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug" - ]) + ], _exit_on_fail=True) def execute(coreclr_args, output_mch_name): """Execute the superpmi collection for Microbenchmarks @@ -346,10 +347,7 @@ def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): mcs_command = [mcs_exe, "-dumpMap", old_mch_filename] # Gather method list to strip - (mcs_out, mcs_error) = run_command(mcs_command, _capture_output=True) - if len(mcs_error) > 0: - print("Error executing mcs -dumpMap") - return + (mcs_out, _, _) = run_command(mcs_command, _exit_on_fail=True) method_context_list = mcs_out.decode("utf-8").split(os.linesep) filtered_context_list = [] @@ -363,8 +361,13 @@ def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): with open(methods_to_strip_list, "w") as f: f.write('\n'.join(filtered_context_list)) + print(f"Stripping {len(filtered_context_list)} entries.") + # Strip and produce new .mcs file - run_command([mcs_exe, "-strip", methods_to_strip_list, old_mch_filename, new_mch_filename]) + if run_command([mcs_exe, "-strip", methods_to_strip_list, old_mch_filename, new_mch_filename])[2] != 0: + # If strip command fails, then just copy the old_mch to new_mch + copyfile(old_mch_filename, new_mch_filename) + return # Create toc file run_command([mcs_exe, "-toc", new_mch_filename]) diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 95287368b1ec8e..f474a06458a64a 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -256,28 +256,32 @@ def first_fit(sorted_by_size, max_size): return partitions -def run_command(command_to_run, _cwd=None, _env=None, _capture_output=False): +def run_command(command_to_run, _cwd=None, _env=None, _exit_on_fail=False): """ Runs the command. Args: command_to_run ([string]): Command to run along with arguments. _cwd (string): Current working directory. _env (string): Environment variables, if any. - _capture_output (bool): If should capture output. + _exit_on_fail (bool): If it should exit on failure. Returns: (string, string): Returns a tuple of stdout and stderr """ print("Running: " + " ".join(command_to_run)) command_stdout = "" command_stderr = "" + return_code = 1 with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_cwd, env=_env) as proc: command_stdout, command_stderr = proc.communicate() - if not _capture_output: - if len(command_stdout) > 0: - print(command_stdout.decode("utf-8")) - if len(command_stderr) > 0: - print(command_stderr.decode("utf-8")) - return (command_stdout, command_stderr) + return_code = proc.returncode + if _exit_on_fail and return_code != 0: + print("Command failed. Exiting.") + sys.exit(1) + if len(command_stdout) > 0: + print(command_stdout.decode("utf-8")) + if len(command_stderr) > 0: + print(command_stderr.decode("utf-8")) + return (command_stdout, command_stderr, return_code) def copy_directory(src_path, dst_path, verbose_output=True, match_func=lambda path: True): From c5d47a4831299b009dc10fa757871a1863f91a6b Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 07:29:29 -0800 Subject: [PATCH 22/35] Revert "Produce artifacts in temp folder" This reverts commit afdfbd4b03a684d780ef06f644dba0dcf0621438. --- src/coreclr/scripts/superpmi_benchmarks.py | 76 ++++++++++------------ 1 file changed, 34 insertions(+), 42 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index fa1300433c1957..701b89f7a7193f 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -107,43 +107,39 @@ def build_and_run(coreclr_args, output_mch_name): log_file = coreclr_args.log_file partition_count = coreclr_args.partition_count partition_index = coreclr_args.partition_index + dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) + dotnet_exe = os.path.join(dotnet_directory, "dotnet") - with TempDir() as temp_location: - dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) - dotnet_exe = os.path.join(dotnet_directory, "dotnet") - project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") - - # Produce artifacts in temp folder because on docker, payload directory (where performance - # directory resides) is read-only. - artifacts_directory = os.path.join(temp_location, "artifacts") - artifacts_packages_directory = os.path.join(artifacts_directory, "packages") - benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") - # benchmarks_dll = path.join('/home/kpathak/temp/artifacts', "MicroBenchmarks.dll") - - if is_windows: - # shim_name = "superpmi-shim-collector.dll" - shim_name = "%JitName%" - corerun_exe = "CoreRun.exe" - script_name = "run_microbenchmarks.bat" - else: - # shim_name = "libsuperpmi-shim-collector.so" - shim_name = "$JitName" - corerun_exe = "corerun" - script_name = "run_microbenchmarks.sh" - - run_command( + #TODO: Use TMP instead of os.path.join(performance_directory, "artifacts") + artifacts_directory = os.path.join(performance_directory, "artifacts") + artifacts_packages_directory = os.path.join(artifacts_directory, "packages") + project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") + # benchmarks_dll = path.join('/home/kpathak/temp/artifacts', "MicroBenchmarks.dll") + benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") + + if is_windows: + shim_name = "superpmi-shim-collector.dll" + corerun_exe = "CoreRun.exe" + script_name = "run_microbenchmarks.bat" + else: + shim_name = "libsuperpmi-shim-collector.so" + corerun_exe = "corerun" + script_name = "run_microbenchmarks.sh" + + run_command( [dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory], _exit_on_fail=True) - run_command( - [dotnet_exe, "build", project_file, "--configuration", "Release", - "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - "-o", artifacts_directory], _exit_on_fail=True) + run_command( + [dotnet_exe, "build", project_file, "--configuration", "Release", + "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, + "-o", artifacts_directory]) - collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ - f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ - "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" + collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ + "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" + with TempDir() as temp_location: # script_name = path.join('/home/kpathak/temp/output', script_name) script_name = path.join(temp_location, script_name) with open(script_name, "w") as collection_script: @@ -151,10 +147,8 @@ def build_and_run(coreclr_args, output_mch_name): # Unset the JitName so dotnet process will not fail if not is_windows: contents.append("#!/bin/bash") - contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") else: - contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") contents.append(f"pushd {performance_directory}") contents.append(collection_command) @@ -381,17 +375,15 @@ def main(main_args): """ coreclr_args = setup_args(main_args) - # all_output_mch_name = path.join(coreclr_args.output_mch_path + "_all.mch") - # build_and_run(coreclr_args, all_output_mch_name) - build_and_run(coreclr_args, coreclr_args.output_mch_path) - - # if not os.path.isfile(all_output_mch_name): - # print("No mch file generated.") - if not os.path.isfile(coreclr_args.output_mch_path): + all_output_mch_name = path.join(coreclr_args.output_mch_path + "_all.mch") + # execute(coreclr_args, all_output_mch_name) + build_and_run(coreclr_args, all_output_mch_name) + if os.path.isfile(all_output_mch_name): + pass + else: print("No mch file generated.") - #TODO: Disable stripping for now until we solve mcs.exe problems - # strip_unrelated_mc(coreclr_args, all_output_mch_name, coreclr_args.output_mch_path) + strip_unrelated_mc(coreclr_args, all_output_mch_name, coreclr_args.output_mch_path) if __name__ == "__main__": From 2d8d1b2e1116c8d5afebe7ffd3b4ceca98b9187a Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 07:31:56 -0800 Subject: [PATCH 23/35] Use JitName --- src/coreclr/scripts/superpmi_benchmarks.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 701b89f7a7193f..494a8ca978a0f8 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -118,11 +118,11 @@ def build_and_run(coreclr_args, output_mch_name): benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") if is_windows: - shim_name = "superpmi-shim-collector.dll" + shim_name = "%JitName%" corerun_exe = "CoreRun.exe" script_name = "run_microbenchmarks.bat" else: - shim_name = "libsuperpmi-shim-collector.so" + shim_name = "$JitName" corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" @@ -135,7 +135,7 @@ def build_and_run(coreclr_args, output_mch_name): "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory]) - collection_command = f"{dotnet_exe} {benchmarks_dll} --filter *Array* --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + collection_command = f"{dotnet_exe} {benchmarks_dll} --filter * --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" @@ -147,8 +147,10 @@ def build_and_run(coreclr_args, output_mch_name): # Unset the JitName so dotnet process will not fail if not is_windows: contents.append("#!/bin/bash") + contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") else: + contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") contents.append(f"pushd {performance_directory}") contents.append(collection_command) From df48a3b63742fbadcaa7aed0aef268b0773401a4 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 07:36:13 -0800 Subject: [PATCH 24/35] Use workitem folder instead of correlation --- src/coreclr/scripts/superpmi.proj | 4 ++-- src/coreclr/scripts/superpmi_setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index 69613a0df7940a..36808a3a98f28c 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -49,7 +49,7 @@ %HELIX_CORRELATION_PAYLOAD%\superpmi - %HELIX_CORRELATION_PAYLOAD%\performance + %WorkItemDirectory%\performance %PerformanceDirectory%\tools\dotnet\dotnet.exe %HELIX_WORKITEM_UPLOAD_ROOT% @@ -64,7 +64,7 @@ $HELIX_CORRELATION_PAYLOAD/superpmi - $HELIX_CORRELATION_PAYLOAD/performance + $WorkItemDirectory/performance $PerformanceDirectory/tools/dotnet/dotnet $HELIX_WORKITEM_UPLOAD_ROOT diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index f474a06458a64a..b7d3cf916de97e 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -465,7 +465,7 @@ def main(main_args): if coreclr_args.collection_name == "benchmarks": # Setup microbenchmarks - setup_microbenchmark(correlation_payload_directory, arch) + setup_microbenchmark(workitem_directory, arch) # performance_directory = path.join(correlation_payload_directory, "performance") # run_command( # ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) From 29d6cdff1b70ed052e20b8eb5f46cb7e950aa057 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 09:17:22 -0800 Subject: [PATCH 25/35] fix typo in WorkItemDirectory --- src/coreclr/scripts/superpmi.proj | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index 36808a3a98f28c..7541e56be86ff6 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -49,7 +49,7 @@ %HELIX_CORRELATION_PAYLOAD%\superpmi - %WorkItemDirectory%\performance + $(WorkItemDirectory)\performance %PerformanceDirectory%\tools\dotnet\dotnet.exe %HELIX_WORKITEM_UPLOAD_ROOT% @@ -64,7 +64,7 @@ $HELIX_CORRELATION_PAYLOAD/superpmi - $WorkItemDirectory/performance + $(WorkItemDirectory)/performance $PerformanceDirectory/tools/dotnet/dotnet $HELIX_WORKITEM_UPLOAD_ROOT From 114e03925e8940c9913afe11dca394b8e5e65581 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 10:41:35 -0800 Subject: [PATCH 26/35] Set the payload directory --- src/coreclr/scripts/superpmi.proj | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/coreclr/scripts/superpmi.proj b/src/coreclr/scripts/superpmi.proj index 7541e56be86ff6..05ad302eba2de3 100644 --- a/src/coreclr/scripts/superpmi.proj +++ b/src/coreclr/scripts/superpmi.proj @@ -1,4 +1,4 @@ - + - $(WorkItemDirectory)\performance - %PerformanceDirectory%\tools\dotnet\dotnet.exe + %HELIX_WORKITEM_PAYLOAD%\performance %HELIX_WORKITEM_UPLOAD_ROOT% @@ -64,8 +63,7 @@ $HELIX_CORRELATION_PAYLOAD/superpmi - $(WorkItemDirectory)/performance - $PerformanceDirectory/tools/dotnet/dotnet + $HELIX_WORKITEM_PAYLOAD/performance $HELIX_WORKITEM_UPLOAD_ROOT @@ -162,6 +160,7 @@ $(CollectionName).$(CollectionType).%(HelixWorkItem.Index).$(MchFileTag) + $(WorkItemDirectory) $(WorkItemCommand) -partition_count $(PartitionCount) -partition_index %(HelixWorkItem.Index) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log $(WorkItemTimeout) %(OutputFileName).mch;%(OutputFileName).mch.mct;%(OutputFileName).log From eb40f58956723168d3d20d981ea15866272f954e Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 12:28:38 -0800 Subject: [PATCH 27/35] print error message before exiting --- src/coreclr/scripts/superpmi_setup.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index b7d3cf916de97e..21e0c8e1be4688 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -274,13 +274,14 @@ def run_command(command_to_run, _cwd=None, _env=None, _exit_on_fail=False): with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_cwd, env=_env) as proc: command_stdout, command_stderr = proc.communicate() return_code = proc.returncode - if _exit_on_fail and return_code != 0: - print("Command failed. Exiting.") - sys.exit(1) + if len(command_stdout) > 0: print(command_stdout.decode("utf-8")) if len(command_stderr) > 0: print(command_stderr.decode("utf-8")) + if _exit_on_fail and return_code != 0: + print("Command failed. Exiting.") + sys.exit(1) return (command_stdout, command_stderr, return_code) From 6ffc43f61a273f44a298c2bcda19eb6707de6346 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 16:57:34 -0800 Subject: [PATCH 28/35] fix some linux issues --- src/coreclr/scripts/superpmi_benchmarks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 494a8ca978a0f8..cae123ff30f87d 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -135,7 +135,7 @@ def build_and_run(coreclr_args, output_mch_name): "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, "-o", artifacts_directory]) - collection_command = f"{dotnet_exe} {benchmarks_dll} --filter * --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ + collection_command = f"{dotnet_exe} {benchmarks_dll} --filter \"*\" --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" @@ -149,6 +149,7 @@ def build_and_run(coreclr_args, output_mch_name): contents.append("#!/bin/bash") contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") + contents.append(f"chmod +x {dotnet_exe}") else: contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") From da184e10d59ebe39c86b6fc45faaa55817d53d37 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 18:40:28 -0800 Subject: [PATCH 29/35] Make dotnet executable --- src/coreclr/scripts/superpmi_benchmarks.py | 36 +++++++++++++++++----- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index cae123ff30f87d..30a36dc9247f8d 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -92,6 +92,22 @@ def setup_args(args): return coreclr_args +def make_executable(file_name): + """Make file executable by changing the permission + + Args: + file_name (string): file to execute + """ + if not is_windows: + return + os.chmod(file_name, + # read+execute for owner + (stat.S_IRUSR | stat.S_IXUSR) | + # read+execute for group + (stat.S_IRGRP | stat.S_IXGRP) | + # read+execute for other + (stat.S_IROTH | stat.S_IXOTH)) + def build_and_run(coreclr_args, output_mch_name): """Build the microbenchmarks and run them under "superpmi collect" @@ -126,6 +142,8 @@ def build_and_run(coreclr_args, output_mch_name): corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" + make_executable(dotnet_exe) + run_command( [dotnet_exe, "restore", project_file, "--packages", artifacts_packages_directory], _exit_on_fail=True) @@ -133,7 +151,7 @@ def build_and_run(coreclr_args, output_mch_name): run_command( [dotnet_exe, "build", project_file, "--configuration", "Release", "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - "-o", artifacts_directory]) + "-o", artifacts_directory], _exit_on_fail=True) collection_command = f"{dotnet_exe} {benchmarks_dll} --filter \"*\" --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ @@ -149,7 +167,6 @@ def build_and_run(coreclr_args, output_mch_name): contents.append("#!/bin/bash") contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") - contents.append(f"chmod +x {dotnet_exe}") else: contents.append("set JitName=%COMPlus_JitName%") contents.append("set COMPlus_JitName=") @@ -164,8 +181,7 @@ def build_and_run(coreclr_args, output_mch_name): collection_script.write(os.linesep.join(contents)) - if not is_windows: - os.chmod(script_name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + make_executable(script_name) # with ChangeDir(performance_directory): run_command([ @@ -344,25 +360,31 @@ def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): mcs_command = [mcs_exe, "-dumpMap", old_mch_filename] # Gather method list to strip - (mcs_out, _, _) = run_command(mcs_command, _exit_on_fail=True) + (mcs_out, _, return_code) = run_command(mcs_command) + if return_code != 0: + # If strip command fails, then just copy the old_mch to new_mch + print(f"-dumpMap failed. Copying {old_mch_filename} to {new_mch_filename}.") + copyfile(old_mch_filename, new_mch_filename) + return method_context_list = mcs_out.decode("utf-8").split(os.linesep) filtered_context_list = [] match_pattern = re.compile('^(\\d+),(BenchmarkDotNet|Perfolizer)') + print("Stripping following entries:") for mc_entry in method_context_list: matched = match_pattern.match(mc_entry) if matched: + print(matched.group(1)) filtered_context_list.append(matched.group(1)) with open(methods_to_strip_list, "w") as f: f.write('\n'.join(filtered_context_list)) - print(f"Stripping {len(filtered_context_list)} entries.") - # Strip and produce new .mcs file if run_command([mcs_exe, "-strip", methods_to_strip_list, old_mch_filename, new_mch_filename])[2] != 0: # If strip command fails, then just copy the old_mch to new_mch + print(f"-strip failed. Copying {old_mch_filename} to {new_mch_filename}.") copyfile(old_mch_filename, new_mch_filename) return From 8d8c7b56175342458296820b2edaef931c86d409 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 22:52:42 -0800 Subject: [PATCH 30/35] resolve merge conflicts --- eng/pipelines/coreclr/superpmi.yml | 42 +++++++++++++-------------- src/coreclr/scripts/superpmi_setup.py | 35 ++++++++++------------ 2 files changed, 36 insertions(+), 41 deletions(-) diff --git a/eng/pipelines/coreclr/superpmi.yml b/eng/pipelines/coreclr/superpmi.yml index 75a5567cc06dba..8ab8a59c0fd8b1 100644 --- a/eng/pipelines/coreclr/superpmi.yml +++ b/eng/pipelines/coreclr/superpmi.yml @@ -116,27 +116,27 @@ jobs: # collectionType: pmi # collectionName: tests -- template: /eng/pipelines/common/platform-matrix.yml - parameters: - jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml - buildConfig: checked - platforms: - # Linux tests are built on the OSX machines. - # - OSX_x64 - # TODO: Linux crossgen2 jobs crash during collection, and need to be investigated. - # - Linux_arm - # - Linux_arm64 - # - Linux_x64 - - windows_x64 - - windows_x86 - - windows_arm64 - helixQueueGroup: ci - helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml - jobParameters: - testGroup: outerloop - liveLibrariesBuildConfig: Release - collectionType: crossgen2 - collectionName: libraries +# - template: /eng/pipelines/common/platform-matrix.yml +# parameters: +# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml +# buildConfig: checked +# platforms: +# # Linux tests are built on the OSX machines. +# # - OSX_x64 +# # TODO: Linux crossgen2 jobs crash during collection, and need to be investigated. +# # - Linux_arm +# # - Linux_arm64 +# # - Linux_x64 +# - windows_x64 +# - windows_x86 +# - windows_arm64 +# helixQueueGroup: ci +# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml +# jobParameters: +# testGroup: outerloop +# liveLibrariesBuildConfig: Release +# collectionType: crossgen2 +# collectionName: libraries - template: /eng/pipelines/common/platform-matrix.yml parameters: diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 21e0c8e1be4688..36cf076387b99d 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -488,32 +488,27 @@ def main(main_args): # Details: https://bugs.python.org/issue26660 print('Ignoring PermissionError: {0}'.format(pe_error)) - # Setup microbenchmarks - if coreclr_args.collection_name == "benchmarks": - setup_microbenchmark(correlation_payload_directory, arch) - else: - # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a - # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2. - # The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios. + # NOTE: we can't use the build machine ".dotnet" to run on all platforms. E.g., the Windows x86 build uses a + # Windows x64 .dotnet\dotnet.exe that can't load a 32-bit shim. Thus, we always use corerun from Core_Root to invoke crossgen2. + # The following will copy .dotnet to the correlation payload in case we change our mind, and need or want to use it for some scenarios. - # # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll - # if coreclr_args.collection_type == "crossgen2": - # dotnet_src_directory = path.join(source_directory, ".dotnet") - # dotnet_dst_directory = path.join(correlation_payload_directory, ".dotnet") - # print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory)) - # copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False) + # # Copy ".dotnet" to correlation_payload_directory for crossgen2 job; it is needed to invoke crossgen2.dll + # if coreclr_args.collection_type == "crossgen2": + # dotnet_src_directory = path.join(source_directory, ".dotnet") + # dotnet_dst_directory = path.join(correlation_payload_directory, ".dotnet") + # print('Copying {} -> {}'.format(dotnet_src_directory, dotnet_dst_directory)) + # copy_directory(dotnet_src_directory, dotnet_dst_directory, verbose_output=False) # payload pmiassemblies_directory = path.join(workitem_directory, "pmiAssembliesDirectory") input_artifacts = path.join(pmiassemblies_directory, coreclr_args.collection_name) exclude_directory = ['Core_Root'] if coreclr_args.collection_name == "tests" else [] - exclude_files = native_binaries_to_ignore - partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory) - if coreclr_args.collection_type == "crossgen2": - print('Adding exclusions for crossgen2') - # Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it. - exclude_files += [ "Microsoft.Build.dll" ] - partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory, exclude_files) + exclude_files = native_binaries_to_ignore + if coreclr_args.collection_type == "crossgen2": + print('Adding exclusions for crossgen2') + # Currently, trying to crossgen2 R2RTest\Microsoft.Build.dll causes a pop-up failure, so exclude it. + exclude_files += [ "Microsoft.Build.dll" ] + partition_files(coreclr_args.input_directory, input_artifacts, coreclr_args.max_size, exclude_directory, exclude_files) # Set variables print('Setting pipeline variables:') From 05bb56fa0be878bd3918978c7cec291bd8996a31 Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Thu, 11 Feb 2021 23:49:05 -0800 Subject: [PATCH 31/35] fix typo from merge conflict --- eng/pipelines/coreclr/templates/run-superpmi-job.yml | 2 +- src/coreclr/scripts/superpmi_benchmarks.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/eng/pipelines/coreclr/templates/run-superpmi-job.yml b/eng/pipelines/coreclr/templates/run-superpmi-job.yml index 5c66eaeb48ac44..5478133963b3a9 100644 --- a/eng/pipelines/coreclr/templates/run-superpmi-job.yml +++ b/eng/pipelines/coreclr/templates/run-superpmi-job.yml @@ -106,7 +106,7 @@ jobs: steps: - ${{ parameters.steps }} - - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi-setup.py -source_directory $(Build.SourcesDirectory) -core_root_directory $(Core_Root_Dir) -arch $(archType) -mch_file_tag $(MchFileTag) -input_directory $(InputDirectory) -collection_name $(CollectionName) -collection_type $(CollectionType) -max_size 50 # size in MB + - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi_setup.py -source_directory $(Build.SourcesDirectory) -core_root_directory $(Core_Root_Dir) -arch $(archType) -mch_file_tag $(MchFileTag) -input_directory $(InputDirectory) -collection_name $(CollectionName) -collection_type $(CollectionType) -max_size 50 # size in MB displayName: ${{ format('SuperPMI setup ({0})', parameters.osGroup) }} # Run superpmi collection in helix diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 30a36dc9247f8d..60dc1e23a1ca8f 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -126,7 +126,6 @@ def build_and_run(coreclr_args, output_mch_name): dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_exe = os.path.join(dotnet_directory, "dotnet") - #TODO: Use TMP instead of os.path.join(performance_directory, "artifacts") artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") @@ -258,7 +257,6 @@ def execute(coreclr_args, output_mch_name): shim_name = "libsuperpmi-shim-collector.so" corerun_exe = "corerun" script_name = "run_microbenchmarks.sh" - benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") From de9594ed3f890a284eb77f27f5ec49537eaed8ec Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 12 Feb 2021 00:56:29 -0800 Subject: [PATCH 32/35] add logging around chmod --- src/coreclr/scripts/superpmi_benchmarks.py | 42 ++++++++++++---------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 60dc1e23a1ca8f..d1a03f13db090b 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -100,6 +100,9 @@ def make_executable(file_name): """ if not is_windows: return + + print("Inside make_executable") + run_command(["ls", "-l", file_name]) os.chmod(file_name, # read+execute for owner (stat.S_IRUSR | stat.S_IXUSR) | @@ -107,6 +110,7 @@ def make_executable(file_name): (stat.S_IRGRP | stat.S_IXGRP) | # read+execute for other (stat.S_IROTH | stat.S_IXOTH)) + run_command(["ls", "-l", file_name]) def build_and_run(coreclr_args, output_mch_name): """Build the microbenchmarks and run them under "superpmi collect" @@ -157,29 +161,29 @@ def build_and_run(coreclr_args, output_mch_name): "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" with TempDir() as temp_location: - # script_name = path.join('/home/kpathak/temp/output', script_name) script_name = path.join(temp_location, script_name) - with open(script_name, "w") as collection_script: - contents = [] - # Unset the JitName so dotnet process will not fail - if not is_windows: - contents.append("#!/bin/bash") - contents.append("export JitName=$COMPlus_JitName") - contents.append("unset COMPlus_JitName") - else: - contents.append("set JitName=%COMPlus_JitName%") - contents.append("set COMPlus_JitName=") - contents.append(f"pushd {performance_directory}") - contents.append(collection_command) - - print() - print(f"{script_name} contents:") - print("******************************************") - print(os.linesep.join(contents)) - print("******************************************") + contents = [] + # Unset the JitName so dotnet process will not fail + if not is_windows: + contents.append("#!/bin/bash") + contents.append("export JitName=$COMPlus_JitName") + contents.append("unset COMPlus_JitName") + else: + contents.append("set JitName=%COMPlus_JitName%") + contents.append("set COMPlus_JitName=") + contents.append(f"pushd {performance_directory}") + contents.append(collection_command) + + with open(script_name, "w") as collection_script: collection_script.write(os.linesep.join(contents)) + print() + print(f"{script_name} contents:") + print("******************************************") + print(os.linesep.join(contents)) + print("******************************************") + make_executable(script_name) # with ChangeDir(performance_directory): From 4ac45bd081b52d01d4dd52eb676becd9206c406b Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 12 Feb 2021 09:49:55 -0800 Subject: [PATCH 33/35] fix the is_windows condition --- src/coreclr/scripts/superpmi_benchmarks.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index d1a03f13db090b..7a378b8a97a274 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -98,7 +98,7 @@ def make_executable(file_name): Args: file_name (string): file to execute """ - if not is_windows: + if is_windows: return print("Inside make_executable") @@ -165,13 +165,13 @@ def build_and_run(coreclr_args, output_mch_name): contents = [] # Unset the JitName so dotnet process will not fail - if not is_windows: + if is_windows: + contents.append("set JitName=%COMPlus_JitName%") + contents.append("set COMPlus_JitName=") + else: contents.append("#!/bin/bash") contents.append("export JitName=$COMPlus_JitName") contents.append("unset COMPlus_JitName") - else: - contents.append("set JitName=%COMPlus_JitName%") - contents.append("set COMPlus_JitName=") contents.append(f"pushd {performance_directory}") contents.append(collection_command) From 58e1c8dcde8332401c1f46d4c127ab26ce453f6c Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 12 Feb 2021 13:18:57 -0800 Subject: [PATCH 34/35] cleanup and disable linux arm/arm64 --- eng/pipelines/coreclr/superpmi.yml | 187 ++++++++++---------- src/coreclr/scripts/superpmi_benchmarks.py | 195 +++------------------ src/coreclr/scripts/superpmi_setup.py | 31 +--- 3 files changed, 125 insertions(+), 288 deletions(-) diff --git a/eng/pipelines/coreclr/superpmi.yml b/eng/pipelines/coreclr/superpmi.yml index 8ab8a59c0fd8b1..5927afe2d28738 100644 --- a/eng/pipelines/coreclr/superpmi.yml +++ b/eng/pipelines/coreclr/superpmi.yml @@ -42,101 +42,57 @@ jobs: jobParameters: testGroup: outerloop -# - template: /eng/pipelines/common/platform-matrix.yml -# parameters: -# jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml -# buildConfig: checked -# platforms: -# - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 -# jobParameters: -# testGroup: outerloop -# liveLibrariesBuildConfig: Release - -# - template: /eng/pipelines/common/platform-matrix.yml -# parameters: -# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml -# buildConfig: checked -# platforms: -# # Linux tests are built on the OSX machines. -# # - OSX_x64 -# - Linux_arm -# - Linux_arm64 -# - Linux_x64 -# - windows_x64 -# - windows_x86 -# - windows_arm64 -# helixQueueGroup: ci -# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml -# jobParameters: -# testGroup: outerloop -# liveLibrariesBuildConfig: Release -# collectionType: pmi -# collectionName: libraries - -# - template: /eng/pipelines/common/platform-matrix.yml -# parameters: -# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml -# buildConfig: checked -# platforms: -# # Linux tests are built on the OSX machines. -# # - OSX_x64 -# - Linux_arm -# - Linux_arm64 -# - Linux_x64 -# - windows_x64 -# - windows_x86 -# - windows_arm64 -# helixQueueGroup: ci -# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml -# jobParameters: -# testGroup: outerloop -# liveLibrariesBuildConfig: Release -# collectionType: crossgen -# collectionName: libraries +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml + buildConfig: checked + platforms: + - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release -# - template: /eng/pipelines/common/platform-matrix.yml -# parameters: -# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml -# buildConfig: checked -# platforms: -# # Linux tests are built on the OSX machines. -# # - OSX_x64 -# - Linux_arm -# - Linux_arm64 -# - Linux_x64 -# - windows_x64 -# - windows_x86 -# - windows_arm64 -# - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 -# helixQueueGroup: ci -# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml -# jobParameters: -# testGroup: outerloop -# liveLibrariesBuildConfig: Release -# collectionType: pmi -# collectionName: tests +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + - Linux_arm + - Linux_arm64 + - Linux_x64 + - windows_x64 + - windows_x86 + - windows_arm64 + helixQueueGroup: ci + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release + collectionType: pmi + collectionName: libraries -# - template: /eng/pipelines/common/platform-matrix.yml -# parameters: -# jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml -# buildConfig: checked -# platforms: -# # Linux tests are built on the OSX machines. -# # - OSX_x64 -# # TODO: Linux crossgen2 jobs crash during collection, and need to be investigated. -# # - Linux_arm -# # - Linux_arm64 -# # - Linux_x64 -# - windows_x64 -# - windows_x86 -# - windows_arm64 -# helixQueueGroup: ci -# helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml -# jobParameters: -# testGroup: outerloop -# liveLibrariesBuildConfig: Release -# collectionType: crossgen2 -# collectionName: libraries +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + - Linux_arm + - Linux_arm64 + - Linux_x64 + - windows_x64 + - windows_x86 + - windows_arm64 + helixQueueGroup: ci + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release + collectionType: crossgen + collectionName: libraries - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -154,6 +110,51 @@ jobs: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release + collectionType: pmi + collectionName: tests + +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + # TODO: Linux crossgen2 jobs crash during collection, and need to be investigated. + # - Linux_arm + # - Linux_arm64 + # - Linux_x64 + - windows_x64 + - windows_x86 + - windows_arm64 + helixQueueGroup: ci + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release + collectionType: crossgen2 + collectionName: libraries + +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + #TODO: Need special handling of running "benchmark build" from inside TMP folder on helix machine. + # - Linux_arm + # - Linux_arm64 + - Linux_x64 + - windows_x64 + - windows_x86 + - windows_arm64 + - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 + helixQueueGroup: ci + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop liveLibrariesBuildConfig: Release diff --git a/src/coreclr/scripts/superpmi_benchmarks.py b/src/coreclr/scripts/superpmi_benchmarks.py index 7a378b8a97a274..5f9725f9aec180 100644 --- a/src/coreclr/scripts/superpmi_benchmarks.py +++ b/src/coreclr/scripts/superpmi_benchmarks.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 # -## Licensed to the .NET Foundation under one or more agreements. -## The .NET Foundation licenses this file to you under the MIT license. +# Licensed to the .NET Foundation under one or more agreements. +# The .NET Foundation licenses this file to you under the MIT license. +# # -## # Title: superpmi_benchmarks.py # # Notes: @@ -67,7 +67,7 @@ def setup_args(args): coreclr_args.verify(args, "log_file", - lambda log_file: True, #not os.path.isfile(log_file), + lambda log_file: True, # not os.path.isfile(log_file), "log_file already exist") coreclr_args.verify(args, @@ -92,6 +92,7 @@ def setup_args(args): return coreclr_args + def make_executable(file_name): """Make file executable by changing the permission @@ -104,14 +105,15 @@ def make_executable(file_name): print("Inside make_executable") run_command(["ls", "-l", file_name]) os.chmod(file_name, - # read+execute for owner - (stat.S_IRUSR | stat.S_IXUSR) | - # read+execute for group - (stat.S_IRGRP | stat.S_IXGRP) | - # read+execute for other - (stat.S_IROTH | stat.S_IXOTH)) + # read+execute for owner + (stat.S_IRUSR | stat.S_IXUSR) | + # read+execute for group + (stat.S_IRGRP | stat.S_IXGRP) | + # read+execute for other + (stat.S_IROTH | stat.S_IXOTH)) run_command(["ls", "-l", file_name]) + def build_and_run(coreclr_args, output_mch_name): """Build the microbenchmarks and run them under "superpmi collect" @@ -133,7 +135,6 @@ def build_and_run(coreclr_args, output_mch_name): artifacts_directory = os.path.join(performance_directory, "artifacts") artifacts_packages_directory = os.path.join(artifacts_directory, "packages") project_file = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") - # benchmarks_dll = path.join('/home/kpathak/temp/artifacts', "MicroBenchmarks.dll") benchmarks_dll = path.join(artifacts_directory, "MicroBenchmarks.dll") if is_windows: @@ -148,18 +149,19 @@ def build_and_run(coreclr_args, output_mch_name): make_executable(dotnet_exe) run_command( - [dotnet_exe, "restore", project_file, "--packages", - artifacts_packages_directory], _exit_on_fail=True) + [dotnet_exe, "restore", project_file, "--packages", + artifacts_packages_directory], _exit_on_fail=True) run_command( [dotnet_exe, "build", project_file, "--configuration", "Release", - "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - "-o", artifacts_directory], _exit_on_fail=True) + "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, + "-o", artifacts_directory], _exit_on_fail=True) collection_command = f"{dotnet_exe} {benchmarks_dll} --filter \"*\" --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ - f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ - "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" + f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ + "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart" + # Generate the execution script in Temp location with TempDir() as temp_location: script_name = path.join(temp_location, script_name) @@ -186,163 +188,12 @@ def build_and_run(coreclr_args, output_mch_name): make_executable(script_name) - # with ChangeDir(performance_directory): run_command([ python_path, path.join(superpmi_directory, "superpmi.py"), "collect", "-core_root", core_root, # Disable ReadyToRun so we always JIT R2R methods and collect them "--use_zapdisable", "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug", - script_name, - #---------------------------- - # # collection_command - # dotnet_exe, - - # # collection_args - # f"\"{benchmarks_dll} --filter * --corerun {path.join(core_root, corerun_exe)} --partition-count {partition_count} " \ - # f"--partition-index {partition_index} --envVars COMPlus_JitName:{shim_name} " \ - # "--iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart\"", - #---------------------------- - - # dotnet command to execute Microbenchmarks.dll - # dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + - # " --partition-count " + partition_count + " --partition-index " + partition_index + - # " --envVars COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", - - # dotnet_exe, "\"" + benchmarks_dll, "--filter", "*", "--corerun", path.join(core_root, corerun_exe_name), "--partition-count", - # partition_count, "--partition-index", partition_index, "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", - # "--warmupCount", "0", "--invocationCount", "1", "--unrollFactor", "1", "--strategy", "ColdStart\"", - - # python_path, benchmarks_ci_script, script_args + bdn_artifacts + bdn_arguments, - # python_path, benchmarks_ci_script, "--csproj", microbenchmark_proj, "-f", "net6.0", "--incremental", "no", "--architecture", "x64", - # "--bdn-artifacts", path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts"), - # "--bdn-arguments=\"--filter *", "--corerun", corerun_exe, "--partition-count", partition_count, - # "--partition-index", partition_index, - # "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", "--warmupCount", "0", - # "--invocationCount", "1", - # "--unrollFactor", "1", "--strategy", "ColdStart\"", - - # # superpmi.py collect arguments - - # # Path to core_root because the script will be ran from "performance" repo. - # "-core_root", core_root, - - # # Specify that temp_dir is current performance directory, because in order to execute - # # microbenchmarks, it needs access to the source code. - # # Also, skip cleaning up once done, because the superpmi script is being - # # executed from the same folder. - # "-temp_dir", performance_directory, "--skip_cleanup", - - # # Disable ReadyToRun so we always JIT R2R methods and collect them - # "--use_zapdisable", - # "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug" - ], _exit_on_fail=True) - -def execute(coreclr_args, output_mch_name): - """Execute the superpmi collection for Microbenchmarks - - Args: - coreclr_args (CoreclrArguments): Arguments - output_mch_name (string): The name of output mch file name. - """ - python_path = sys.executable - core_root = coreclr_args.core_root - superpmi_directory = coreclr_args.superpmi_directory - performance_directory = coreclr_args.performance_directory - log_file = coreclr_args.log_file - partition_count = coreclr_args.partition_count - partition_index = coreclr_args.partition_index - arch = coreclr_args.arch - - if is_windows: - shim_name = "superpmi-shim-collector.dll" - corerun_exe = "CoreRun.exe" - script_name = "run_microbenchmarks.bat" - else: - shim_name = "libsuperpmi-shim-collector.so" - corerun_exe = "corerun" - script_name = "run_microbenchmarks.sh" - - benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") - microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") - corerun_exe = path.join(core_root, corerun_exe) - script_args = f"--csproj {microbenchmark_proj} -f net6.0 --incremental no --filter * --architecture {arch}" - bdn_artifacts = f"--bdn-artifacts {path.join(performance_directory, 'artifacts', 'BenchmarkDotNet.Artifacts')}" - bdn_arguments = f"--bdn-arguments=\"--corerun {corerun_exe} --partition-count {partition_count} --partition-index {partition_index} " \ - f"--envVars COMPlus_JitName:{shim_name} --iterationCount 1 --warmupCount 0 --invocationCount 1 " \ - "--unrollFactor 1 --strategy ColdStart\"" - collection_command = f"{python_path} {benchmarks_ci_script} {script_args} {bdn_artifacts} {bdn_arguments}" - - with TempDir() as temp_location: - script_name = path.join(temp_location, script_name) - with open(script_name, "w") as collection_script: - contents = ["echo off", f"echo Invoking {collection_command}", collection_command] - collection_script.write(os.linesep.join(contents)) - - if not is_windows: - os.chmod(script_name, stat.S_IRWXU) - - run_command([ - python_path, path.join(superpmi_directory, "superpmi.py"), "collect", script_name, "-core_root", core_root, - "-temp_dir", temp_location, "--skip_cleanup", # cleanup will happen once this block is done executing. - "--use_zapdisable", # Disable ReadyToRun so we always JIT R2R methods and collect them - "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug"]) - - # with ChangeDir(performance_directory): - # print("Inside " + performance_directory) - # # dotnet_exe_name = "dotnet.exe" if is_windows else "dotnet" - - # # dotnet_exe = path.join(performance_directory, "tools", "dotnet", dotnet_exe_name) - # benchmarks_ci_script = path.join(performance_directory, "scripts", "benchmarks_ci.py") - # microbenchmark_proj = path.join(performance_directory, "src", "benchmarks", "micro", "MicroBenchmarks.csproj") - # corerun_exe = path.join(core_root, "CoreRun.exe" if is_windows else "corerun") - # # script_args = " --csproj " + microbenchmark_proj + " -f net6.0 --incremental no --architecture x64" - # # bdn_artifacts = " --bdn-artifacts " + path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts") - # # bdn_arguments = " --bdn-arguments=\"--filter * --corerun {} --partition-count {} --partition-index {} " \ - # # "--envVars COMPlus_JitName:{} --iterationCount 1 --warmupCount 0 --invocationCount 1 " \ - # # "--unrollFactor 1 --strategy ColdStart\"".format(corerun_exe, partition_count, - # # partition_index, shim_name) - - # script_args = ["--csproj", microbenchmark_proj, "-f", "net6.0", "--incremental", "no", "--architecture", "x64"] - # bdn_artifacts = ["--bdn-artifacts", path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts")] - # bdn_arguments = ["--bdn-arguments=\"--filter *", "--corerun", corerun_exe, "--partition-count", partition_count, "--partition-index", partition_index, - # "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", "--warmupCount", "0", "--invocationCount", "1", - # "--unrollFactor", "1", "--strategy", "ColdStart\""] - - # run_command([ - # python_path, path.join(superpmi_directory, "superpmi.py"), "collect", - - # # dotnet command to execute Microbenchmarks.dll - # # dotnet_exe, benchmarks_dll + " --filter * --corerun " + path.join(core_root, corerun_exe_name) + - # # " --partition-count " + partition_count + " --partition-index " + partition_index + - # # " --envVars COMPlus_JitName:" + shim_name + " --iterationCount 1 --warmupCount 0 --invocationCount 1 --unrollFactor 1 --strategy ColdStart", - - # # dotnet_exe, "\"" + benchmarks_dll, "--filter", "*", "--corerun", path.join(core_root, corerun_exe_name), "--partition-count", - # # partition_count, "--partition-index", partition_index, "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", - # # "--warmupCount", "0", "--invocationCount", "1", "--unrollFactor", "1", "--strategy", "ColdStart\"", - - # # python_path, benchmarks_ci_script, script_args + bdn_artifacts + bdn_arguments, - # python_path, benchmarks_ci_script, "--csproj", microbenchmark_proj, "-f", "net6.0", "--incremental", "no", "--architecture", "x64", - # "--bdn-artifacts", path.join(performance_directory, "artifacts", "BenchmarkDotNet.Artifacts"), - # "--bdn-arguments=\"--filter *", "--corerun", corerun_exe, "--partition-count", partition_count, - # "--partition-index", partition_index, - # "--envVars", "COMPlus_JitName:" + shim_name, "--iterationCount", "1", "--warmupCount", "0", - # "--invocationCount", "1", - # "--unrollFactor", "1", "--strategy", "ColdStart\"", - - # # superpmi.py collect arguments - - # # Path to core_root because the script will be ran from "performance" repo. - # "-core_root", core_root, - - # # Specify that temp_dir is current performance directory, because in order to execute - # # microbenchmarks, it needs access to the source code. - # # Also, skip cleaning up once done, because the superpmi script is being - # # executed from the same folder. - # "-temp_dir", performance_directory, "--skip_cleanup", - - # # Disable ReadyToRun so we always JIT R2R methods and collect them - # "--use_zapdisable", - # "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug"]) + script_name], _exit_on_fail=True) def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): @@ -367,18 +218,20 @@ def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): # If strip command fails, then just copy the old_mch to new_mch print(f"-dumpMap failed. Copying {old_mch_filename} to {new_mch_filename}.") copyfile(old_mch_filename, new_mch_filename) + copyfile(old_mch_filename + ".mct", new_mch_filename + ".mct") return method_context_list = mcs_out.decode("utf-8").split(os.linesep) filtered_context_list = [] match_pattern = re.compile('^(\\d+),(BenchmarkDotNet|Perfolizer)') - print("Stripping following entries:") + print("Method indices to strip:") for mc_entry in method_context_list: matched = match_pattern.match(mc_entry) if matched: print(matched.group(1)) filtered_context_list.append(matched.group(1)) + print(f"Total {len(filtered_context_list)} methods.") with open(methods_to_strip_list, "w") as f: f.write('\n'.join(filtered_context_list)) @@ -388,6 +241,7 @@ def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename): # If strip command fails, then just copy the old_mch to new_mch print(f"-strip failed. Copying {old_mch_filename} to {new_mch_filename}.") copyfile(old_mch_filename, new_mch_filename) + copyfile(old_mch_filename + ".mct", new_mch_filename + ".mct") return # Create toc file @@ -403,7 +257,6 @@ def main(main_args): coreclr_args = setup_args(main_args) all_output_mch_name = path.join(coreclr_args.output_mch_path + "_all.mch") - # execute(coreclr_args, all_output_mch_name) build_and_run(coreclr_args, all_output_mch_name) if os.path.isfile(all_output_mch_name): pass diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index 36cf076387b99d..c28a5e39fcafc8 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 # -## Licensed to the .NET Foundation under one or more agreements. -## The .NET Foundation licenses this file to you under the MIT license. +# Licensed to the .NET Foundation under one or more agreements. +# The .NET Foundation licenses this file to you under the MIT license. # ## # Title : superpmi_setup.py @@ -238,7 +238,7 @@ def first_fit(sorted_by_size, max_size): if file_size < max_size: for p_index in partitions: total_in_curr_par = sum(n for _, n in partitions[p_index]) - if (((total_in_curr_par + file_size) < max_size) and (len(partitions[p_index]) < MAX_FILES_COUNT)): + if ((total_in_curr_par + file_size) < max_size) and (len(partitions[p_index]) < MAX_FILES_COUNT): partitions[p_index].append(curr_file) found_bucket = True break @@ -256,13 +256,11 @@ def first_fit(sorted_by_size, max_size): return partitions -def run_command(command_to_run, _cwd=None, _env=None, _exit_on_fail=False): +def run_command(command_to_run, _exit_on_fail=False): """ Runs the command. Args: command_to_run ([string]): Command to run along with arguments. - _cwd (string): Current working directory. - _env (string): Environment variables, if any. _exit_on_fail (bool): If it should exit on failure. Returns: (string, string): Returns a tuple of stdout and stderr @@ -271,7 +269,7 @@ def run_command(command_to_run, _cwd=None, _env=None, _exit_on_fail=False): command_stdout = "" command_stderr = "" return_code = 1 - with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_cwd, env=_env) as proc: + with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_cwd) as proc: command_stdout, command_stderr = proc.communicate() return_code = proc.returncode @@ -282,7 +280,7 @@ def run_command(command_to_run, _cwd=None, _env=None, _exit_on_fail=False): if _exit_on_fail and return_code != 0: print("Command failed. Exiting.") sys.exit(1) - return (command_stdout, command_stderr, return_code) + return command_stdout, command_stderr, return_code def copy_directory(src_path, dst_path, verbose_output=True, match_func=lambda path: True): @@ -366,6 +364,7 @@ def setup_microbenchmark(workitem_directory, arch): Args: workitem_directory (string): Path to work + arch (string): Architecture for which dotnet will be installed """ performance_directory = path.join(workitem_directory, "performance") @@ -375,26 +374,13 @@ def setup_microbenchmark(workitem_directory, arch): with ChangeDir(performance_directory): dotnet_directory = os.path.join(performance_directory, "tools", "dotnet", arch) dotnet_install_script = os.path.join(performance_directory, "scripts", "dotnet.py") - # dotnet_exe = os.path.join(dotnet_directory, "dotnet") - # artifacts_directory = os.path.join(performance_directory, "artifacts") - # artifacts_packages_directory = os.path.join(artifacts_directory, "packages") if not isfile(dotnet_install_script): print("Missing " + dotnet_install_script) return - # build_env_vars = os.environ.copy() - # build_env_vars["DOTNET_CLI_TELEMETRY_OPTOUT"] = "1" - # build_env_vars["DOTNET_MULTILEVEL_LOOKUP"] = "0" - # build_env_vars["UseSharedCompilation"] = "false" - run_command( get_python_name() + [dotnet_install_script, "install", "--architecture", arch, "--install-dir", dotnet_directory, "--verbose"]) - # run_command([dotnet_exe, "restore", "src/benchmarks/micro/MicroBenchmarks.csproj", "--packages", - # artifacts_packages_directory], _env=build_env_vars) - # run_command([dotnet_exe, "build", "src/benchmarks/micro/MicroBenchmarks.csproj", "--configuration", "Release", - # "--framework", "net6.0", "--no-restore", "/p:NuGetPackageRoot=" + artifacts_packages_directory, - # "-o", artifacts_directory], _env=build_env_vars) def get_python_name(): """Gets the python name @@ -467,9 +453,6 @@ def main(main_args): if coreclr_args.collection_name == "benchmarks": # Setup microbenchmarks setup_microbenchmark(workitem_directory, arch) - # performance_directory = path.join(correlation_payload_directory, "performance") - # run_command( - # ["git", "clone", "--quiet", "--depth", "1", "https://github.com/kunalspathak/performance", performance_directory]) else: # Setup for pmi/crossgen runs From 8af31517ccc2ec8b148f4de542e629d538fbf15f Mon Sep 17 00:00:00 2001 From: Kunal Pathak Date: Fri, 12 Feb 2021 16:42:32 -0800 Subject: [PATCH 35/35] remove the unwanted parameter --- src/coreclr/scripts/superpmi_setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/coreclr/scripts/superpmi_setup.py b/src/coreclr/scripts/superpmi_setup.py index c28a5e39fcafc8..21f51daf44ccb0 100644 --- a/src/coreclr/scripts/superpmi_setup.py +++ b/src/coreclr/scripts/superpmi_setup.py @@ -269,7 +269,7 @@ def run_command(command_to_run, _exit_on_fail=False): command_stdout = "" command_stderr = "" return_code = 1 - with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_cwd) as proc: + with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: command_stdout, command_stderr = proc.communicate() return_code = proc.returncode