diff --git a/eng/common/superpmi/superpmi.proj b/eng/common/superpmi/superpmi.proj new file mode 100644 index 0000000000000..75797de16e86f --- /dev/null +++ b/eng/common/superpmi/superpmi.proj @@ -0,0 +1,102 @@ + + + + \ + + + / + + + + + %HELIX_PYTHONPATH% + $(WorkItemDirectory)\pmiAssembliesDirectory + %HELIX_WORKITEM_PAYLOAD%\binaries + %HELIX_CORRELATION_PAYLOAD%\superpmi + %HELIX_WORKITEM_UPLOAD_ROOT% + + $(BUILD_SOURCESDIRECTORY)\artifacts\helixresults + $(SuperPMIDirectory)\superpmi.py collect --pmi -pmi_location $(SuperPMIDirectory)\pmi.dll + + + $HELIX_PYTHONPATH + $(WorkItemDirectory)/pmiAssembliesDirectory + $HELIX_WORKITEM_PAYLOAD/binaries + $HELIX_CORRELATION_PAYLOAD/superpmi + $HELIX_WORKITEM_UPLOAD_ROOT + + $(BUILD_SOURCESDIRECTORY)/artifacts/helixresults + $(SuperPMIDirectory)/superpmi.py collect --pmi -pmi_location $(SuperPMIDirectory)/pmi.dll + + + + $(Python) $(WorkItemCommand) -pmi_assemblies $(PmiAssembliesDirectory) -arch $(Architecture) -build_type $(BuildConfig) -core_root $(SuperPMIDirectory) + + + + false + false + 5:00 + + + + + %(Identity) + + + + + + + + + + + + + + + + %(HelixWorkItem.OutputMchName).$(MchFileTag).%(HelixWorkItem.PartitionId) + $(PmiAssembliesPayload)$(FileSeparatorChar)%(HelixWorkItem.PmiAssemblies) + $(WorkItemCommand) -output_mch_path $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).mch -log_file $(OutputMchPath)$(FileSeparatorChar)%(OutputFileName).log + $(WorkItemTimeout) + %(OutputFileName).mch;%(OutputFileName).mch.mct;%(OutputFileName).log + + + + + + + \ No newline at end of file diff --git a/eng/common/templates/steps/superpmi-send-to-helix.yml b/eng/common/templates/steps/superpmi-send-to-helix.yml new file mode 100644 index 0000000000000..7f31bfbdb022a --- /dev/null +++ b/eng/common/templates/steps/superpmi-send-to-helix.yml @@ -0,0 +1,53 @@ +# Please remember to update the documentation if you make changes to these parameters! +parameters: + HelixSource: 'pr/default' # required -- sources must start with pr/, official/, prodcon/, or agent/ + HelixType: 'tests/default/' # required -- Helix telemetry which identifies what type of data this is; should include "test" for clarity and must end in '/' + HelixBuild: $(Build.BuildNumber) # required -- the build number Helix will use to identify this -- automatically set to the AzDO build number + HelixTargetQueues: '' # required -- semicolon delimited list of Helix queues to test on; see https://helix.dot.net/ for a list of queues + HelixAccessToken: '' # required -- access token to make Helix API requests; should be provided by the appropriate variable group + HelixPreCommands: '' # optional -- commands to run before Helix work item execution + HelixPostCommands: '' # optional -- commands to run after Helix work item execution + WorkItemDirectory: '' # optional -- a payload directory to zip up and send to Helix; requires WorkItemCommand; incompatible with XUnitProjects + CorrelationPayloadDirectory: '' # optional -- a directory to zip up and send to Helix as a correlation payload + IncludeDotNetCli: false # optional -- true will download a version of the .NET CLI onto the Helix machine as a correlation payload; requires DotNetCliPackageType and DotNetCliVersion + DotNetCliPackageType: '' # optional -- either 'sdk' or 'runtime'; determines whether the sdk or runtime will be sent to Helix; see https://raw.githubusercontent.com/dotnet/core/master/release-notes/releases.json + DotNetCliVersion: '' # optional -- version of the CLI to send to Helix; based on this: https://raw.githubusercontent.com/dotnet/core/master/release-notes/releases.json + EnableXUnitReporter: false # optional -- true enables XUnit result reporting to Mission Control + WaitForWorkItemCompletion: true # optional -- true will make the task wait until work items have been completed and fail the build if work items fail. False is "fire and forget." + Creator: '' # optional -- if the build is external, use this to specify who is sending the job + DisplayNamePrefix: 'Send job to Helix' # optional -- rename the beginning of the displayName of the steps in AzDO + condition: succeeded() # optional -- condition for step to execute; defaults to succeeded() + continueOnError: false # optional -- determines whether to continue the build if the step errors; defaults to false + BuildConfig: 'checked' # optional -- Mostly, superpmi will be run on checked builds + LibrariesArtifacts: '' + TestsArtifacts: '' + +steps: +- template: /eng/pipelines/common/templates/runtimes/send-to-helix-inner-step.yml + parameters: + osGroup: ${{ parameters.osGroup }} + sendParams: $(Build.SourcesDirectory)/eng/common/superpmi/superpmi.proj /restore /t:Test /bl:$(Build.SourcesDirectory)/artifacts/log/$(BuildConfig)/SendToHelix.binlog + displayName: ${{ parameters.DisplayNamePrefix }} + condition: ${{ parameters.condition }} + continueOnError: ${{ parameters.continueOnError }} + environment: + MchFileTag: $(MchFileTag) + BuildConfig: ${{ parameters.BuildConfig }} + LibrariesArtifacts: ${{ parameters.LibrariesArtifacts }} + TestsArtifacts: ${{ parameters.TestsArtifacts }} + HelixSource: ${{ parameters.HelixSource }} + HelixType: ${{ parameters.HelixType }} + HelixBuild: ${{ parameters.HelixBuild }} + HelixTargetQueues: ${{ parameters.HelixTargetQueues }} + HelixAccessToken: ${{ parameters.HelixAccessToken }} + HelixPreCommands: ${{ parameters.HelixPreCommands }} + HelixPostCommands: ${{ parameters.HelixPostCommands }} + WorkItemDirectory: ${{ parameters.WorkItemDirectory }} + CorrelationPayloadDirectory: ${{ parameters.CorrelationPayloadDirectory }} + IncludeDotNetCli: ${{ parameters.IncludeDotNetCli }} + DotNetCliPackageType: ${{ parameters.DotNetCliPackageType }} + DotNetCliVersion: ${{ parameters.DotNetCliVersion }} + EnableXUnitReporter: ${{ parameters.EnableXUnitReporter }} + WaitForWorkItemCompletion: ${{ parameters.WaitForWorkItemCompletion }} + Creator: ${{ parameters.Creator }} + SYSTEM_ACCESSTOKEN: $(System.AccessToken) \ No newline at end of file diff --git a/eng/pipelines/coreclr/superpmi.yml b/eng/pipelines/coreclr/superpmi.yml new file mode 100644 index 0000000000000..6666a3a4d290a --- /dev/null +++ b/eng/pipelines/coreclr/superpmi.yml @@ -0,0 +1,66 @@ +trigger: none + +pr: none + +# schedules: +# - cron: "0 4 * * *" +# displayName: Daily at 8:00 PM (UTC-8:00) +# branches: +# include: +# - master +# always: true + +jobs: +# +# Checkout repository +# +- template: /eng/pipelines/common/checkout-job.yml + +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + - Linux_arm + - Linux_arm64 + - Linux_x64 + - Windows_NT_x64 + - Windows_NT_x86 + - Windows_NT_arm64 + - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 + jobParameters: + testGroup: outerloop + +# TODO: Disable SPMI for P1 tests +# - template: /eng/pipelines/common/platform-matrix.yml +# parameters: +# jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml +# buildConfig: checked +# platforms: +# - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 +# jobParameters: +# testGroup: outerloop +# liveLibrariesBuildConfig: Release + +- template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/superpmi-job.yml + buildConfig: checked + platforms: + # Linux tests are built on the OSX machines. + # - OSX_x64 + - Linux_arm + - Linux_arm64 + - Linux_x64 + - Windows_NT_x64 + - Windows_NT_x86 + - Windows_NT_arm64 + - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 + helixQueueGroup: ci + helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml + jobParameters: + testGroup: outerloop + liveLibrariesBuildConfig: Release + diff --git a/eng/pipelines/coreclr/templates/run-superpmi-job.yml b/eng/pipelines/coreclr/templates/run-superpmi-job.yml new file mode 100644 index 0000000000000..703c384108d9c --- /dev/null +++ b/eng/pipelines/coreclr/templates/run-superpmi-job.yml @@ -0,0 +1,143 @@ +parameters: + steps: [] # optional -- any additional steps that need to happen before pulling down the jitutils repo and sending the jitutils to helix (ie building your repo) + variables: [] # optional -- list of additional variables to send to the template + jobName: '' # required -- job name + displayName: '' # optional -- display name for the job. Will use jobName if not passed + pool: '' # required -- name of the Build pool + container: '' # required -- name of the container + buildConfig: '' # required -- build configuration + archType: '' # required -- targeting CPU architecture + osGroup: '' # required -- operating system for the job + osSubgroup: '' # optional -- operating system subgroup + extraSetupParameters: '' # optional -- extra arguments to pass to the setup script + frameworks: ['netcoreapp3.0'] # optional -- list of frameworks to run against + continueOnError: 'false' # optional -- determines whether to continue the build if the step errors + dependsOn: '' # optional -- dependencies of the job + timeoutInMinutes: 320 # optional -- timeout for the job + enableTelemetry: false # optional -- enable for telemetry + liveLibrariesBuildConfig: '' # optional -- live-live libraries configuration to use for the run + runtimeType: 'coreclr' # optional -- Sets the runtime as coreclr or mono + codeGenType: 'JIT' # optional -- Decides on the codegen technology if running on mono + projectFile: 'superpmi.proj' # optional -- project file to build helix workitems + runKind: '' # required -- test category + +jobs: +- template: xplat-pipeline-job.yml + parameters: + dependsOn: ${{ parameters.dependsOn }} + buildConfig: ${{ parameters.buildConfig }} + archType: ${{ parameters.archType }} + osGroup: ${{ parameters.osGroup }} + osSubgroup: ${{ parameters.osSubgroup }} + liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }} + enableTelemetry: ${{ parameters.enableTelemetry }} + enablePublishBuildArtifacts: true + continueOnError: ${{ parameters.continueOnError }} + + ${{ if ne(parameters.displayName, '') }}: + displayName: '${{ parameters.displayName }}' + ${{ if eq(parameters.displayName, '') }}: + displayName: '${{ parameters.jobName }}' + + timeoutInMinutes: ${{ parameters.timeoutInMinutes }} + + variables: + + - ${{ each variable in parameters.variables }}: + - ${{ if ne(variable.name, '') }}: + - name: ${{ variable.name }} + value: ${{ variable.value }} + - ${{ if ne(variable.group, '') }}: + - group: ${{ variable.group }} + + - HelixApiAccessToken: '' + - HelixPreCommand: '' + - MchFileTag: '${{ parameters.osGroup }}.${{ parameters.archType }}.${{ parameters.buildConfig }}' + + - ${{ if eq(parameters.osGroup, 'Windows_NT') }}: + - name: PythonScript + value: 'py -3' + - name: PipScript + value: 'py -3 -m pip' + - name: Core_Root_Dir + value: '$(Build.SourcesDirectory)\artifacts\tests\coreclr\${{ parameters.osGroup }}.${{ parameters.archType }}.${{ parameters.buildConfig }}\Tests\Core_Root' + - name: MchFilesLocation + value: '$(Build.SourcesDirectory)\artifacts\helixresults\' + - ${{ if ne(parameters.osGroup, 'Windows_NT') }}: + - name: PythonScript + value: 'python3' + - name: PipScript + value: 'pip3' + - name: Core_Root_Dir + value: '$(Build.SourcesDirectory)/artifacts/tests/coreclr/${{ parameters.osGroup }}.${{ parameters.archType }}.$(buildConfigUpper)/Tests/Core_Root' + - name: MchFilesLocation + value: '$(Build.SourcesDirectory)/artifacts/helixresults/' + workspace: + clean: all + pool: + ${{ parameters.pool }} + container: ${{ parameters.container }} + strategy: + matrix: + ${{ each framework in parameters.frameworks }}: + ${{ framework }}: + _Framework: ${{ framework }} + steps: + - ${{ parameters.steps }} + + - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi-setup.py -source_directory $(Build.SourcesDirectory) -core_root_directory $(Core_Root_Dir) -arch $(archType) -mch_file_tag $(MchFileTag) -libraries_directory $(Core_Root_Dir) -tests_directory $(managedTestArtifactRootFolderPath) -max_size 50 # size in MB + displayName: ${{ format('SuperPMI setup ({0})', parameters.osGroup) }} + + # Run superpmi collection in helix + - template: /eng/common/templates/steps/superpmi-send-to-helix.yml + parameters: + HelixSource: '$(HelixSourcePrefix)/$(Build.Repository.Name)/$(Build.SourceBranch)' # sources must start with pr/, official/, prodcon/, or agent/ + HelixType: 'test/superpmi/$(Kind)/$(_Framework)/$(Architecture)' + HelixAccessToken: $(HelixApiAccessToken) + HelixTargetQueues: $(Queue) + HelixPreCommands: $(HelixPreCommand) + Creator: $(Creator) + WorkItemTimeout: 4:00 # 4 hours + WorkItemDirectory: '$(WorkItemDirectory)' + CorrelationPayloadDirectory: '$(CorrelationPayloadDirectory)' + ProjectFile: ${{ parameters.projectFile }} + BuildConfig: ${{ parameters.buildConfig }} + osGroup: ${{ parameters.osGroup }} + LibrariesArtifacts: '$(LibrariesArtifacts)' + TestsArtifacts: '$(TestsArtifacts)' + + - task: PublishPipelineArtifact@1 + displayName: Publish SuperPMI collection + inputs: + targetPath: $(Build.SourcesDirectory)/artifacts/helixresults + artifactName: 'SuperPMI_Result_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}' + continueOnError: true + condition: always() + + - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi.py merge-mch -pattern $(MchFilesLocation)libraries.pmi*.mch -output_mch_path $(MchFilesLocation)libraries.pmi.$(MchFileTag).mch + displayName: Merge libraries SuperPMI collections + + # TODO: Disable SPMI for P1 tests + # - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi.py merge-mch -pattern $(MchFilesLocation)tests.pmi*.mch -output_mch_path $(MchFilesLocation)tests.pmi.$(MchFileTag).mch + # displayName: Merge tests SuperPMI collections + + # For now, we won't upload merged collection as an artifact. + + - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi.py upload -arch $(archType) -build_type $(buildConfig) -mch_files $(MchFilesLocation)libraries.pmi.$(MchFileTag).mch -core_root $(Build.SourcesDirectory)/artifacts/bin/coreclr/$(osGroup).x64.$(buildConfigUpper) + displayName: Upload SuperPMI libraries collection to Azure Storage + env: + CLRJIT_AZ_KEY: $(clrjit_key1) # secret key stored as variable in pipeline + + # TODO: Disable SPMI for P1 tests + # - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi.py upload -arch $(archType) -build_type $(buildConfig) $(MchFilesLocation)tests.pmi.$(MchFileTag).mch -core_root $(Build.SourcesDirectory)/artifacts/bin/coreclr/$(osGroup).x64.$(buildConfigUpper) + # displayName: Upload SuperPMI tests collection to Azure Storage + # env: + # CLRJIT_AZ_KEY: $(clrjit_key1) # secret key stored as variable in pipeline + + - task: PublishPipelineArtifact@1 + displayName: Publish Logs + inputs: + targetPath: $(Build.SourcesDirectory)/artifacts/log + artifactName: 'SuperPMI_Logs_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}' + continueOnError: true + condition: always() \ No newline at end of file diff --git a/eng/pipelines/coreclr/templates/superpmi-job.yml b/eng/pipelines/coreclr/templates/superpmi-job.yml new file mode 100644 index 0000000000000..3844206d8e8bd --- /dev/null +++ b/eng/pipelines/coreclr/templates/superpmi-job.yml @@ -0,0 +1,104 @@ +parameters: + buildConfig: '' + archType: '' + osGroup: '' + osSubgroup: '' + container: '' + runtimeVariant: '' + testGroup: '' + framework: net5.0 # Specify the appropriate framework when running release branches (ie netcoreapp3.0 for release/3.0) + liveLibrariesBuildConfig: '' + variables: {} + runtimeType: 'coreclr' + pool: '' + codeGenType: 'JIT' + projetFile: '' + runKind: '' + runJobTemplate: '/eng/pipelines/coreclr/templates/run-superpmi-job.yml' + additionalSetupParameters: '' + +### SuperPMI job + +### Each superpmi job depends on a corresponding build job with the same +### buildConfig and archType. + +jobs: +- template: ${{ parameters.runJobTemplate }} + parameters: + # Compute job name from template parameters + jobName: ${{ format('superpmibuild_{0}{1}_{2}_{3}_{4}_{5}_{6}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.runtimeType, parameters.codeGenType, parameters.runKind) }} + displayName: ${{ format('SuperPMI {0}{1} {2} {3} {4} {5} {6}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.runtimeType, parameters.codeGenType, parameters.runKind) }} + pool: ${{ parameters.pool }} + buildConfig: ${{ parameters.buildConfig }} + archType: ${{ parameters.archType }} + osGroup: ${{ parameters.osGroup }} + osSubgroup: ${{ parameters.osSubgroup }} + runtimeVariant: ${{ parameters.runtimeVariant }} + liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }} + runtimeType: ${{ parameters.runtimeType }} + codeGenType: ${{ parameters.codeGenType }} + projectFile: ${{ parameters.projectFile }} + runKind: ${{ parameters.runKind }} + testGroup: ${{ parameters.testGroup }} + additionalSetupParameters: ${{ parameters.additionalSetupParameters }} + # Test job depends on the corresponding build job + dependsOn: + - ${{ format('coreclr_{0}_product_build_{1}{2}_{3}_{4}', parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} + # Depend on coreclr x64 so we can download it and use mcs.exe from it while publishing non-x64 arch SPMI collection + - ${{ if ne(parameters.archType, 'x64') }}: + - ${{ format('coreclr_{0}_product_build_{1}{2}_x64_{3}', parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.buildConfig) }} + - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}: + - ${{ format('libraries_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.liveLibrariesBuildConfig) }} + # TODO: Disable SPMI for P1 tests + # - ${{ if eq(parameters.testGroup, 'innerloop') }}: + # - '${{ parameters.runtimeType }}_common_test_build_p0_AnyOS_AnyCPU_${{parameters.buildConfig }}' + # - ${{ if ne(parameters.testGroup, 'innerloop') }}: + # - '${{ parameters.runtimeType }}_common_test_build_p1_AnyOS_AnyCPU_${{parameters.buildConfig }}' + + variables: ${{ parameters.variables }} + + frameworks: + - ${{ parameters.framework }} + steps: + # Extra steps that will be passed to the superpmi template and run before sending the job to helix (all of which is done in the template) + + # Optionally download live-built libraries + - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}: + - template: /eng/pipelines/common/download-artifact-step.yml + parameters: + unpackFolder: $(librariesDownloadDir) + cleanUnpackFolder: false + artifactFileName: '$(librariesBuildArtifactName)$(archiveExtension)' + artifactName: '$(librariesBuildArtifactName)' + displayName: 'live-built libraries' + + # Download coreclr + - template: /eng/pipelines/common/download-artifact-step.yml + parameters: + unpackFolder: $(buildProductRootFolderPath) + artifactFileName: '$(buildProductArtifactName)$(archiveExtension)' + artifactName: '$(buildProductArtifactName)' + displayName: 'Coreclr product build' + + # Download x64 coreclr if running on non-x64 configuration + - ${{ if ne(parameters.archType, 'x64') }}: + - template: /eng/pipelines/common/download-artifact-step.yml + parameters: + unpackFolder: '$(Build.SourcesDirectory)/artifacts/bin/coreclr/$(osGroup).x64.$(buildConfigUpper)' + artifactFileName: 'CoreCLRProduct_${{ parameters.runtimeVariant }}_$(osGroup)$(osSubgroup)_x64_$(buildConfig)$(archiveExtension)' + artifactName: 'CoreCLRProduct_${{ parameters.runtimeVariant }}_$(osGroup)$(osSubgroup)_x64_$(buildConfig)' + displayName: 'Coreclr product build (x64)' + + # TODO: Disable SPMI for P1 tests + # # Download and unzip managed test artifacts + # - template: /eng/pipelines/common/download-artifact-step.yml + # parameters: + # unpackFolder: '$(managedTestArtifactRootFolderPath)' + # artifactFileName: '$(managedGenericTestArtifactName).tar.gz' + # artifactName: '$(managedGenericTestArtifactName)' + # displayName: 'generic managed test artifacts' + + # Create Core_Root + - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(buildConfig) $(archType) generatelayoutonly $(librariesOverrideArg) + displayName: Create Core_Root + condition: succeeded() diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml index 3e0f2ca420b81..b687a65b089da 100644 --- a/eng/pipelines/runtime.yml +++ b/eng/pipelines/runtime.yml @@ -1041,4 +1041,4 @@ jobs: or( eq(dependencies.checkout.outputs['SetPathVars_coreclr.containsChange'], true), eq(dependencies.checkout.outputs['SetPathVars_libraries.containsChange'], true), - eq(variables['isFullMatrix'], true)) + eq(variables['isFullMatrix'], true)) \ No newline at end of file diff --git a/src/coreclr/scripts/superpmi-setup.py b/src/coreclr/scripts/superpmi-setup.py new file mode 100644 index 0000000000000..73de8cff73eec --- /dev/null +++ b/src/coreclr/scripts/superpmi-setup.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python3 +# +## Licensed to the .NET Foundation under one or more agreements. +## The .NET Foundation licenses this file to you under the MIT license. +# +## +# Title : superpmi-setup.py +# +# Notes: +# +# Script to setup directory structure required to perform SuperPMI collection in CI. +# It does the following steps: +# 1. It creates `correlation_payload_directory` that contains files from CORE_ROOT, src\coreclr\scripts. +# This directory is the one that is sent to all the helix machines that performs SPMI collection. +# 2. It clones dotnet/jitutils, builds it and then copies the `pmi.dll` to `correlation_payload_directory` folder. +# This file is needed to do pmi SPMI runs. +# 3. The script takes `libraries_artifacts` and `tests_artifacts` parameters which contains managed .dlls and .exes on +# which SPMI needs to be run. This script will partition these folders into equal buckets of approximately `max_size` +# bytes and stores them under `payload` directory. Each sub-folder inside `payload` directory is sent to individual +# helix machine to do SPMI collection on. E.g. for `libraries_artifacts` the parameter would be path to `CORE_ROOT` +# folder and this script will copy `max_size` bytes of those files under `payload/Core_Root/libraries_0/binaries`, +# `payload/Core_Root/libraries_1/binaries` and so forth. +# 4. Lastly, it sets the pipeline variables. + +# Below are the helix queues it sets depending on the OS/architecture: +# | Arch | Windows_NT | Linux | +# |-------|------------------|--------------------------------------------------------------------------------------------------------------------------------------| +# | x86 | Windows.10.Amd64 | - | +# | x64 | Windows.10.Amd64 | Ubuntu.1804.Amd64 | +# | arm | - | (Ubuntu.1804.Arm32)Ubuntu.1804.Armarch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440 | +# | arm64 | Windows.10.Arm64 | (Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-a45aeeb-20190620155855 | +################################################################################ +################################################################################ + + +import subprocess +import argparse + +from os import listdir, path, walk +from os.path import isfile, join, getsize +from coreclr_arguments import * + +# Start of parser object creation. + +parser = argparse.ArgumentParser(description="description") + +parser.add_argument("-source_directory", help="path to source directory") +parser.add_argument("-core_root_directory", help="path to core_root directory") +parser.add_argument("-arch", help="Architecture") +parser.add_argument("-mch_file_tag", help="Tag to be used to mch files") +parser.add_argument("-libraries_directory", help="directory containing assemblies for which superpmi collection to " + "be done") +parser.add_argument("-tests_directory", help="path to managed test artifacts directory") +parser.add_argument("-max_size", help="Max size of each partition in MB") +is_windows = platform.system() == "Windows" +native_binaries_to_ignore = [ + "clrcompression.dll", + "clretwrc.dll", + "clrgc.dll", + "clrjit.dll", + "clrjit_unix_arm_x64.dll", + "clrjit_unix_arm64_x64.dll", + "clrjit_unix_x64_x64.dll", + "clrjit_win_arm_x64.dll", + "clrjit_win_arm64_x64.dll", + "clrjit_win_x64_x64.dll", + "clrjit_win_x86_x64.dll", + "coreclr.dll", + "CoreConsole.exe", + "coredistools.dll", + "CoreRun.exe", + "CoreShim.dll", + "createdump.exe", + "crossgen.exe", + "dbgshim.dll", + "ilasm.exe", + "ildasm.exe", + "jitinterface_x64.dll", + "linuxnonjit.dll", + "mcs.exe", + "mscordaccore.dll", + "mscordbi.dll", + "mscorrc.dll", + "protononjit.dll", + "superpmi.exe", + "superpmi-shim-collector.dll", + "superpmi-shim-counter.dll", + "superpmi-shim-simple.dll", +] + + +def setup_args(args): + """ Setup the args for SuperPMI to use. + + Args: + args (ArgParse): args parsed by arg parser + + Returns: + args (CoreclrArguments) + + """ + coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False, + require_built_test_dir=False, default_build_type="Checked") + + coreclr_args.verify(args, + "source_directory", + lambda source_directory: os.path.isdir(source_directory), + "source_directory doesn't exist") + + coreclr_args.verify(args, + "core_root_directory", + lambda core_root_directory: os.path.isdir(core_root_directory), + "core_root_directory doesn't exist") + + coreclr_args.verify(args, + "arch", + lambda unused: True, + "Unable to set arch") + + coreclr_args.verify(args, + "mch_file_tag", + lambda unused: True, + "Unable to set mch_file_tag") + + coreclr_args.verify(args, + "libraries_directory", + lambda libraries_directory: os.path.isdir(libraries_directory), + "libraries_directory doesn't exist") + + coreclr_args.verify(args, + "tests_directory", + lambda tests_directory: os.path.isdir(tests_directory), + "tests_directory doesn't exist") + + coreclr_args.verify(args, + "max_size", + lambda max_size: max_size > 0, + "Please enter valid positive numeric max_size", + modify_arg=lambda max_size: int( + max_size) * 1000 * 1000 if max_size is not None and max_size.isnumeric() else 0 + # Convert to MB + ) + return coreclr_args + + +def get_files_sorted_by_size(src_directory, exclude_directories, exclude_files): + """ For a given src_directory, returns all the .dll files sorted by size. + + Args: + src_directory (string): Path of directory to enumerate. + exclude_directories ([string]): Directory names to exclude. + exclude_files ([string]): File names to exclude. + """ + + def sorter_by_size(pair): + """ Sorts the pair (file_name, file_size) tuple in descending order of file_size + + Args: + pair ([(string, int)]): List of tuple of file_name, file_size + """ + pair.sort(key=lambda x: x[1], reverse=True) + return pair + + filename_with_size = [] + + for file_path, dirs, files in walk(src_directory, topdown=True): + # Credit: https://stackoverflow.com/a/19859907 + dirs[:] = [d for d in dirs if d not in exclude_directories] + for name in files: + if name in exclude_files: + continue + curr_file_path = path.join(file_path, name) + + if not isfile(curr_file_path): + continue + if not name.endswith(".dll") and not name.endswith(".exe"): + continue + + size = getsize(curr_file_path) + filename_with_size.append((curr_file_path, size)) + + return sorter_by_size(filename_with_size) + + +def first_fit(sorted_by_size, max_size): + """ Given a list of file names along with size in descending order, divides the files + in number of buckets such that each bucket doesn't exceed max_size. Since this is a first-fit + approach, it doesn't guarantee to find the bucket with tighest spot available. + + Args: + sorted_by_size ((string, int)): (file_name, file_size) tuple + max_size (int): Maximum size (in bytes) of each bucket. + + Returns: + [{int, [string]}]: Returns a dictionary of partition-index to list of file names following in that bucket. + """ + partitions = {} + for curr_file in sorted_by_size: + _, file_size = curr_file + + # Find the right bucket + found_bucket = False + + if file_size < max_size: + for p_index in partitions: + total_in_curr_par = sum(n for _, n in partitions[p_index]) + if (total_in_curr_par + file_size) < max_size: + partitions[p_index].append(curr_file) + found_bucket = True + break + + if not found_bucket: + partitions[len(partitions)] = [curr_file] + + total_size = 0 + for p_index in partitions: + partition_size = sum(n for _, n in partitions[p_index]) + print("Partition {0}: {1} bytes.".format(p_index, partition_size)) + total_size += partition_size + print("Total {0} partitions with {1} bytes.".format(str(len(partitions)), total_size)) + + return partitions + + +def run_command(command_to_run, _cwd=None): + """ Runs the command. + + Args: + command_to_run ([string]): Command to run along with arguments. + _cmd (string): Current working directory. + """ + print("Running: " + " ".join(command_to_run)) + with subprocess.Popen(command_to_run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_cwd) as proc: + stdout, stderr = proc.communicate() + if len(stdout) > 0: + print(stdout.decode("utf-8")) + if len(stderr) > 0: + print(stderr.decode("utf-8")) + + +def copy_directory(src_path, dst_path): + """Copies directory in 'src_path' to 'dst_path' maintaining the directory + structure. https://docs.python.org/3.5/library/shutil.html#shutil.copytree can't + be used in this case because it expects the destination directory should not + exist, however we do call copy_directory() to copy files to same destination directory. + + It only copied *.dll, *.exe and *.py files. + + Args: + src_path (string): Path of source directory that need to be copied. + dst_path (string): Path where directory should be copied. + """ + if not os.path.exists(dst_path): + os.makedirs(dst_path) + for item in os.listdir(src_path): + src_item = os.path.join(src_path, item) + dst_item = os.path.join(dst_path, item) + if os.path.isdir(src_item): + copy_directory(src_item, dst_item) + else: + should_copy_file = dst_item.endswith('.dll') or dst_item.endswith('.py') + if is_windows: + should_copy_file = should_copy_file or dst_item.endswith('.exe') + else: + should_copy_file = should_copy_file or dst_item.endswith('.so') or item.find(".") == -1 + if not should_copy_file: + continue + shutil.copy2(src_item, dst_item) + + +def copy_files(src_path, dst_path, file_names): + """Copy files from 'file_names' list from 'src_path' to 'dst_path'. + It retains the original directory structure of src_path. + + Args: + src_path (string): Source directory from where files are copied. + dst_path (string): Destination directory where files to be copied. + file_names ([string]): List of full path file names to be copied. + """ + + print('### Copying below files to {0}:'.format(dst_path)) + print('') + print(file_names) + for f in file_names: + # Create same structure in dst so we don't clobber same files names present in different directories + dst_path_of_file = f.replace(src_path, dst_path) + + dst_directory = path.dirname(dst_path_of_file) + if not os.path.exists(dst_directory): + os.makedirs(dst_directory) + shutil.copy2(f, dst_path_of_file) + + +def partition_files(src_directory, dst_directory, max_size, exclude_directories=[], exclude_files=native_binaries_to_ignore): + """ Copy bucketized files based on size to destination folder. + + Args: + src_directory (string): Source folder containing files to be copied. + dst_directory (string): Destination folder where files should be copied. + max_size (int): Maximum partition size in bytes + exclude_directories ([string]): List of folder names to be excluded. + exclude_files ([string]): List of files names to be excluded. + """ + + sorted_by_size = get_files_sorted_by_size(src_directory, exclude_directories, exclude_files) + partitions = first_fit(sorted_by_size, max_size) + + index = 0 + for p_index in partitions: + file_names = [curr_file[0] for curr_file in partitions[p_index]] + curr_dst_path = path.join(dst_directory, str(index), "binaries") + copy_files(src_directory, curr_dst_path, file_names) + index += 1 + + +def set_pipeline_variable(name, value): + """ This method sets pipeline variable. + + Args: + name (string): Name of the variable. + value (string): Value of the variable. + """ + define_variable_format = "##vso[task.setvariable variable={0}]{1}" + print("{0} -> {1}".format(name, value)) # logging + print(define_variable_format.format(name, value)) # set variable + + +def main(main_args): + """ Main entrypoint + + Args: + main_args ([type]): Arguments to the script + """ + coreclr_args = setup_args(main_args) + source_directory = coreclr_args.source_directory + + # CorrelationPayload directories + correlation_payload_directory = path.join(coreclr_args.source_directory, "payload") + superpmi_src_directory = path.join(source_directory, 'src', 'coreclr', 'scripts') + superpmi_dst_directory = path.join(correlation_payload_directory, "superpmi") + arch = coreclr_args.arch + helix_source_prefix = "official" + creator = "" + ci = True + if is_windows: + helix_queue = "Windows.10.Arm64" if arch == "arm64" else "Windows.10.Amd64" + else: + if arch == "arm": + helix_queue = "(Ubuntu.1804.Arm32)Ubuntu.1804.Armarch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm32v7-bfcd90a-20200121150440" + elif arch == "arm64": + helix_queue = "(Ubuntu.1804.Arm64)Ubuntu.1804.ArmArch@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-helix-arm64v8-a45aeeb-20190620155855" + else: + helix_queue = "Ubuntu.1804.Amd64" + + # create superpmi directory + print('Copying {} -> {}'.format(superpmi_src_directory, superpmi_dst_directory)) + copy_directory(superpmi_src_directory, superpmi_dst_directory) + print('Copying {} -> {}'.format(coreclr_args.core_root_directory, superpmi_dst_directory)) + copy_directory(coreclr_args.core_root_directory, superpmi_dst_directory) + + # Clone and build jitutils + try: + with tempfile.TemporaryDirectory() as jitutils_directory: + run_command( + ["git", "clone", "--quiet", "--depth", "1", "https://github.com/dotnet/jitutils", jitutils_directory]) + # Set dotnet path to run bootstrap + os.environ["PATH"] = path.join(source_directory, ".dotnet") + os.pathsep + os.environ["PATH"] + bootstrap_file = "bootstrap.cmd" if is_windows else "bootstrap.sh" + run_command([path.join(jitutils_directory, bootstrap_file)], jitutils_directory) + + copy_files(path.join(jitutils_directory, "bin"), superpmi_dst_directory, [path.join(jitutils_directory, "bin", "pmi.dll")]) + except PermissionError as pe_error: + # Details: https://bugs.python.org/issue26660 + print('Ignoring PermissionError: {0}'.format(pe_error)) + + # Workitem directories + workitem_directory = path.join(source_directory, "workitem") + pmiassemblies_directory = path.join(workitem_directory, "pmiAssembliesDirectory") + + # libraries + libraries_artifacts = path.join(pmiassemblies_directory, "Core_Root") + partition_files(coreclr_args.libraries_directory, libraries_artifacts, coreclr_args.max_size) + + # test + tests_artifacts = path.join(pmiassemblies_directory, "Tests") + # TODO: Disable SPMI for P1 tests + # partition_files(coreclr_args.tests_directory, tests_artifacts, coreclr_args.max_size, ["Core_Root"]) + + # Set variables + print('Setting pipeline variables:') + set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory) + set_pipeline_variable("WorkItemDirectory", workitem_directory) + set_pipeline_variable("LibrariesArtifacts", libraries_artifacts) + set_pipeline_variable("TestsArtifacts", tests_artifacts) + if is_windows: + set_pipeline_variable("Python", "py -3") + else: + set_pipeline_variable("Python", "python3") + set_pipeline_variable("Architecture", arch) + set_pipeline_variable("Creator", creator) + set_pipeline_variable("Queue", helix_queue) + set_pipeline_variable("HelixSourcePrefix", helix_source_prefix) + set_pipeline_variable("MchFileTag", coreclr_args.mch_file_tag) + + +################################################################################ +# __main__ +################################################################################ + +if __name__ == "__main__": + args = parser.parse_args() + sys.exit(main(args)) diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py index bc8d75c6b97b5..f53ebd37f0a02 100755 --- a/src/coreclr/scripts/superpmi.py +++ b/src/coreclr/scripts/superpmi.py @@ -184,7 +184,8 @@ parser = argparse.ArgumentParser(description=description) -subparsers = parser.add_subparsers(dest='mode', required=True, help="Command to invoke") +subparsers = parser.add_subparsers(dest='mode', help="Command to invoke") +subparsers.required = True # Create a parser for core_root. It can be specified directly, # or computed from the script location and host OS, architecture, and build type: @@ -534,7 +535,7 @@ def __init__(self, items, subproc_count=multiprocessing.cpu_count(), verbose=Fal if 'win32' in sys.platform: # Windows specific event-loop policy & cmd - asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) + asyncio.set_event_loop(asyncio.ProactorEventLoop()) async def __get_item__(self, item, index, size, async_callback, *extra_args): """ Wrapper to the async callback which will schedule based on the queue @@ -593,7 +594,8 @@ def run_to_completion(self, async_callback, *extra_args): """ reset_env = os.environ.copy() - asyncio.run(self.__run_to_completion__(async_callback, *extra_args)) + loop = asyncio.get_event_loop() + loop.run_until_complete(self.__run_to_completion__(async_callback, *extra_args)) os.environ.update(reset_env) ################################################################################