mirror of
https://github.com/zebrajr/pytorch.git
synced 2026-01-15 12:15:51 +00:00
Updated .github/actionlint.yaml to replace linux.rocm.gpu.mi300.2 with linux.rocm.gpu.mi300.1 in the supported runner list Modified all affected workflows (inductor-perf-test-nightly-rocm.yml, inductor-periodic.yml, inductor-rocm-mi300.yml, and rocm-mi300.yml) to run jobs on 1-GPU MI300 runners instead of 2-GPU runners This should help increase available runners even with same number of CI nodes. Pull Request resolved: https://github.com/pytorch/pytorch/pull/158882 Approved by: https://github.com/jeffdaily Co-authored-by: Jeff Daily <jeff.daily@amd.com>
129 lines
5.8 KiB
YAML
129 lines
5.8 KiB
YAML
name: inductor-perf-nightly-rocm
|
|
|
|
on:
|
|
push:
|
|
tags:
|
|
- ciflow/inductor-perf-test-nightly-rocm/*
|
|
schedule:
|
|
- cron: 0 7 * * 0,3
|
|
# NB: GitHub has an upper limit of 10 inputs here, so before we can sort it
|
|
# out, let try to run torchao cudagraphs_low_precision as part of cudagraphs
|
|
workflow_dispatch:
|
|
inputs:
|
|
training:
|
|
description: Run training (on by default)?
|
|
required: false
|
|
type: boolean
|
|
default: true
|
|
inference:
|
|
description: Run inference (on by default)?
|
|
required: false
|
|
type: boolean
|
|
default: true
|
|
default:
|
|
description: Run inductor_default?
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
dynamic:
|
|
description: Run inductor_dynamic_shapes?
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
cppwrapper:
|
|
description: Run inductor_cpp_wrapper?
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
cudagraphs:
|
|
description: Run inductor_cudagraphs?
|
|
required: false
|
|
type: boolean
|
|
default: true
|
|
freezing_cudagraphs:
|
|
description: Run inductor_cudagraphs with freezing for inference?
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
aotinductor:
|
|
description: Run aot_inductor for inference?
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
maxautotune:
|
|
description: Run inductor_max_autotune?
|
|
required: false
|
|
type: boolean
|
|
default: false
|
|
benchmark_configs:
|
|
description: The list of configs used the benchmark
|
|
required: false
|
|
type: string
|
|
default: inductor_huggingface_perf_rocm,inductor_timm_perf_rocm,inductor_torchbench_perf_rocm
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
|
cancel-in-progress: true
|
|
|
|
permissions: read-all
|
|
|
|
jobs:
|
|
get-label-type:
|
|
name: get-label-type
|
|
uses: pytorch/pytorch/.github/workflows/_runner-determinator.yml@main
|
|
if: ${{ (github.event_name != 'schedule' || github.repository == 'pytorch/pytorch') && github.repository_owner == 'pytorch' }}
|
|
with:
|
|
triggering_actor: ${{ github.triggering_actor }}
|
|
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
|
curr_branch: ${{ github.head_ref || github.ref_name }}
|
|
curr_ref_type: ${{ github.ref_type }}
|
|
opt_out_experiments: lf
|
|
|
|
linux-jammy-rocm-py3_10-inductor-benchmark-build:
|
|
if: github.repository_owner == 'pytorch'
|
|
name: rocm-py3_10-inductor-benchmark-build
|
|
uses: ./.github/workflows/_linux-build.yml
|
|
with:
|
|
build-environment: linux-jammy-rocm-py3_10
|
|
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
|
|
test-matrix: |
|
|
{ include: [
|
|
{ config: "inductor_huggingface_perf_rocm", shard: 1, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_huggingface_perf_rocm", shard: 2, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_huggingface_perf_rocm", shard: 3, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_huggingface_perf_rocm", shard: 4, num_shards: 4, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_timm_perf_rocm", shard: 1, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_timm_perf_rocm", shard: 2, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_timm_perf_rocm", shard: 3, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_timm_perf_rocm", shard: 4, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_timm_perf_rocm", shard: 5, num_shards: 5, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 1, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 2, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 3, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 4, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 5, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 6, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 7, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
{ config: "inductor_torchbench_perf_rocm", shard: 8, num_shards: 8, runner: "linux.rocm.gpu.gfx942.1" },
|
|
]}
|
|
secrets: inherit
|
|
|
|
linux-jammy-rocm-py3_10-inductor-benchmark-test:
|
|
permissions:
|
|
id-token: write
|
|
contents: read
|
|
name: rocm-py3_10-inductor-benchmark-test
|
|
uses: ./.github/workflows/_rocm-test.yml
|
|
needs: linux-jammy-rocm-py3_10-inductor-benchmark-build
|
|
with:
|
|
build-environment: linux-jammy-rocm-py3_10
|
|
dashboard-tag: training-true-inference-true-default-true-dynamic-true-cudagraphs-true-cppwrapper-true-aotinductor-true-freezing_cudagraphs-true-cudagraphs_low_precision-true
|
|
docker-image: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.docker-image }}
|
|
test-matrix: ${{ needs.linux-jammy-rocm-py3_10-inductor-benchmark-build.outputs.test-matrix }}
|
|
timeout-minutes: 720
|
|
# Disable monitor in perf tests for more investigation
|
|
disable-monitor: true
|
|
monitor-log-interval: 10
|
|
monitor-data-collect-interval: 2
|
|
secrets: inherit
|