pulumi/scripts/get-job-matrix.py

647 lines
21 KiB
Python
Raw Permalink Normal View History

#!/usr/bin/env python3
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=missing-class-docstring
"""
Compute a GitHub Actions job matrix, or in the case of build and lint jobs, a set of versions to
build and produce.
Uses `gotestsum tool ci-matrix` to divide up Go packages into partitions to reduce execution time.
"""
import argparse
import itertools
import json
import os
import subprocess as sp
import sys
from dataclasses import dataclass
from enum import Enum
from pprint import pformat
from typing import Any, Dict, List, Optional, Set, TypedDict, Union
global_verbosity = 0
VersionSet = Dict[str, str]
class JobKind(str, Enum):
"""Output kinds supported with this utility."""
INTEGRATION_TEST = "integration-test"
ACCEPTANCE_TEST = "acceptance-test"
UNIT_TEST = "unit-test"
ALL_TEST = "all-test"
@dataclass
class PartitionModule:
"""Go modules to partition into jobs by package."""
module_dir: str
partitions: int
@dataclass
class PartitionPackage:
"""Go packages to subdivide into jobs by test."""
package: str
package_dir: str
partitions: int
INTEGRATION_TEST_PACKAGES = {
"github.com/pulumi/pulumi/pkg/v3/cmd/pulumi",
"github.com/pulumi/pulumi/pkg/v3/codegen/testing/utils",
"github.com/pulumi/pulumi/pkg/v3/graph/dotconv",
"github.com/pulumi/pulumi/pkg/v3/testing/integration",
"github.com/pulumi/pulumi/sdk/v3/go/auto",
"github.com/pulumi/pulumi/sdk/v3/go/auto/debug",
"github.com/pulumi/pulumi/sdk/v3/go/auto/optdestroy",
"github.com/pulumi/pulumi/sdk/v3/go/auto/optremove",
"github.com/pulumi/pulumi/sdk/v3/go/common/constant",
"github.com/pulumi/pulumi/sdk/v3/go/common/util/retry",
"github.com/pulumi/pulumi/sdk/v3/nodejs/npm",
# And the entirety of the 'tests' module
}
def is_unit_test(pkg: str) -> bool:
"""Checks if the package is a unit test"""
return not (
pkg.startswith("github.com/pulumi/pulumi/tests")
or pkg in INTEGRATION_TEST_PACKAGES
)
ci: Don't run codegen tests for PRs We currently run all codegen tests in pkg/codegen/$lang for every PR. These tests take quite a while to run and lock up many GitHub workers for this entire duration. This change attempts to address this issue by running codegen tests only for those PRs that touch the codegen directories. The machinery to make this work is roughly as follows: - In the on-pr workflow, when we're figuring out what we're doing, we check if we've changed codegen files. We use [paths-filter] to do this. - We decide whether we want to run codegen tests based on those files, and pass that onto the test matrix generator. - The test matrix generator filters out these packages and their subpackages from the list of tests under consideration. - Everything else proceeds as normal. [paths-filter]: https://github.com/dorny/paths-filter Things to note: - The test-codegen input defaults to true. All other invocations will run with codegen tests so these will continue to run on merge. Only PRs (from on-pr.yml) set it to false. - Since the number of tests is remarkably smaller without these tests, we can significantly reduce the number of partitions we use for pkg/ unit tests. This should alleviate pressure on GitHub workers further. This is a pretty blunt approach to the problem. If we wanted to be more targeted, instead of filtering at the get-job-matrix.py level, we could instead set an environment variable and add t.Skips in {program,sdk,type}_driver if that environment variable is set. And we can still do that in the future if we decide that maintaining this list is too much. Resolves #12334
2023-03-02 22:33:45 +00:00
# Keep this in sync with filters defined in .github/workflows/on-pr.yml.
CODEGEN_TEST_PACKAGES = {
"github.com/pulumi/pulumi/pkg/v3/codegen/docs",
"github.com/pulumi/pulumi/pkg/v3/codegen/dotnet",
"github.com/pulumi/pulumi/pkg/v3/codegen/go",
"github.com/pulumi/pulumi/pkg/v3/codegen/nodejs",
"github.com/pulumi/pulumi/pkg/v3/codegen/python",
}
def is_codegen_test(pkg: str) -> bool:
"""Checks if a package is a per-language codegen test"""
if pkg in CODEGEN_TEST_PACKAGES:
return True
for codegen_pkg in CODEGEN_TEST_PACKAGES:
if pkg.startswith(codegen_pkg + "/"):
return True
return False
class MakefileTest(TypedDict):
name: str
run: str
eta: int
MAKEFILE_INTEGRATION_TESTS: List[MakefileTest] = [
{"name": "sdk/nodejs test_auto", "run": "cd sdk/nodejs && ../../scripts/retry make test_auto", "eta": 3},
{"name": "sdk/nodejs unit_tests", "run": "cd sdk/nodejs && ../../scripts/retry make unit_tests", "eta": 4},
Vendor TypeScript and ts-node (#15622) <!--- Thanks so much for your contribution! If this is your first time contributing, please ensure that you have read the [CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md) documentation. --> # Description Fixes https://github.com/pulumi/pulumi/issues/15733 Historically these packages were direct dependencies of `@pulumi/pulumi`. To decouple the node SDK from the precise version of TypeScript, the packages are now declared as optional peer pependencies of `@pulumi/pulumi` and customers can pick the versions they want. The reason we mark the peer dependencies as *optional* is to prevent package managers from automatically installing them. This avoids the situation where the package manger would install a more recent version of TypeScript without the user explictly opting in. Newer versions have stricter type checks, and can thus stop existing programs from running successfully. When the peer dependencies are not present, we load the vendored versions of the modules. ## Checklist - [ ] I have run `make tidy` to update any new dependencies - [ ] I have run `make lint` to verify my code passes the lint check - [ ] I have formatted my code using `gofumpt` <!--- Please provide details if the checkbox below is to be left unchecked. --> - [ ] I have added tests that prove my fix is effective or that my feature works <!--- User-facing changes require a CHANGELOG entry. --> - [ ] I have run `make changelog` and committed the `changelog/pending/<file>` documenting my change <!-- If the change(s) in this PR is a modification of an existing call to the Pulumi Cloud, then the service should honor older versions of the CLI where this change would not exist. You must then bump the API version in /pkg/backend/httpstate/client/api.go, as well as add it to the service. --> - [ ] Yes, there are changes in this PR that warrants bumping the Pulumi Cloud API version <!-- @Pulumi employees: If yes, you must submit corresponding changes in the service repo. -->
2024-04-10 15:26:37 +00:00
{"name": "sdk/nodejs test_integration", "run": "cd sdk/nodejs && ../../scripts/retry make test_integration", "eta": 3},
{"name": "sdk/python test_auto", "run": "cd sdk/python && ../../scripts/retry make test_auto", "eta": 6},
{"name": "sdk/python test_fast", "run": "cd sdk/python && ../../scripts/retry make test_fast", "eta": 3},
]
Reorganize closure tests to prepare for multiple typescript versions (#15753) # Description In preparation of https://github.com/pulumi/pulumi/issues/15735 we make the closure tests proper integration tests so that we can run them with different typescript versions. Move each test to its own folder instead of one large file. This PR only changes tests, and does not touch any of the function serialisation code. Some snapshots had to be updated for indentation changes. Hidden after all the test cases is the test script [sdk/nodejs/tests/runtime/testdata/closure-tests/test.ts](https://github.com/pulumi/pulumi/blob/dfcc953c08051555ba1fa363b0694d195cfcc27a/sdk/nodejs/tests/runtime/testdata/closure-tests/test.ts) Verified that tests run in CI https://github.com/pulumi/pulumi/actions/runs/8389170587/job/22975068167?pr=15753 ## Checklist - [x] I have run `make tidy` to update any new dependencies - [x] I have run `make lint` to verify my code passes the lint check - [x] I have formatted my code using `gofumpt` <!--- Please provide details if the checkbox below is to be left unchecked. --> - [x] I have added tests that prove my fix is effective or that my feature works <!--- User-facing changes require a CHANGELOG entry. --> - [x] I have run `make changelog` and committed the `changelog/pending/<file>` documenting my change <!-- If the change(s) in this PR is a modification of an existing call to the Pulumi Cloud, then the service should honor older versions of the CLI where this change would not exist. You must then bump the API version in /pkg/backend/httpstate/client/api.go, as well as add it to the service. --> - [ ] Yes, there are changes in this PR that warrants bumping the Pulumi Cloud API version <!-- @Pulumi employees: If yes, you must submit corresponding changes in the service repo. -->
2024-03-25 13:19:17 +00:00
MAKEFILE_ACCEPTANCE_TESTS: List[MakefileTest] = [
Vendor TypeScript and ts-node (#15622) <!--- Thanks so much for your contribution! If this is your first time contributing, please ensure that you have read the [CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md) documentation. --> # Description Fixes https://github.com/pulumi/pulumi/issues/15733 Historically these packages were direct dependencies of `@pulumi/pulumi`. To decouple the node SDK from the precise version of TypeScript, the packages are now declared as optional peer pependencies of `@pulumi/pulumi` and customers can pick the versions they want. The reason we mark the peer dependencies as *optional* is to prevent package managers from automatically installing them. This avoids the situation where the package manger would install a more recent version of TypeScript without the user explictly opting in. Newer versions have stricter type checks, and can thus stop existing programs from running successfully. When the peer dependencies are not present, we load the vendored versions of the modules. ## Checklist - [ ] I have run `make tidy` to update any new dependencies - [ ] I have run `make lint` to verify my code passes the lint check - [ ] I have formatted my code using `gofumpt` <!--- Please provide details if the checkbox below is to be left unchecked. --> - [ ] I have added tests that prove my fix is effective or that my feature works <!--- User-facing changes require a CHANGELOG entry. --> - [ ] I have run `make changelog` and committed the `changelog/pending/<file>` documenting my change <!-- If the change(s) in this PR is a modification of an existing call to the Pulumi Cloud, then the service should honor older versions of the CLI where this change would not exist. You must then bump the API version in /pkg/backend/httpstate/client/api.go, as well as add it to the service. --> - [ ] Yes, there are changes in this PR that warrants bumping the Pulumi Cloud API version <!-- @Pulumi employees: If yes, you must submit corresponding changes in the service repo. -->
2024-04-10 15:26:37 +00:00
{"name": "sdk/nodejs test_integration", "run": "cd sdk/nodejs && ../../scripts/retry make test_integration", "eta": 3},
Reorganize closure tests to prepare for multiple typescript versions (#15753) # Description In preparation of https://github.com/pulumi/pulumi/issues/15735 we make the closure tests proper integration tests so that we can run them with different typescript versions. Move each test to its own folder instead of one large file. This PR only changes tests, and does not touch any of the function serialisation code. Some snapshots had to be updated for indentation changes. Hidden after all the test cases is the test script [sdk/nodejs/tests/runtime/testdata/closure-tests/test.ts](https://github.com/pulumi/pulumi/blob/dfcc953c08051555ba1fa363b0694d195cfcc27a/sdk/nodejs/tests/runtime/testdata/closure-tests/test.ts) Verified that tests run in CI https://github.com/pulumi/pulumi/actions/runs/8389170587/job/22975068167?pr=15753 ## Checklist - [x] I have run `make tidy` to update any new dependencies - [x] I have run `make lint` to verify my code passes the lint check - [x] I have formatted my code using `gofumpt` <!--- Please provide details if the checkbox below is to be left unchecked. --> - [x] I have added tests that prove my fix is effective or that my feature works <!--- User-facing changes require a CHANGELOG entry. --> - [x] I have run `make changelog` and committed the `changelog/pending/<file>` documenting my change <!-- If the change(s) in this PR is a modification of an existing call to the Pulumi Cloud, then the service should honor older versions of the CLI where this change would not exist. You must then bump the API version in /pkg/backend/httpstate/client/api.go, as well as add it to the service. --> - [ ] Yes, there are changes in this PR that warrants bumping the Pulumi Cloud API version <!-- @Pulumi employees: If yes, you must submit corresponding changes in the service repo. -->
2024-03-25 13:19:17 +00:00
]
MAKEFILE_UNIT_TESTS: List[MakefileTest] = [
{"name": "sdk/nodejs sxs_tests", "run": "cd sdk/nodejs && ../../scripts/retry make sxs_tests", "eta": 3},
]
ALL_PLATFORMS = ["ubuntu-latest", "windows-latest", "macos-latest"]
MINIMUM_SUPPORTED_VERSION_SET = {
"name": "minimum",
"dotnet": "6",
"go": "1.21.x",
"nodejs": "18.x",
"python": "3.8.x",
}
CURRENT_VERSION_SET = {
"name": "current",
"dotnet": "8",
"go": "1.22.x",
"nodejs": "21.x",
Support Python 3.12 (#15190) Python 3.12 requires `grpcio` 1.59.0 or higher. Unfortunately, there is a regression in `grpcio` 1.58.0 through the latest version (currently 1.60.0) which causes any error returned from a Python gRPC server to be written to stderr, including UNIMPLEMENTED errors. This primarily affects Python dynamic providers, which don't have implementations for `CheckConfig` and `DiffConfig`, resulting in a traceback error being emitted to stderr when the engine calls these, which is visible to users. This `grpcio` regression has been fixed upstream, but the fix has not been released yet. We've been waiting for a 1.60.1 patch release. This has not been great for our Python users who are using Python 3.12. It's particularly bad for new Pulumi users who are using Python 3.12 and are trying to get started with Pulumi. For these users, when trying to install the `pulumi` PyPi package (i.e. via `pulumi new python`) the installation fails with an error because it is pinned to depending on an older version of `grpcio` which doesn't work on Python 3.12. This commit works around the problem by providing default implementations of `CheckConfig` and `DiffConfig` for python dynamic providers and the component provider API, so that no error is emitted to stderr when the engine calls these methods. The default implementations for these are the same behavior that the engine would use if these methods had returned UNIMPLEMENTED. I believe these are the only two methods affected by this. Other methods like `Invoke`, `Call`, `StreamInvoke`, `Construct`, `Attach`, `GetMapping`, and `GetMappings`, continue to return UNIMPLEMENTED for dynamic providers, which I think is OK; I don't believe these will be called by the engine under normal circumstances. Out of an abundance of caution, the `pulumi` package continues to depend on the pinned version of `grpcio` when installing on versions of Python less than 3.12. On Python 3.12 or greater, we now depend on `grpcio` `~=1.60.0`. 1.60.0 doesn't have the fix for the regression, but the workaround should allow things to work on Python 3.12 as before. Once 1.60.1 is released, we can look into updating the `grpcio` dependency to `~=1.60.1` for all versions of Python, and possibly revert the workarounds, if we want. Note: #14474 added a test for dynamic providers to ensure nothing is written to stderr. The test would fail if the workaround in this PR did not work as intended: https://github.com/pulumi/pulumi/pull/14474/files#diff-d92ccd283e08eadab2597825103e45cdaa96fea93324bc4d4d3b1d2b83c51b76 This PR depends on several other smaller PRs: - https://github.com/pulumi/pulumi/pull/15220 - https://github.com/pulumi/pulumi/pull/15221 - https://github.com/pulumi/pulumi/pull/15222 - https://github.com/pulumi/pulumi/pull/15223 - https://github.com/pulumi/pulumi/pull/15224 - https://github.com/pulumi/pulumi/pull/15225 - https://github.com/pulumi/pulumi/pull/15226 Fixes #14258
2024-01-24 22:24:34 +00:00
"python": "3.12.x",
}
def run_list_packages(module_dir: str, tags: List[str]) -> Set[str]:
"""Runs go list on pkg, sdk, and tests"""
try:
cmd = sp.run(
["go", "list", "-tags", " ".join(tags), "-find", "./..."],
cwd=module_dir,
check=True,
capture_output=True,
text=True,
)
except sp.CalledProcessError as err:
message=f"Failed to list packages in module at path '{module_dir}', usually this implies a Go compilation error. Check that `make lint` succeeds."
print(f"::error {message}", file=sys.stderr)
raise Exception(message) from err
return set(cmd.stdout.split())
def run_list_tests(pkg_dir: str, tags: List[str]) -> List[str]:
"""Runs `go test --list` on a given package."""
# This Go command is finnicky. It must be run from the directory queried as a '.' path argument,
# and the output is unstructured, mixing diagnostics & test names on stdout. The output
# typically looks like this:
#
# ```sh
# $ go test --list .
# TestStackTagValidation
# ...
# TestPassphrasePrompting
# ok github.com/pulumi/pulumi/tests/integration 0.093s
# ```
#
# That last line is emitted on stdout - so we skip any lines containing "ok".
#
# Neither relative paths nor package paths will work, as shown below:
#
# ```sh
# $ go test -tags all --list github.com/pulumi/pulumi/tests/integration
# no Go files in /home/friel/c/github.com/pulumi/pulumi
#
# $ go test -tags all --list ./tests/integration
# no Go files in /home/friel/c/github.com/pulumi/pulumi
# ```
try:
cmd = sp.run(
["go", "test", "-tags", " ".join(tags), "--list", "."],
check=True,
cwd=pkg_dir,
capture_output=True,
text=True,
)
except sp.CalledProcessError as err:
message=f"Failed to list tests in package dir '{pkg_dir}', usually this implies a Go compilation error. Check that `make lint` succeeds."
print(f"::error {message}", file=sys.stderr)
raise Exception(message) from err
tests: List[str] = []
for line in cmd.stdout.split():
if line.startswith("ok"):
break
tests.append(line)
return tests
class GotestsumInclude(TypedDict):
"""Job entry from `gotestsum tool ci-matrix`"""
id: int
estimatedRuntime: str
packages: str
tests: Optional[str]
description: str
class GotestsumOutput(TypedDict):
"""Type of value returned via `gotestsum tool ci-matrix`"""
include: List[GotestsumInclude]
class TestSuite(TypedDict):
"""Commands passed to jobs"""
name: str
command: str
Matrix = TypedDict("Matrix", {
"test-suite": List[TestSuite],
"platform": List[str],
"version-set": VersionSet,
})
def run_gotestsum_ci_matrix_packages(go_packages: List[str], partition_module: PartitionModule, tags: List[str]) -> List[TestSuite]:
"""Runs `gotestsum tool ci-matrix` to compute Go test partitions"""
script_dir = os.path.dirname(os.path.realpath(__file__))
test_reports_dir = os.path.join(script_dir, "..", "test-results")
os.makedirs(test_reports_dir, exist_ok=True)
if partition_module.partitions == 1:
pkgs = " ".join(go_packages)
return [{
"name": f"{partition_module.module_dir}",
"command": f'GO_TEST_TAGS="{" ".join(tags)}" PKGS="{pkgs}" ./scripts/retry make gotestsum/{partition_module.module_dir}'
}]
gotestsum_matrix_args = [
"gotestsum",
"tool",
"ci-matrix",
"--partitions",
f"{partition_module.partitions}",
"--timing-files",
f"{test_reports_dir}/*.json",
"--debug",
]
try:
cmd = sp.run(
gotestsum_matrix_args,
input="\n".join(go_packages),
check=True,
capture_output=True,
text=True,
)
except sp.CalledProcessError as err:
raise Exception(f"Failed to run gotestsum ci-matrix: {err.stderr}") from err
if global_verbosity >= 3:
print(cmd.stderr, file=sys.stderr)
gotestsum_matrix: GotestsumOutput = json.loads(cmd.stdout)
if global_verbosity >= 3:
print(pformat(gotestsum_matrix), file=sys.stderr)
matrix_jobs = gotestsum_matrix["include"]
buckets_len = len(f"{len(matrix_jobs)}")
test_suites: List[TestSuite] = []
for idx, include in enumerate(matrix_jobs):
idx_str = f"{idx+1}".zfill(buckets_len)
test_command = f'GO_TEST_TAGS="{" ".join(tags)}" PKGS="{include["packages"]}" ./scripts/retry make gotestsum/{partition_module.module_dir}'
if global_verbosity >= 1:
print(test_command, file=sys.stderr)
test_suites.append(
{
"name": f"{partition_module.module_dir} {idx_str}/{partition_module.partitions}",
"command": test_command,
}
)
return test_suites
def run_gotestsum_ci_matrix_single_package(
partition_pkg: PartitionPackage, tests: List[str], tags: List[str]
) -> List[TestSuite]:
"""Runs `gotestsum tool ci-matrix` to compute Go test partitions for a single package"""
script_dir = os.path.dirname(os.path.realpath(__file__))
test_reports_dir = os.path.join(script_dir, "..", "test-results")
os.makedirs(test_reports_dir, exist_ok=True)
gotestsum_matrix_args = [
"gotestsum",
"tool",
"ci-matrix",
"--partitions",
f"{partition_pkg.partitions}",
"--timing-files",
f"{test_reports_dir}/*.json",
"--partition-tests-in-package",
partition_pkg.package,
"--debug",
]
try:
cmd = sp.run(
gotestsum_matrix_args,
input="\n".join(tests),
check=True,
capture_output=True,
text=True,
)
except sp.CalledProcessError as err:
raise Exception(f"Failed to run gotestsum ci-matrix: {err.stderr}") from err
if global_verbosity >= 3:
print(cmd.stderr, file=sys.stderr)
gotestsum_matrix: GotestsumOutput = json.loads(cmd.stdout)
if global_verbosity >= 3:
print(pformat(gotestsum_matrix), file=sys.stderr)
include = gotestsum_matrix["include"]
buckets_len = len(f"{len(include)}")
test_suites: List[TestSuite] = []
for idx, include in enumerate(include):
idx_str = f"{idx+1}".zfill(buckets_len)
# the test list is formatted like so: -run='^TestOne$,^TestTwo$'
# but we want: -run ^(TestOne|TestTwo)$
test_list = include["tests"]
if not test_list:
continue
test_list = test_list.replace("$,^", "|")
test_list = test_list.replace("='^", " ^(")
test_list = test_list.replace("$'", ")$")
env=f'PKGS="{include["packages"]}" OPTS="{test_list}"'
env=f'GO_TEST_TAGS="{" ".join(tags)}" PKGS="{include["packages"]}" OPTS="{test_list}"'
test_command = f'{env} ./scripts/retry make gotestsum/{partition_pkg.package_dir}'
if global_verbosity >= 1:
print(test_command, file=sys.stderr)
test_suites.append(
{
"name": f"{partition_pkg.package_dir} {idx_str}/{partition_pkg.partitions}",
"command": test_command,
}
)
return test_suites
# pylint: disable=too-many-arguments
def get_matrix(
kind: JobKind,
tags: List[str],
partition_modules: List[PartitionModule],
partition_packages: List[PartitionPackage],
platforms: List[str],
version_sets: List[VersionSet],
fast: bool = False,
ci: Don't run codegen tests for PRs We currently run all codegen tests in pkg/codegen/$lang for every PR. These tests take quite a while to run and lock up many GitHub workers for this entire duration. This change attempts to address this issue by running codegen tests only for those PRs that touch the codegen directories. The machinery to make this work is roughly as follows: - In the on-pr workflow, when we're figuring out what we're doing, we check if we've changed codegen files. We use [paths-filter] to do this. - We decide whether we want to run codegen tests based on those files, and pass that onto the test matrix generator. - The test matrix generator filters out these packages and their subpackages from the list of tests under consideration. - Everything else proceeds as normal. [paths-filter]: https://github.com/dorny/paths-filter Things to note: - The test-codegen input defaults to true. All other invocations will run with codegen tests so these will continue to run on merge. Only PRs (from on-pr.yml) set it to false. - Since the number of tests is remarkably smaller without these tests, we can significantly reduce the number of partitions we use for pkg/ unit tests. This should alleviate pressure on GitHub workers further. This is a pretty blunt approach to the problem. If we wanted to be more targeted, instead of filtering at the get-job-matrix.py level, we could instead set an environment variable and add t.Skips in {program,sdk,type}_driver if that environment variable is set. And we can still do that in the future if we decide that maintaining this list is too much. Resolves #12334
2023-03-02 22:33:45 +00:00
codegen_tests: bool = False,
) -> Matrix:
"""Compute a job matrix"""
if kind == JobKind.INTEGRATION_TEST:
makefile_tests = MAKEFILE_INTEGRATION_TESTS
elif kind == JobKind.UNIT_TEST:
makefile_tests = MAKEFILE_UNIT_TESTS
elif kind == JobKind.ACCEPTANCE_TEST:
Reorganize closure tests to prepare for multiple typescript versions (#15753) # Description In preparation of https://github.com/pulumi/pulumi/issues/15735 we make the closure tests proper integration tests so that we can run them with different typescript versions. Move each test to its own folder instead of one large file. This PR only changes tests, and does not touch any of the function serialisation code. Some snapshots had to be updated for indentation changes. Hidden after all the test cases is the test script [sdk/nodejs/tests/runtime/testdata/closure-tests/test.ts](https://github.com/pulumi/pulumi/blob/dfcc953c08051555ba1fa363b0694d195cfcc27a/sdk/nodejs/tests/runtime/testdata/closure-tests/test.ts) Verified that tests run in CI https://github.com/pulumi/pulumi/actions/runs/8389170587/job/22975068167?pr=15753 ## Checklist - [x] I have run `make tidy` to update any new dependencies - [x] I have run `make lint` to verify my code passes the lint check - [x] I have formatted my code using `gofumpt` <!--- Please provide details if the checkbox below is to be left unchecked. --> - [x] I have added tests that prove my fix is effective or that my feature works <!--- User-facing changes require a CHANGELOG entry. --> - [x] I have run `make changelog` and committed the `changelog/pending/<file>` documenting my change <!-- If the change(s) in this PR is a modification of an existing call to the Pulumi Cloud, then the service should honor older versions of the CLI where this change would not exist. You must then bump the API version in /pkg/backend/httpstate/client/api.go, as well as add it to the service. --> - [ ] Yes, there are changes in this PR that warrants bumping the Pulumi Cloud API version <!-- @Pulumi employees: If yes, you must submit corresponding changes in the service repo. -->
2024-03-25 13:19:17 +00:00
makefile_tests = MAKEFILE_ACCEPTANCE_TESTS
elif kind == JobKind.ALL_TEST:
makefile_tests = MAKEFILE_INTEGRATION_TESTS + MAKEFILE_UNIT_TESTS
else:
raise Exception(f"Unknown job kind {kind}")
test_suites: List[TestSuite] = []
for test in makefile_tests:
if fast and test["eta"] > 5:
continue
test_suites.append({"name": test["name"], "command": test["run"]})
partitioned_packages = {part.package for part in partition_packages}
for item in partition_modules:
go_packages = run_list_packages(item.module_dir, tags)
go_packages = set(go_packages) - partitioned_packages
ci: Don't run codegen tests for PRs We currently run all codegen tests in pkg/codegen/$lang for every PR. These tests take quite a while to run and lock up many GitHub workers for this entire duration. This change attempts to address this issue by running codegen tests only for those PRs that touch the codegen directories. The machinery to make this work is roughly as follows: - In the on-pr workflow, when we're figuring out what we're doing, we check if we've changed codegen files. We use [paths-filter] to do this. - We decide whether we want to run codegen tests based on those files, and pass that onto the test matrix generator. - The test matrix generator filters out these packages and their subpackages from the list of tests under consideration. - Everything else proceeds as normal. [paths-filter]: https://github.com/dorny/paths-filter Things to note: - The test-codegen input defaults to true. All other invocations will run with codegen tests so these will continue to run on merge. Only PRs (from on-pr.yml) set it to false. - Since the number of tests is remarkably smaller without these tests, we can significantly reduce the number of partitions we use for pkg/ unit tests. This should alleviate pressure on GitHub workers further. This is a pretty blunt approach to the problem. If we wanted to be more targeted, instead of filtering at the get-job-matrix.py level, we could instead set an environment variable and add t.Skips in {program,sdk,type}_driver if that environment variable is set. And we can still do that in the future if we decide that maintaining this list is too much. Resolves #12334
2023-03-02 22:33:45 +00:00
if not codegen_tests:
go_packages = {pkg for pkg in go_packages if not is_codegen_test(pkg)}
if kind == JobKind.INTEGRATION_TEST or kind == JobKind.ACCEPTANCE_TEST:
go_packages = {pkg for pkg in go_packages if not is_unit_test(pkg)}
elif kind == JobKind.UNIT_TEST:
go_packages = {pkg for pkg in go_packages if is_unit_test(pkg)}
elif kind == JobKind.ALL_TEST:
pass
test_suites += run_gotestsum_ci_matrix_packages(list(go_packages), item, tags)
for item in partition_packages:
pkg_tests = run_list_tests(item.package_dir, tags)
test_suites += run_gotestsum_ci_matrix_single_package(item, pkg_tests, tags)
return {
"test-suite": test_suites,
"platform": platforms,
"version-set": version_sets,
}
def get_version_sets(args: argparse.Namespace):
"""Read version set arguments into valid sets"""
version_sets: List[VersionSet] = []
for named_version_set in args.version_set:
if named_version_set == "minimum":
version_sets.append(MINIMUM_SUPPORTED_VERSION_SET)
elif named_version_set == "current":
version_sets.append(CURRENT_VERSION_SET)
else:
raise argparse.ArgumentError(argument=None, message=f"Unknown version set {named_version_set}")
for version_arg in args.versions or []:
this_set = {**MINIMUM_SUPPORTED_VERSION_SET}
version_arg = version_arg.split(",")
for version in version_arg:
lang, version = version.split("=")
if lang not in ["dotnet", "go", "node", "python"]:
raise argparse.ArgumentError(argument=None, message=f"Unknown language {lang}")
this_set[lang] = version
version_sets.append(this_set)
return version_sets
def generate_version_set(args: argparse.Namespace):
version_sets = get_version_sets(args)
if len(version_sets) != 1:
raise argparse.ArgumentError(
argument=None,
message=f"Exactly one version set must be specified (received {len(version_sets)}) for build and lint."
)
print(json.dumps(version_sets[0]))
def generate_matrix(args: argparse.Namespace):
partition_modules: List[PartitionModule] = []
for mod_dir, partitions in args.partition_module:
# mod_dir, partitions = arg
partition_modules.append(PartitionModule(mod_dir, int(partitions)))
partition_packages: List[PartitionPackage] = []
for pkg, pkg_dir, partitions in args.partition_package:
partition_packages.append(PartitionPackage(pkg, pkg_dir, int(partitions)))
version_sets = get_version_sets(args)
matrix = get_matrix(
kind=args.kind,
platforms=args.platform,
fast=args.fast,
tags=args.tags,
partition_modules=partition_modules,
partition_packages=partition_packages,
version_sets=version_sets,
ci: Don't run codegen tests for PRs We currently run all codegen tests in pkg/codegen/$lang for every PR. These tests take quite a while to run and lock up many GitHub workers for this entire duration. This change attempts to address this issue by running codegen tests only for those PRs that touch the codegen directories. The machinery to make this work is roughly as follows: - In the on-pr workflow, when we're figuring out what we're doing, we check if we've changed codegen files. We use [paths-filter] to do this. - We decide whether we want to run codegen tests based on those files, and pass that onto the test matrix generator. - The test matrix generator filters out these packages and their subpackages from the list of tests under consideration. - Everything else proceeds as normal. [paths-filter]: https://github.com/dorny/paths-filter Things to note: - The test-codegen input defaults to true. All other invocations will run with codegen tests so these will continue to run on merge. Only PRs (from on-pr.yml) set it to false. - Since the number of tests is remarkably smaller without these tests, we can significantly reduce the number of partitions we use for pkg/ unit tests. This should alleviate pressure on GitHub workers further. This is a pretty blunt approach to the problem. If we wanted to be more targeted, instead of filtering at the get-job-matrix.py level, we could instead set an environment variable and add t.Skips in {program,sdk,type}_driver if that environment variable is set. And we can still do that in the future if we decide that maintaining this list is too much. Resolves #12334
2023-03-02 22:33:45 +00:00
codegen_tests=args.codegen_tests,
)
if not matrix["platform"] or not matrix["test-suite"] or not matrix["version-set"]:
print('{}') # Empty output because one of the vectors is empty.
return
print(json.dumps(matrix))
def add_generate_matrix_args(parser: argparse.ArgumentParser):
parser.set_defaults(func=generate_matrix)
parser.add_argument(
"--kind",
required=True,
choices=[kind.value for kind in JobKind],
help="Kind of output to generate",
)
ci: Don't run codegen tests for PRs We currently run all codegen tests in pkg/codegen/$lang for every PR. These tests take quite a while to run and lock up many GitHub workers for this entire duration. This change attempts to address this issue by running codegen tests only for those PRs that touch the codegen directories. The machinery to make this work is roughly as follows: - In the on-pr workflow, when we're figuring out what we're doing, we check if we've changed codegen files. We use [paths-filter] to do this. - We decide whether we want to run codegen tests based on those files, and pass that onto the test matrix generator. - The test matrix generator filters out these packages and their subpackages from the list of tests under consideration. - Everything else proceeds as normal. [paths-filter]: https://github.com/dorny/paths-filter Things to note: - The test-codegen input defaults to true. All other invocations will run with codegen tests so these will continue to run on merge. Only PRs (from on-pr.yml) set it to false. - Since the number of tests is remarkably smaller without these tests, we can significantly reduce the number of partitions we use for pkg/ unit tests. This should alleviate pressure on GitHub workers further. This is a pretty blunt approach to the problem. If we wanted to be more targeted, instead of filtering at the get-job-matrix.py level, we could instead set an environment variable and add t.Skips in {program,sdk,type}_driver if that environment variable is set. And we can still do that in the future if we decide that maintaining this list is too much. Resolves #12334
2023-03-02 22:33:45 +00:00
parser.add_argument(
"--codegen-tests",
required=False,
default=True,
action=argparse.BooleanOptionalAction, # adds --no-codegen-tests
help="Whether to include per-langauge codegen tests",
)
parser.add_argument(
"--fast", action="store_true", default=False, help="Exclude slow tests"
)
parser.add_argument(
"--partition-module",
action="append",
nargs=2,
default=[],
metavar=("MODULE_DIR", "PARTITIONS"),
help="Partition the tests in a single module, by module directory.",
)
parser.add_argument(
"--partition-package",
action="append",
nargs=3,
default=[],
metavar=("GO_PACKAGE", "PACKAGE_DIR", "PARTITIONS"),
help="Partition the tests in a single package, instead of by package. "
+ "Must specify a package name, the directory containing the package, "
+ "and the number of partitions to divide the tests into. Tests added "
+ "are automatically excluded from modules.",
)
parser.add_argument(
"--tags",
action="store",
nargs="*",
default=["all"],
help="Go build tags",
)
parser.add_argument(
"--platform",
action="store",
nargs="*",
default=ALL_PLATFORMS,
choices=ALL_PLATFORMS,
help="Platforms to test",
)
parser.add_argument(
"--version-set",
action="store",
nargs="*",
default=["minimum"],
choices=["minimum", "current"],
help="Named set of versions to use. Defaults to minimum supported versions. Available sets: minimum, current",
)
default_versions = ",".join(
[f"{lang}={version}" for lang, version in MINIMUM_SUPPORTED_VERSION_SET.items()]
)
parser.add_argument(
"--versions",
action="store",
type=str,
nargs="*",
help=(
"Set of language versions to use, in the form of lang=version,lang=version. "
+ "Spaces separate distinct sets, creating separate sets of jobs. Prefer using .x or semver ranges. "
+ " For supported version strings, see, e.g., www.github.com/actions/setup-go for each language. "
+ "Languages not included in a set use the default."
+ f"Defaults: {default_versions}."
),
)
def add_version_set_args(parser: argparse.ArgumentParser):
parser.add_argument(
"--version-set",
action="store",
nargs="*",
default=["minimum"],
choices=["minimum", "current"],
help="Named set of versions to use. Defaults to minimum supported versions. Available sets: minimum, current",
)
default_versions = ",".join(
[f"{lang}={version}" for lang, version in MINIMUM_SUPPORTED_VERSION_SET.items()]
)
parser.add_argument(
"--versions",
action="store",
type=str,
nargs="*",
help=(
"Set of language versions to use, in the form of lang=version,lang=version. "
+ "Spaces separate distinct sets, creating separate sets of jobs. Prefer using .x or semver ranges. "
+ " For supported version strings, see, e.g., www.github.com/actions/setup-go for each language. "
+ "Languages not included in a set use the default."
+ f"Defaults: {default_versions}."
),
)
parser.set_defaults(func=generate_version_set)
def combine_matrices(args: argparse.Namespace):
matrix_includes = []
for json_obj in args.matrices:
matrix: Dict[str, List[Any]] = json.loads(json_obj)
keys = list(matrix.keys())
combinations = list(itertools.product(*matrix.values()))
for combination in combinations:
include = dict(zip(keys, combination))
matrix_includes.append(include)
print(json.dumps({
"include": matrix_includes
}))
def main():
parser = argparse.ArgumentParser(description="Generate job and version matrices")
parser.add_argument("-v", "--verbosity", action="count", default=0, help="logging verbosity, specify multiple times for higher levels, i.e.: -vvv")
subparsers = parser.add_subparsers()
gen_matrix_parser = subparsers.add_parser("generate-matrix",
help="Generate a matrix of jobs.")
add_generate_matrix_args(gen_matrix_parser)
version_set_parser = subparsers.add_parser("generate-version-set",
help="Generate a version set only.")
add_version_set_args(version_set_parser)
combine_matrices_parser = subparsers.add_parser("combine-matrices",
help="Combine one or more matrices, computing all combinations of each and generating a list of includes.")
combine_matrices_parser.add_argument("matrices", nargs=argparse.REMAINDER)
combine_matrices_parser.set_defaults(func=combine_matrices)
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_help()
sys.exit(1)
global global_verbosity # pylint: disable=global-statement
global_verbosity = args.verbosity
args.func(args)
if __name__ == "__main__":
main()