2017-12-01 21:50:32 +00:00
|
|
|
PROJECT_NAME := Pulumi Node.JS SDK
|
2018-02-13 00:16:16 +00:00
|
|
|
NODE_MODULE_NAME := @pulumi/pulumi
|
2022-09-02 05:40:13 +00:00
|
|
|
VERSION := $(if ${PULUMI_VERSION},${PULUMI_VERSION},$(shell ../../scripts/pulumi-version.sh javascript))
|
2017-12-01 21:50:32 +00:00
|
|
|
|
2023-05-26 09:59:45 +00:00
|
|
|
LANGHOST_PKG := github.com/pulumi/pulumi/sdk/nodejs/cmd/pulumi-language-nodejs/v3
|
2018-02-10 02:15:04 +00:00
|
|
|
|
|
|
|
TEST_FAST_TIMEOUT := 2m
|
|
|
|
|
2022-09-14 01:52:21 +00:00
|
|
|
ifeq ($(DEBUG),"true")
|
2022-09-02 05:40:13 +00:00
|
|
|
$(info VERSION = $(VERSION))
|
2022-09-14 01:52:21 +00:00
|
|
|
endif
|
2022-09-02 05:40:13 +00:00
|
|
|
|
2021-07-29 00:31:11 +00:00
|
|
|
# Motivation: running `make TEST_ALL_DEPS= test_all` permits running
|
|
|
|
# `test_all` without the dependencies.
|
2022-03-02 03:56:44 +00:00
|
|
|
TEST_ALL_DEPS ?= build
|
2021-07-29 00:31:11 +00:00
|
|
|
|
2017-11-12 00:20:56 +00:00
|
|
|
include ../../build/common.mk
|
Implement initial Lumi-as-a-library
This is the initial step towards redefining Lumi as a library that runs
atop vanilla Node.js/V8, rather than as its own runtime.
This change is woefully incomplete but this includes some of the more
stable pieces of my current work-in-progress.
The new structure is that within the sdk/ directory we will have a client
library per language. This client library contains the object model for
Lumi (resources, properties, assets, config, etc), in addition to the
"language runtime host" components required to interoperate with the
Lumi resource monitor. This resource monitor is effectively what we call
"Lumi" today, in that it's the thing orchestrating plans and deployments.
Inside the sdk/ directory, you will find nodejs/, the Node.js client
library, alongside proto/, the definitions for RPC interop between the
different pieces of the system. This includes existing RPC definitions
for resource providers, etc., in addition to the new ones for hosting
different language runtimes from within Lumi.
These new interfaces are surprisingly simple. There is effectively a
bidirectional RPC channel between the Lumi resource monitor, represented
by the lumirpc.ResourceMonitor interface, and each language runtime,
represented by the lumirpc.LanguageRuntime interface.
The overall orchestration goes as follows:
1) Lumi decides it needs to run a program written in language X, so
it dynamically loads the language runtime plugin for language X.
2) Lumi passes that runtime a loopback address to its ResourceMonitor
service, while language X will publish a connection back to its
LanguageRuntime service, which Lumi will talk to.
3) Lumi then invokes LanguageRuntime.Run, passing information like
the desired working directory, program name, arguments, and optional
configuration variables to make available to the program.
4) The language X runtime receives this, unpacks it and sets up the
necessary context, and then invokes the program. The program then
calls into Lumi object model abstractions that internally communicate
back to Lumi using the ResourceMonitor interface.
5) The key here is ResourceMonitor.NewResource, which Lumi uses to
serialize state about newly allocated resources. Lumi receives these
and registers them as part of the plan, doing the usual diffing, etc.,
to decide how to proceed. This interface is perhaps one of the
most subtle parts of the new design, as it necessitates the use of
promises internally to allow parallel evaluation of the resource plan,
letting dataflow determine the available concurrency.
6) The program exits, and Lumi continues on its merry way. If the program
fails, the RunResponse will include information about the failure.
Due to (5), all properties on resources are now instances of a new
Property<T> type. A Property<T> is just a thin wrapper over a T, but it
encodes the special properties of Lumi resource properties. Namely, it
is possible to create one out of a T, other Property<T>, Promise<T>, or
to freshly allocate one. In all cases, the Property<T> does not "settle"
until its final state is known. This cannot occur before the deployment
actually completes, and so in general it's not safe to depend on concrete
resolutions of values (unlike ordinary Promise<T>s which are usually
expected to resolve). As a result, all derived computations are meant to
use the `then` function (as in `someValue.then(v => v+x)`).
Although this change includes tests that may be run in isolation to test
the various RPC interactions, we are nowhere near finished. The remaining
work primarily boils down to three things:
1) Wiring all of this up to the Lumi code.
2) Fixing the handful of known loose ends required to make this work,
primarily around the serialization of properties (waiting on
unresolved ones, serializing assets properly, etc).
3) Implementing lambda closure serialization as a native extension.
This ongoing work is part of pulumi/pulumi-fabric#311.
2017-08-26 19:07:54 +00:00
|
|
|
|
2017-11-12 00:20:56 +00:00
|
|
|
export PATH:=$(shell yarn bin 2>/dev/null):$(PATH)
|
2017-09-11 22:17:11 +00:00
|
|
|
|
2022-05-16 23:47:04 +00:00
|
|
|
ensure:: yarn.ensure node.ensure .ensure.phony
|
|
|
|
.ensure.phony: package.json
|
2022-09-17 02:25:06 +00:00
|
|
|
yarn install --frozen-lockfile
|
2022-05-16 23:47:04 +00:00
|
|
|
@touch .ensure.phony
|
2022-01-14 02:04:47 +00:00
|
|
|
|
2023-04-28 22:25:36 +00:00
|
|
|
format:: ensure
|
2024-06-24 11:14:56 +00:00
|
|
|
yarn biome format --write .
|
2023-04-28 22:25:36 +00:00
|
|
|
|
2022-01-14 02:04:47 +00:00
|
|
|
lint:: ensure
|
2022-01-08 03:27:14 +00:00
|
|
|
yarn run eslint -c .eslintrc.js --ext .ts .
|
2024-06-24 11:14:56 +00:00
|
|
|
yarn biome ci .
|
Implement initial Lumi-as-a-library
This is the initial step towards redefining Lumi as a library that runs
atop vanilla Node.js/V8, rather than as its own runtime.
This change is woefully incomplete but this includes some of the more
stable pieces of my current work-in-progress.
The new structure is that within the sdk/ directory we will have a client
library per language. This client library contains the object model for
Lumi (resources, properties, assets, config, etc), in addition to the
"language runtime host" components required to interoperate with the
Lumi resource monitor. This resource monitor is effectively what we call
"Lumi" today, in that it's the thing orchestrating plans and deployments.
Inside the sdk/ directory, you will find nodejs/, the Node.js client
library, alongside proto/, the definitions for RPC interop between the
different pieces of the system. This includes existing RPC definitions
for resource providers, etc., in addition to the new ones for hosting
different language runtimes from within Lumi.
These new interfaces are surprisingly simple. There is effectively a
bidirectional RPC channel between the Lumi resource monitor, represented
by the lumirpc.ResourceMonitor interface, and each language runtime,
represented by the lumirpc.LanguageRuntime interface.
The overall orchestration goes as follows:
1) Lumi decides it needs to run a program written in language X, so
it dynamically loads the language runtime plugin for language X.
2) Lumi passes that runtime a loopback address to its ResourceMonitor
service, while language X will publish a connection back to its
LanguageRuntime service, which Lumi will talk to.
3) Lumi then invokes LanguageRuntime.Run, passing information like
the desired working directory, program name, arguments, and optional
configuration variables to make available to the program.
4) The language X runtime receives this, unpacks it and sets up the
necessary context, and then invokes the program. The program then
calls into Lumi object model abstractions that internally communicate
back to Lumi using the ResourceMonitor interface.
5) The key here is ResourceMonitor.NewResource, which Lumi uses to
serialize state about newly allocated resources. Lumi receives these
and registers them as part of the plan, doing the usual diffing, etc.,
to decide how to proceed. This interface is perhaps one of the
most subtle parts of the new design, as it necessitates the use of
promises internally to allow parallel evaluation of the resource plan,
letting dataflow determine the available concurrency.
6) The program exits, and Lumi continues on its merry way. If the program
fails, the RunResponse will include information about the failure.
Due to (5), all properties on resources are now instances of a new
Property<T> type. A Property<T> is just a thin wrapper over a T, but it
encodes the special properties of Lumi resource properties. Namely, it
is possible to create one out of a T, other Property<T>, Promise<T>, or
to freshly allocate one. In all cases, the Property<T> does not "settle"
until its final state is known. This cannot occur before the deployment
actually completes, and so in general it's not safe to depend on concrete
resolutions of values (unlike ordinary Promise<T>s which are usually
expected to resolve). As a result, all derived computations are meant to
use the `then` function (as in `someValue.then(v => v+x)`).
Although this change includes tests that may be run in isolation to test
the various RPC interactions, we are nowhere near finished. The remaining
work primarily boils down to three things:
1) Wiring all of this up to the Lumi code.
2) Fixing the handful of known loose ends required to make this work,
primarily around the serialization of properties (waiting on
unresolved ones, serializing assets properly, etc).
3) Implementing lambda closure serialization as a native extension.
This ongoing work is part of pulumi/pulumi-fabric#311.
2017-08-26 19:07:54 +00:00
|
|
|
|
2022-01-14 02:04:47 +00:00
|
|
|
build_package:: ensure
|
2022-01-08 03:27:14 +00:00
|
|
|
yarn run tsc
|
2022-03-04 00:26:06 +00:00
|
|
|
mkdir -p bin/tests/automation/data/
|
2020-09-14 14:55:06 +00:00
|
|
|
cp -R tests/automation/data/. bin/tests/automation/data/
|
2022-03-02 03:48:38 +00:00
|
|
|
cp README.md ../../LICENSE ./dist/* bin/
|
2024-04-10 15:26:37 +00:00
|
|
|
cp -R ./vendor/ ./bin/vendor/
|
2018-02-20 22:19:16 +00:00
|
|
|
node ../../scripts/reversion.js bin/package.json ${VERSION}
|
|
|
|
node ../../scripts/reversion.js bin/version.js ${VERSION}
|
2022-03-04 00:26:06 +00:00
|
|
|
mkdir -p bin/proto
|
2017-10-13 22:40:35 +00:00
|
|
|
cp -R proto/. bin/proto/
|
|
|
|
mkdir -p bin/tests/runtime/langhost/cases/
|
|
|
|
find tests/runtime/langhost/cases/* -type d -exec cp -R {} bin/tests/runtime/langhost/cases/ \;
|
Implement initial Lumi-as-a-library
This is the initial step towards redefining Lumi as a library that runs
atop vanilla Node.js/V8, rather than as its own runtime.
This change is woefully incomplete but this includes some of the more
stable pieces of my current work-in-progress.
The new structure is that within the sdk/ directory we will have a client
library per language. This client library contains the object model for
Lumi (resources, properties, assets, config, etc), in addition to the
"language runtime host" components required to interoperate with the
Lumi resource monitor. This resource monitor is effectively what we call
"Lumi" today, in that it's the thing orchestrating plans and deployments.
Inside the sdk/ directory, you will find nodejs/, the Node.js client
library, alongside proto/, the definitions for RPC interop between the
different pieces of the system. This includes existing RPC definitions
for resource providers, etc., in addition to the new ones for hosting
different language runtimes from within Lumi.
These new interfaces are surprisingly simple. There is effectively a
bidirectional RPC channel between the Lumi resource monitor, represented
by the lumirpc.ResourceMonitor interface, and each language runtime,
represented by the lumirpc.LanguageRuntime interface.
The overall orchestration goes as follows:
1) Lumi decides it needs to run a program written in language X, so
it dynamically loads the language runtime plugin for language X.
2) Lumi passes that runtime a loopback address to its ResourceMonitor
service, while language X will publish a connection back to its
LanguageRuntime service, which Lumi will talk to.
3) Lumi then invokes LanguageRuntime.Run, passing information like
the desired working directory, program name, arguments, and optional
configuration variables to make available to the program.
4) The language X runtime receives this, unpacks it and sets up the
necessary context, and then invokes the program. The program then
calls into Lumi object model abstractions that internally communicate
back to Lumi using the ResourceMonitor interface.
5) The key here is ResourceMonitor.NewResource, which Lumi uses to
serialize state about newly allocated resources. Lumi receives these
and registers them as part of the plan, doing the usual diffing, etc.,
to decide how to proceed. This interface is perhaps one of the
most subtle parts of the new design, as it necessitates the use of
promises internally to allow parallel evaluation of the resource plan,
letting dataflow determine the available concurrency.
6) The program exits, and Lumi continues on its merry way. If the program
fails, the RunResponse will include information about the failure.
Due to (5), all properties on resources are now instances of a new
Property<T> type. A Property<T> is just a thin wrapper over a T, but it
encodes the special properties of Lumi resource properties. Namely, it
is possible to create one out of a T, other Property<T>, Promise<T>, or
to freshly allocate one. In all cases, the Property<T> does not "settle"
until its final state is known. This cannot occur before the deployment
actually completes, and so in general it's not safe to depend on concrete
resolutions of values (unlike ordinary Promise<T>s which are usually
expected to resolve). As a result, all derived computations are meant to
use the `then` function (as in `someValue.then(v => v+x)`).
Although this change includes tests that may be run in isolation to test
the various RPC interactions, we are nowhere near finished. The remaining
work primarily boils down to three things:
1) Wiring all of this up to the Lumi code.
2) Fixing the handful of known loose ends required to make this work,
primarily around the serialization of properties (waiting on
unresolved ones, serializing assets properly, etc).
3) Implementing lambda closure serialization as a native extension.
This ongoing work is part of pulumi/pulumi-fabric#311.
2017-08-26 19:07:54 +00:00
|
|
|
|
Add a Dockerfile for the Pulumi CLI
This introduces a Dockerfile for the Pulumi CLI. This makes it
easier to develop and test the engine in a self-contained environment,
in addition to being suitable for running the actual CLI itself.
For instance,
$ docker run pulumi/pulumi -e "PULUMI_ACCESS_TOKEN=x" up
will run the Pulumi program mounted under the /app volume. This will
be used in some upcoming CI/CD scenarios.
This uses multi-stage builds, and Debian Stretch as the base, for
relatively fast and lean build times and resulting images. We are
intentional about restoring dep packages independent of the actual
source code so that we don't end up needlessly re-depping, which can
consume quite a bit of time. After fixing
https://github.com/pulumi/pulumi/issues/1986, we should explore an
Alpine base image option.
I made the decision to keep this image scoped to just the Go builds.
Therefore, none of the actual SDK packages themselves are built, just
the engine, CLI, and language plugins for Node.js, Python, and Go.
It's possible to create a mega-container that has all of these full
environments so that we can rebuild them too, but for now I figured
it was better to rely on package management for them.
Another alternative would have been to install released binaries,
rather than building them. To keep the useful flow for development,
however, I decided to go the build route for now. If we build at the
same hashes, the resulting binaries "should" be ~identical anyhow.
I've created a pulumi/pulumi Docker Hub repo that we can publish this
into. For now, there is no CI publishing of the image.
This fixes pulumi/pulumi#1991.
2018-09-29 18:43:35 +00:00
|
|
|
build_plugin::
|
2023-05-26 09:59:45 +00:00
|
|
|
cd cmd/pulumi-language-nodejs && \
|
|
|
|
go install -ldflags "-X github.com/pulumi/pulumi/sdk/v3/go/common/version.Version=${VERSION}" ${LANGHOST_PKG}
|
Add a Dockerfile for the Pulumi CLI
This introduces a Dockerfile for the Pulumi CLI. This makes it
easier to develop and test the engine in a self-contained environment,
in addition to being suitable for running the actual CLI itself.
For instance,
$ docker run pulumi/pulumi -e "PULUMI_ACCESS_TOKEN=x" up
will run the Pulumi program mounted under the /app volume. This will
be used in some upcoming CI/CD scenarios.
This uses multi-stage builds, and Debian Stretch as the base, for
relatively fast and lean build times and resulting images. We are
intentional about restoring dep packages independent of the actual
source code so that we don't end up needlessly re-depping, which can
consume quite a bit of time. After fixing
https://github.com/pulumi/pulumi/issues/1986, we should explore an
Alpine base image option.
I made the decision to keep this image scoped to just the Go builds.
Therefore, none of the actual SDK packages themselves are built, just
the engine, CLI, and language plugins for Node.js, Python, and Go.
It's possible to create a mega-container that has all of these full
environments so that we can rebuild them too, but for now I figured
it was better to rely on package management for them.
Another alternative would have been to install released binaries,
rather than building them. To keep the useful flow for development,
however, I decided to go the build route for now. If we build at the
same hashes, the resulting binaries "should" be ~identical anyhow.
I've created a pulumi/pulumi Docker Hub repo that we can publish this
into. For now, there is no CI publishing of the image.
This fixes pulumi/pulumi#1991.
2018-09-29 18:43:35 +00:00
|
|
|
|
|
|
|
build:: build_package build_plugin
|
|
|
|
|
2020-05-04 21:26:52 +00:00
|
|
|
install_package:: build
|
2022-01-08 03:27:14 +00:00
|
|
|
cp dist/pulumi-resource-pulumi-nodejs* "$(PULUMI_BIN)"
|
|
|
|
cp dist/pulumi-analyzer-policy* "$(PULUMI_BIN)"
|
2017-11-12 00:20:56 +00:00
|
|
|
|
2020-05-04 21:26:52 +00:00
|
|
|
install_plugin:: build
|
2023-05-26 09:59:45 +00:00
|
|
|
cd cmd/pulumi-language-nodejs && \
|
|
|
|
GOBIN=$(PULUMI_BIN) go install -ldflags "-X github.com/pulumi/pulumi/sdk/v3/go/common/version.Version=${VERSION}" ${LANGHOST_PKG}
|
Add a Dockerfile for the Pulumi CLI
This introduces a Dockerfile for the Pulumi CLI. This makes it
easier to develop and test the engine in a self-contained environment,
in addition to being suitable for running the actual CLI itself.
For instance,
$ docker run pulumi/pulumi -e "PULUMI_ACCESS_TOKEN=x" up
will run the Pulumi program mounted under the /app volume. This will
be used in some upcoming CI/CD scenarios.
This uses multi-stage builds, and Debian Stretch as the base, for
relatively fast and lean build times and resulting images. We are
intentional about restoring dep packages independent of the actual
source code so that we don't end up needlessly re-depping, which can
consume quite a bit of time. After fixing
https://github.com/pulumi/pulumi/issues/1986, we should explore an
Alpine base image option.
I made the decision to keep this image scoped to just the Go builds.
Therefore, none of the actual SDK packages themselves are built, just
the engine, CLI, and language plugins for Node.js, Python, and Go.
It's possible to create a mega-container that has all of these full
environments so that we can rebuild them too, but for now I figured
it was better to rely on package management for them.
Another alternative would have been to install released binaries,
rather than building them. To keep the useful flow for development,
however, I decided to go the build route for now. If we build at the
same hashes, the resulting binaries "should" be ~identical anyhow.
I've created a pulumi/pulumi Docker Hub repo that we can publish this
into. For now, there is no CI publishing of the image.
This fixes pulumi/pulumi#1991.
2018-09-29 18:43:35 +00:00
|
|
|
|
|
|
|
install:: install_package install_plugin
|
|
|
|
|
2021-12-01 01:24:01 +00:00
|
|
|
unit_tests:: $(TEST_ALL_DEPS)
|
2024-03-25 13:19:17 +00:00
|
|
|
yarn run nyc -s mocha --timeout 120000 \
|
2022-03-02 03:56:44 +00:00
|
|
|
--exclude 'bin/tests/automation/**/*.spec.js' \
|
2024-03-27 10:03:57 +00:00
|
|
|
--exclude 'bin/tests/runtime/closure-integration-tests.js' \
|
2022-03-02 03:56:44 +00:00
|
|
|
'bin/tests/**/*.spec.js'
|
|
|
|
yarn run nyc -s mocha 'bin/tests_with_mocks/**/*.spec.js'
|
|
|
|
|
|
|
|
test_auto:: $(TEST_ALL_DEPS)
|
increase the timeout for automation API tests (#14844)
These tests are pretty close to hitting the timeout during regular runs
(e.g. one successful run I picked randomly took ~194000ms). The GitHub
action runners don't have super reliable performance, so we can easily
get pushed over this limit.
To make matters worse here, if we hit this timeout just at the right
time, the test doesn't exit cleanly (potentially related to
https://github.com/pulumi/pulumi-dotnet/issues/134), as I've seen a
`pulumi preview` process stick around in that case, preventing the tests
from shutting down). This means we have to wait until the CI job times
out after an hour until the failure is reported.
I'm not sure if we only got close to this timeout recently, or if this
is a longer standing issue, but it reproduces well for me locally.
Ideally of course the tests would be faster, but this is still "only"
~5min which is much faster than other tests, and should hopefully reduce
the amount of times we need to go through the merge queue, saving a lot
of time there.
Fixes https://github.com/pulumi/pulumi/issues/14842
## Checklist
- [ ] I have run `make tidy` to update any new dependencies
- [ ] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [ ] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [ ] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] Yes, there are changes in this PR that warrants bumping the Pulumi
Cloud API version
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-12-13 10:40:27 +00:00
|
|
|
yarn run nyc -s mocha --timeout 300000 'bin/tests/automation/**/*.spec.js'
|
2021-09-17 00:33:33 +00:00
|
|
|
|
2024-04-10 15:26:37 +00:00
|
|
|
test_integration:: $(TEST_ALL_DEPS)
|
2024-03-27 10:03:57 +00:00
|
|
|
node 'bin/tests/runtime/closure-integration-tests.js'
|
2024-04-10 15:26:37 +00:00
|
|
|
node 'bin/tests/runtime/install-package-tests.js'
|
2024-03-25 13:19:17 +00:00
|
|
|
|
2024-03-05 13:50:45 +00:00
|
|
|
TSC_SUPPORTED_VERSIONS = ~3.8.3 ^3 ^4
|
2022-03-02 09:08:58 +00:00
|
|
|
|
|
|
|
version=$(subst sxs_test_,,$(word 1,$(subst !, ,$@)))
|
|
|
|
sxs_test_%:
|
|
|
|
@cd tests/sxs_ts_test && ( \
|
2024-03-05 13:50:45 +00:00
|
|
|
cp -f package$(version).json package.json && \
|
|
|
|
yarn install && \
|
|
|
|
yarn run tsc --version && \
|
|
|
|
yarn run tsc && \
|
|
|
|
rm package.json && \
|
|
|
|
echo "✅ TypeScript $(version) passed" \
|
2022-03-02 09:08:58 +00:00
|
|
|
) || ( \
|
|
|
|
echo "🚫 TypeScript $(version) failed"; \
|
|
|
|
exit 1; \
|
|
|
|
)
|
|
|
|
|
2024-03-05 13:50:45 +00:00
|
|
|
sxs_tests:: $(TSC_SUPPORTED_VERSIONS:%=sxs_test_%)
|
2019-03-29 19:27:42 +00:00
|
|
|
|
2021-12-01 01:24:01 +00:00
|
|
|
test_fast:: sxs_tests unit_tests
|
|
|
|
ifneq ($(PULUMI_TEST_COVERAGE_PATH),)
|
2022-01-08 03:27:14 +00:00
|
|
|
if [ -e .nyc_output ]; then yarn run nyc report -r cobertura --report-dir $(PULUMI_TEST_COVERAGE_PATH); fi
|
2021-12-01 01:24:01 +00:00
|
|
|
endif
|
2023-05-26 09:59:45 +00:00
|
|
|
@cd cmd/pulumi-language-nodejs && $(GO_TEST_FAST) $(shell go list ./... | grep -v /vendor/ | grep -v templates)
|
2024-02-16 08:25:12 +00:00
|
|
|
$(GO_TEST_FAST) $(shell go list ./...)
|
2019-02-26 22:12:43 +00:00
|
|
|
|
2022-03-02 03:56:44 +00:00
|
|
|
test_go:: $(TEST_ALL_DEPS)
|
2023-05-26 09:59:45 +00:00
|
|
|
@cd cmd/pulumi-language-nodejs && $(GO_TEST) $(shell go list ./... | grep -v /vendor/ | grep -v templates)
|
2024-02-16 08:25:12 +00:00
|
|
|
$(GO_TEST) $(shell go list ./...)
|
2022-01-08 03:27:14 +00:00
|
|
|
|
2024-04-10 15:26:37 +00:00
|
|
|
test_all:: sxs_tests unit_tests test_auto test_go test_integration
|
2021-12-01 01:24:01 +00:00
|
|
|
ifneq ($(PULUMI_TEST_COVERAGE_PATH),)
|
2022-01-08 03:27:14 +00:00
|
|
|
if [ -e .nyc_output ]; then yarn run nyc report -r cobertura --report-dir $(PULUMI_TEST_COVERAGE_PATH); fi
|
2021-12-01 01:24:01 +00:00
|
|
|
endif
|
2018-08-08 20:00:42 +00:00
|
|
|
|
2020-05-04 21:26:52 +00:00
|
|
|
dist:: build
|
2018-09-13 09:46:23 +00:00
|
|
|
cp dist/pulumi-resource-pulumi-nodejs "$$(go env GOPATH)"/bin/
|
2019-06-30 22:26:33 +00:00
|
|
|
cp dist/pulumi-analyzer-policy "$$(go env GOPATH)"/bin/
|
2020-05-14 03:38:27 +00:00
|
|
|
|
2021-03-18 02:07:02 +00:00
|
|
|
brew:: BREW_VERSION := $(shell ../../scripts/get-version HEAD)
|
2020-05-14 03:38:27 +00:00
|
|
|
brew::
|
2023-05-26 09:59:45 +00:00
|
|
|
cd cmd/pulumi-language-nodejs && \
|
|
|
|
go install -ldflags "-X github.com/pulumi/pulumi/sdk/v3/go/common/version.Version=${VERSION}" ${LANGHOST_PKG}
|
2020-05-14 03:38:27 +00:00
|
|
|
cp dist/pulumi-resource-pulumi-nodejs "$$(go env GOPATH)"/bin/
|
|
|
|
cp dist/pulumi-analyzer-policy "$$(go env GOPATH)"/bin/
|
2020-09-21 23:20:05 +00:00
|
|
|
|
2022-09-02 05:40:13 +00:00
|
|
|
publish::
|
2020-09-21 23:20:05 +00:00
|
|
|
bash -c ../../scripts/publish_npm.sh
|