2018-05-22 19:43:36 +00:00
|
|
|
// Copyright 2016-2018, Pulumi Corporation.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2017-08-30 01:24:12 +00:00
|
|
|
|
2017-08-22 23:56:15 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
2019-06-30 23:34:39 +00:00
|
|
|
"context"
|
2020-03-08 21:11:55 +00:00
|
|
|
"encoding/json"
|
2021-11-13 02:37:17 +00:00
|
|
|
"errors"
|
2020-01-30 21:31:41 +00:00
|
|
|
"fmt"
|
2019-12-16 22:51:02 +00:00
|
|
|
"path/filepath"
|
2020-03-08 21:11:55 +00:00
|
|
|
"sort"
|
2020-01-30 21:31:41 +00:00
|
|
|
"strings"
|
2018-08-06 23:46:17 +00:00
|
|
|
"sync"
|
2017-08-22 23:56:15 +00:00
|
|
|
|
2023-09-18 11:01:28 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/display"
|
2021-03-17 13:20:05 +00:00
|
|
|
resourceanalyzer "github.com/pulumi/pulumi/pkg/v3/resource/analyzer"
|
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/diag"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
|
2017-08-22 23:56:15 +00:00
|
|
|
)
|
|
|
|
|
2019-06-30 23:34:39 +00:00
|
|
|
// RequiredPolicy represents a set of policies to apply during an update.
|
|
|
|
type RequiredPolicy interface {
|
|
|
|
// Name provides the user-specified name of the PolicyPack.
|
|
|
|
Name() string
|
|
|
|
// Version of the PolicyPack.
|
|
|
|
Version() string
|
|
|
|
// Install will install the PolicyPack locally, returning the path it was installed to.
|
|
|
|
Install(ctx context.Context) (string, error)
|
2020-03-08 21:11:55 +00:00
|
|
|
// Config returns the PolicyPack's configuration.
|
|
|
|
Config() map[string]*json.RawMessage
|
2019-06-30 23:34:39 +00:00
|
|
|
}
|
|
|
|
|
2020-01-30 21:31:41 +00:00
|
|
|
// LocalPolicyPack represents a set of local Policy Packs to apply during an update.
|
|
|
|
type LocalPolicyPack struct {
|
|
|
|
// Name provides the user-specified name of the Policy Pack.
|
|
|
|
Name string
|
2023-10-09 18:31:17 +00:00
|
|
|
// Version of the local Policy Pack.
|
|
|
|
Version string
|
2020-01-30 21:31:41 +00:00
|
|
|
// Path of the local Policy Pack.
|
|
|
|
Path string
|
2020-03-08 21:11:55 +00:00
|
|
|
// Path of the local Policy Pack's JSON config file.
|
|
|
|
Config string
|
2020-01-30 21:31:41 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
// NameForEvents encodes a local policy pack's information in a single string which can
|
|
|
|
// be used for engine events. It is done this way so we don't lose path information.
|
|
|
|
func (pack LocalPolicyPack) NameForEvents() string {
|
|
|
|
path := abbreviateFilePath(pack.Path)
|
|
|
|
return fmt.Sprintf("%s|local|%s", pack.Name, path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLocalPolicyPackInfoFromEventName round trips the NameForEvents back into a name/path pair.
|
|
|
|
func GetLocalPolicyPackInfoFromEventName(name string) (string, string) {
|
|
|
|
parts := strings.Split(name, "|")
|
|
|
|
if len(parts) != 3 {
|
|
|
|
return "", ""
|
|
|
|
}
|
|
|
|
return parts[0], parts[2]
|
|
|
|
}
|
|
|
|
|
2020-01-30 21:31:41 +00:00
|
|
|
// MakeLocalPolicyPacks is a helper function for converting the list of local Policy
|
|
|
|
// Pack paths to list of LocalPolicyPack. The name of the Local Policy Pack is not set
|
|
|
|
// since we must load up the Policy Pack plugin to determine its name.
|
2020-03-08 21:11:55 +00:00
|
|
|
func MakeLocalPolicyPacks(localPaths []string, configPaths []string) []LocalPolicyPack {
|
|
|
|
// If we have any configPaths, we should have already validated that the length of
|
|
|
|
// the localPaths and configPaths are the same.
|
2023-02-15 01:28:14 +00:00
|
|
|
contract.Assertf(len(configPaths) == 0 || len(configPaths) == len(localPaths),
|
|
|
|
"configPaths must be empty or match localPaths count (%d), got %d", len(localPaths), len(configPaths))
|
2020-03-08 21:11:55 +00:00
|
|
|
|
2020-01-30 21:31:41 +00:00
|
|
|
r := make([]LocalPolicyPack, len(localPaths))
|
|
|
|
for i, p := range localPaths {
|
2020-03-08 21:11:55 +00:00
|
|
|
var config string
|
|
|
|
if len(configPaths) > 0 {
|
|
|
|
config = configPaths[i]
|
|
|
|
}
|
2020-01-30 21:31:41 +00:00
|
|
|
r[i] = LocalPolicyPack{
|
2020-03-08 21:11:55 +00:00
|
|
|
Path: p,
|
|
|
|
Config: config,
|
2020-01-30 21:31:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
// ConvertLocalPolicyPacksToPaths is a helper function for converting the list of LocalPolicyPacks
|
|
|
|
// to a list of paths.
|
|
|
|
func ConvertLocalPolicyPacksToPaths(localPolicyPack []LocalPolicyPack) []string {
|
|
|
|
r := make([]string, len(localPolicyPack))
|
|
|
|
for i, p := range localPolicyPack {
|
|
|
|
r[i] = p.Name
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2018-01-18 19:10:15 +00:00
|
|
|
// UpdateOptions contains all the settings for customizing how an update (deploy, preview, or destroy) is performed.
|
2018-11-05 21:36:35 +00:00
|
|
|
//
|
2019-10-09 20:50:28 +00:00
|
|
|
// This structure is embedded in another which uses some of the unexported fields, which trips up the `structcheck`
|
2018-11-05 21:36:35 +00:00
|
|
|
// linter.
|
2023-01-06 00:07:45 +00:00
|
|
|
//
|
|
|
|
//nolint:structcheck
|
2018-01-18 19:10:15 +00:00
|
|
|
type UpdateOptions struct {
|
2020-01-30 21:31:41 +00:00
|
|
|
// LocalPolicyPacks contains an optional set of policy packs to run as part of this deployment.
|
|
|
|
LocalPolicyPacks []LocalPolicyPack
|
2018-04-14 05:26:01 +00:00
|
|
|
|
2019-06-30 23:34:39 +00:00
|
|
|
// RequiredPolicies is the set of policies that are required to run as part of the update.
|
|
|
|
RequiredPolicies []RequiredPolicy
|
|
|
|
|
2018-04-14 05:26:01 +00:00
|
|
|
// the degree of parallelism for resource operations (<=1 for serial).
|
|
|
|
Parallel int
|
|
|
|
|
|
|
|
// true if debugging output it enabled
|
|
|
|
Debug bool
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 00:50:29 +00:00
|
|
|
|
2018-08-23 00:52:46 +00:00
|
|
|
// true if the plan should refresh before executing.
|
|
|
|
Refresh bool
|
|
|
|
|
2019-10-31 00:16:55 +00:00
|
|
|
// Specific resources to replace during an update operation.
|
2022-10-25 19:05:35 +00:00
|
|
|
ReplaceTargets deploy.UrnTargets
|
2019-10-31 00:16:55 +00:00
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
// Specific resources to update during a deployment.
|
|
|
|
Targets deploy.UrnTargets
|
2019-10-01 06:41:56 +00:00
|
|
|
|
2019-11-19 04:28:25 +00:00
|
|
|
// true if we're allowing dependent targets to change, even if not specified in one of the above
|
|
|
|
// XXXTargets lists.
|
|
|
|
TargetDependents bool
|
|
|
|
|
2019-07-01 19:34:19 +00:00
|
|
|
// true if the engine should use legacy diffing behavior during an update.
|
|
|
|
UseLegacyDiff bool
|
|
|
|
|
2020-10-09 20:13:55 +00:00
|
|
|
// true if the engine should disable provider previews.
|
|
|
|
DisableProviderPreview bool
|
|
|
|
|
2020-12-16 20:38:20 +00:00
|
|
|
// true if the engine should disable resource reference support.
|
|
|
|
DisableResourceReferences bool
|
|
|
|
|
2021-09-15 21:16:00 +00:00
|
|
|
// true if the engine should disable output value support.
|
|
|
|
DisableOutputValues bool
|
|
|
|
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 00:50:29 +00:00
|
|
|
// the plugin host to use for this update
|
2020-10-15 17:35:09 +00:00
|
|
|
Host plugin.Host
|
2022-01-31 10:31:51 +00:00
|
|
|
|
|
|
|
// The plan to use for the update, if any.
|
|
|
|
Plan *deploy.Plan
|
|
|
|
|
2022-10-28 16:51:42 +00:00
|
|
|
// GeneratePlan when true cause plans to be generated, we skip this if we know their not needed (e.g. during up)
|
2022-10-10 08:14:26 +00:00
|
|
|
GeneratePlan bool
|
2022-10-28 16:51:42 +00:00
|
|
|
|
|
|
|
// Experimental is true if the engine is in experimental mode (i.e. PULUMI_EXPERIMENTAL was set)
|
|
|
|
Experimental bool
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2018-05-05 18:57:09 +00:00
|
|
|
// HasChanges returns true if there are any non-same changes in the resulting summary.
|
2022-06-27 14:08:06 +00:00
|
|
|
func HasChanges(changes display.ResourceChanges) bool {
|
2018-05-05 18:57:09 +00:00
|
|
|
var c int
|
|
|
|
for op, count := range changes {
|
2019-09-07 05:10:34 +00:00
|
|
|
if op != deploy.OpSame &&
|
|
|
|
op != deploy.OpRead &&
|
|
|
|
op != deploy.OpReadDiscard &&
|
|
|
|
op != deploy.OpReadReplacement {
|
2018-05-05 18:57:09 +00:00
|
|
|
c += count
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return c > 0
|
|
|
|
}
|
|
|
|
|
2022-01-31 10:31:51 +00:00
|
|
|
func Update(u UpdateInfo, ctx *Context, opts UpdateOptions, dryRun bool) (
|
2023-10-11 14:44:09 +00:00
|
|
|
*deploy.Plan, display.ResourceChanges, error,
|
2023-03-03 16:36:39 +00:00
|
|
|
) {
|
2023-02-15 01:28:14 +00:00
|
|
|
contract.Requiref(u != nil, "update", "cannot be nil")
|
|
|
|
contract.Requiref(ctx != nil, "ctx", "cannot be nil")
|
2017-09-09 20:43:51 +00:00
|
|
|
|
2023-11-16 16:54:03 +00:00
|
|
|
defer func() { ctx.Events <- NewCancelEvent() }()
|
2017-10-22 22:52:00 +00:00
|
|
|
|
2020-11-18 19:16:30 +00:00
|
|
|
info, err := newDeploymentContext(u, "update", ctx.ParentSpan)
|
2017-08-22 23:56:15 +00:00
|
|
|
if err != nil {
|
2023-10-11 14:44:09 +00:00
|
|
|
return nil, nil, err
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2018-04-20 01:59:14 +00:00
|
|
|
defer info.Close()
|
2017-10-05 21:08:46 +00:00
|
|
|
|
2018-08-22 22:32:54 +00:00
|
|
|
emitter, err := makeEventEmitter(ctx.Events, u)
|
|
|
|
if err != nil {
|
2023-10-11 14:44:09 +00:00
|
|
|
return nil, nil, err
|
2018-08-22 22:32:54 +00:00
|
|
|
}
|
2019-10-15 22:47:40 +00:00
|
|
|
defer emitter.Close()
|
|
|
|
|
2021-07-28 19:12:53 +00:00
|
|
|
logging.V(7).Infof("*** Starting Update(preview=%v) ***", dryRun)
|
|
|
|
defer logging.V(7).Infof("*** Update(preview=%v) complete ***", dryRun)
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
// We skip the target check here because the targeted resource may not exist yet.
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
return update(ctx, info, &deploymentOptions{
|
2018-01-18 19:10:15 +00:00
|
|
|
UpdateOptions: opts,
|
2018-03-29 15:57:25 +00:00
|
|
|
SourceFunc: newUpdateSource,
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
Events: emitter,
|
Implement status sinks
This commit reverts most of #1853 and replaces it with functionally
identical logic, using the notion of status message-specific sinks.
In other words, where the original commit implemented ephemeral status
messages by adding an `isStatus` parameter to most of the logging
methdos in pulumi/pulumi, this implements ephemeral status messages as a
parallel logging sink, which emits _only_ ephemeral status messages.
The original commit message in that PR was:
> Allow log events to be marked "status" events
>
> This commit will introduce a field, IsStatus to LogRequest. A "status"
> logging event will be displayed in the Info column of the main
> display, but will not be printed out at the end, when resource
> operations complete.
>
> For example, for complex resource initialization, we'd like to display
> a series of intermediate results: [1/4] Service object created, for
> example. We'd like these to appear in the Info column, but not at the
> end, where they are not helpful to the user.
2018-08-31 20:12:40 +00:00
|
|
|
Diag: newEventSink(emitter, false),
|
|
|
|
StatusDiag: newEventSink(emitter, true),
|
2018-04-14 05:26:01 +00:00
|
|
|
}, dryRun)
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2019-11-08 22:37:40 +00:00
|
|
|
// RunInstallPlugins calls installPlugins and just returns the error (avoids having to export pluginSet).
|
|
|
|
func RunInstallPlugins(
|
2023-03-03 16:36:39 +00:00
|
|
|
proj *workspace.Project, pwd, main string, target *deploy.Target, plugctx *plugin.Context,
|
|
|
|
) error {
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
_, _, err := installPlugins(context.Background(), proj, pwd, main, target, plugctx, true /*returnInstallErrors*/)
|
2019-11-08 22:37:40 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
func installPlugins(ctx context.Context,
|
Add `--server` to `pulumi plugin install`
Previously, when the CLI wanted to install a plugin, it used a special
method, `DownloadPlugin` on the `httpstate` backend to actually fetch
the tarball that had the plugin. The reason for this is largely tied
to history, at one point during a closed beta, we required presenting
an API key to download plugins (as a way to enforce folks outside the
beta could not download them) and because of that it was natural to
bake that functionality into the part of the code that interfaced with
the rest of the API from the Pulumi Service.
The downside here is that it means we need to host all the plugins on
`api.pulumi.com` which prevents community folks from being able to
easily write resource providers, since they have to manually manage
the process of downloading a provider to a machine and getting it on
the `$PATH` or putting it in the plugin cache.
To make this easier, we add a `--server` argument you can pass to
`pulumi plugin install` to control the URL that it attempts to fetch
the tarball from. We still have perscriptive guidence on how the
tarball must be
named (`pulumi-[<type>]-[<provider-name>]-vX.Y.Z.tar.gz`) but the base
URL can now be configured.
Folks publishing packages can use install scripts to run `pulumi
plugin install` passing a custom `--server` argument, if needed.
There are two improvements we can make to provide a nicer end to end
story here:
- We can augment the GetRequiredPlugins method on the language
provider to also return information about an optional server to use
when downloading the provider.
- We can pass information about a server to download plugins from as
part of a resource registration or creation of a first class
provider.
These help out in cases where for one reason or another where `pulumi
plugin install` doesn't get run before an update takes place and would
allow us to either do the right thing ahead of time or provide better
error messages with the correct `--server` argument. But, for now,
this unblocks a majority of the cases we care about and provides a
path forward for folks that want to develop and host their own
resource providers.
2019-05-30 20:56:55 +00:00
|
|
|
proj *workspace.Project, pwd, main string, target *deploy.Target,
|
2023-03-03 16:36:39 +00:00
|
|
|
plugctx *plugin.Context, returnInstallErrors bool,
|
|
|
|
) (pluginSet, map[tokens.Package]workspace.PluginSpec, error) {
|
2019-03-15 22:01:37 +00:00
|
|
|
// Before launching the source, ensure that we have all of the plugins that we need in order to proceed.
|
|
|
|
//
|
|
|
|
// There are two places that we need to look for plugins:
|
|
|
|
// 1. The language host, which reports to us the set of plugins that the program that's about to execute
|
|
|
|
// needs in order to create new resources. This is purely advisory by the language host and not all
|
|
|
|
// languages implement this (notably Python).
|
|
|
|
// 2. The snapshot. The snapshot contains plugins in two locations: first, in the manifest, all plugins
|
|
|
|
// that were loaded are recorded. Second, all first class providers record the version of the plugin
|
|
|
|
// to which they are bound.
|
|
|
|
//
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 18:07:56 +00:00
|
|
|
// In order to get a complete view of the set of plugins that we need for an update or query, we must
|
|
|
|
// consult both sources and merge their results into a list of plugins.
|
2024-01-25 23:28:58 +00:00
|
|
|
runtime := proj.Runtime.Name()
|
|
|
|
programInfo := plugin.NewProgramInfo(
|
|
|
|
/* rootDirectory */ plugctx.Root,
|
|
|
|
/* programDirectory */ pwd,
|
|
|
|
/* entryPoint */ main,
|
|
|
|
/* options */ proj.Runtime.Options(),
|
|
|
|
)
|
|
|
|
languagePlugins, err := gatherPluginsFromProgram(plugctx, runtime, programInfo)
|
2018-03-29 15:57:25 +00:00
|
|
|
if err != nil {
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 18:07:56 +00:00
|
|
|
return nil, nil, err
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
}
|
2019-03-15 22:01:37 +00:00
|
|
|
snapshotPlugins, err := gatherPluginsFromSnapshot(plugctx, target)
|
|
|
|
if err != nil {
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 18:07:56 +00:00
|
|
|
return nil, nil, err
|
2019-03-15 22:01:37 +00:00
|
|
|
}
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 18:07:56 +00:00
|
|
|
|
2019-03-15 22:01:37 +00:00
|
|
|
allPlugins := languagePlugins.Union(snapshotPlugins)
|
|
|
|
|
Add `--server` to `pulumi plugin install`
Previously, when the CLI wanted to install a plugin, it used a special
method, `DownloadPlugin` on the `httpstate` backend to actually fetch
the tarball that had the plugin. The reason for this is largely tied
to history, at one point during a closed beta, we required presenting
an API key to download plugins (as a way to enforce folks outside the
beta could not download them) and because of that it was natural to
bake that functionality into the part of the code that interfaced with
the rest of the API from the Pulumi Service.
The downside here is that it means we need to host all the plugins on
`api.pulumi.com` which prevents community folks from being able to
easily write resource providers, since they have to manually manage
the process of downloading a provider to a machine and getting it on
the `$PATH` or putting it in the plugin cache.
To make this easier, we add a `--server` argument you can pass to
`pulumi plugin install` to control the URL that it attempts to fetch
the tarball from. We still have perscriptive guidence on how the
tarball must be
named (`pulumi-[<type>]-[<provider-name>]-vX.Y.Z.tar.gz`) but the base
URL can now be configured.
Folks publishing packages can use install scripts to run `pulumi
plugin install` passing a custom `--server` argument, if needed.
There are two improvements we can make to provide a nicer end to end
story here:
- We can augment the GetRequiredPlugins method on the language
provider to also return information about an optional server to use
when downloading the provider.
- We can pass information about a server to download plugins from as
part of a resource registration or creation of a first class
provider.
These help out in cases where for one reason or another where `pulumi
plugin install` doesn't get run before an update takes place and would
allow us to either do the right thing ahead of time or provide better
error messages with the correct `--server` argument. But, for now,
this unblocks a majority of the cases we care about and provides a
path forward for folks that want to develop and host their own
resource providers.
2019-05-30 20:56:55 +00:00
|
|
|
// If there are any plugins that are not available, we can attempt to install them here.
|
2019-03-15 22:01:37 +00:00
|
|
|
//
|
|
|
|
// Note that this is purely a best-effort thing. If we can't install missing plugins, just proceed; we'll fail later
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 05:56:13 +00:00
|
|
|
// with an error message indicating exactly what plugins are missing. If `returnInstallErrors` is set, then return
|
|
|
|
// the error.
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
if err := ensurePluginsAreInstalled(ctx, plugctx.Diag, allPlugins.Deduplicate(),
|
2022-07-22 13:17:43 +00:00
|
|
|
plugctx.Host.GetProjectPlugins()); err != nil {
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 05:56:13 +00:00
|
|
|
if returnInstallErrors {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2019-03-15 22:01:37 +00:00
|
|
|
logging.V(7).Infof("newUpdateSource(): failed to install missing plugins: %v", err)
|
|
|
|
}
|
2018-03-29 15:57:25 +00:00
|
|
|
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 18:07:56 +00:00
|
|
|
// Collect the version information for default providers.
|
|
|
|
defaultProviderVersions := computeDefaultProviderPlugins(languagePlugins, allPlugins)
|
|
|
|
|
|
|
|
return allPlugins, defaultProviderVersions, nil
|
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
// installAndLoadPolicyPlugins loads and installs all requird policy plugins and packages as well as any
|
|
|
|
// local policy packs. It returns fully populated metadata about those policy plugins.
|
|
|
|
func installAndLoadPolicyPlugins(ctx context.Context, plugctx *plugin.Context,
|
|
|
|
deployOpts *deploymentOptions, analyzerOpts *plugin.PolicyAnalyzerOptions,
|
2023-03-03 16:36:39 +00:00
|
|
|
) error {
|
2020-03-08 21:11:55 +00:00
|
|
|
var allValidationErrors []string
|
|
|
|
appendValidationErrors := func(policyPackName, policyPackVersion string, validationErrors []string) {
|
|
|
|
for _, validationError := range validationErrors {
|
|
|
|
allValidationErrors = append(allValidationErrors,
|
|
|
|
fmt.Sprintf("validating policy config: %s %s %s",
|
|
|
|
policyPackName, policyPackVersion, validationError))
|
|
|
|
}
|
|
|
|
}
|
2019-12-16 22:51:02 +00:00
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
errs := make(chan error, len(deployOpts.RequiredPolicies)+len(deployOpts.LocalPolicyPacks))
|
2019-12-16 22:51:02 +00:00
|
|
|
// Install and load required policy packs.
|
2023-10-09 18:31:17 +00:00
|
|
|
for _, policy := range deployOpts.RequiredPolicies {
|
2023-11-15 11:19:31 +00:00
|
|
|
deployOpts.Events.PolicyLoadEvent()
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
policyPath, err := policy.Install(ctx)
|
2019-06-30 23:34:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(policy RequiredPolicy, policyPath string) {
|
|
|
|
defer wg.Done()
|
|
|
|
analyzer, err := plugctx.Host.PolicyAnalyzer(tokens.QName(policy.Name()), policyPath, analyzerOpts)
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
}
|
2020-03-08 21:11:55 +00:00
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
analyzerInfo, err := analyzer.GetAnalyzerInfo()
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
}
|
2020-03-08 21:11:55 +00:00
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
// Parse the config, reconcile & validate it, and pass it to the policy pack.
|
|
|
|
if !analyzerInfo.SupportsConfig {
|
|
|
|
if len(policy.Config()) > 0 {
|
|
|
|
logging.V(7).Infof("policy pack %q does not support config; skipping configure", analyzerInfo.Name)
|
|
|
|
}
|
|
|
|
return
|
2020-03-08 21:11:55 +00:00
|
|
|
}
|
2023-11-20 14:08:32 +00:00
|
|
|
configFromAPI, err := resourceanalyzer.ParsePolicyPackConfigFromAPI(policy.Config())
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
config, validationErrors, err := resourceanalyzer.ReconcilePolicyPackConfig(
|
|
|
|
analyzerInfo.Policies, analyzerInfo.InitialConfig, configFromAPI)
|
|
|
|
if err != nil {
|
|
|
|
errs <- fmt.Errorf("reconciling config for %q: %w", analyzerInfo.Name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
appendValidationErrors(analyzerInfo.Name, analyzerInfo.Version, validationErrors)
|
|
|
|
if err = analyzer.Configure(config); err != nil {
|
|
|
|
errs <- fmt.Errorf("configuring policy pack %q: %w", analyzerInfo.Name, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(policy, policyPath)
|
2019-12-16 22:51:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load local policy packs.
|
2023-10-09 18:31:17 +00:00
|
|
|
for i, pack := range deployOpts.LocalPolicyPacks {
|
2023-11-20 14:08:32 +00:00
|
|
|
wg.Add(1)
|
|
|
|
go func(i int, pack LocalPolicyPack) {
|
|
|
|
defer wg.Done()
|
|
|
|
deployOpts.Events.PolicyLoadEvent()
|
|
|
|
abs, err := filepath.Abs(pack.Path)
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
}
|
2019-06-30 23:34:39 +00:00
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
analyzer, err := plugctx.Host.PolicyAnalyzer(tokens.QName(abs), pack.Path, analyzerOpts)
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
} else if analyzer == nil {
|
|
|
|
errs <- fmt.Errorf("policy analyzer could not be loaded from path %q", pack.Path)
|
|
|
|
return
|
|
|
|
}
|
2023-10-09 18:31:17 +00:00
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
// Update the Policy Pack names now that we have loaded the plugins and can access the name.
|
|
|
|
analyzerInfo, err := analyzer.GetAnalyzerInfo()
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
}
|
2020-03-08 21:11:55 +00:00
|
|
|
|
2023-11-20 14:08:32 +00:00
|
|
|
// Read and store the name and version since it won't have been supplied by anyone else yet.
|
|
|
|
deployOpts.LocalPolicyPacks[i].Name = analyzerInfo.Name
|
|
|
|
deployOpts.LocalPolicyPacks[i].Version = analyzerInfo.Version
|
|
|
|
|
|
|
|
// Load config, reconcile & validate it, and pass it to the policy pack.
|
|
|
|
if !analyzerInfo.SupportsConfig {
|
|
|
|
if pack.Config != "" {
|
|
|
|
errs <- fmt.Errorf("policy pack %q at %q does not support config", analyzerInfo.Name, pack.Path)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var configFromFile map[string]plugin.AnalyzerPolicyConfig
|
2020-03-08 21:11:55 +00:00
|
|
|
if pack.Config != "" {
|
2023-11-20 14:08:32 +00:00
|
|
|
configFromFile, err = resourceanalyzer.LoadPolicyPackConfigFromFile(pack.Config)
|
|
|
|
if err != nil {
|
|
|
|
errs <- err
|
|
|
|
return
|
|
|
|
}
|
2020-03-08 21:11:55 +00:00
|
|
|
}
|
2023-11-20 14:08:32 +00:00
|
|
|
config, validationErrors, err := resourceanalyzer.ReconcilePolicyPackConfig(
|
|
|
|
analyzerInfo.Policies, analyzerInfo.InitialConfig, configFromFile)
|
2020-03-08 21:11:55 +00:00
|
|
|
if err != nil {
|
2023-11-20 14:08:32 +00:00
|
|
|
errs <- fmt.Errorf("reconciling policy config for %q at %q: %w", analyzerInfo.Name, pack.Path, err)
|
|
|
|
return
|
2020-03-08 21:11:55 +00:00
|
|
|
}
|
2023-11-20 14:08:32 +00:00
|
|
|
appendValidationErrors(analyzerInfo.Name, analyzerInfo.Version, validationErrors)
|
|
|
|
if err = analyzer.Configure(config); err != nil {
|
|
|
|
errs <- fmt.Errorf("configuring policy pack %q at %q: %w", analyzerInfo.Name, pack.Path, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}(i, pack)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
if len(errs) > 0 {
|
|
|
|
// If we have any errors return the first one. Even
|
|
|
|
// if we have more than one error, we only return the
|
|
|
|
// first to not overwhelm the user.
|
|
|
|
return <-errs
|
2020-01-30 21:31:41 +00:00
|
|
|
}
|
2020-03-08 21:11:55 +00:00
|
|
|
|
|
|
|
// Report any policy config validation errors and return an error.
|
|
|
|
if len(allValidationErrors) > 0 {
|
|
|
|
sort.Strings(allValidationErrors)
|
|
|
|
for _, validationError := range allValidationErrors {
|
|
|
|
plugctx.Diag.Errorf(diag.Message("", validationError))
|
|
|
|
}
|
|
|
|
return errors.New("validating policy config")
|
|
|
|
}
|
|
|
|
|
2019-06-30 23:34:39 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
func newUpdateSource(ctx context.Context,
|
2023-10-09 18:31:17 +00:00
|
|
|
client deploy.BackendClient, opts *deploymentOptions, proj *workspace.Project, pwd, main, projectRoot string,
|
2023-03-03 16:36:39 +00:00
|
|
|
target *deploy.Target, plugctx *plugin.Context, dryRun bool,
|
|
|
|
) (deploy.Source, error) {
|
2019-06-30 23:34:39 +00:00
|
|
|
//
|
|
|
|
// Step 1: Install and load plugins.
|
|
|
|
//
|
|
|
|
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
allPlugins, defaultProviderVersions, err := installPlugins(ctx, proj, pwd, main, target,
|
.NET: Report plugin install errors during `pulumi new` (#5760)
The way `pulumi new` installs dependencies for .NET projects is slightly different from other languages. For Node.js, Python, and Go, `pulumi new` runs the appropriate command to install project dependencies (e.g. `npm install`, `pip install`, or `go mod download`). For .NET, it calls the same routine used during `preview|up` to ensure required plugins are installed. For .NET, this ends up running `dotnet build` which implicitly installs Nuget packages, builds the project, and also attempts to determine and install the needed Pulumi plugins. When this operation runs during `preview|up`, and there are failures installing a plugin, the error is logged, but deliberately not returned, because an error will be shown for missing plugins later on during the `preview|up` operation. However, during `pulumi new`, we should show any plugin install errors.
2020-11-17 05:56:13 +00:00
|
|
|
plugctx, false /*returnInstallErrors*/)
|
Implement query primitives in the engine
`pulumi query` is designed, essentially, as a souped-up `exec`. We
execute a query program, and add a few convenience constructs (e.g., the
default providers that give you access to things like `getStack`).
Early in the design process, we decided to not re-use the `up`/update
path, both to minimize risk to update operations, and to simplify the
implementation.
This commit will add this "parallel query universe" into the engine
package. In particular, this includes:
* `QuerySource`, which executes the language provider running the query
program, and providing it with some simple constructs, such as the
default provider, which provides access to `getStack`. This is much
like a very simplified `EvalSource`, though notably without any of the
planning/step execution machinery.
* `queryResmon`, which disallows all resource operations, except the
`Invoke` that retrieves the resource outputs of some stack's last
snapshot. This is much like a simplified `resmon`, but without any of
the provider resolution, and without and support for resource
operations generally.
* Various static functions that pull together miscellaneous things
needed to execute a query program. Notably, this includes gathering
language plugins.
2019-04-30 18:07:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-15 22:01:37 +00:00
|
|
|
// Once we've installed all of the plugins we need, make sure that all analyzers and language plugins are
|
|
|
|
// loaded up and ready to go. Provider plugins are loaded lazily by the provider registry and thus don't
|
|
|
|
// need to be loaded here.
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 00:50:29 +00:00
|
|
|
const kinds = plugin.AnalyzerPlugins | plugin.LanguagePlugins
|
2019-03-15 22:01:37 +00:00
|
|
|
if err := ensurePluginsAreLoaded(plugctx, allPlugins, kinds); err != nil {
|
2018-03-29 15:57:25 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-30 23:34:39 +00:00
|
|
|
//
|
|
|
|
// Step 2: Install and load policy plugins.
|
|
|
|
//
|
|
|
|
|
2019-12-16 22:51:02 +00:00
|
|
|
// Decrypt the configuration.
|
|
|
|
config, err := target.Config.Decrypt(target.Decrypter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-10-09 18:31:17 +00:00
|
|
|
analyzerOpts := &plugin.PolicyAnalyzerOptions{
|
2022-09-02 09:47:38 +00:00
|
|
|
Organization: target.Organization.String(),
|
|
|
|
Project: proj.Name.String(),
|
|
|
|
Stack: target.Name.String(),
|
|
|
|
Config: config,
|
|
|
|
DryRun: dryRun,
|
2019-12-16 22:51:02 +00:00
|
|
|
}
|
2023-10-09 18:31:17 +00:00
|
|
|
if err := installAndLoadPolicyPlugins(ctx, plugctx, opts, analyzerOpts); err != nil {
|
2019-06-30 23:34:39 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-09-15 00:40:17 +00:00
|
|
|
// If we are connecting to an existing client, stash the address of the engine in its arguments.
|
|
|
|
var args []string
|
|
|
|
if proj.Runtime.Name() == clientRuntimeName {
|
|
|
|
args = []string{plugctx.Host.ServerAddr()}
|
|
|
|
}
|
|
|
|
|
2018-03-29 15:57:25 +00:00
|
|
|
// If that succeeded, create a new source that will perform interpretation of the compiled program.
|
|
|
|
return deploy.NewEvalSource(plugctx, &deploy.EvalRunInfo{
|
[engine] Add support for source positions
These changes add support for passing source position information in
gRPC metadata and recording the source position that corresponds to a
resource registration in the statefile.
Enabling source position information in the resource model can provide
substantial benefits, including but not limited to:
- Better errors from the Pulumi CLI
- Go-to-defintion for resources in state
- Editor integration for errors, etc. from `pulumi preview`
Source positions are (file, line) or (file, line, column) tuples
represented as URIs. The line and column are stored in the fragment
portion of the URI as "line(,column)?". The scheme of the URI and the
form of its path component depends on the context in which it is
generated or used:
- During an active update, the URI's scheme is `file` and paths are
absolute filesystem paths. This allows consumers to easily access
arbitrary files that are available on the host.
- In a statefile, the URI's scheme is `project` and paths are relative
to the project root. This allows consumers to resolve source positions
relative to the project file in different contexts irrespective of the
location of the project itself (e.g. given a project-relative path and
the URL of the project's root on GitHub, one can build a GitHub URL for
the source position).
During an update, source position information may be attached to gRPC
calls as "source-position" metadata. This allows arbitrary calls to be
associated with source positions without changes to their protobuf
payloads. Modifying the protobuf payloads is also a viable approach, but
is somewhat more invasive than attaching metadata, and requires changes
to every call signature.
Source positions should reflect the position in user code that initiated
a resource model operation (e.g. the source position passed with
`RegisterResource` for `pet` in the example above should be the source
position in `index.ts`, _not_ the source position in the Pulumi SDK). In
general, the Pulumi SDK should be able to infer the source position of
the resource registration, as the relationship between a resource
registration and its corresponding user code should be static per SDK.
Source positions in state files will be stored as a new `registeredAt`
property on each resource. This property is optional.
2023-06-29 18:41:19 +00:00
|
|
|
Proj: proj,
|
|
|
|
Pwd: pwd,
|
|
|
|
Program: main,
|
|
|
|
ProjectRoot: projectRoot,
|
|
|
|
Args: args,
|
|
|
|
Target: target,
|
Implement first-class providers. (#1695)
### First-Class Providers
These changes implement support for first-class providers. First-class
providers are provider plugins that are exposed as resources via the
Pulumi programming model so that they may be explicitly and multiply
instantiated. Each instance of a provider resource may be configured
differently, and configuration parameters may be source from the
outputs of other resources.
### Provider Plugin Changes
In order to accommodate the need to verify and diff provider
configuration and configure providers without complete configuration
information, these changes adjust the high-level provider plugin
interface. Two new methods for validating a provider's configuration
and diffing changes to the same have been added (`CheckConfig` and
`DiffConfig`, respectively), and the type of the configuration bag
accepted by `Configure` has been changed to a `PropertyMap`.
These changes have not yet been reflected in the provider plugin gRPC
interface. We will do this in a set of follow-up changes. Until then,
these methods are implemented by adapters:
- `CheckConfig` validates that all configuration parameters are string
or unknown properties. This is necessary because existing plugins
only accept string-typed configuration values.
- `DiffConfig` either returns "never replace" if all configuration
values are known or "must replace" if any configuration value is
unknown. The justification for this behavior is given
[here](https://github.com/pulumi/pulumi/pull/1695/files#diff-a6cd5c7f337665f5bb22e92ca5f07537R106)
- `Configure` converts the config bag to a legacy config map and
configures the provider plugin if all config values are known. If any
config value is unknown, the underlying plugin is not configured and
the provider may only perform `Check`, `Read`, and `Invoke`, all of
which return empty results. We justify this behavior becuase it is
only possible during a preview and provides the best experience we
can manage with the existing gRPC interface.
### Resource Model Changes
Providers are now exposed as resources that participate in a stack's
dependency graph. Like other resources, they are explicitly created,
may have multiple instances, and may have dependencies on other
resources. Providers are referred to using provider references, which
are a combination of the provider's URN and its ID. This design
addresses the need during a preview to refer to providers that have not
yet been physically created and therefore have no ID.
All custom resources that are not themselves providers must specify a
single provider via a provider reference. The named provider will be
used to manage that resource's CRUD operations. If a resource's
provider reference changes, the resource must be replaced. Though its
URN is not present in the resource's dependency list, the provider
should be treated as a dependency of the resource when topologically
sorting the dependency graph.
Finally, `Invoke` operations must now specify a provider to use for the
invocation via a provider reference.
### Engine Changes
First-class providers support requires a few changes to the engine:
- The engine must have some way to map from provider references to
provider plugins. It must be possible to add providers from a stack's
checkpoint to this map and to register new/updated providers during
the execution of a plan in response to CRUD operations on provider
resources.
- In order to support updating existing stacks using existing Pulumi
programs that may not explicitly instantiate providers, the engine
must be able to manage the "default" providers for each package
referenced by a checkpoint or Pulumi program. The configuration for
a "default" provider is taken from the stack's configuration data.
The former need is addressed by adding a provider registry type that is
responsible for managing all of the plugins required by a plan. In
addition to loading plugins froma checkpoint and providing the ability
to map from a provider reference to a provider plugin, this type serves
as the provider plugin for providers themselves (i.e. it is the
"provider provider").
The latter need is solved via two relatively self-contained changes to
plan setup and the eval source.
During plan setup, the old checkpoint is scanned for custom resources
that do not have a provider reference in order to compute the set of
packages that require a default provider. Once this set has been
computed, the required default provider definitions are conjured and
prepended to the checkpoint's resource list. Each resource that
requires a default provider is then updated to refer to the default
provider for its package.
While an eval source is running, each custom resource registration,
resource read, and invoke that does not name a provider is trapped
before being returned by the source iterator. If no default provider
for the appropriate package has been registered, the eval source
synthesizes an appropriate registration, waits for it to complete, and
records the registered provider's reference. This reference is injected
into the original request, which is then processed as usual. If a
default provider was already registered, the recorded reference is
used and no new registration occurs.
### SDK Changes
These changes only expose first-class providers from the Node.JS SDK.
- A new abstract class, `ProviderResource`, can be subclassed and used
to instantiate first-class providers.
- A new field in `ResourceOptions`, `provider`, can be used to supply
a particular provider instance to manage a `CustomResource`'s CRUD
operations.
- A new type, `InvokeOptions`, can be used to specify options that
control the behavior of a call to `pulumi.runtime.invoke`. This type
includes a `provider` field that is analogous to
`ResourceOptions.provider`.
2018-08-07 00:50:29 +00:00
|
|
|
}, defaultProviderVersions, dryRun), nil
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
func update(ctx *Context, info *deploymentContext, opts *deploymentOptions,
|
2023-03-03 16:36:39 +00:00
|
|
|
preview bool,
|
2023-10-11 14:44:09 +00:00
|
|
|
) (*deploy.Plan, display.ResourceChanges, error) {
|
2020-11-18 19:16:30 +00:00
|
|
|
// Create an appropriate set of event listeners.
|
|
|
|
var actions runActions
|
|
|
|
if preview {
|
|
|
|
actions = newPreviewActions(opts)
|
|
|
|
} else {
|
|
|
|
actions = newUpdateActions(ctx, info.Update, opts)
|
|
|
|
}
|
2019-09-26 01:42:30 +00:00
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
// Initialize our deployment object with the context and options.
|
2020-11-18 19:16:30 +00:00
|
|
|
deployment, err := newDeployment(ctx, info, opts, preview)
|
|
|
|
if err != nil {
|
2023-10-11 14:44:09 +00:00
|
|
|
return nil, nil, err
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2020-11-18 19:16:30 +00:00
|
|
|
defer contract.IgnoreClose(deployment)
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
// Execute the deployment.
|
2023-10-11 14:44:09 +00:00
|
|
|
return deployment.run(ctx, actions, preview)
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2020-01-30 21:31:41 +00:00
|
|
|
// abbreviateFilePath is a helper function that cleans up and shortens a provided file path.
|
|
|
|
// If the path is long, it will keep the first two and last two directories and then replace the
|
|
|
|
// middle directories with `...`.
|
|
|
|
func abbreviateFilePath(path string) string {
|
|
|
|
path = filepath.Clean(path)
|
|
|
|
if len(path) > 75 {
|
|
|
|
// Do some shortening.
|
|
|
|
separator := "/"
|
|
|
|
dirs := strings.Split(path, separator)
|
|
|
|
|
|
|
|
// If we get no splits, we will try to use the backslashes in support of a Windows path.
|
|
|
|
if len(dirs) == 1 {
|
|
|
|
separator = `\`
|
|
|
|
dirs = strings.Split(path, separator)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(dirs) > 4 {
|
|
|
|
back := dirs[len(dirs)-2:]
|
|
|
|
dirs = append(dirs[:2], "...")
|
|
|
|
dirs = append(dirs, back...)
|
|
|
|
}
|
|
|
|
path = strings.Join(dirs, separator)
|
|
|
|
}
|
|
|
|
return path
|
|
|
|
}
|
|
|
|
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
// updateActions pretty-prints the plan application process as it goes.
|
|
|
|
type updateActions struct {
|
2020-11-18 19:16:30 +00:00
|
|
|
Context *Context
|
|
|
|
Steps int
|
2022-06-27 14:08:06 +00:00
|
|
|
Ops map[display.StepOp]int
|
2020-11-18 19:16:30 +00:00
|
|
|
Seen map[resource.URN]deploy.Step
|
|
|
|
MapLock sync.Mutex
|
|
|
|
Update UpdateInfo
|
2023-10-09 18:31:17 +00:00
|
|
|
Opts *deploymentOptions
|
2020-11-18 19:16:30 +00:00
|
|
|
|
|
|
|
maybeCorrupt bool
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
func newUpdateActions(context *Context, u UpdateInfo, opts *deploymentOptions) *updateActions {
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
return &updateActions{
|
2018-04-20 01:59:14 +00:00
|
|
|
Context: context,
|
2022-06-27 14:08:06 +00:00
|
|
|
Ops: make(map[display.StepOp]int),
|
2018-04-20 01:59:14 +00:00
|
|
|
Seen: make(map[resource.URN]deploy.Step),
|
|
|
|
Update: u,
|
|
|
|
Opts: opts,
|
2017-11-17 02:21:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
func (acts *updateActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
|
2018-02-03 00:02:50 +00:00
|
|
|
// Ensure we've marked this step as observed.
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Lock()
|
2020-07-09 14:19:12 +00:00
|
|
|
acts.Seen[step.URN()] = step
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Unlock()
|
2018-02-03 00:02:50 +00:00
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourcePreEvent(step, false /*planning*/, acts.Opts.Debug, isInternalStep(step))
|
2018-08-06 23:46:17 +00:00
|
|
|
|
2017-10-21 16:31:01 +00:00
|
|
|
// Inform the snapshot service that we are about to perform a step.
|
2018-04-26 00:20:08 +00:00
|
|
|
return acts.Context.SnapshotManager.BeginMutation(step)
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
}
|
2017-08-22 23:56:15 +00:00
|
|
|
|
2018-12-19 21:19:56 +00:00
|
|
|
func (acts *updateActions) OnResourceStepPost(
|
|
|
|
ctx interface{}, step deploy.Step,
|
2023-03-03 16:36:39 +00:00
|
|
|
status resource.Status, err error,
|
|
|
|
) error {
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Lock()
|
2018-02-03 00:02:50 +00:00
|
|
|
assertSeen(acts.Seen, step)
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Unlock()
|
2018-02-03 00:02:50 +00:00
|
|
|
|
2018-04-20 01:59:14 +00:00
|
|
|
// If we've already been terminated, exit without writing the checkpoint. We explicitly want to leave the
|
|
|
|
// checkpoint in an inconsistent state in this event.
|
|
|
|
if acts.Context.Cancel.TerminateErr() != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
isInternalStep := isInternalStep(step)
|
2018-08-09 21:45:39 +00:00
|
|
|
|
2017-10-03 17:27:59 +00:00
|
|
|
// Report the result of the step.
|
2017-08-22 23:56:15 +00:00
|
|
|
if err != nil {
|
2018-02-04 09:18:06 +00:00
|
|
|
if status == resource.StatusUnknown {
|
2020-11-18 19:16:30 +00:00
|
|
|
acts.maybeCorrupt = true
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2018-02-04 09:18:06 +00:00
|
|
|
|
2018-08-09 21:45:39 +00:00
|
|
|
errorURN := resource.URN("")
|
2023-11-20 21:55:59 +00:00
|
|
|
if !isInternalStep {
|
2020-07-09 14:19:12 +00:00
|
|
|
errorURN = step.URN()
|
2018-08-09 21:45:39 +00:00
|
|
|
}
|
|
|
|
|
2018-02-04 09:18:06 +00:00
|
|
|
// Issue a true, bonafide error.
|
2019-07-30 01:51:11 +00:00
|
|
|
acts.Opts.Diag.Errorf(diag.GetResourceOperationFailedError(errorURN), err)
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourceOperationFailedEvent(step, status, acts.Steps, acts.Opts.Debug)
|
|
|
|
} else {
|
2018-08-23 00:52:46 +00:00
|
|
|
op, record := step.Op(), step.Logical()
|
|
|
|
if acts.Opts.isRefresh && op == deploy.OpRefresh {
|
|
|
|
// Refreshes are handled specially.
|
|
|
|
op, record = step.(*deploy.RefreshStep).ResultOp(), true
|
|
|
|
}
|
|
|
|
|
2018-12-19 21:19:56 +00:00
|
|
|
if step.Op() == deploy.OpRead {
|
|
|
|
record = ShouldRecordReadStep(step)
|
|
|
|
}
|
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
if record && !isInternalStep {
|
2017-11-29 19:27:32 +00:00
|
|
|
// Increment the counters.
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Lock()
|
2017-11-29 19:27:32 +00:00
|
|
|
acts.Steps++
|
2018-08-23 00:52:46 +00:00
|
|
|
acts.Ops[op]++
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Unlock()
|
2017-11-29 19:27:32 +00:00
|
|
|
}
|
|
|
|
|
2018-04-20 18:52:33 +00:00
|
|
|
// Also show outputs here for custom resources, since there might be some from the initial registration. We do
|
|
|
|
// not show outputs for component resources at this point: any that exist must be from a previous execution of
|
|
|
|
// the Pulumi program, as component resources only report outputs via calls to RegisterResourceOutputs.
|
2023-09-29 19:48:33 +00:00
|
|
|
// Deletions emit the resourceOutputEvent so the display knows when to stop the time elapsed counter.
|
|
|
|
if step.Res().Custom || acts.Opts.Refresh && step.Op() == deploy.OpRefresh || step.Op() == deploy.OpDelete {
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourceOutputsEvent(op, step, false /*planning*/, acts.Opts.Debug, isInternalStep)
|
2018-04-20 18:44:28 +00:00
|
|
|
}
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2017-10-02 21:27:50 +00:00
|
|
|
|
2018-10-09 18:19:31 +00:00
|
|
|
// See pulumi/pulumi#2011 for details. Terraform always returns the existing state with the diff applied to it in
|
|
|
|
// the event of an update failure. It's appropriate that we save this new state in the output of the resource, but
|
|
|
|
// it is not appropriate to save the inputs, because the resource that exists was not created or updated
|
|
|
|
// successfully with those inputs.
|
|
|
|
//
|
|
|
|
// If we were doing an update and got a `StatusPartialFailure`, the resource that ultimately gets persisted in the
|
|
|
|
// snapshot should be old inputs and new outputs. We accomplish that here by clobbering the new resource's inputs
|
|
|
|
// with the old inputs.
|
|
|
|
//
|
|
|
|
// This is a little kludgy given that these resources are global state. However, given the way that we have
|
|
|
|
// implemented the snapshot manager and engine today, it's the easiest way to accomplish what we are trying to do.
|
|
|
|
if status == resource.StatusPartialFailure && step.Op() == deploy.OpUpdate {
|
|
|
|
logging.V(7).Infof(
|
|
|
|
"OnResourceStepPost(%s): Step is partially-failed update, saving old inputs instead of new inputs",
|
2020-07-09 14:19:12 +00:00
|
|
|
step.URN())
|
2018-10-09 18:19:31 +00:00
|
|
|
new := step.New()
|
|
|
|
old := step.Old()
|
2023-02-15 01:28:14 +00:00
|
|
|
contract.Assertf(new != nil, "new state should not be nil for partially-failed update")
|
|
|
|
contract.Assertf(old != nil, "old state should not be nil for partially-failed update")
|
2018-10-09 18:19:31 +00:00
|
|
|
new.Inputs = make(resource.PropertyMap)
|
|
|
|
for key, value := range old.Inputs {
|
|
|
|
new.Inputs[key] = value
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implement resource protection (#751)
This change implements resource protection, as per pulumi/pulumi#689.
The overall idea is that a resource can be marked as "protect: true",
which will prevent deletion of that resource for any reason whatsoever
(straight deletion, replacement, etc). This is expressed in the
program. To "unprotect" a resource, one must perform an update setting
"protect: false", and then afterwards, they can delete the resource.
For example:
let res = new MyResource("precious", { .. }, { protect: true });
Afterwards, the resource will display in the CLI with a lock icon, and
any attempts to remove it will fail in the usual ways (in planning or,
worst case, during an actual update).
This was done by adding a new ResourceOptions bag parameter to the
base Resource types. This is unfortunately a breaking change, but now
is the right time to take this one. We had been adding new settings
one by one -- like parent and dependsOn -- and this new approach will
set us up to add any number of additional settings down the road,
without needing to worry about breaking anything ever again.
This is related to protected stacks, as described in
pulumi/pulumi-service#399. Most likely this will serve as a foundational
building block that enables the coarser grained policy management.
2017-12-20 22:31:07 +00:00
|
|
|
// Write out the current snapshot. Note that even if a failure has occurred, we should still have a
|
2018-04-26 00:20:08 +00:00
|
|
|
// safe checkpoint. Note that any error that occurs when writing the checkpoint trumps the error
|
|
|
|
// reported above.
|
2018-05-02 17:36:55 +00:00
|
|
|
return ctx.(SnapshotMutation).End(step, err == nil || status == resource.StatusPartialFailure)
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
}
|
2017-10-03 17:27:59 +00:00
|
|
|
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
func (acts *updateActions) OnResourceOutputs(step deploy.Step) error {
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Lock()
|
2018-02-03 00:02:50 +00:00
|
|
|
assertSeen(acts.Seen, step)
|
2018-08-06 23:46:17 +00:00
|
|
|
acts.MapLock.Unlock()
|
2018-02-03 00:02:50 +00:00
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourceOutputsEvent(step.Op(), step, false /*planning*/, acts.Opts.Debug, isInternalStep(step))
|
2018-04-17 06:04:56 +00:00
|
|
|
|
|
|
|
// There's a chance there are new outputs that weren't written out last time.
|
|
|
|
// We need to perform another snapshot write to ensure they get written out.
|
2018-04-26 00:20:08 +00:00
|
|
|
return acts.Context.SnapshotManager.RegisterResourceOutputs(step)
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2019-06-10 22:20:44 +00:00
|
|
|
|
|
|
|
func (acts *updateActions) OnPolicyViolation(urn resource.URN, d plugin.AnalyzeDiagnostic) {
|
|
|
|
acts.Opts.Events.policyViolationEvent(urn, d)
|
|
|
|
}
|
2020-11-18 19:16:30 +00:00
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
func (acts *updateActions) OnPolicyRemediation(urn resource.URN, t plugin.Remediation,
|
|
|
|
before resource.PropertyMap, after resource.PropertyMap,
|
|
|
|
) {
|
|
|
|
acts.Opts.Events.policyRemediationEvent(urn, t, before, after)
|
|
|
|
}
|
|
|
|
|
2020-11-18 19:16:30 +00:00
|
|
|
func (acts *updateActions) MaybeCorrupt() bool {
|
|
|
|
return acts.maybeCorrupt
|
|
|
|
}
|
|
|
|
|
2022-06-27 14:08:06 +00:00
|
|
|
func (acts *updateActions) Changes() display.ResourceChanges {
|
|
|
|
return display.ResourceChanges(acts.Ops)
|
2020-11-18 19:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type previewActions struct {
|
2022-06-27 14:08:06 +00:00
|
|
|
Ops map[display.StepOp]int
|
2023-10-09 18:31:17 +00:00
|
|
|
Opts *deploymentOptions
|
2020-11-18 19:16:30 +00:00
|
|
|
Seen map[resource.URN]deploy.Step
|
|
|
|
MapLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
func isInternalStep(step deploy.Step) bool {
|
|
|
|
return step.Op() == deploy.OpRemovePendingReplace || isDefaultProviderStep(step)
|
2020-11-18 19:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func ShouldRecordReadStep(step deploy.Step) bool {
|
|
|
|
contract.Assertf(step.Op() == deploy.OpRead, "Only call this on a Read step")
|
|
|
|
|
|
|
|
// If reading a resource didn't result in any change to the resource, we then want to
|
|
|
|
// record this as a 'same'. That way, when things haven't actually changed, but a user
|
|
|
|
// app did any 'reads' these don't show up in the resource summary at the end.
|
|
|
|
return step.Old() != nil &&
|
|
|
|
step.New() != nil &&
|
|
|
|
step.Old().Outputs != nil &&
|
|
|
|
step.New().Outputs != nil &&
|
|
|
|
step.Old().Outputs.Diff(step.New().Outputs) != nil
|
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
func newPreviewActions(opts *deploymentOptions) *previewActions {
|
2020-11-18 19:16:30 +00:00
|
|
|
return &previewActions{
|
2022-06-27 14:08:06 +00:00
|
|
|
Ops: make(map[display.StepOp]int),
|
2020-11-18 19:16:30 +00:00
|
|
|
Opts: opts,
|
|
|
|
Seen: make(map[resource.URN]deploy.Step),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
|
|
|
|
acts.MapLock.Lock()
|
|
|
|
acts.Seen[step.URN()] = step
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourcePreEvent(step, true /*planning*/, acts.Opts.Debug, isInternalStep(step))
|
2020-11-18 19:16:30 +00:00
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnResourceStepPost(ctx interface{},
|
2023-03-03 16:36:39 +00:00
|
|
|
step deploy.Step, status resource.Status, err error,
|
|
|
|
) error {
|
2020-11-18 19:16:30 +00:00
|
|
|
acts.MapLock.Lock()
|
|
|
|
assertSeen(acts.Seen, step)
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
isInternalStep := isInternalStep(step)
|
2020-11-18 19:16:30 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
// We always want to report a failure. If we intend to elide this step overall, though, we report it as a
|
|
|
|
// global message.
|
|
|
|
reportedURN := resource.URN("")
|
2023-11-20 21:55:59 +00:00
|
|
|
if !isInternalStep {
|
2020-11-18 19:16:30 +00:00
|
|
|
reportedURN = step.URN()
|
|
|
|
}
|
|
|
|
|
|
|
|
acts.Opts.Diag.Errorf(diag.GetPreviewFailedError(reportedURN), err)
|
2023-11-20 21:55:59 +00:00
|
|
|
} else {
|
2020-11-18 19:16:30 +00:00
|
|
|
op, record := step.Op(), step.Logical()
|
|
|
|
if acts.Opts.isRefresh && op == deploy.OpRefresh {
|
|
|
|
// Refreshes are handled specially.
|
|
|
|
op, record = step.(*deploy.RefreshStep).ResultOp(), true
|
|
|
|
}
|
|
|
|
|
|
|
|
if step.Op() == deploy.OpRead {
|
|
|
|
record = ShouldRecordReadStep(step)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Track the operation if shown and/or if it is a logically meaningful operation.
|
2023-11-20 21:55:59 +00:00
|
|
|
if record && !isInternalStep {
|
2020-11-18 19:16:30 +00:00
|
|
|
acts.MapLock.Lock()
|
|
|
|
acts.Ops[op]++
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourceOutputsEvent(op, step, true /*planning*/, acts.Opts.Debug, isInternalStep)
|
2020-11-18 19:16:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnResourceOutputs(step deploy.Step) error {
|
|
|
|
acts.MapLock.Lock()
|
|
|
|
assertSeen(acts.Seen, step)
|
|
|
|
acts.MapLock.Unlock()
|
|
|
|
|
|
|
|
// Print the resource outputs separately.
|
2023-11-20 21:55:59 +00:00
|
|
|
acts.Opts.Events.resourceOutputsEvent(step.Op(), step, true /*planning*/, acts.Opts.Debug, isInternalStep(step))
|
2020-11-18 19:16:30 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (acts *previewActions) OnPolicyViolation(urn resource.URN, d plugin.AnalyzeDiagnostic) {
|
|
|
|
acts.Opts.Events.policyViolationEvent(urn, d)
|
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
func (acts *previewActions) OnPolicyRemediation(urn resource.URN, t plugin.Remediation,
|
|
|
|
before resource.PropertyMap, after resource.PropertyMap,
|
|
|
|
) {
|
|
|
|
acts.Opts.Events.policyRemediationEvent(urn, t, before, after)
|
|
|
|
}
|
|
|
|
|
2020-11-18 19:16:30 +00:00
|
|
|
func (acts *previewActions) MaybeCorrupt() bool {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-06-27 14:08:06 +00:00
|
|
|
func (acts *previewActions) Changes() display.ResourceChanges {
|
|
|
|
return display.ResourceChanges(acts.Ops)
|
2020-11-18 19:16:30 +00:00
|
|
|
}
|