2018-05-22 19:43:36 +00:00
|
|
|
// Copyright 2016-2018, Pulumi Corporation.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2017-08-30 01:24:12 +00:00
|
|
|
|
2017-08-22 23:56:15 +00:00
|
|
|
package engine
|
|
|
|
|
2017-10-02 18:14:27 +00:00
|
|
|
import (
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
"context"
|
|
|
|
|
2023-09-18 11:01:28 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/display"
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
|
2017-10-02 18:14:27 +00:00
|
|
|
)
|
2017-08-22 23:56:15 +00:00
|
|
|
|
2022-01-31 10:31:51 +00:00
|
|
|
func Destroy(
|
|
|
|
u UpdateInfo,
|
|
|
|
ctx *Context,
|
|
|
|
opts UpdateOptions,
|
2023-03-03 16:36:39 +00:00
|
|
|
dryRun bool,
|
2023-10-11 14:44:09 +00:00
|
|
|
) (*deploy.Plan, display.ResourceChanges, error) {
|
2023-02-15 01:28:14 +00:00
|
|
|
contract.Requiref(u != nil, "u", "cannot be nil")
|
|
|
|
contract.Requiref(ctx != nil, "ctx", "cannot be nil")
|
2017-10-02 23:37:12 +00:00
|
|
|
|
2023-11-16 16:54:03 +00:00
|
|
|
defer func() { ctx.Events <- NewCancelEvent() }()
|
2017-10-22 22:52:00 +00:00
|
|
|
|
2020-11-18 19:16:30 +00:00
|
|
|
info, err := newDeploymentContext(u, "destroy", ctx.ParentSpan)
|
2017-08-22 23:56:15 +00:00
|
|
|
if err != nil {
|
2023-10-11 14:44:09 +00:00
|
|
|
return nil, nil, err
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2018-04-20 01:59:14 +00:00
|
|
|
defer info.Close()
|
2017-08-22 23:56:15 +00:00
|
|
|
|
2018-08-22 22:32:54 +00:00
|
|
|
emitter, err := makeEventEmitter(ctx.Events, u)
|
|
|
|
if err != nil {
|
2023-10-11 14:44:09 +00:00
|
|
|
return nil, nil, err
|
2018-08-22 22:32:54 +00:00
|
|
|
}
|
2019-10-15 22:47:40 +00:00
|
|
|
defer emitter.Close()
|
|
|
|
|
2021-07-28 19:12:53 +00:00
|
|
|
logging.V(7).Infof("*** Starting Destroy(preview=%v) ***", dryRun)
|
|
|
|
defer logging.V(7).Infof("*** Destroy(preview=%v) complete ***", dryRun)
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
if err := checkTargets(opts.Targets, u.GetTarget().Snapshot); err != nil {
|
2023-10-11 14:44:09 +00:00
|
|
|
return nil, nil, err
|
2023-05-23 20:17:59 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 18:31:17 +00:00
|
|
|
return update(ctx, info, &deploymentOptions{
|
2018-01-18 19:10:15 +00:00
|
|
|
UpdateOptions: opts,
|
2018-03-29 15:57:25 +00:00
|
|
|
SourceFunc: newDestroySource,
|
2018-03-05 19:39:50 +00:00
|
|
|
Events: emitter,
|
Implement status sinks
This commit reverts most of #1853 and replaces it with functionally
identical logic, using the notion of status message-specific sinks.
In other words, where the original commit implemented ephemeral status
messages by adding an `isStatus` parameter to most of the logging
methdos in pulumi/pulumi, this implements ephemeral status messages as a
parallel logging sink, which emits _only_ ephemeral status messages.
The original commit message in that PR was:
> Allow log events to be marked "status" events
>
> This commit will introduce a field, IsStatus to LogRequest. A "status"
> logging event will be displayed in the Info column of the main
> display, but will not be printed out at the end, when resource
> operations complete.
>
> For example, for complex resource initialization, we'd like to display
> a series of intermediate results: [1/4] Service object created, for
> example. We'd like these to appear in the Info column, but not at the
> end, where they are not helpful to the user.
2018-08-31 20:12:40 +00:00
|
|
|
Diag: newEventSink(emitter, false),
|
|
|
|
StatusDiag: newEventSink(emitter, true),
|
2018-04-14 05:26:01 +00:00
|
|
|
}, dryRun)
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
|
2018-04-14 05:26:01 +00:00
|
|
|
func newDestroySource(
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
ctx context.Context,
|
2023-10-09 18:31:17 +00:00
|
|
|
client deploy.BackendClient, opts *deploymentOptions, proj *workspace.Project, pwd, main, projectRoot string,
|
2023-03-03 16:36:39 +00:00
|
|
|
target *deploy.Target, plugctx *plugin.Context, dryRun bool,
|
|
|
|
) (deploy.Source, error) {
|
2019-03-15 22:01:37 +00:00
|
|
|
// Like Update, we need to gather the set of plugins necessary to delete everything in the snapshot.
|
|
|
|
// Unlike Update, we don't actually run the user's program so we only need the set of plugins described
|
|
|
|
// in the snapshot.
|
|
|
|
plugins, err := gatherPluginsFromSnapshot(plugctx, target)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Like Update, if we're missing plugins, attempt to download the missing plugins.
|
2022-07-22 13:17:43 +00:00
|
|
|
|
ctrl-c should cause Pulumi to call Cancel operation on providers (#14057)
<!---
Thanks so much for your contribution! If this is your first time
contributing, please ensure that you have read the
[CONTRIBUTING](https://github.com/pulumi/pulumi/blob/master/CONTRIBUTING.md)
documentation.
-->
# Description
Fixes #14054
This PR fixes a problem that the engine cannot forward a cancellation
signal to the provider, because the plugin context is already closed. An
[earlier
commit](https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114)
made the plugin context be closed too eagerly, with the intent of
cancelling plugin installation. This PR attempts to decouple the
cancellation of plugin installation from the lifecycle of the plugin
context, so that plugin installation may be cancelled during the
cancelation phase as opposed to the termination phase. Then, it closes
the plugin context in termination phase.
There's an existing test case in the engine lifecycle tests called
`TestProviderCancellation`, but it didn't catch the problem because it
uses a fake plugin host that behaves differently after being closed. The
issue was fixed in https://github.com/pulumi/pulumi/pull/14063 and the
test was temporarily disabled. This PR re-enables the test case.
A new test case `TestSourceFuncCancellation` is added to test
cancellation of the source func (where plugin installation happens, see
[update.go](https://github.com/pulumi/pulumi/pull/14057/files#diff-7d2ca3e83a05073b332435271496050e28466b4f7af8c0c91bbc77947a75a3a2R378)),
as this was the original motivation of
https://github.com/pulumi/pulumi/pull/9793/commits/a9ae602867834efc9821abd866ef388c1b051114.
## Checklist
- [x] I have run `make tidy` to update any new dependencies
- [x] I have run `make lint` to verify my code passes the lint check
- [ ] I have formatted my code using `gofumpt`
<!--- Please provide details if the checkbox below is to be left
unchecked. -->
- [x] I have added tests that prove my fix is effective or that my
feature works
<!---
User-facing changes require a CHANGELOG entry.
-->
- [x] I have run `make changelog` and committed the
`changelog/pending/<file>` documenting my change
<!--
If the change(s) in this PR is a modification of an existing call to the
Pulumi Cloud,
then the service should honor older versions of the CLI where this
change would not exist.
You must then bump the API version in
/pkg/backend/httpstate/client/api.go, as well as add
it to the service.
-->
- [ ] ~Yes, there are changes in this PR that warrants bumping the
Pulumi Cloud API version~
<!-- @Pulumi employees: If yes, you must submit corresponding changes in
the service repo. -->
2023-09-29 22:12:35 +00:00
|
|
|
if err := ensurePluginsAreInstalled(ctx, plugctx.Diag, plugins.Deduplicate(),
|
2022-07-22 13:17:43 +00:00
|
|
|
plugctx.Host.GetProjectPlugins()); err != nil {
|
2019-03-15 22:01:37 +00:00
|
|
|
logging.V(7).Infof("newDestroySource(): failed to install missing plugins: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We don't need the language plugin, since destroy doesn't run code, so we will leave that out.
|
|
|
|
if err := ensurePluginsAreLoaded(plugctx, plugins, plugin.AnalyzerPlugins); err != nil {
|
|
|
|
return nil, err
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
}
|
2018-03-29 15:57:25 +00:00
|
|
|
|
|
|
|
// Create a nil source. This simply returns "nothing" as the new state, which will cause the
|
|
|
|
// engine to destroy the entire existing state.
|
2021-12-09 09:09:48 +00:00
|
|
|
return deploy.NewNullSource(proj.Name), nil
|
General prep work for refresh
This change includes a bunch of refactorings I made in prep for
doing refresh (first, the command, see pulumi/pulumi#1081):
* The primary change is to change the way the engine's core update
functionality works with respect to deploy.Source. This is the
way we can plug in new sources of resource information during
planning (and, soon, diffing). The way I intend to model refresh
is by having a new kind of source, deploy.RefreshSource, which
will let us do virtually everything about an update/diff the same
way with refreshes, which avoid otherwise duplicative effort.
This includes changing the planOptions (nee deployOptions) to
take a new SourceFunc callback, which is responsible for creating
a source specific to the kind of plan being requested.
Preview, Update, and Destroy now are primarily differentiated by
the kind of deploy.Source that they return, rather than sprinkling
things like `if Destroying` throughout. This tidies up some logic
and, more importantly, gives us precisely the refresh hook we need.
* Originally, we used the deploy.NullSource for Destroy operations.
This simply returns nothing, which is how Destroy works. For some
reason, we were no longer doing this, and instead had some
`if Destroying` cases sprinkled throughout the deploy.EvalSource.
I think this is a vestige of some old way we did configuration, at
least judging by a comment, which is apparently no longer relevant.
* Move diff and diff-printing logic within the engine into its own
pkg/engine/diff.go file, to prepare for upcoming work.
* I keep noticing benign diffs anytime I regenerate protobufs. I
suspect this is because we're also on different versions. I changed
generate.sh to also dump the version into grpc_version.txt. At
least we can understand where the diffs are coming from, decide
whether to take them (i.e., a newer version), and ensure that as
a team we are monotonically increasing, and not going backwards.
* I also tidied up some tiny things I noticed while in there, like
comments, incorrect types, lint suppressions, and so on.
2018-03-28 14:45:23 +00:00
|
|
|
}
|