2017-08-30 01:24:12 +00:00
|
|
|
// Copyright 2017, Pulumi Corporation. All rights reserved.
|
|
|
|
|
2017-08-22 23:56:15 +00:00
|
|
|
package engine
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2017-09-07 14:25:08 +00:00
|
|
|
goerr "github.com/pkg/errors"
|
|
|
|
|
2017-09-22 02:18:21 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/compiler/errors"
|
|
|
|
"github.com/pulumi/pulumi/pkg/diag"
|
|
|
|
"github.com/pulumi/pulumi/pkg/diag/colors"
|
|
|
|
"github.com/pulumi/pulumi/pkg/resource"
|
|
|
|
"github.com/pulumi/pulumi/pkg/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/pkg/util/contract"
|
2017-08-22 23:56:15 +00:00
|
|
|
)
|
|
|
|
|
2018-01-18 19:10:15 +00:00
|
|
|
// UpdateOptions contains all the settings for customizing how an update (deploy, preview, or destroy) is performed.
|
|
|
|
type UpdateOptions struct {
|
2018-01-31 17:41:42 +00:00
|
|
|
Analyzers []string // an optional set of analyzers to run as part of this deployment.
|
|
|
|
DryRun bool // true if we should just print the plan without performing it.
|
|
|
|
Parallel int // the degree of parallelism for resource operations (<=1 for serial).
|
|
|
|
ShowConfig bool // true to show the configuration variables being used.
|
|
|
|
ShowReplacementSteps bool // true to show the replacement steps in the plan.
|
|
|
|
ShowSames bool // true to show the resources that aren't updated in addition to updates.
|
|
|
|
Summary bool // true if we should only summarize resources and operations.
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 20:07:03 +00:00
|
|
|
// ResourceChanges contains the aggregate resource changes by operation type.
|
|
|
|
type ResourceChanges map[deploy.StepOp]int
|
|
|
|
|
|
|
|
func Deploy(update Update, events chan<- Event, opts UpdateOptions) (ResourceChanges, error) {
|
2018-01-08 21:01:40 +00:00
|
|
|
contract.Require(update != nil, "update")
|
2017-10-05 21:08:46 +00:00
|
|
|
contract.Require(events != nil, "events")
|
2017-09-09 20:43:51 +00:00
|
|
|
|
2017-10-22 22:52:00 +00:00
|
|
|
defer func() { events <- cancelEvent() }()
|
|
|
|
|
2018-01-08 22:20:51 +00:00
|
|
|
info, err := planContextFromUpdate(update)
|
2017-08-22 23:56:15 +00:00
|
|
|
if err != nil {
|
2018-01-20 20:07:03 +00:00
|
|
|
return nil, err
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2017-11-09 01:08:51 +00:00
|
|
|
defer info.Close()
|
2017-10-05 21:08:46 +00:00
|
|
|
|
2018-01-08 22:20:51 +00:00
|
|
|
return deployLatest(info, deployOptions{
|
2018-01-18 19:10:15 +00:00
|
|
|
UpdateOptions: opts,
|
|
|
|
|
|
|
|
Destroy: false,
|
|
|
|
|
|
|
|
Events: events,
|
2018-01-31 17:41:42 +00:00
|
|
|
Diag: newEventSink(events),
|
2017-08-22 23:56:15 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
type deployOptions struct {
|
2018-01-18 19:10:15 +00:00
|
|
|
UpdateOptions
|
|
|
|
|
|
|
|
Destroy bool // true if we are destroying the stack.
|
|
|
|
|
|
|
|
Detailed bool // true to show very detailed output, like properties that haven't changed.
|
|
|
|
DOT bool // true if we should print the DOT file for this plan.
|
|
|
|
Events chan<- Event // the channel to write events from the engine to.
|
|
|
|
Diag diag.Sink // the sink to use for diag'ing.
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2018-01-20 20:07:03 +00:00
|
|
|
func deployLatest(info *planContext, opts deployOptions) (ResourceChanges, error) {
|
2018-01-08 22:20:51 +00:00
|
|
|
result, err := plan(info, opts)
|
2017-08-22 23:56:15 +00:00
|
|
|
if err != nil {
|
2018-01-20 20:07:03 +00:00
|
|
|
return nil, err
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2018-01-20 20:07:03 +00:00
|
|
|
|
|
|
|
var resourceChanges ResourceChanges
|
2017-08-22 23:56:15 +00:00
|
|
|
if result != nil {
|
|
|
|
defer contract.IgnoreClose(result)
|
2017-12-12 20:31:09 +00:00
|
|
|
|
|
|
|
// Make the current working directory the same as the program's, and restore it upon exit.
|
|
|
|
done, err := result.Chdir()
|
|
|
|
if err != nil {
|
2018-01-20 20:07:03 +00:00
|
|
|
return nil, err
|
2017-12-12 20:31:09 +00:00
|
|
|
}
|
|
|
|
defer done()
|
|
|
|
|
2017-08-22 23:56:15 +00:00
|
|
|
if opts.DryRun {
|
2018-01-25 02:22:41 +00:00
|
|
|
// If a dry run, just print the plan, don't actually carry out the deployment.
|
|
|
|
resourceChanges, err = printPlan(result)
|
|
|
|
if err != nil {
|
2018-01-20 20:07:03 +00:00
|
|
|
return resourceChanges, err
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we will actually deploy the latest bits.
|
|
|
|
var header bytes.Buffer
|
2017-09-17 15:10:46 +00:00
|
|
|
printPrelude(&header, result, false)
|
2017-09-23 00:23:40 +00:00
|
|
|
header.WriteString(fmt.Sprintf("%vPerforming changes:%v\n", colors.SpecUnimportant, colors.Reset))
|
2018-01-31 17:41:42 +00:00
|
|
|
opts.Events <- stdOutEventWithColor(&header)
|
2017-08-22 23:56:15 +00:00
|
|
|
|
2017-10-02 21:26:51 +00:00
|
|
|
// Walk the plan, reporting progress and executing the actual operations as we go.
|
2017-08-22 23:56:15 +00:00
|
|
|
start := time.Now()
|
2018-01-08 21:01:40 +00:00
|
|
|
actions := newDeployActions(info.Update, opts)
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
summary, _, _, err := result.Walk(actions, false)
|
2017-08-27 07:38:17 +00:00
|
|
|
if err != nil && summary == nil {
|
2017-10-03 18:07:36 +00:00
|
|
|
// Something went wrong, and no changes were made.
|
2018-01-20 20:07:03 +00:00
|
|
|
return resourceChanges, err
|
2017-08-27 07:38:17 +00:00
|
|
|
}
|
2017-08-22 23:56:15 +00:00
|
|
|
contract.Assert(summary != nil)
|
|
|
|
|
|
|
|
// Print a summary.
|
|
|
|
var footer bytes.Buffer
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
|
2017-08-22 23:56:15 +00:00
|
|
|
// Print out the total number of steps performed (and their kinds), the duration, and any summary info.
|
2018-01-20 20:07:03 +00:00
|
|
|
resourceChanges = ResourceChanges(actions.Ops)
|
|
|
|
if c := printChangeSummary(&footer, resourceChanges, false); c != 0 {
|
2017-09-23 00:23:40 +00:00
|
|
|
footer.WriteString(fmt.Sprintf("%vUpdate duration: %v%v\n",
|
2017-08-22 23:56:15 +00:00
|
|
|
colors.SpecUnimportant, time.Since(start), colors.Reset))
|
|
|
|
}
|
|
|
|
|
2017-10-02 21:26:51 +00:00
|
|
|
if actions.MaybeCorrupt {
|
2017-08-22 23:56:15 +00:00
|
|
|
footer.WriteString(fmt.Sprintf(
|
|
|
|
"%vA catastrophic error occurred; resources states may be unknown%v\n",
|
|
|
|
colors.SpecAttention, colors.Reset))
|
|
|
|
}
|
|
|
|
|
2018-01-31 17:41:42 +00:00
|
|
|
opts.Events <- stdOutEventWithColor(&footer)
|
2017-10-05 21:08:46 +00:00
|
|
|
|
2017-09-09 19:42:04 +00:00
|
|
|
if err != nil {
|
2018-01-20 20:07:03 +00:00
|
|
|
return resourceChanges, err
|
2017-09-09 19:42:04 +00:00
|
|
|
}
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-20 20:07:03 +00:00
|
|
|
|
2017-10-05 21:08:46 +00:00
|
|
|
if !opts.Diag.Success() {
|
2017-09-06 16:35:35 +00:00
|
|
|
// If any error that wasn't printed above, be sure to make it evident in the output.
|
2018-01-20 20:07:03 +00:00
|
|
|
return resourceChanges, goerr.New("One or more errors occurred during this update")
|
2017-09-06 16:35:35 +00:00
|
|
|
}
|
2018-01-20 20:07:03 +00:00
|
|
|
return resourceChanges, nil
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2017-10-02 21:26:51 +00:00
|
|
|
// deployActions pretty-prints the plan application process as it goes.
|
|
|
|
type deployActions struct {
|
2017-08-22 23:56:15 +00:00
|
|
|
Steps int
|
|
|
|
Ops map[deploy.StepOp]int
|
2017-11-17 02:21:41 +00:00
|
|
|
Seen map[resource.URN]deploy.Step
|
|
|
|
Shown map[resource.URN]bool
|
2017-08-22 23:56:15 +00:00
|
|
|
MaybeCorrupt bool
|
2018-01-08 21:01:40 +00:00
|
|
|
Update Update
|
2017-08-22 23:56:15 +00:00
|
|
|
Opts deployOptions
|
|
|
|
}
|
|
|
|
|
2018-01-08 21:01:40 +00:00
|
|
|
func newDeployActions(update Update, opts deployOptions) *deployActions {
|
2017-11-17 02:21:41 +00:00
|
|
|
return &deployActions{
|
|
|
|
Ops: make(map[deploy.StepOp]int),
|
|
|
|
Seen: make(map[resource.URN]deploy.Step),
|
|
|
|
Shown: make(map[resource.URN]bool),
|
2018-01-08 21:01:40 +00:00
|
|
|
Update: update,
|
2017-11-17 02:21:41 +00:00
|
|
|
Opts: opts,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
func (acts *deployActions) OnResourceStepPre(step deploy.Step) (interface{}, error) {
|
2017-10-03 17:27:59 +00:00
|
|
|
// Report the beginning of the step if appropriate.
|
2017-12-07 21:33:16 +00:00
|
|
|
if shouldShow(acts.Seen, step, acts.Opts) || isRootStack(step) {
|
2017-10-02 21:26:51 +00:00
|
|
|
var b bytes.Buffer
|
2017-11-17 02:21:41 +00:00
|
|
|
printStep(&b, step, acts.Seen, acts.Shown, acts.Opts.Summary, acts.Opts.Detailed, false, 0 /*indent*/)
|
2018-01-31 17:41:42 +00:00
|
|
|
acts.Opts.Events <- stdOutEventWithColor(&b)
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
|
|
|
|
2017-10-21 16:31:01 +00:00
|
|
|
// Inform the snapshot service that we are about to perform a step.
|
2018-01-08 21:01:40 +00:00
|
|
|
return acts.Update.BeginMutation()
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
}
|
2017-08-22 23:56:15 +00:00
|
|
|
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
func (acts *deployActions) OnResourceStepPost(ctx interface{},
|
2017-11-29 23:05:58 +00:00
|
|
|
step deploy.Step, status resource.Status, err error) error {
|
2017-11-29 19:27:32 +00:00
|
|
|
var b bytes.Buffer
|
|
|
|
|
2017-10-03 17:27:59 +00:00
|
|
|
// Report the result of the step.
|
2017-08-22 23:56:15 +00:00
|
|
|
stepop := step.Op()
|
|
|
|
if err != nil {
|
|
|
|
// Issue a true, bonafide error.
|
2017-10-05 21:08:46 +00:00
|
|
|
acts.Opts.Diag.Errorf(errors.ErrorPlanApplyFailed, err)
|
2017-08-22 23:56:15 +00:00
|
|
|
|
|
|
|
// Print the state of the resource; we don't issue the error, because the deploy above will do that.
|
2017-10-02 21:26:51 +00:00
|
|
|
stepnum := acts.Steps + 1
|
2017-08-22 23:56:15 +00:00
|
|
|
b.WriteString(fmt.Sprintf("Step #%v failed [%v]: ", stepnum, stepop))
|
|
|
|
switch status {
|
|
|
|
case resource.StatusOK:
|
|
|
|
b.WriteString(colors.SpecNote)
|
|
|
|
b.WriteString("provider successfully recovered from this failure")
|
|
|
|
case resource.StatusUnknown:
|
|
|
|
b.WriteString(colors.SpecAttention)
|
|
|
|
b.WriteString("this failure was catastrophic and the provider cannot guarantee recovery")
|
2017-10-02 21:26:51 +00:00
|
|
|
acts.MaybeCorrupt = true
|
2017-08-22 23:56:15 +00:00
|
|
|
default:
|
|
|
|
contract.Failf("Unrecognized resource state: %v", status)
|
|
|
|
}
|
|
|
|
b.WriteString(colors.Reset)
|
|
|
|
b.WriteString("\n")
|
2017-11-29 19:27:32 +00:00
|
|
|
} else {
|
|
|
|
if step.Logical() {
|
|
|
|
// Increment the counters.
|
|
|
|
acts.Steps++
|
|
|
|
acts.Ops[stepop]++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also show outputs here, since there might be some from the initial registration.
|
2017-12-07 21:33:16 +00:00
|
|
|
if shouldShow(acts.Seen, step, acts.Opts) && !acts.Opts.Summary {
|
2017-12-16 15:33:58 +00:00
|
|
|
printResourceOutputProperties(&b, step, acts.Seen, acts.Shown, false, 0 /*indent*/)
|
2017-12-07 21:33:16 +00:00
|
|
|
}
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|
2017-10-02 21:27:50 +00:00
|
|
|
|
2018-01-31 17:41:42 +00:00
|
|
|
acts.Opts.Events <- stdOutEventWithColor(&b)
|
2017-11-29 19:27:32 +00:00
|
|
|
|
Implement resource protection (#751)
This change implements resource protection, as per pulumi/pulumi#689.
The overall idea is that a resource can be marked as "protect: true",
which will prevent deletion of that resource for any reason whatsoever
(straight deletion, replacement, etc). This is expressed in the
program. To "unprotect" a resource, one must perform an update setting
"protect: false", and then afterwards, they can delete the resource.
For example:
let res = new MyResource("precious", { .. }, { protect: true });
Afterwards, the resource will display in the CLI with a lock icon, and
any attempts to remove it will fail in the usual ways (in planning or,
worst case, during an actual update).
This was done by adding a new ResourceOptions bag parameter to the
base Resource types. This is unfortunately a breaking change, but now
is the right time to take this one. We had been adding new settings
one by one -- like parent and dependsOn -- and this new approach will
set us up to add any number of additional settings down the road,
without needing to worry about breaking anything ever again.
This is related to protected stacks, as described in
pulumi/pulumi-service#399. Most likely this will serve as a foundational
building block that enables the coarser grained policy management.
2017-12-20 22:31:07 +00:00
|
|
|
// Write out the current snapshot. Note that even if a failure has occurred, we should still have a
|
2017-11-21 01:38:09 +00:00
|
|
|
// safe checkpoint. Note that any error that occurs when writing the checkpoint trumps the error reported above.
|
Implement resource protection (#751)
This change implements resource protection, as per pulumi/pulumi#689.
The overall idea is that a resource can be marked as "protect: true",
which will prevent deletion of that resource for any reason whatsoever
(straight deletion, replacement, etc). This is expressed in the
program. To "unprotect" a resource, one must perform an update setting
"protect: false", and then afterwards, they can delete the resource.
For example:
let res = new MyResource("precious", { .. }, { protect: true });
Afterwards, the resource will display in the CLI with a lock icon, and
any attempts to remove it will fail in the usual ways (in planning or,
worst case, during an actual update).
This was done by adding a new ResourceOptions bag parameter to the
base Resource types. This is unfortunately a breaking change, but now
is the right time to take this one. We had been adding new settings
one by one -- like parent and dependsOn -- and this new approach will
set us up to add any number of additional settings down the road,
without needing to worry about breaking anything ever again.
This is related to protected stacks, as described in
pulumi/pulumi-service#399. Most likely this will serve as a foundational
building block that enables the coarser grained policy management.
2017-12-20 22:31:07 +00:00
|
|
|
return ctx.(SnapshotMutation).End(step.Iterator().Snap())
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
}
|
2017-10-03 17:27:59 +00:00
|
|
|
|
2017-11-29 23:05:58 +00:00
|
|
|
func (acts *deployActions) OnResourceOutputs(step deploy.Step) error {
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
// Print this step's output properties.
|
2017-12-07 21:33:16 +00:00
|
|
|
if (shouldShow(acts.Seen, step, acts.Opts) || isRootStack(step)) && !acts.Opts.Summary {
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
var b bytes.Buffer
|
2017-12-16 15:33:58 +00:00
|
|
|
printResourceOutputProperties(&b, step, acts.Seen, acts.Shown, false, 0 /*indent*/)
|
2018-01-31 17:41:42 +00:00
|
|
|
acts.Opts.Events <- stdOutEventWithColor(&b)
|
Bring back component outputs
This change brings back component outputs to the overall system again.
In doing so, it generally overhauls the way we do resource RPCs a bit:
* Instead of RegisterResource and CompleteResource, we call these
BeginRegisterResource and EndRegisterResource, which begins to model
these as effectively "asynchronous" resource requests. This should also
help with parallelism (https://github.com/pulumi/pulumi/issues/106).
* Flip the CLI/engine a little on its head. Rather than it driving the
planning and deployment process, we move more to a model where it
simply observes it. This is done by implementing an event handler
interface with three events: OnResourceStepPre, OnResourceStepPost,
and OnResourceComplete. The first two are invoked immediately before
and after any step operation, and the latter is invoked whenever a
EndRegisterResource comes in. The reason for the asymmetry here is
that the checkpointing logic in the deployment engine is largely
untouched (intentionally, as this is a sensitive part of the system),
and so the "begin"/"end" nature doesn't flow through faithfully.
* Also make the engine more event-oriented in its terminology and the
way it handles the incoming BeginRegisterResource and
EndRegisterResource events from the language host. This is the first
step down a long road of incrementally refactoring the engine to work
this way, a necessary prerequisite for parallelism.
2017-11-29 15:42:14 +00:00
|
|
|
}
|
2017-11-29 16:36:04 +00:00
|
|
|
|
2017-11-29 23:05:58 +00:00
|
|
|
// There's a chance there are new outputs that weren't written out last time.
|
2017-11-29 16:36:04 +00:00
|
|
|
// We need to perform another snapshot write to ensure they get written out.
|
2018-01-08 21:01:40 +00:00
|
|
|
mutation, err := acts.Update.BeginMutation()
|
2017-11-29 23:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-29 16:36:04 +00:00
|
|
|
|
2018-01-25 02:22:41 +00:00
|
|
|
return mutation.End(step.Iterator().Snap())
|
2017-08-22 23:56:15 +00:00
|
|
|
}
|