mirror of https://github.com/pulumi/pulumi.git
Switch to a resource-progress oriented view for pulumi preview/update/destroy (#1116)
This commit is contained in:
parent
085e8fb412
commit
a759f2e085
Gopkg.lock
cmd
pkg
sdk
nodejs
proto
tests/integration/diff
|
@ -1,6 +1,21 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Azure/go-ansiterm"
|
||||
packages = [
|
||||
".",
|
||||
"winterm"
|
||||
]
|
||||
revision = "d6e3b3328b783f23731bc4d058875b0371ff8109"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/apache/thrift"
|
||||
packages = ["lib/go/thrift"]
|
||||
|
@ -78,6 +93,25 @@
|
|||
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/docker"
|
||||
packages = [
|
||||
"pkg/jsonlog",
|
||||
"pkg/jsonmessage",
|
||||
"pkg/progress",
|
||||
"pkg/streamformatter",
|
||||
"pkg/term",
|
||||
"pkg/term/windows"
|
||||
]
|
||||
revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
|
||||
version = "v1.13.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-units"
|
||||
packages = ["."]
|
||||
revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dustin/go-humanize"
|
||||
|
@ -369,6 +403,12 @@
|
|||
]
|
||||
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
|
@ -485,6 +525,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "1de7bd262e1e77d3cc0d7d0bba2a51e124c40a6b7e18f0fcc0566121b364f068"
|
||||
inputs-digest = "bd2f79a09b9058e1d57ad165c4ae96028620c3521b994e66e615f505b47a7fa4"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -206,7 +206,7 @@ func newConfigSetCmd(stack *string) *cobra.Command {
|
|||
// If we saved a plaintext configuration value, and --plaintext was not passed, warn the user.
|
||||
if !secret && !plaintext {
|
||||
cmdutil.Diag().Warningf(
|
||||
diag.Message(
|
||||
diag.Message("", /*urn*/
|
||||
"saved config key '%s' value '%s' as plaintext; "+
|
||||
"re-run with --secret to encrypt the value instead. Use "+
|
||||
"--plaintext to avoid this warning"),
|
||||
|
|
|
@ -29,7 +29,7 @@ func newDestroyCmd() *cobra.Command {
|
|||
var showConfig bool
|
||||
var showReplacementSteps bool
|
||||
var showSames bool
|
||||
var summary bool
|
||||
var diffDisplay bool
|
||||
|
||||
var cmd = &cobra.Command{
|
||||
Use: "destroy",
|
||||
|
@ -64,7 +64,7 @@ func newDestroyCmd() *cobra.Command {
|
|||
return errors.New("confirmation declined")
|
||||
}
|
||||
|
||||
return s.Destroy(proj, root, debug, m, engine.UpdateOptions{
|
||||
return s.Destroy(proj, root, m, engine.UpdateOptions{
|
||||
Analyzers: analyzers,
|
||||
DryRun: preview,
|
||||
Parallel: parallel,
|
||||
|
@ -74,7 +74,8 @@ func newDestroyCmd() *cobra.Command {
|
|||
ShowConfig: showConfig,
|
||||
ShowReplacementSteps: showReplacementSteps,
|
||||
ShowSames: showSames,
|
||||
Summary: summary,
|
||||
DiffDisplay: diffDisplay,
|
||||
Debug: debug,
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
@ -115,8 +116,8 @@ func newDestroyCmd() *cobra.Command {
|
|||
&showSames, "show-sames", false,
|
||||
"Show resources that needn't be updated because they haven't changed, alongside those that do")
|
||||
cmd.PersistentFlags().BoolVar(
|
||||
&summary, "summary", false,
|
||||
"Only display summarization of resources and operations")
|
||||
&diffDisplay, "diff", false,
|
||||
"Display operation as a rich diff showing the overall change")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func newPreviewCmd() *cobra.Command {
|
|||
var showConfig bool
|
||||
var showReplacementSteps bool
|
||||
var showSames bool
|
||||
var summary bool
|
||||
var diffDisplay bool
|
||||
|
||||
var cmd = &cobra.Command{
|
||||
Use: "preview",
|
||||
|
@ -52,7 +52,7 @@ func newPreviewCmd() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
|
||||
return s.Preview(proj, root, debug, engine.UpdateOptions{
|
||||
return s.Preview(proj, root, engine.UpdateOptions{
|
||||
Analyzers: analyzers,
|
||||
DryRun: true,
|
||||
Parallel: parallel,
|
||||
|
@ -62,7 +62,8 @@ func newPreviewCmd() *cobra.Command {
|
|||
ShowConfig: showConfig,
|
||||
ShowReplacementSteps: showReplacementSteps,
|
||||
ShowSames: showSames,
|
||||
Summary: summary,
|
||||
DiffDisplay: diffDisplay,
|
||||
Debug: debug,
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
@ -93,8 +94,8 @@ func newPreviewCmd() *cobra.Command {
|
|||
&showSames, "show-sames", false,
|
||||
"Show resources that needn't be updated because they haven't changed, alongside those that do")
|
||||
cmd.PersistentFlags().BoolVar(
|
||||
&summary, "summary", false,
|
||||
"Only display summarization of resources and operations")
|
||||
&diffDisplay, "diff", false,
|
||||
"Display operation as a rich diff showing the overall change")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -66,7 +66,10 @@ func newStackImportCmd() *cobra.Command {
|
|||
res.URN, res.URN.Stack(), s.Name())
|
||||
if force {
|
||||
// If --force was passed, just issue a warning and proceed anyway.
|
||||
cmdutil.Diag().Warningf(diag.Message(msg))
|
||||
// Note: we could associate this diagnostic with the resource URN
|
||||
// we have. However, this sort of message seems to be better as
|
||||
// something associated with the stack as a whole.
|
||||
cmdutil.Diag().Warningf(diag.Message("" /*urn*/, msg))
|
||||
} else {
|
||||
// Otherwise, gather up an error so that we can quit before doing damage.
|
||||
result = multierror.Append(result, errors.New(msg))
|
||||
|
|
|
@ -26,7 +26,7 @@ func newUpdateCmd() *cobra.Command {
|
|||
var showConfig bool
|
||||
var showReplacementSteps bool
|
||||
var showSames bool
|
||||
var summary bool
|
||||
var diffDisplay bool
|
||||
|
||||
var cmd = &cobra.Command{
|
||||
Use: "update",
|
||||
|
@ -61,7 +61,7 @@ func newUpdateCmd() *cobra.Command {
|
|||
return errors.Wrap(err, "gathering environment metadata")
|
||||
}
|
||||
|
||||
return s.Update(proj, root, debug, m, engine.UpdateOptions{
|
||||
return s.Update(proj, root, m, engine.UpdateOptions{
|
||||
Analyzers: analyzers,
|
||||
DryRun: preview,
|
||||
Parallel: parallel,
|
||||
|
@ -71,7 +71,8 @@ func newUpdateCmd() *cobra.Command {
|
|||
ShowConfig: showConfig,
|
||||
ShowReplacementSteps: showReplacementSteps,
|
||||
ShowSames: showSames,
|
||||
Summary: summary,
|
||||
DiffDisplay: diffDisplay,
|
||||
Debug: debug,
|
||||
})
|
||||
}),
|
||||
}
|
||||
|
@ -109,8 +110,8 @@ func newUpdateCmd() *cobra.Command {
|
|||
&showSames, "show-sames", false,
|
||||
"Show resources that needn't be updated because they haven't changed, alongside those that do")
|
||||
cmd.PersistentFlags().BoolVar(
|
||||
&summary, "summary", false,
|
||||
"Only display summarization of resources and operations")
|
||||
&diffDisplay, "diff", false,
|
||||
"Display operation as a rich diff showing the overall change")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -34,13 +34,13 @@ type Backend interface {
|
|||
|
||||
// Preview initiates a preview of the current workspace's contents.
|
||||
Preview(stackName tokens.QName, proj *workspace.Project, root string,
|
||||
debug bool, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
// Update updates the target stack with the current workspace's contents (config and code).
|
||||
Update(stackName tokens.QName, proj *workspace.Project, root string,
|
||||
debug bool, m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
// Destroy destroys all of this stack's resources.
|
||||
Destroy(stackName tokens.QName, proj *workspace.Project, root string,
|
||||
debug bool, m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
|
||||
// GetHistory returns all updates for the stack. The returned UpdateInfo slice will be in
|
||||
// descending order (newest first).
|
||||
|
|
|
@ -404,27 +404,27 @@ var actionLabels = map[string]string{
|
|||
}
|
||||
|
||||
func (b *cloudBackend) Preview(stackName tokens.QName, pkg *workspace.Project, root string,
|
||||
debug bool, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
|
||||
return b.updateStack(client.UpdateKindPreview, stackName, pkg, root, debug, backend.UpdateMetadata{}, opts,
|
||||
return b.updateStack(client.UpdateKindPreview, stackName, pkg, root, backend.UpdateMetadata{}, opts,
|
||||
displayOpts)
|
||||
}
|
||||
|
||||
func (b *cloudBackend) Update(stackName tokens.QName, pkg *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
|
||||
return b.updateStack(client.UpdateKindUpdate, stackName, pkg, root, debug, m, opts, displayOpts)
|
||||
return b.updateStack(client.UpdateKindUpdate, stackName, pkg, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (b *cloudBackend) Destroy(stackName tokens.QName, pkg *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
|
||||
return b.updateStack(client.UpdateKindDestroy, stackName, pkg, root, debug, m, opts, displayOpts)
|
||||
return b.updateStack(client.UpdateKindDestroy, stackName, pkg, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (b *cloudBackend) createAndStartUpdate(action client.UpdateKind, stackName tokens.QName, pkg *workspace.Project,
|
||||
root string, debug bool, m backend.UpdateMetadata,
|
||||
opts engine.UpdateOptions) (client.UpdateIdentifier, int, string, error) {
|
||||
func (b *cloudBackend) createAndStartUpdate(
|
||||
action client.UpdateKind, stackName tokens.QName, pkg *workspace.Project,
|
||||
root string, m backend.UpdateMetadata, opts engine.UpdateOptions) (client.UpdateIdentifier, int, string, error) {
|
||||
|
||||
stack, err := getCloudStackIdentifier(stackName)
|
||||
if err != nil {
|
||||
|
@ -470,7 +470,7 @@ func (b *cloudBackend) createAndStartUpdate(action client.UpdateKind, stackName
|
|||
|
||||
// updateStack performs a the provided type of update on a stack hosted in the Pulumi Cloud.
|
||||
func (b *cloudBackend) updateStack(action client.UpdateKind, stackName tokens.QName, pkg *workspace.Project,
|
||||
root string, debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions,
|
||||
root string, m backend.UpdateMetadata, opts engine.UpdateOptions,
|
||||
displayOpts backend.DisplayOptions) error {
|
||||
|
||||
// Print a banner so it's clear this is going to the cloud.
|
||||
|
@ -494,7 +494,7 @@ func (b *cloudBackend) updateStack(action client.UpdateKind, stackName tokens.QN
|
|||
var version int
|
||||
var token string
|
||||
if !stack.(Stack).RunLocally() || action != client.UpdateKindPreview {
|
||||
update, version, token, err = b.createAndStartUpdate(action, stackName, pkg, root, debug, m, opts)
|
||||
update, version, token, err = b.createAndStartUpdate(action, stackName, pkg, root, m, opts)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -513,7 +513,7 @@ func (b *cloudBackend) updateStack(action client.UpdateKind, stackName tokens.QN
|
|||
|
||||
// If we are targeting a stack that uses local operations, run the appropriate engine action locally.
|
||||
if stack.(Stack).RunLocally() {
|
||||
return b.runEngineAction(action, stackName, pkg, root, debug, opts, displayOpts, update, token)
|
||||
return b.runEngineAction(action, stackName, pkg, root, opts, displayOpts, update, token)
|
||||
}
|
||||
|
||||
// Otherwise, wait for the update to complete while rendering its events to stdout/stderr.
|
||||
|
@ -552,8 +552,9 @@ func getUpdateContents(context string, useDefaultIgnores bool, progress bool) (i
|
|||
return archiveReader, int64(archiveContents.Len()), nil
|
||||
}
|
||||
|
||||
func (b *cloudBackend) runEngineAction(action client.UpdateKind, stackName tokens.QName, pkg *workspace.Project,
|
||||
root string, debug bool, opts engine.UpdateOptions, displayOpts backend.DisplayOptions,
|
||||
func (b *cloudBackend) runEngineAction(
|
||||
action client.UpdateKind, stackName tokens.QName, pkg *workspace.Project,
|
||||
root string, opts engine.UpdateOptions, displayOpts backend.DisplayOptions,
|
||||
update client.UpdateIdentifier, token string) error {
|
||||
|
||||
u, err := b.newUpdate(stackName, pkg, root, update, token)
|
||||
|
@ -566,7 +567,7 @@ func (b *cloudBackend) runEngineAction(action client.UpdateKind, stackName token
|
|||
|
||||
actionLabel, ok := actionLabels[string(action)]
|
||||
contract.Assertf(ok, "unsupported update kind: %v", action)
|
||||
go u.RecordAndDisplayEvents(actionLabel, events, done, debug, displayOpts)
|
||||
go u.RecordAndDisplayEvents(actionLabel, events, done, displayOpts)
|
||||
|
||||
switch action {
|
||||
case client.UpdateKindPreview:
|
||||
|
@ -845,7 +846,7 @@ func (b *cloudBackend) waitForUpdate(actionLabel string, update client.UpdateIde
|
|||
|
||||
func displayEvents(action string, events <-chan displayEvent, done chan<- bool, opts backend.DisplayOptions) {
|
||||
prefix := fmt.Sprintf("%s%s...", cmdutil.EmojiOr("✨ ", "@ "), action)
|
||||
spinner, ticker := cmdutil.NewSpinnerAndTicker(prefix, nil)
|
||||
spinner, ticker := cmdutil.NewSpinnerAndTicker(prefix, nil, 8 /*timesPerSecond*/)
|
||||
|
||||
defer func() {
|
||||
spinner.Reset()
|
||||
|
@ -926,8 +927,8 @@ func (b *cloudBackend) tryNextUpdate(update client.UpdateIdentifier, afterIndex
|
|||
|
||||
// Issue a warning if appropriate.
|
||||
if warn {
|
||||
b.d.Warningf(diag.Message("error querying update status: %v"), err)
|
||||
b.d.Warningf(diag.Message("retrying in %vs... ^C to stop (this will not cancel the update)"),
|
||||
b.d.Warningf(diag.Message("" /*urn*/, "error querying update status: %v"), err)
|
||||
b.d.Warningf(diag.Message("" /*urn*/, "retrying in %vs... ^C to stop (this will not cancel the update)"),
|
||||
nextRetryTime.Seconds())
|
||||
}
|
||||
|
||||
|
|
|
@ -87,18 +87,18 @@ func (s *cloudStack) Remove(force bool) (bool, error) {
|
|||
}
|
||||
|
||||
func (s *cloudStack) Preview(proj *workspace.Project, root string,
|
||||
debug bool, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.PreviewStack(s, proj, root, debug, opts, displayOpts)
|
||||
opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.PreviewStack(s, proj, root, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (s *cloudStack) Update(proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.UpdateStack(s, proj, root, debug, m, opts, displayOpts)
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.UpdateStack(s, proj, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (s *cloudStack) Destroy(proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.DestroyStack(s, proj, root, debug, m, opts, displayOpts)
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.DestroyStack(s, proj, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (s *cloudStack) GetLogs(query operations.LogQuery) ([]operations.LogEntry, error) {
|
||||
|
|
|
@ -148,7 +148,8 @@ func (u *cloudUpdate) Complete(status apitype.UpdateStatus) error {
|
|||
}
|
||||
|
||||
func (u *cloudUpdate) recordEvent(
|
||||
event engine.Event, seen map[resource.URN]engine.StepEventMetadata, debug bool, opts backend.DisplayOptions) error {
|
||||
event engine.Event, seen map[resource.URN]engine.StepEventMetadata,
|
||||
opts backend.DisplayOptions) error {
|
||||
|
||||
// If we don't have a token source, we can't perform any mutations.
|
||||
if u.tokenSource == nil {
|
||||
|
@ -163,9 +164,10 @@ func (u *cloudUpdate) recordEvent(
|
|||
}
|
||||
}
|
||||
|
||||
// Ensure we render events with raw colorization tags.
|
||||
// Ensure we render events with raw colorization tags. Also, render these as 'diff' events so
|
||||
// the user has a rich diff-log they can see when the look at their logs in the service.
|
||||
opts.Color = colors.Raw
|
||||
msg := local.RenderEvent(event, seen, debug, opts)
|
||||
msg := local.RenderDiffEvent(event, seen, opts)
|
||||
if msg == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -180,20 +182,20 @@ func (u *cloudUpdate) recordEvent(
|
|||
}
|
||||
|
||||
func (u *cloudUpdate) RecordAndDisplayEvents(action string,
|
||||
events <-chan engine.Event, done chan<- bool, debug bool, opts backend.DisplayOptions) {
|
||||
events <-chan engine.Event, done chan<- bool, opts backend.DisplayOptions) {
|
||||
|
||||
// Start the local display processor.
|
||||
// Start the local display processor. Display things however the options have been
|
||||
// set to display (i.e. diff vs progress).
|
||||
displayEvents := make(chan engine.Event)
|
||||
go local.DisplayEvents(action, displayEvents, done, debug, opts)
|
||||
go local.DisplayEvents(action, displayEvents, done, opts)
|
||||
|
||||
seen := make(map[resource.URN]engine.StepEventMetadata)
|
||||
|
||||
for e := range events {
|
||||
// First echo the event to the local display.
|
||||
displayEvents <- e
|
||||
|
||||
// Then render and record the event for posterity.
|
||||
if err := u.recordEvent(e, seen, debug, opts); err != nil {
|
||||
if err := u.recordEvent(e, seen, opts); err != nil {
|
||||
diagEvent := engine.Event{
|
||||
Type: engine.DiagEvent,
|
||||
Payload: engine.DiagEventPayload{
|
||||
|
|
|
@ -10,5 +10,6 @@ type DisplayOptions struct {
|
|||
ShowConfig bool // true if we should show configuration information.
|
||||
ShowReplacementSteps bool // true to show the replacement steps in the plan.
|
||||
ShowSames bool // true to show the resources that aren't updated in addition to updates.
|
||||
Summary bool // true if we should only summarize resources and operations.
|
||||
DiffDisplay bool // true if we should display things as a rich diff
|
||||
Debug bool
|
||||
}
|
||||
|
|
|
@ -128,7 +128,8 @@ func (b *localBackend) GetStackCrypter(stackName tokens.QName) (config.Crypter,
|
|||
return symmetricCrypter(stackName)
|
||||
}
|
||||
|
||||
func (b *localBackend) Preview(stackName tokens.QName, proj *workspace.Project, root string, debug bool,
|
||||
func (b *localBackend) Preview(
|
||||
stackName tokens.QName, proj *workspace.Project, root string,
|
||||
opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
|
||||
update, err := b.newUpdate(stackName, proj, root)
|
||||
|
@ -139,7 +140,7 @@ func (b *localBackend) Preview(stackName tokens.QName, proj *workspace.Project,
|
|||
events := make(chan engine.Event)
|
||||
done := make(chan bool)
|
||||
|
||||
go DisplayEvents("previewing", events, done, debug, displayOpts)
|
||||
go DisplayEvents("previewing", events, done, displayOpts)
|
||||
|
||||
if err = engine.Preview(update, events, opts); err != nil {
|
||||
return err
|
||||
|
@ -151,11 +152,13 @@ func (b *localBackend) Preview(stackName tokens.QName, proj *workspace.Project,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *localBackend) Update(stackName tokens.QName, proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
func (b *localBackend) Update(
|
||||
stackName tokens.QName, proj *workspace.Project, root string,
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
|
||||
return b.performEngineOp(
|
||||
"updating", backend.DeployUpdate,
|
||||
stackName, proj, root, debug, m, opts, displayOpts,
|
||||
stackName, proj, root, m, opts, displayOpts,
|
||||
func(update *update, events chan engine.Event) (engine.ResourceChanges, error) {
|
||||
return engine.Update(update, events, opts)
|
||||
},
|
||||
|
@ -163,10 +166,11 @@ func (b *localBackend) Update(stackName tokens.QName, proj *workspace.Project, r
|
|||
}
|
||||
|
||||
func (b *localBackend) Destroy(stackName tokens.QName, proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
|
||||
return b.performEngineOp(
|
||||
"destroying", backend.DestroyUpdate,
|
||||
stackName, proj, root, debug, m, opts, displayOpts,
|
||||
stackName, proj, root, m, opts, displayOpts,
|
||||
func(update *update, events chan engine.Event) (engine.ResourceChanges, error) {
|
||||
return engine.Destroy(update, events, opts)
|
||||
},
|
||||
|
@ -175,8 +179,9 @@ func (b *localBackend) Destroy(stackName tokens.QName, proj *workspace.Project,
|
|||
|
||||
func (b *localBackend) performEngineOp(op string, kind backend.UpdateKind,
|
||||
stackName tokens.QName, proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions,
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions,
|
||||
performEngineOp func(*update, chan engine.Event) (engine.ResourceChanges, error)) error {
|
||||
|
||||
update, err := b.newUpdate(stackName, proj, root)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -185,7 +190,7 @@ func (b *localBackend) performEngineOp(op string, kind backend.UpdateKind,
|
|||
events := make(chan engine.Event)
|
||||
done := make(chan bool)
|
||||
|
||||
go DisplayEvents(op, events, done, debug, displayOpts)
|
||||
go DisplayEvents(op, events, done, displayOpts)
|
||||
|
||||
// Perform the update
|
||||
start := time.Now().Unix()
|
||||
|
|
|
@ -20,13 +20,26 @@ import (
|
|||
"github.com/pulumi/pulumi/pkg/util/contract"
|
||||
)
|
||||
|
||||
// DisplayEvents reads events from the `events` channel until it is closed, displaying each event as it comes in.
|
||||
// Once all events have been read from the channel and displayed, it closes the `done` channel so the caller can
|
||||
// await all the events being written.
|
||||
func DisplayEvents(action string,
|
||||
events <-chan engine.Event, done chan<- bool, debug bool, opts backend.DisplayOptions) {
|
||||
// DisplayEvents reads events from the `events` channel until it is closed, displaying each event as
|
||||
// it comes in. Once all events have been read from the channel and displayed, it closes the `done`
|
||||
// channel so the caller can await all the events being written.
|
||||
func DisplayEvents(
|
||||
action string, events <-chan engine.Event,
|
||||
done chan<- bool, opts backend.DisplayOptions) {
|
||||
|
||||
if opts.DiffDisplay {
|
||||
DisplayDiffEvents(action, events, done, opts)
|
||||
} else {
|
||||
DisplayProgressEvents(action, events, done, opts)
|
||||
}
|
||||
}
|
||||
|
||||
// DisplayDiffEvents displays the engine events with the diff view.
|
||||
func DisplayDiffEvents(action string,
|
||||
events <-chan engine.Event, done chan<- bool, opts backend.DisplayOptions) {
|
||||
|
||||
prefix := fmt.Sprintf("%s%s...", cmdutil.EmojiOr("✨ ", "@ "), action)
|
||||
spinner, ticker := cmdutil.NewSpinnerAndTicker(prefix, nil)
|
||||
spinner, ticker := cmdutil.NewSpinnerAndTicker(prefix, nil, 8 /*timesPerSecond*/)
|
||||
|
||||
defer func() {
|
||||
spinner.Reset()
|
||||
|
@ -51,7 +64,7 @@ func DisplayEvents(action string,
|
|||
}
|
||||
}
|
||||
|
||||
msg := RenderEvent(event, seen, debug, opts)
|
||||
msg := RenderDiffEvent(event, seen, opts)
|
||||
if msg != "" && out != nil {
|
||||
fprintIgnoreError(out, msg)
|
||||
}
|
||||
|
@ -63,44 +76,46 @@ func DisplayEvents(action string,
|
|||
}
|
||||
}
|
||||
|
||||
func RenderEvent(
|
||||
event engine.Event, seen map[resource.URN]engine.StepEventMetadata, debug bool, opts backend.DisplayOptions) string {
|
||||
func RenderDiffEvent(
|
||||
event engine.Event, seen map[resource.URN]engine.StepEventMetadata, opts backend.DisplayOptions) string {
|
||||
|
||||
switch event.Type {
|
||||
case engine.CancelEvent:
|
||||
return ""
|
||||
case engine.PreludeEvent:
|
||||
return RenderPreludeEvent(event.Payload.(engine.PreludeEventPayload), opts)
|
||||
return renderPreludeEvent(event.Payload.(engine.PreludeEventPayload), opts)
|
||||
case engine.SummaryEvent:
|
||||
return RenderSummaryEvent(event.Payload.(engine.SummaryEventPayload), opts)
|
||||
return renderSummaryEvent(event.Payload.(engine.SummaryEventPayload), opts)
|
||||
case engine.ResourceOperationFailed:
|
||||
return RenderResourceOperationFailedEvent(event.Payload.(engine.ResourceOperationFailedPayload), opts)
|
||||
return renderResourceOperationFailedEvent(event.Payload.(engine.ResourceOperationFailedPayload), opts)
|
||||
case engine.ResourceOutputsEvent:
|
||||
return RenderResourceOutputsEvent(event.Payload.(engine.ResourceOutputsEventPayload), seen, opts)
|
||||
return renderResourceOutputsEvent(event.Payload.(engine.ResourceOutputsEventPayload), seen, opts)
|
||||
case engine.ResourcePreEvent:
|
||||
return RenderResourcePreEvent(event.Payload.(engine.ResourcePreEventPayload), seen, opts)
|
||||
return renderResourcePreEvent(event.Payload.(engine.ResourcePreEventPayload), seen, opts)
|
||||
case engine.StdoutColorEvent:
|
||||
return RenderStdoutColorEvent(event.Payload.(engine.StdoutEventPayload), opts)
|
||||
return renderStdoutColorEvent(event.Payload.(engine.StdoutEventPayload), opts)
|
||||
case engine.DiagEvent:
|
||||
return RenderDiagEvent(event.Payload.(engine.DiagEventPayload), debug, opts)
|
||||
return renderDiagEvent(event.Payload.(engine.DiagEventPayload), opts)
|
||||
default:
|
||||
contract.Failf("unknown event type '%s'", event.Type)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func RenderDiagEvent(payload engine.DiagEventPayload, debug bool, opts backend.DisplayOptions) string {
|
||||
if payload.Severity == diag.Debug && !debug {
|
||||
func renderDiagEvent(payload engine.DiagEventPayload, opts backend.DisplayOptions) string {
|
||||
if payload.Severity == diag.Debug && !opts.Debug {
|
||||
return ""
|
||||
}
|
||||
return opts.Color.Colorize(payload.Message)
|
||||
}
|
||||
|
||||
func RenderStdoutColorEvent(payload engine.StdoutEventPayload, opts backend.DisplayOptions) string {
|
||||
func renderStdoutColorEvent(
|
||||
payload engine.StdoutEventPayload, opts backend.DisplayOptions) string {
|
||||
|
||||
return opts.Color.Colorize(payload.Message)
|
||||
}
|
||||
|
||||
func RenderSummaryEvent(event engine.SummaryEventPayload, opts backend.DisplayOptions) string {
|
||||
func renderSummaryEvent(event engine.SummaryEventPayload, opts backend.DisplayOptions) string {
|
||||
changes := event.ResourceChanges
|
||||
|
||||
changeCount := 0
|
||||
|
@ -165,7 +180,7 @@ func RenderSummaryEvent(event engine.SummaryEventPayload, opts backend.DisplayOp
|
|||
return out.String()
|
||||
}
|
||||
|
||||
func RenderPreludeEvent(event engine.PreludeEventPayload, opts backend.DisplayOptions) string {
|
||||
func renderPreludeEvent(event engine.PreludeEventPayload, opts backend.DisplayOptions) string {
|
||||
out := &bytes.Buffer{}
|
||||
|
||||
if opts.ShowConfig {
|
||||
|
@ -192,7 +207,7 @@ func RenderPreludeEvent(event engine.PreludeEventPayload, opts backend.DisplayOp
|
|||
return out.String()
|
||||
}
|
||||
|
||||
func RenderResourceOperationFailedEvent(
|
||||
func renderResourceOperationFailedEvent(
|
||||
payload engine.ResourceOperationFailedPayload, opts backend.DisplayOptions) string {
|
||||
|
||||
// It's not actually useful or interesting to print out any details about
|
||||
|
@ -205,7 +220,7 @@ func RenderResourceOperationFailedEvent(
|
|||
return ""
|
||||
}
|
||||
|
||||
func RenderResourcePreEvent(
|
||||
func renderResourcePreEvent(
|
||||
payload engine.ResourcePreEventPayload,
|
||||
seen map[resource.URN]engine.StepEventMetadata,
|
||||
opts backend.DisplayOptions) string {
|
||||
|
@ -220,25 +235,21 @@ func RenderResourcePreEvent(
|
|||
details := engine.GetResourcePropertiesDetails(payload.Metadata, indent, payload.Planning, payload.Debug)
|
||||
|
||||
fprintIgnoreError(out, opts.Color.Colorize(summary))
|
||||
|
||||
if !opts.Summary {
|
||||
fprintIgnoreError(out, opts.Color.Colorize(details))
|
||||
}
|
||||
|
||||
fprintIgnoreError(out, opts.Color.Colorize(details))
|
||||
fprintIgnoreError(out, opts.Color.Colorize(colors.Reset))
|
||||
}
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func RenderResourceOutputsEvent(
|
||||
func renderResourceOutputsEvent(
|
||||
payload engine.ResourceOutputsEventPayload,
|
||||
seen map[resource.URN]engine.StepEventMetadata,
|
||||
opts backend.DisplayOptions) string {
|
||||
|
||||
out := &bytes.Buffer{}
|
||||
|
||||
if (shouldShow(payload.Metadata, opts) || isRootStack(payload.Metadata)) && !opts.Summary {
|
||||
if shouldShow(payload.Metadata, opts) || isRootStack(payload.Metadata) {
|
||||
indent := engine.GetIndent(payload.Metadata, seen)
|
||||
text := engine.GetResourceOutputsPropertiesString(payload.Metadata, indent+1, payload.Planning, payload.Debug)
|
||||
|
||||
|
@ -250,7 +261,11 @@ func RenderResourceOutputsEvent(
|
|||
|
||||
// isRootStack returns true if the step pertains to the rootmost stack component.
|
||||
func isRootStack(step engine.StepEventMetadata) bool {
|
||||
return step.URN.Type() == resource.RootStackType
|
||||
return isRootURN(step.URN)
|
||||
}
|
||||
|
||||
func isRootURN(urn resource.URN) bool {
|
||||
return urn != "" && urn.Type() == resource.RootStackType
|
||||
}
|
||||
|
||||
// shouldShow returns true if a step should show in the output.
|
||||
|
|
|
@ -0,0 +1,666 @@
|
|||
// Copyright 2016-2018, Pulumi Corporation. All rights reserved.
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
|
||||
"github.com/pulumi/pulumi/pkg/backend"
|
||||
"github.com/pulumi/pulumi/pkg/diag"
|
||||
"github.com/pulumi/pulumi/pkg/diag/colors"
|
||||
"github.com/pulumi/pulumi/pkg/engine"
|
||||
"github.com/pulumi/pulumi/pkg/resource"
|
||||
"github.com/pulumi/pulumi/pkg/resource/deploy"
|
||||
"github.com/pulumi/pulumi/pkg/tokens"
|
||||
"github.com/pulumi/pulumi/pkg/util/cmdutil"
|
||||
"github.com/pulumi/pulumi/pkg/util/contract"
|
||||
)
|
||||
|
||||
// Status helps us keep track for a resource as it is worked on by the engine.
|
||||
type Status struct {
|
||||
// The simple short ID we have generated for the resource to present it to the user.
|
||||
// Usually similar to the form: aws.Function("name")
|
||||
ID string
|
||||
|
||||
// The change that the engine wants apply to that resource.
|
||||
Step engine.StepEventMetadata
|
||||
|
||||
// The tick we were on when we created this status. Purely used for generating an
|
||||
// ellipses to show progress for in-flight resources.
|
||||
Tick int
|
||||
|
||||
// If the engine finished processing this resources.
|
||||
Done bool
|
||||
|
||||
// The progress message to print.
|
||||
Message string
|
||||
|
||||
// All the diagnostic events we've heard about this resource. We'll print the last
|
||||
// diagnostic in the status region while a resource is in progress. At the end we'll
|
||||
// print out all diagnostics for a resource.
|
||||
DiagEvents []engine.Event
|
||||
}
|
||||
|
||||
var (
|
||||
// simple regex to take our names like "aws:function:Function" and convert to
|
||||
// "aws:Function"
|
||||
typeNameRegex = regexp.MustCompile("^(.*):(.*):(.*)$")
|
||||
)
|
||||
|
||||
func simplifyTypeName(typ tokens.Type) string {
|
||||
typeString := string(typ)
|
||||
return typeNameRegex.ReplaceAllString(typeString, "$1:$3")
|
||||
}
|
||||
|
||||
// getEventUrn returns the resource URN associated with an event, or the empty URN if this is not an
|
||||
// event that has a URN.
|
||||
func getEventUrn(event engine.Event) resource.URN {
|
||||
if event.Type == engine.ResourcePreEvent {
|
||||
payload := event.Payload.(engine.ResourcePreEventPayload)
|
||||
return payload.Metadata.URN
|
||||
} else if event.Type == engine.ResourceOutputsEvent {
|
||||
payload := event.Payload.(engine.ResourceOutputsEventPayload)
|
||||
return payload.Metadata.URN
|
||||
} else if event.Type == engine.DiagEvent {
|
||||
payload := event.Payload.(engine.DiagEventPayload)
|
||||
return payload.URN
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func writeProgress(chanOutput progress.Output, progress progress.Progress) {
|
||||
err := chanOutput.WriteProgress(progress)
|
||||
if err != nil {
|
||||
contract.IgnoreError(err)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// We want to present a trim name to users for any URN. These maps, and the helper functions
|
||||
// below are used for that.
|
||||
urnToID = make(map[resource.URN]string)
|
||||
idToUrn = make(map[string]resource.URN)
|
||||
)
|
||||
|
||||
func makeIDWorker(urn resource.URN, suffix int) string {
|
||||
var id string
|
||||
if urn == "" {
|
||||
id = "global"
|
||||
} else {
|
||||
id = simplifyTypeName(urn.Type()) + "(\"" + string(urn.Name()) + "\")"
|
||||
}
|
||||
|
||||
if suffix > 0 {
|
||||
id += fmt.Sprintf("-%v", suffix)
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func makeID(urn resource.URN) string {
|
||||
if id, has := urnToID[urn]; !has {
|
||||
for i := 0; ; i++ {
|
||||
id = makeIDWorker(urn, i)
|
||||
|
||||
if _, has = idToUrn[id]; !has {
|
||||
urnToID[urn] = id
|
||||
idToUrn[id] = urn
|
||||
|
||||
return id
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return id
|
||||
}
|
||||
}
|
||||
|
||||
// DisplayProgressEvents displays the engine events with docker's progress view.
|
||||
func DisplayProgressEvents(
|
||||
action string, events <-chan engine.Event,
|
||||
done chan<- bool, opts backend.DisplayOptions) {
|
||||
|
||||
// Create a ticker that will update all our status messages once a second. Any
|
||||
// in-flight resources will get a varying . .. ... ticker appended to them to
|
||||
// let the user know what is still being worked on.
|
||||
prefix := fmt.Sprintf("%s%s...", cmdutil.EmojiOr("✨ ", "@ "), action)
|
||||
_, ticker := cmdutil.NewSpinnerAndTicker(prefix, nil, 1 /*timesPerSecond*/)
|
||||
|
||||
// Whether or not we're previewing. We don't know what we are actually doing until
|
||||
// we get the initial 'prelude' event.
|
||||
//
|
||||
// this flag is only used to adjust how we describe what's going on to the user.
|
||||
// i.e. if we're previewing we say things like "Would update" instead of "Updating".
|
||||
isPreview := false
|
||||
|
||||
// The urn of the stack.
|
||||
var stackUrn resource.URN
|
||||
seen := make(map[resource.URN]engine.StepEventMetadata)
|
||||
|
||||
// The summary event from the engine. If we get this, we'll print this after all
|
||||
// normal resource events are heard. That way we don't interfere with all the progress
|
||||
// messages we're outputting for them.
|
||||
var summaryEvent *engine.Event
|
||||
|
||||
// The length of the largest ID we've seen. We use this so we can align status messages per
|
||||
// resource. i.e. status messages for shorter IDs will get passed with spaces so that
|
||||
// everything aligns.
|
||||
maxIDLength := 0
|
||||
|
||||
// What tick we're currently on. Used to determine the number of ellipses to concat to
|
||||
// a status message to help indicate that things are still working.
|
||||
currentTick := 0
|
||||
|
||||
// A mapping from each resource URN we are told about to its current status.
|
||||
eventUrnToStatus := make(map[resource.URN]Status)
|
||||
|
||||
// As we receive information for the engine, we will convert them into Status objects
|
||||
// that we track. In turn, every time we update our status (or our ticker fires) we'll
|
||||
// update the "progress channel". This progress chanel is what the Docker cli listens
|
||||
// to which it then updates the actual CLI with.
|
||||
_, stdout, _ := term.StdStreams()
|
||||
|
||||
// Remember if we're a terminal or not. In a terminal we get a little bit fancier.
|
||||
// For example, we'll go back and update previous status messages to make sure things
|
||||
// align. We don't need to do that in non-terminal situations.
|
||||
_, isTerminal := term.GetFdInfo(stdout)
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
progressChan := make(chan progress.Progress, 100)
|
||||
|
||||
chanOutput := progress.ChanOutput(progressChan)
|
||||
|
||||
go func() {
|
||||
// docker helper that reads progress messages in from progressChan and converts them
|
||||
// into special formatted messages that are written to pipe-writer.
|
||||
writeDistributionProgress(pipeWriter, progressChan)
|
||||
|
||||
// Once we've written everything to the pipe, we're done with it can let it go.
|
||||
err := pipeWriter.Close()
|
||||
contract.IgnoreError(err)
|
||||
|
||||
ticker.Stop()
|
||||
|
||||
// let our caller know we're done.
|
||||
done <- true
|
||||
}()
|
||||
|
||||
ellipses := []string{"", ".", "..", "..."}
|
||||
createInProgressMessage := func(status Status) string {
|
||||
msg := status.Message
|
||||
|
||||
// if there are any diagnostics for this resource, add information about the
|
||||
// last diagnostic to the status message.
|
||||
if len(status.DiagEvents) > 0 {
|
||||
diagMsg := renderProgressEvent(
|
||||
status.DiagEvents[len(status.DiagEvents)-1], seen, opts, isPreview)
|
||||
|
||||
if diagMsg != "" {
|
||||
msg += ". " + diagMsg
|
||||
}
|
||||
}
|
||||
|
||||
// Add an changing ellipses to help convey that progress is happening.
|
||||
msg += ellipses[(status.Tick+currentTick)%len(ellipses)]
|
||||
|
||||
return msg
|
||||
}
|
||||
|
||||
createDoneMessage := func(status Status, isPreview bool) string {
|
||||
if status.Step.Op == "" {
|
||||
contract.Failf("Finishing a resource we never heard about: '%s'", status.ID)
|
||||
}
|
||||
|
||||
msg := getMetadataSummary(status.Step, opts, isPreview, true /*isComplete*/)
|
||||
|
||||
debugEvents := 0
|
||||
infoEvents := 0
|
||||
errorEvents := 0
|
||||
warningEvents := 0
|
||||
for _, ev := range status.DiagEvents {
|
||||
payload := ev.Payload.(engine.DiagEventPayload)
|
||||
|
||||
switch payload.Severity {
|
||||
case diag.Debug:
|
||||
debugEvents++
|
||||
case diag.Info:
|
||||
infoEvents++
|
||||
case diag.Infoerr:
|
||||
errorEvents++
|
||||
case diag.Warning:
|
||||
warningEvents++
|
||||
case diag.Error:
|
||||
errorEvents++
|
||||
}
|
||||
}
|
||||
|
||||
if debugEvents > 0 {
|
||||
msg += fmt.Sprintf(", %v debug message(s)", debugEvents)
|
||||
}
|
||||
|
||||
if infoEvents > 0 {
|
||||
msg += fmt.Sprintf(", %v info message(s)", infoEvents)
|
||||
}
|
||||
|
||||
if warningEvents > 0 {
|
||||
msg += fmt.Sprintf(", %v warning(s)", warningEvents)
|
||||
}
|
||||
|
||||
if errorEvents > 0 {
|
||||
msg += fmt.Sprintf(", %v error(s)", errorEvents)
|
||||
}
|
||||
|
||||
return opts.Color.Colorize(msg)
|
||||
}
|
||||
|
||||
writeAction := func(id string, msg string) {
|
||||
extraWhitespace := 0
|
||||
|
||||
// In the terminal we try to align the status messages for each resource.
|
||||
// do not bother with this in the non-terminal case.
|
||||
if isTerminal {
|
||||
extraWhitespace = maxIDLength - len(id)
|
||||
contract.Assertf(extraWhitespace >= 0, "Neg whitespace. %v %s", maxIDLength, id)
|
||||
}
|
||||
|
||||
writeProgress(chanOutput, progress.Progress{
|
||||
ID: id,
|
||||
Action: strings.Repeat(" ", extraWhitespace) + msg,
|
||||
})
|
||||
}
|
||||
|
||||
printStatusForTopLevelResource := func(status Status) {
|
||||
if !status.Done {
|
||||
writeAction(status.ID, createInProgressMessage(status))
|
||||
} else {
|
||||
writeAction(status.ID, createDoneMessage(status, isPreview))
|
||||
}
|
||||
}
|
||||
|
||||
printStatusForTopLevelResources := func(includeDone bool) {
|
||||
for _, v := range eventUrnToStatus {
|
||||
if v.Done && !includeDone {
|
||||
continue
|
||||
}
|
||||
|
||||
printStatusForTopLevelResource(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Performs all the work at the end once we've heard about the last message
|
||||
// from the engine. Specifically, this will update the status messages for
|
||||
// any resources, and will also then print out all final diagnostics. and
|
||||
// finally will print out the summary.
|
||||
processEndSteps := func() {
|
||||
// Mark all in progress resources as done.
|
||||
for k, v := range eventUrnToStatus {
|
||||
if !v.Done {
|
||||
v.Done = true
|
||||
eventUrnToStatus[k] = v
|
||||
printStatusForTopLevelResource(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Print all diagnostics at the end. We only need to do this if we were summarizing.
|
||||
// Otherwise, this would have been seen while we were receiving the events.
|
||||
|
||||
for _, status := range eventUrnToStatus {
|
||||
if len(status.DiagEvents) > 0 {
|
||||
wroteHeader := false
|
||||
for _, v := range status.DiagEvents {
|
||||
msg := renderProgressEvent(v, seen, opts, isPreview)
|
||||
if msg != "" {
|
||||
if !wroteHeader {
|
||||
wroteHeader = true
|
||||
writeProgress(chanOutput, progress.Progress{Message: " "})
|
||||
writeProgress(chanOutput, progress.Progress{ID: status.ID, Message: "Diagnostics"})
|
||||
}
|
||||
|
||||
writeProgress(chanOutput, progress.Progress{Message: " " + msg})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// print the summary
|
||||
if summaryEvent != nil {
|
||||
msg := renderProgressEvent(*summaryEvent, seen, opts, isPreview)
|
||||
if msg != "" {
|
||||
writeProgress(chanOutput, progress.Progress{Message: " "})
|
||||
writeProgress(chanOutput, progress.Progress{Message: msg})
|
||||
}
|
||||
}
|
||||
|
||||
// no more progress events from this point on. By closing the progress channel, this will
|
||||
// cause writeDistributionProgress to finish. This, in turn, will close the pipeWriter.
|
||||
// This will then cause DisplayJSONMessagesToStream to finish once it processes the last
|
||||
// message is receives from pipeReader, causing DisplayEvents to finally complete.
|
||||
close(progressChan)
|
||||
}
|
||||
|
||||
// Main processing loop. The purpose of this func is to read in events from the engine
|
||||
// and translate them into Status objects and progress messages to be presented to the
|
||||
// command line.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// Got a tick. Update all the in-progress resources.
|
||||
currentTick++
|
||||
printStatusForTopLevelResources(false /*includeDone:*/)
|
||||
|
||||
case event := <-events:
|
||||
if event.Type == "" || event.Type == engine.CancelEvent {
|
||||
// Engine finished sending events. Do all the final processing and return
|
||||
// from this local func. This will print out things like full diagnostic
|
||||
// events, as well as the summary event from the engine.
|
||||
processEndSteps()
|
||||
return
|
||||
}
|
||||
|
||||
eventUrn := getEventUrn(event)
|
||||
if isRootURN(eventUrn) {
|
||||
stackUrn = eventUrn
|
||||
}
|
||||
|
||||
// First just make a string out of the event. If we get nothing back this isn't an
|
||||
// interesting event and we can just skip it.
|
||||
msg := renderProgressEvent(event, seen, opts, isPreview)
|
||||
if msg == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch event.Type {
|
||||
case engine.PreludeEvent:
|
||||
// A prelude event can just be printed out directly to the console.
|
||||
// Note: we should probably make sure we don't get any prelude events
|
||||
// once we start hearing about actual resource events.
|
||||
|
||||
isPreview = event.Payload.(engine.PreludeEventPayload).IsPreview
|
||||
writeProgress(chanOutput, progress.Progress{Message: " "})
|
||||
writeProgress(chanOutput, progress.Progress{Message: msg})
|
||||
continue
|
||||
case engine.SummaryEvent:
|
||||
// keep track of hte summar event so that we can display it after all other
|
||||
// resource-related events we receive.
|
||||
summaryEvent = &event
|
||||
continue
|
||||
}
|
||||
|
||||
// At this point, all events should relate to resources.
|
||||
|
||||
if eventUrn == "" {
|
||||
// if the event doesn't have any URN associated with it, just associate
|
||||
// it with the stack.
|
||||
eventUrn = stackUrn
|
||||
}
|
||||
|
||||
refreshAllStatuses := false
|
||||
status, has := eventUrnToStatus[eventUrn]
|
||||
if !has {
|
||||
// first time we're hearing about this resource. Create an initial nearly-empty
|
||||
// status for it, assigning it a nice short ID.
|
||||
status = Status{Tick: currentTick}
|
||||
status.Step.Op = deploy.OpSame
|
||||
status.ID = makeID(eventUrn)
|
||||
|
||||
if isTerminal {
|
||||
// in the terminal we want to align the status portions of messages. If we
|
||||
// heard about a resource with a longer id, go and update all in-flight and
|
||||
// finished resources so that their statuses get aligned.
|
||||
if len(status.ID) > maxIDLength {
|
||||
maxIDLength = len(status.ID)
|
||||
refreshAllStatuses = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if event.Type == engine.ResourcePreEvent {
|
||||
status.Message = msg
|
||||
status.Step = event.Payload.(engine.ResourcePreEventPayload).Metadata
|
||||
if status.Step.Op == "" {
|
||||
contract.Failf("Got empty op for %s %s", event.Type, msg)
|
||||
}
|
||||
} else if event.Type == engine.ResourceOutputsEvent {
|
||||
// transition the status to done.
|
||||
status.Done = true
|
||||
} else if event.Type == engine.DiagEvent {
|
||||
// also record this diagnostic so we print it at the end.
|
||||
status.DiagEvents = append(status.DiagEvents, event)
|
||||
} else {
|
||||
contract.Failf("Unhandled event type '%s'", event.Type)
|
||||
}
|
||||
|
||||
// Ensure that this updated status is recorded.
|
||||
eventUrnToStatus[eventUrn] = status
|
||||
|
||||
// refresh the progress information for this resource. (or update all resources if
|
||||
// we need to realign everything)
|
||||
if refreshAllStatuses {
|
||||
printStatusForTopLevelResources(true /*includeDone*/)
|
||||
} else {
|
||||
printStatusForTopLevelResource(status)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Call into Docker to actually suck the progress messages out of pipeReader and display
|
||||
// them to the console.
|
||||
err := jsonmessage.DisplayJSONMessagesToStream(pipeReader, newOutStream(stdout), nil)
|
||||
if err != nil {
|
||||
contract.IgnoreError(err)
|
||||
}
|
||||
}
|
||||
|
||||
func renderProgressEvent(
|
||||
event engine.Event, seen map[resource.URN]engine.StepEventMetadata,
|
||||
opts backend.DisplayOptions, isPreview bool) string {
|
||||
|
||||
dispatch := func() string {
|
||||
switch event.Type {
|
||||
case engine.CancelEvent:
|
||||
return ""
|
||||
case engine.PreludeEvent:
|
||||
return renderPreludeEvent(event.Payload.(engine.PreludeEventPayload), opts)
|
||||
case engine.SummaryEvent:
|
||||
return renderSummaryEvent(event.Payload.(engine.SummaryEventPayload), opts)
|
||||
case engine.ResourceOperationFailed:
|
||||
return renderResourceOperationFailedEvent(event.Payload.(engine.ResourceOperationFailedPayload), opts)
|
||||
case engine.ResourceOutputsEvent:
|
||||
return renderProgressResourceOutputsEvent(event.Payload.(engine.ResourceOutputsEventPayload), seen, opts, isPreview)
|
||||
case engine.ResourcePreEvent:
|
||||
return renderProgressResourcePreEvent(event.Payload.(engine.ResourcePreEventPayload), seen, opts, isPreview)
|
||||
case engine.StdoutColorEvent:
|
||||
return ""
|
||||
case engine.DiagEvent:
|
||||
return renderProgressDiagEvent(event.Payload.(engine.DiagEventPayload), opts)
|
||||
default:
|
||||
contract.Failf("unknown event type '%s'", event.Type)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
msg := dispatch()
|
||||
return opts.Color.Colorize(strings.TrimSpace(msg))
|
||||
}
|
||||
|
||||
func renderProgressDiagEvent(
|
||||
payload engine.DiagEventPayload, opts backend.DisplayOptions) string {
|
||||
if payload.Severity == diag.Debug && !opts.Debug {
|
||||
// If this was a debug diagnostic and we're not displaying debug diagnostics,
|
||||
// then just return empty. our callers will then filter out this message.
|
||||
return ""
|
||||
}
|
||||
|
||||
return payload.Message
|
||||
}
|
||||
|
||||
func getMetadataSummary(
|
||||
step engine.StepEventMetadata, opts backend.DisplayOptions,
|
||||
isPreview bool, isComplete bool) string {
|
||||
|
||||
out := &bytes.Buffer{}
|
||||
|
||||
if isComplete {
|
||||
writeString(out, getStepCompleteDescription(step.Op, isPreview))
|
||||
} else {
|
||||
writeString(out, getStepDescription(step.Op, isPreview))
|
||||
}
|
||||
writeString(out, colors.Reset)
|
||||
|
||||
if step.Old != nil && step.New != nil && step.Old.Inputs != nil && step.New.Inputs != nil {
|
||||
diff := step.Old.Inputs.Diff(step.New.Inputs)
|
||||
|
||||
if diff != nil {
|
||||
writeString(out, ". Changes:")
|
||||
|
||||
updates := make(resource.PropertyMap)
|
||||
for k := range diff.Updates {
|
||||
updates[k] = resource.PropertyValue{}
|
||||
}
|
||||
|
||||
writePropertyKeys(out, diff.Adds, deploy.OpCreate)
|
||||
writePropertyKeys(out, diff.Deletes, deploy.OpDelete)
|
||||
writePropertyKeys(out, updates, deploy.OpReplace)
|
||||
}
|
||||
}
|
||||
|
||||
fprintIgnoreError(out, colors.Reset)
|
||||
|
||||
return out.String()
|
||||
}
|
||||
|
||||
func getStepCompleteDescription(op deploy.StepOp, isPreview bool) string {
|
||||
if isPreview {
|
||||
return getStepDescription(op, isPreview)
|
||||
}
|
||||
|
||||
getDescription := func() string {
|
||||
switch op {
|
||||
case deploy.OpSame:
|
||||
return "Unchanged"
|
||||
case deploy.OpCreate:
|
||||
return "Created"
|
||||
case deploy.OpUpdate:
|
||||
return "Updated"
|
||||
case deploy.OpDelete:
|
||||
return "Deleted"
|
||||
case deploy.OpReplace:
|
||||
return "Replaced"
|
||||
case deploy.OpCreateReplacement:
|
||||
return "Created for replacement"
|
||||
case deploy.OpDeleteReplaced:
|
||||
return "Deleted for replacement"
|
||||
}
|
||||
|
||||
contract.Failf("Unrecognized resource step op: %v", op)
|
||||
return ""
|
||||
}
|
||||
|
||||
return op.Prefix() + getDescription() + colors.Reset
|
||||
}
|
||||
|
||||
func getStepDescription(op deploy.StepOp, isPreview bool) string {
|
||||
getDescription := func() string {
|
||||
if isPreview {
|
||||
switch op {
|
||||
case deploy.OpSame:
|
||||
return "Would not change"
|
||||
case deploy.OpCreate:
|
||||
return "Would create"
|
||||
case deploy.OpUpdate:
|
||||
return "Would update"
|
||||
case deploy.OpDelete:
|
||||
return "Would delete"
|
||||
case deploy.OpReplace:
|
||||
return "Would replace"
|
||||
case deploy.OpCreateReplacement:
|
||||
return "Would creating for replacement"
|
||||
case deploy.OpDeleteReplaced:
|
||||
return "Would delete for replacement"
|
||||
}
|
||||
} else {
|
||||
switch op {
|
||||
case deploy.OpSame:
|
||||
return "Unchanged"
|
||||
case deploy.OpCreate:
|
||||
return "Creating"
|
||||
case deploy.OpUpdate:
|
||||
return "Updating"
|
||||
case deploy.OpDelete:
|
||||
return "Deleting"
|
||||
case deploy.OpReplace:
|
||||
return "Replacing"
|
||||
case deploy.OpCreateReplacement:
|
||||
return "Creating for replacement"
|
||||
case deploy.OpDeleteReplaced:
|
||||
return "Deleting for replacement"
|
||||
}
|
||||
}
|
||||
|
||||
contract.Failf("Unrecognized resource step op: %v", op)
|
||||
return ""
|
||||
}
|
||||
|
||||
return op.Prefix() + getDescription() + colors.Reset
|
||||
}
|
||||
|
||||
func writePropertyKeys(b *bytes.Buffer, propMap resource.PropertyMap, op deploy.StepOp) {
|
||||
if len(propMap) > 0 {
|
||||
writeString(b, " ")
|
||||
writeString(b, op.Prefix())
|
||||
|
||||
index := 0
|
||||
for k := range propMap {
|
||||
if index != 0 {
|
||||
writeString(b, ",")
|
||||
}
|
||||
writeString(b, string(k))
|
||||
index++
|
||||
}
|
||||
|
||||
writeString(b, colors.Reset)
|
||||
}
|
||||
}
|
||||
|
||||
func renderProgressResourcePreEvent(
|
||||
payload engine.ResourcePreEventPayload,
|
||||
seen map[resource.URN]engine.StepEventMetadata,
|
||||
opts backend.DisplayOptions,
|
||||
isPreview bool) string {
|
||||
|
||||
seen[payload.Metadata.URN] = payload.Metadata
|
||||
|
||||
if shouldShow(payload.Metadata, opts) {
|
||||
return getMetadataSummary(payload.Metadata, opts, isPreview, false /*isComplete*/)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func renderProgressResourceOutputsEvent(
|
||||
payload engine.ResourceOutputsEventPayload,
|
||||
seen map[resource.URN]engine.StepEventMetadata,
|
||||
opts backend.DisplayOptions,
|
||||
isPreview bool) string {
|
||||
|
||||
if shouldShow(payload.Metadata, opts) {
|
||||
return getMetadataSummary(payload.Metadata, opts, isPreview, true /*isComplete*/)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func writeString(b *bytes.Buffer, s string) {
|
||||
_, err := b.WriteString(s)
|
||||
contract.IgnoreError(err)
|
||||
}
|
|
@ -50,18 +50,18 @@ func (s *localStack) Remove(force bool) (bool, error) {
|
|||
}
|
||||
|
||||
func (s *localStack) Preview(proj *workspace.Project, root string,
|
||||
debug bool, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.PreviewStack(s, proj, root, debug, opts, displayOpts)
|
||||
opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.PreviewStack(s, proj, root, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (s *localStack) Update(proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.UpdateStack(s, proj, root, debug, m, opts, displayOpts)
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.UpdateStack(s, proj, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (s *localStack) Destroy(proj *workspace.Project, root string,
|
||||
debug bool, m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.DestroyStack(s, proj, root, debug, m, opts, displayOpts)
|
||||
m backend.UpdateMetadata, opts engine.UpdateOptions, displayOpts backend.DisplayOptions) error {
|
||||
return backend.DestroyStack(s, proj, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
func (s *localStack) GetLogs(query operations.LogQuery) ([]operations.LogEntry, error) {
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2016-2018, Pulumi Corporation. All rights reserved.
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/term"
|
||||
|
||||
"github.com/pulumi/pulumi/pkg/util/contract"
|
||||
)
|
||||
|
||||
// copied from: https://github.com/docker/cli/blob/master/cli/command/out.go
|
||||
// replace with usage of that library when we can figure out the right version story
|
||||
|
||||
type commonStream struct {
|
||||
fd uintptr
|
||||
isTerminal bool
|
||||
state *term.State
|
||||
}
|
||||
|
||||
// FD returns the file descriptor number for this stream
|
||||
func (s *commonStream) FD() uintptr {
|
||||
return s.fd
|
||||
}
|
||||
|
||||
// IsTerminal returns true if this stream is connected to a terminal
|
||||
func (s *commonStream) IsTerminal() bool {
|
||||
return s.isTerminal
|
||||
}
|
||||
|
||||
// RestoreTerminal restores normal mode to the terminal
|
||||
func (s *commonStream) RestoreTerminal() {
|
||||
if s.state != nil {
|
||||
err := term.RestoreTerminal(s.fd, s.state)
|
||||
contract.IgnoreError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// SetIsTerminal sets the boolean used for isTerminal
|
||||
func (s *commonStream) SetIsTerminal(isTerminal bool) {
|
||||
s.isTerminal = isTerminal
|
||||
}
|
||||
|
||||
type outStream struct {
|
||||
commonStream
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (o *outStream) Write(p []byte) (int, error) {
|
||||
return o.out.Write(p)
|
||||
}
|
||||
|
||||
// SetRawTerminal sets raw mode on the input terminal
|
||||
func (o *outStream) SetRawTerminal() (err error) {
|
||||
if os.Getenv("NORAW") != "" || !o.commonStream.isTerminal {
|
||||
return nil
|
||||
}
|
||||
o.commonStream.state, err = term.SetRawTerminalOutput(o.commonStream.fd)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTtySize returns the height and width in characters of the tty
|
||||
func (o *outStream) GetTtySize() (uint, uint) {
|
||||
if !o.isTerminal {
|
||||
return 0, 0
|
||||
}
|
||||
ws, err := term.GetWinsize(o.fd)
|
||||
if err != nil {
|
||||
if ws == nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return uint(ws.Height), uint(ws.Width)
|
||||
}
|
||||
|
||||
// NewOutStream returns a new OutStream object from a Writer
|
||||
func newOutStream(out io.Writer) *outStream {
|
||||
fd, isTerminal := term.GetFdInfo(out)
|
||||
return &outStream{commonStream: commonStream{fd: fd, isTerminal: isTerminal}, out: out}
|
||||
}
|
||||
|
||||
func writeDistributionProgress(outStream io.Writer, progressChan <-chan progress.Progress) {
|
||||
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
|
||||
|
||||
for prog := range progressChan {
|
||||
writeProgress(progressOutput, prog)
|
||||
}
|
||||
}
|
|
@ -21,13 +21,13 @@ type Stack interface {
|
|||
|
||||
// Preview changes to this stack.
|
||||
Preview(proj *workspace.Project, root string,
|
||||
debug bool, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
// Update this stack.
|
||||
Update(proj *workspace.Project, root string,
|
||||
debug bool, m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
// Destroy this stack's resources.
|
||||
Destroy(proj *workspace.Project, root string,
|
||||
debug bool, m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error
|
||||
|
||||
Remove(force bool) (bool, error) // remove this stack.
|
||||
GetLogs(query operations.LogQuery) ([]operations.LogEntry, error) // list log entries for this stack.
|
||||
|
@ -42,20 +42,20 @@ func RemoveStack(s Stack, force bool) (bool, error) {
|
|||
|
||||
// PreviewStack initiates a preview of the current workspace's contents.
|
||||
func PreviewStack(s Stack, proj *workspace.Project, root string,
|
||||
debug bool, opts engine.UpdateOptions, displayOpts DisplayOptions) error {
|
||||
return s.Backend().Preview(s.Name(), proj, root, debug, opts, displayOpts)
|
||||
opts engine.UpdateOptions, displayOpts DisplayOptions) error {
|
||||
return s.Backend().Preview(s.Name(), proj, root, opts, displayOpts)
|
||||
}
|
||||
|
||||
// UpdateStack updates the target stack with the current workspace's contents (config and code).
|
||||
func UpdateStack(s Stack, proj *workspace.Project, root string,
|
||||
debug bool, m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error {
|
||||
return s.Backend().Update(s.Name(), proj, root, debug, m, opts, displayOpts)
|
||||
m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error {
|
||||
return s.Backend().Update(s.Name(), proj, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
// DestroyStack destroys all of this stack's resources.
|
||||
func DestroyStack(s Stack, proj *workspace.Project, root string,
|
||||
debug bool, m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error {
|
||||
return s.Backend().Destroy(s.Name(), proj, root, debug, m, opts, displayOpts)
|
||||
m UpdateMetadata, opts engine.UpdateOptions, displayOpts DisplayOptions) error {
|
||||
return s.Backend().Destroy(s.Name(), proj, root, m, opts, displayOpts)
|
||||
}
|
||||
|
||||
// GetStackCrypter fetches the encrypter/decrypter for a stack.
|
||||
|
|
|
@ -2,23 +2,28 @@
|
|||
|
||||
package diag
|
||||
|
||||
import (
|
||||
"github.com/pulumi/pulumi/pkg/resource"
|
||||
)
|
||||
|
||||
// ID is a unique diagnostics identifier.
|
||||
type ID int
|
||||
|
||||
// Diag is an instance of an error or warning generated by the compiler.
|
||||
type Diag struct {
|
||||
ID ID // a unique identifier for this diagnostic.
|
||||
Message string // a human-friendly message for this diagnostic.
|
||||
Raw bool // true if this diagnostic should not be formatted when displayed.
|
||||
URN resource.URN // Resource this diagnostics is associated with. Empty if not associated with any resource.
|
||||
ID ID // a unique identifier for this diagnostic.
|
||||
Message string // a human-friendly message for this diagnostic.
|
||||
Raw bool // true if this diagnostic should not be formatted when displayed.
|
||||
}
|
||||
|
||||
// Message returns an anonymous diagnostic message without any source or ID information.
|
||||
func Message(msg string) *Diag {
|
||||
return &Diag{Message: msg}
|
||||
func Message(urn resource.URN, msg string) *Diag {
|
||||
return &Diag{URN: urn, Message: msg}
|
||||
}
|
||||
|
||||
// RawMessage returns an anonymous diagnostic message without any source or ID information that will not be rendered
|
||||
// with Sprintf.
|
||||
func RawMessage(msg string) *Diag {
|
||||
return &Diag{Message: msg, Raw: true}
|
||||
func RawMessage(urn resource.URN, msg string) *Diag {
|
||||
return &Diag{URN: urn, Message: msg, Raw: true}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
package diag
|
||||
|
||||
import (
|
||||
"github.com/pulumi/pulumi/pkg/resource"
|
||||
"github.com/pulumi/pulumi/pkg/util/contract"
|
||||
)
|
||||
|
||||
|
@ -10,22 +11,35 @@ import (
|
|||
var errors = make(map[ID]*Diag)
|
||||
|
||||
// newError registers a new error message underneath the given unique ID.
|
||||
func newError(id ID, message string) *Diag {
|
||||
func newError(urn resource.URN, id ID, message string) *Diag {
|
||||
contract.Assert(errors[id] == nil)
|
||||
e := &Diag{ID: id, Message: message}
|
||||
e := &Diag{URN: urn, ID: id, Message: message}
|
||||
errors[id] = e
|
||||
return e
|
||||
}
|
||||
|
||||
// Plan and apply errors are in the [2000,3000) range.
|
||||
var (
|
||||
ErrorPlanApplyFailed = newError(2000, "Plan apply failed: %v")
|
||||
ErrorDuplicateResourceURN = newError(2001, "Duplicate resource URN '%v'; try giving it a unique name")
|
||||
ErrorResourceInvalid = newError(2002, "%v resource '%v' has a problem: %v")
|
||||
ErrorResourcePropertyInvalidValue = newError(2003, "%v resource '%v's property '%v' value %v has a problem: %v")
|
||||
ErrorAnalyzeResourceFailure = newError(2004,
|
||||
|
||||
func GetPlanApplyFailedError(urn resource.URN) *Diag {
|
||||
return newError(urn, 2000, "Plan apply failed: %v")
|
||||
}
|
||||
|
||||
func GetDuplicateResourceURNError(urn resource.URN) *Diag {
|
||||
return newError(urn, 2001, "Duplicate resource URN '%v'; try giving it a unique name")
|
||||
}
|
||||
|
||||
func GetResourceInvalidError(urn resource.URN) *Diag {
|
||||
return newError(urn, 2002, "%v resource '%v' has a problem: %v")
|
||||
}
|
||||
|
||||
func GetResourcePropertyInvalidValueError(urn resource.URN) *Diag {
|
||||
return newError(urn, 2003, "%v resource '%v's property '%v' value %v has a problem: %v")
|
||||
}
|
||||
|
||||
func GetAnalyzeResourceFailureError(urn resource.URN) *Diag {
|
||||
return newError(urn, 2004,
|
||||
"Analyzer '%v' reported a resource error:\n"+
|
||||
"\tResource: %v\n"+
|
||||
"\tProperty: %v\n"+
|
||||
"\tReason: %v")
|
||||
)
|
||||
}
|
||||
|
|
|
@ -65,10 +65,10 @@ func TestEscape(t *testing.T) {
|
|||
sink := discardSink()
|
||||
|
||||
// Passing % chars in the argument should not yield %!(MISSING)s.
|
||||
s := sink.Stringify(Error, Message("%s"), "lots of %v %s %d chars")
|
||||
s := sink.Stringify(Error, Message("", "%s"), "lots of %v %s %d chars")
|
||||
assert.Equal(t, "error: lots of %v %s %d chars\n", s)
|
||||
|
||||
// Passing % chars in the format string, on the other hand, should.
|
||||
smiss := sink.Stringify(Error, Message("lots of %v %s %d chars"))
|
||||
smiss := sink.Stringify(Error, Message("", "lots of %v %s %d chars"))
|
||||
assert.Equal(t, "error: lots of %!v(MISSING) %!s(MISSING) %!d(MISSING) chars\n", smiss)
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ func cancelEvent() Event {
|
|||
|
||||
// DiagEventPayload is the payload for an event with type `diag`
|
||||
type DiagEventPayload struct {
|
||||
URN resource.URN
|
||||
Message string
|
||||
Color colors.Colorization
|
||||
Severity diag.Severity
|
||||
|
@ -423,12 +424,13 @@ func (e *eventEmitter) updateSummaryEvent(maybeCorrupt bool,
|
|||
}
|
||||
}
|
||||
|
||||
func diagEvent(e *eventEmitter, msg string, sev diag.Severity) {
|
||||
func diagEvent(e *eventEmitter, urn resource.URN, msg string, sev diag.Severity) {
|
||||
contract.Requiref(e != nil, "e", "!= nil")
|
||||
|
||||
e.Chan <- Event{
|
||||
Type: DiagEvent,
|
||||
Payload: DiagEventPayload{
|
||||
URN: urn,
|
||||
Message: e.Filter.Filter(msg),
|
||||
Color: colors.Raw,
|
||||
Severity: sev,
|
||||
|
@ -436,22 +438,22 @@ func diagEvent(e *eventEmitter, msg string, sev diag.Severity) {
|
|||
}
|
||||
}
|
||||
|
||||
func (e *eventEmitter) diagDebugEvent(msg string) {
|
||||
diagEvent(e, msg, diag.Debug)
|
||||
func (e *eventEmitter) diagDebugEvent(urn resource.URN, msg string) {
|
||||
diagEvent(e, urn, msg, diag.Debug)
|
||||
}
|
||||
|
||||
func (e *eventEmitter) diagInfoEvent(msg string) {
|
||||
diagEvent(e, msg, diag.Info)
|
||||
func (e *eventEmitter) diagInfoEvent(urn resource.URN, msg string) {
|
||||
diagEvent(e, urn, msg, diag.Info)
|
||||
}
|
||||
|
||||
func (e *eventEmitter) diagInfoerrEvent(msg string) {
|
||||
diagEvent(e, msg, diag.Infoerr)
|
||||
func (e *eventEmitter) diagInfoerrEvent(urn resource.URN, msg string) {
|
||||
diagEvent(e, urn, msg, diag.Infoerr)
|
||||
}
|
||||
|
||||
func (e *eventEmitter) diagErrorEvent(msg string) {
|
||||
diagEvent(e, msg, diag.Error)
|
||||
func (e *eventEmitter) diagErrorEvent(urn resource.URN, msg string) {
|
||||
diagEvent(e, urn, msg, diag.Error)
|
||||
}
|
||||
|
||||
func (e *eventEmitter) diagWarningEvent(msg string) {
|
||||
diagEvent(e, msg, diag.Warning)
|
||||
func (e *eventEmitter) diagWarningEvent(urn resource.URN, msg string) {
|
||||
diagEvent(e, urn, msg, diag.Warning)
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ func (s *eventSink) Debugf(d *diag.Diag, args ...interface{}) {
|
|||
if glog.V(9) {
|
||||
glog.V(9).Infof("eventSink::Debug(%v)", msg[:len(msg)-1])
|
||||
}
|
||||
s.events.diagDebugEvent(msg)
|
||||
s.events.diagDebugEvent(d.URN, msg)
|
||||
s.incrementCount(diag.Debug)
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ func (s *eventSink) Infof(d *diag.Diag, args ...interface{}) {
|
|||
if glog.V(5) {
|
||||
glog.V(5).Infof("eventSink::Info(%v)", msg[:len(msg)-1])
|
||||
}
|
||||
s.events.diagInfoEvent(msg)
|
||||
s.events.diagInfoEvent(d.URN, msg)
|
||||
s.incrementCount(diag.Info)
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func (s *eventSink) Infoerrf(d *diag.Diag, args ...interface{}) {
|
|||
if glog.V(5) {
|
||||
glog.V(5).Infof("eventSink::Infoerr(%v)", msg[:len(msg)-1])
|
||||
}
|
||||
s.events.diagInfoerrEvent(msg)
|
||||
s.events.diagInfoerrEvent(d.URN, msg)
|
||||
s.incrementCount(diag.Infoerr)
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ func (s *eventSink) Errorf(d *diag.Diag, args ...interface{}) {
|
|||
if glog.V(5) {
|
||||
glog.V(5).Infof("eventSink::Error(%v)", msg[:len(msg)-1])
|
||||
}
|
||||
s.events.diagErrorEvent(msg)
|
||||
s.events.diagErrorEvent(d.URN, msg)
|
||||
s.incrementCount(diag.Error)
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ func (s *eventSink) Warningf(d *diag.Diag, args ...interface{}) {
|
|||
if glog.V(5) {
|
||||
glog.V(5).Infof("eventSink::Warning(%v)", msg[:len(msg)-1])
|
||||
}
|
||||
s.events.diagWarningEvent(msg)
|
||||
s.events.diagWarningEvent(d.URN, msg)
|
||||
s.incrementCount(diag.Warning)
|
||||
}
|
||||
|
||||
|
|
|
@ -170,7 +170,7 @@ func (acts *updateActions) OnResourceStepPost(ctx interface{},
|
|||
}
|
||||
|
||||
// Issue a true, bonafide error.
|
||||
acts.Opts.Diag.Errorf(diag.ErrorPlanApplyFailed, err)
|
||||
acts.Opts.Diag.Errorf(diag.GetPlanApplyFailedError(step.URN()), err)
|
||||
acts.Opts.Events.resourceOperationFailedEvent(step, status, acts.Steps, acts.Opts.Debug)
|
||||
} else {
|
||||
if step.Logical() {
|
||||
|
|
|
@ -240,7 +240,7 @@ func (iter *PlanIterator) makeRegisterResouceSteps(e RegisterResourceEvent) ([]S
|
|||
if iter.urns[urn] {
|
||||
invalid = true
|
||||
// TODO[pulumi/pulumi-framework#19]: improve this error message!
|
||||
iter.p.Diag().Errorf(diag.ErrorDuplicateResourceURN, urn)
|
||||
iter.p.Diag().Errorf(diag.GetDuplicateResourceURNError(urn), urn)
|
||||
}
|
||||
iter.urns[urn] = true
|
||||
|
||||
|
@ -299,7 +299,8 @@ func (iter *PlanIterator) makeRegisterResouceSteps(e RegisterResourceEvent) ([]S
|
|||
}
|
||||
for _, failure := range failures {
|
||||
invalid = true
|
||||
iter.p.Diag().Errorf(diag.ErrorAnalyzeResourceFailure, a, urn, failure.Property, failure.Reason)
|
||||
iter.p.Diag().Errorf(
|
||||
diag.GetAnalyzeResourceFailureError(urn), a, urn, failure.Property, failure.Reason)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,10 +431,11 @@ func (iter *PlanIterator) issueCheckErrors(new *resource.State, urn resource.URN
|
|||
inputs := new.Inputs
|
||||
for _, failure := range failures {
|
||||
if failure.Property != "" {
|
||||
iter.p.Diag().Errorf(diag.ErrorResourcePropertyInvalidValue,
|
||||
iter.p.Diag().Errorf(diag.GetResourcePropertyInvalidValueError(urn),
|
||||
new.Type, urn.Name(), failure.Property, inputs[failure.Property], failure.Reason)
|
||||
} else {
|
||||
iter.p.Diag().Errorf(diag.ErrorResourceInvalid, new.Type, urn.Name(), failure.Reason)
|
||||
iter.p.Diag().Errorf(
|
||||
diag.GetResourceInvalidError(urn), new.Type, urn.Name(), failure.Reason)
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
|
|
@ -359,8 +359,8 @@ func (host *testProviderHost) ServerAddr() string {
|
|||
contract.Failf("Host RPC address not available")
|
||||
return ""
|
||||
}
|
||||
func (host *testProviderHost) Log(sev diag.Severity, msg string) {
|
||||
cmdutil.Diag().Logf(sev, diag.RawMessage(msg))
|
||||
func (host *testProviderHost) Log(sev diag.Severity, urn resource.URN, msg string) {
|
||||
cmdutil.Diag().Logf(sev, diag.RawMessage(urn, msg))
|
||||
}
|
||||
func (host *testProviderHost) ReadLocation(tok tokens.Token) (resource.PropertyValue, error) {
|
||||
return resource.PropertyValue{}, errors.New("Invalid location")
|
||||
|
|
|
@ -413,7 +413,7 @@ func (op StepOp) Color() string {
|
|||
case OpDeleteReplaced:
|
||||
return colors.SpecDeleteReplaced
|
||||
default:
|
||||
contract.Failf("Unrecognized resource step op: %v", op)
|
||||
contract.Failf("Unrecognized resource step op: '%v'", op)
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/pulumi/pulumi/pkg/diag"
|
||||
"github.com/pulumi/pulumi/pkg/resource"
|
||||
"github.com/pulumi/pulumi/pkg/resource/config"
|
||||
"github.com/pulumi/pulumi/pkg/tokens"
|
||||
"github.com/pulumi/pulumi/pkg/util/contract"
|
||||
|
@ -20,8 +21,9 @@ type Host interface {
|
|||
// ServerAddr returns the address at which the host's RPC interface may be found.
|
||||
ServerAddr() string
|
||||
|
||||
// Log logs a global message, including errors and warnings.
|
||||
Log(sev diag.Severity, msg string)
|
||||
// Log logs a message, including errors and warnings. Messages can have a resource URN
|
||||
// associated with them. If no urn is provided, the message is global.
|
||||
Log(sev diag.Severity, urn resource.URN, msg string)
|
||||
|
||||
// Analyzer fetches the analyzer with a given name, possibly lazily allocating the plugins for it. If an analyzer
|
||||
// could not be found, or an error occurred while creating it, a non-nil error is returned.
|
||||
|
@ -109,8 +111,8 @@ func (host *defaultHost) ServerAddr() string {
|
|||
return host.server.Address()
|
||||
}
|
||||
|
||||
func (host *defaultHost) Log(sev diag.Severity, msg string) {
|
||||
host.ctx.Diag.Logf(sev, diag.RawMessage(msg))
|
||||
func (host *defaultHost) Log(sev diag.Severity, urn resource.URN, msg string) {
|
||||
host.ctx.Diag.Logf(sev, diag.RawMessage(urn, msg))
|
||||
}
|
||||
|
||||
// loadPlugin sends an appropriate load request to the plugin loader and returns the loaded plugin (if any) and error.
|
||||
|
@ -196,7 +198,7 @@ func (host *defaultHost) Provider(pkg tokens.Package, version *semver.Version) (
|
|||
v = info.Version.String()
|
||||
}
|
||||
host.ctx.Diag.Warningf(
|
||||
diag.Message(
|
||||
diag.Message("", /*urn*/
|
||||
"resource plugin %s is expected to have version >=%s, but has %s; "+
|
||||
"the wrong version may be on your path, or this may be a bug in the plugin"),
|
||||
info.Name, version.String(), v)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/pulumi/pulumi/pkg/diag"
|
||||
"github.com/pulumi/pulumi/pkg/resource"
|
||||
"github.com/pulumi/pulumi/pkg/util/rpcutil"
|
||||
lumirpc "github.com/pulumi/pulumi/sdk/proto/go"
|
||||
)
|
||||
|
@ -62,8 +63,7 @@ func (eng *hostServer) Cancel() error {
|
|||
}
|
||||
|
||||
// Log logs a global message in the engine, including errors and warnings.
|
||||
func (eng *hostServer) Log(ctx context.Context,
|
||||
req *lumirpc.LogRequest) (*pbempty.Empty, error) {
|
||||
func (eng *hostServer) Log(ctx context.Context, req *lumirpc.LogRequest) (*pbempty.Empty, error) {
|
||||
var sev diag.Severity
|
||||
switch req.Severity {
|
||||
case lumirpc.LogSeverity_DEBUG:
|
||||
|
@ -77,6 +77,6 @@ func (eng *hostServer) Log(ctx context.Context,
|
|||
default:
|
||||
return nil, errors.Errorf("Unrecognized logging severity: %v", req.Severity)
|
||||
}
|
||||
eng.host.Log(sev, req.Message)
|
||||
eng.host.Log(sev, resource.URN(req.Urn), req.Message)
|
||||
return &pbempty.Empty{}, nil
|
||||
}
|
||||
|
|
|
@ -97,9 +97,9 @@ func newPlugin(ctx *Context, bin string, prefix string, args []string) (*plugin,
|
|||
}
|
||||
msg := line[:len(line)-1]
|
||||
if stderr {
|
||||
ctx.Diag.Infoerrf(diag.RawMessage(msg))
|
||||
ctx.Diag.Infoerrf(diag.RawMessage("" /*urn*/, msg))
|
||||
} else {
|
||||
ctx.Diag.Infof(diag.RawMessage(msg))
|
||||
ctx.Diag.Infof(diag.RawMessage("" /*urn*/, msg))
|
||||
}
|
||||
}
|
||||
close(done)
|
||||
|
|
|
@ -73,12 +73,12 @@ func Exit(err error) {
|
|||
|
||||
// ExitError issues an error and exits with a standard error exit code.
|
||||
func ExitError(msg string, args ...interface{}) {
|
||||
ExitErrorCode(-1, msg, args...)
|
||||
exitErrorCode(-1, msg, args...)
|
||||
}
|
||||
|
||||
// ExitErrorCode issues an error and exists with the given error exit code.
|
||||
func ExitErrorCode(code int, msg string, args ...interface{}) {
|
||||
Diag().Errorf(diag.Message(msg), args...)
|
||||
// exitErrorCode issues an error and exists with the given error exit code.
|
||||
func exitErrorCode(code int, msg string, args ...interface{}) {
|
||||
Diag().Errorf(diag.Message("", msg), args...)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,11 +11,12 @@ import (
|
|||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
// NewSpinnerAndTicker returns a new Spinner and a ticker that will fire an event when the next call to Spinner.Tick()
|
||||
// should be called. NewSpinnerAndTicket takes into account if stdout is connected to a tty or not and returns either
|
||||
// a nice animated spinner that updates quickly, using the specified ttyFrames, or a simple spinner that just prints a
|
||||
// dot on each tick and updates slowly.
|
||||
func NewSpinnerAndTicker(prefix string, ttyFrames []string) (Spinner, *time.Ticker) {
|
||||
// NewSpinnerAndTicker returns a new Spinner and a ticker that will fire an event when the next call
|
||||
// to Spinner.Tick() should be called. NewSpinnerAndTicket takes into account if stdout is
|
||||
// connected to a tty or not and returns either a nice animated spinner that updates quickly, using
|
||||
// the specified ttyFrames, or a simple spinner that just prints a dot on each tick and updates
|
||||
// slowly.
|
||||
func NewSpinnerAndTicker(prefix string, ttyFrames []string, timesPerSecond time.Duration) (Spinner, *time.Ticker) {
|
||||
if ttyFrames == nil {
|
||||
// If explicit tick frames weren't specified, default to unicode for Mac and ASCII for Windows/Linux.
|
||||
if Emoji {
|
||||
|
@ -29,7 +30,7 @@ func NewSpinnerAndTicker(prefix string, ttyFrames []string) (Spinner, *time.Tick
|
|||
return &ttySpinner{
|
||||
prefix: prefix,
|
||||
frames: ttyFrames,
|
||||
}, time.NewTicker(time.Second / 8)
|
||||
}, time.NewTicker(time.Second / timesPerSecond)
|
||||
}
|
||||
|
||||
return &dotSpinner{
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
import * as fs from "fs";
|
||||
import * as minimist from "minimist";
|
||||
import * as path from "path";
|
||||
import * as util from "util";
|
||||
import * as pulumi from "../../";
|
||||
import { RunError } from "../../errors";
|
||||
import * as log from "../../log";
|
||||
|
@ -214,7 +215,7 @@ export function main(args: string[]): void {
|
|||
}
|
||||
else {
|
||||
log.error(`Running program '${program}' failed with an unhandled exception:`);
|
||||
log.error(err);
|
||||
log.error(util.format(err));
|
||||
}
|
||||
|
||||
// Remember that we failed with an error. Don't quit just yet so we have a chance to drain the message loop.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright 2016-2018, Pulumi Corporation. All rights reserved.
|
||||
|
||||
import * as util from "util";
|
||||
import { RunError } from "./errors";
|
||||
import * as log from "./log";
|
||||
import * as runtime from "./runtime";
|
||||
|
@ -22,9 +23,9 @@ export class Config {
|
|||
// just new Config("<package>") was called.
|
||||
if (name.endsWith(":config")) {
|
||||
const newName = name.replace(/:config$/, "");
|
||||
log.warn("`:config` is no longer required at the end of configuration " +
|
||||
log.warn(util.format("`:config` is no longer required at the end of configuration " +
|
||||
"bag names and support will be removed in a future version, please " +
|
||||
"use new Config(\"%s\") instead.", newName);
|
||||
"use new Config(\"%s\") instead.", newName));
|
||||
name = newName;
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
// The log module logs messages in a way that tightly integrates with the resource engine's interface.
|
||||
|
||||
import * as util from "util";
|
||||
import * as resourceTypes from "../resource";
|
||||
import { getEngine, rpcKeepAlive } from "../runtime/settings";
|
||||
const engproto = require("../proto/engine_pb.js");
|
||||
|
||||
|
@ -19,11 +20,10 @@ export function hasErrors(): boolean {
|
|||
/**
|
||||
* debug logs a debug-level message that is generally hidden from end-users.
|
||||
*/
|
||||
export function debug(format: any, ...args: any[]): void {
|
||||
const msg: string = util.format(format, ...args);
|
||||
export async function debug(msg: string, resource?: resourceTypes.Resource) {
|
||||
const engine: Object | undefined = getEngine();
|
||||
if (engine) {
|
||||
log(engine, engproto.LogSeverity.DEBUG, msg);
|
||||
log(engine, engproto.LogSeverity.DEBUG, msg, resource);
|
||||
}
|
||||
else {
|
||||
// ignore debug messages when no engine is available.
|
||||
|
@ -33,57 +33,66 @@ export function debug(format: any, ...args: any[]): void {
|
|||
/**
|
||||
* info logs an informational message that is generally printed to stdout during resource operations.
|
||||
*/
|
||||
export function info(format: any, ...args: any[]): void {
|
||||
const msg: string = util.format(format, ...args);
|
||||
export function info(msg: string, resource?: resourceTypes.Resource) {
|
||||
const engine: Object | undefined = getEngine();
|
||||
if (engine) {
|
||||
log(engine, engproto.LogSeverity.INFO, msg);
|
||||
return log(engine, engproto.LogSeverity.INFO, msg, resource);
|
||||
}
|
||||
else {
|
||||
console.log(`info: [runtime] ${msg}`);
|
||||
return Promise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* warn logs a warning to indicate that something went wrong, but not catastrophically so.
|
||||
*/
|
||||
export function warn(format: any, ...args: any[]): void {
|
||||
const msg: string = util.format(format, ...args);
|
||||
export function warn(msg: string, resource?: resourceTypes.Resource) {
|
||||
const engine: Object | undefined = getEngine();
|
||||
if (engine) {
|
||||
log(engine, engproto.LogSeverity.WARNING, msg);
|
||||
return log(engine, engproto.LogSeverity.WARNING, msg, resource);
|
||||
}
|
||||
else {
|
||||
console.warn(`warning: [runtime] ${msg}`);
|
||||
return Promise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* error logs a fatal error to indicate that the tool should stop processing resource operations immediately.
|
||||
*/
|
||||
export function error(format: any, ...args: any[]): void {
|
||||
export function error(msg: string, resource?: resourceTypes.Resource) {
|
||||
errcnt++; // remember the error so we can suppress leaks.
|
||||
|
||||
const msg: string = util.format(format, ...args);
|
||||
const engine: Object | undefined = getEngine();
|
||||
if (engine) {
|
||||
log(engine, engproto.LogSeverity.ERROR, msg);
|
||||
return log(engine, engproto.LogSeverity.ERROR, msg, resource);
|
||||
}
|
||||
else {
|
||||
console.error(`error: [runtime] ${msg}`);
|
||||
return Promise.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
export function log(engine: any, sev: any, format: any, ...args: any[]): void {
|
||||
function log(
|
||||
engine: any, sev: any, msg: string,
|
||||
resource: resourceTypes.Resource | undefined): Promise<void> {
|
||||
|
||||
// Ensure we log everything in serial order.
|
||||
const msg: string = util.format(format, ...args);
|
||||
const keepAlive: () => void = rpcKeepAlive();
|
||||
lastLog = lastLog.then(() => {
|
||||
|
||||
const urnPromise = resource
|
||||
? resource.urn.promise()
|
||||
: Promise.resolve("");
|
||||
|
||||
lastLog = Promise.all([lastLog, urnPromise]).then(arr => {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
const urn = arr[1];
|
||||
const req = new engproto.LogRequest();
|
||||
req.setSeverity(sev);
|
||||
req.setMessage(msg);
|
||||
req.setUrn(urn);
|
||||
engine.log(req, () => {
|
||||
resolve(); // let the next log through
|
||||
keepAlive(); // permit RPC channel tear-downs
|
||||
|
@ -94,5 +103,7 @@ export function log(engine: any, sev: any, format: any, ...args: any[]): void {
|
|||
}
|
||||
});
|
||||
});
|
||||
|
||||
return lastLog;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,8 @@ proto.pulumirpc.LogRequest.prototype.toObject = function(opt_includeInstance) {
|
|||
proto.pulumirpc.LogRequest.toObject = function(includeInstance, msg) {
|
||||
var f, obj = {
|
||||
severity: jspb.Message.getFieldWithDefault(msg, 1, 0),
|
||||
message: jspb.Message.getFieldWithDefault(msg, 2, "")
|
||||
message: jspb.Message.getFieldWithDefault(msg, 2, ""),
|
||||
urn: jspb.Message.getFieldWithDefault(msg, 3, "")
|
||||
};
|
||||
|
||||
if (includeInstance) {
|
||||
|
@ -107,6 +108,10 @@ proto.pulumirpc.LogRequest.deserializeBinaryFromReader = function(msg, reader) {
|
|||
var value = /** @type {string} */ (reader.readString());
|
||||
msg.setMessage(value);
|
||||
break;
|
||||
case 3:
|
||||
var value = /** @type {string} */ (reader.readString());
|
||||
msg.setUrn(value);
|
||||
break;
|
||||
default:
|
||||
reader.skipField();
|
||||
break;
|
||||
|
@ -150,6 +155,13 @@ proto.pulumirpc.LogRequest.serializeBinaryToWriter = function(message, writer) {
|
|||
f
|
||||
);
|
||||
}
|
||||
f = message.getUrn();
|
||||
if (f.length > 0) {
|
||||
writer.writeString(
|
||||
3,
|
||||
f
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -183,6 +195,21 @@ proto.pulumirpc.LogRequest.prototype.setMessage = function(value) {
|
|||
};
|
||||
|
||||
|
||||
/**
|
||||
* optional string urn = 3;
|
||||
* @return {string}
|
||||
*/
|
||||
proto.pulumirpc.LogRequest.prototype.getUrn = function() {
|
||||
return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 3, ""));
|
||||
};
|
||||
|
||||
|
||||
/** @param {string} value */
|
||||
proto.pulumirpc.LogRequest.prototype.setUrn = function(value) {
|
||||
jspb.Message.setProto3StringField(this, 3, value);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @enum {number}
|
||||
*/
|
||||
|
|
|
@ -176,7 +176,9 @@ class SerializedOutput<T> implements resource.Output<T> {
|
|||
* function's source code, suitable for execution. Unlike toString, it actually includes information
|
||||
* about the captured environment.
|
||||
*/
|
||||
export async function createFunctionInfoAsync(func: Function, serialize: (o: any) => boolean): Promise<FunctionInfo> {
|
||||
export async function createFunctionInfoAsync(
|
||||
func: Function, serialize: (o: any) => boolean): Promise<FunctionInfo> {
|
||||
|
||||
const context: Context = {
|
||||
cache: new Map(),
|
||||
classInstanceMemberToSuperEntry: new Map(),
|
||||
|
|
|
@ -21,6 +21,7 @@ enum LogSeverity {
|
|||
}
|
||||
|
||||
message LogRequest {
|
||||
LogSeverity severity = 1; // the logging level of this message.
|
||||
string message = 2; // the contents of the logged message.
|
||||
LogSeverity severity = 1; // the logging level of this message.
|
||||
string message = 2; // the contents of the logged message.
|
||||
string urn = 3; // the (optional) resource urn this log is associated with.
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: analyzer.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package pulumirpc is a generated protocol buffer package.
|
||||
|
@ -255,7 +254,7 @@ func init() { proto.RegisterFile("analyzer.proto", fileDescriptor0) }
|
|||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 287 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
|
||||
0x10, 0xc5, 0x1b, 0x95, 0xda, 0x4e, 0xb5, 0xc2, 0x80, 0xb5, 0xae, 0x1e, 0x42, 0x4e, 0x39, 0x6d,
|
||||
0x21, 0x22, 0x5e, 0x55, 0xfc, 0x7b, 0x93, 0x78, 0xf6, 0x90, 0x96, 0x49, 0x08, 0xa4, 0xd9, 0x75,
|
||||
0xff, 0x1c, 0xe2, 0xa7, 0xf0, 0x23, 0x8b, 0xbb, 0x6b, 0x2c, 0xda, 0xdb, 0x0c, 0xef, 0xf1, 0x9b,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: engine.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package pulumirpc
|
||||
|
||||
|
@ -50,6 +49,7 @@ func (LogSeverity) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []
|
|||
type LogRequest struct {
|
||||
Severity LogSeverity `protobuf:"varint,1,opt,name=severity,enum=pulumirpc.LogSeverity" json:"severity,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"`
|
||||
Urn string `protobuf:"bytes,3,opt,name=urn" json:"urn,omitempty"`
|
||||
}
|
||||
|
||||
func (m *LogRequest) Reset() { *m = LogRequest{} }
|
||||
|
@ -71,6 +71,13 @@ func (m *LogRequest) GetMessage() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (m *LogRequest) GetUrn() string {
|
||||
if m != nil {
|
||||
return m.Urn
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*LogRequest)(nil), "pulumirpc.LogRequest")
|
||||
proto.RegisterEnum("pulumirpc.LogSeverity", LogSeverity_name, LogSeverity_value)
|
||||
|
@ -153,20 +160,20 @@ var _Engine_serviceDesc = grpc.ServiceDesc{
|
|||
func init() { proto.RegisterFile("engine.proto", fileDescriptor1) }
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 227 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0xcd, 0x4b, 0xcf,
|
||||
0xcc, 0x4b, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2c, 0x28, 0xcd, 0x29, 0xcd, 0xcd,
|
||||
0x2c, 0x2a, 0x48, 0x96, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x4b, 0x24, 0x95,
|
||||
0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0xd4, 0x29, 0x45, 0x71, 0x71, 0xf9, 0xe4, 0xa7,
|
||||
0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, 0x19, 0x71, 0x71, 0x14, 0xa7, 0x96, 0xa5, 0x16,
|
||||
0x65, 0x96, 0x54, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x89, 0xe9, 0xc1, 0x0d, 0xd2, 0xf3,
|
||||
0xc9, 0x4f, 0x0f, 0x86, 0xca, 0x06, 0xc1, 0xd5, 0x09, 0x49, 0x70, 0xb1, 0xe7, 0xa6, 0x16, 0x17,
|
||||
0x27, 0xa6, 0xa7, 0x4a, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x5a, 0x56, 0x5c, 0xdc,
|
||||
0x48, 0x5a, 0x84, 0x38, 0xb9, 0x58, 0x5d, 0x5c, 0x9d, 0x42, 0xdd, 0x05, 0x18, 0x84, 0x38, 0xb8,
|
||||
0x58, 0x3c, 0xfd, 0xdc, 0xfc, 0x05, 0x18, 0x85, 0xb8, 0xb9, 0xd8, 0xc3, 0x1d, 0x83, 0xfc, 0x3c,
|
||||
0xfd, 0xdc, 0x05, 0x98, 0x40, 0x2a, 0x5c, 0x83, 0x82, 0xfc, 0x83, 0x04, 0x98, 0x8d, 0x1c, 0xb8,
|
||||
0xd8, 0x5c, 0xc1, 0xfe, 0x11, 0x32, 0xe3, 0x62, 0xf6, 0xc9, 0x4f, 0x17, 0x12, 0x45, 0x75, 0x08,
|
||||
0xd4, 0xc5, 0x52, 0x62, 0x7a, 0x10, 0xdf, 0xe9, 0xc1, 0x7c, 0xa7, 0xe7, 0x0a, 0xf2, 0x9d, 0x12,
|
||||
0x43, 0x12, 0x1b, 0x58, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x7e, 0x1b, 0x5c, 0x18,
|
||||
0x01, 0x00, 0x00,
|
||||
// 239 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x4b, 0x4b, 0xc3, 0x40,
|
||||
0x14, 0x85, 0x9b, 0x46, 0xdb, 0xe6, 0x56, 0x64, 0xb8, 0x60, 0x09, 0x75, 0x53, 0xba, 0x2a, 0x2e,
|
||||
0xa6, 0x10, 0xc1, 0x85, 0x2b, 0x15, 0x63, 0x29, 0x84, 0x14, 0x46, 0xc4, 0xb5, 0x95, 0xeb, 0x10,
|
||||
0x48, 0x32, 0xe3, 0x3c, 0x84, 0xfe, 0x7b, 0xc9, 0xf4, 0x81, 0xee, 0x66, 0xce, 0xf9, 0x38, 0xdc,
|
||||
0x0f, 0x2e, 0xa8, 0x95, 0x55, 0x4b, 0x5c, 0x1b, 0xe5, 0x14, 0x26, 0xda, 0xd7, 0xbe, 0xa9, 0x8c,
|
||||
0xfe, 0x9c, 0x5e, 0x4b, 0xa5, 0x64, 0x4d, 0xcb, 0x50, 0x6c, 0xfd, 0xd7, 0x92, 0x1a, 0xed, 0x76,
|
||||
0x7b, 0x6e, 0x5e, 0x03, 0x14, 0x4a, 0x0a, 0xfa, 0xf6, 0x64, 0x1d, 0x66, 0x30, 0xb2, 0xf4, 0x43,
|
||||
0xa6, 0x72, 0xbb, 0x34, 0x9a, 0x45, 0x8b, 0xcb, 0x6c, 0xc2, 0x4f, 0x43, 0xbc, 0x50, 0xf2, 0xf5,
|
||||
0xd0, 0x8a, 0x13, 0x87, 0x29, 0x0c, 0x1b, 0xb2, 0xf6, 0x43, 0x52, 0xda, 0x9f, 0x45, 0x8b, 0x44,
|
||||
0x1c, 0xbf, 0xc8, 0x20, 0xf6, 0xa6, 0x4d, 0xe3, 0x90, 0x76, 0xcf, 0x9b, 0x7b, 0x18, 0xff, 0x19,
|
||||
0xc1, 0x04, 0xce, 0x9f, 0xf3, 0xa7, 0xb7, 0x15, 0xeb, 0xe1, 0x08, 0xce, 0xd6, 0xe5, 0xcb, 0x86,
|
||||
0x45, 0x38, 0x86, 0xe1, 0xfb, 0xa3, 0x28, 0xd7, 0xe5, 0x8a, 0xf5, 0x3b, 0x22, 0x17, 0x62, 0x23,
|
||||
0x58, 0x9c, 0x3d, 0xc0, 0x20, 0x0f, 0x86, 0x78, 0x07, 0x71, 0xa1, 0x24, 0x5e, 0xfd, 0x3f, 0xed,
|
||||
0xe0, 0x30, 0x9d, 0xf0, 0xbd, 0x2f, 0x3f, 0xfa, 0xf2, 0xbc, 0xf3, 0x9d, 0xf7, 0xb6, 0x83, 0x90,
|
||||
0xdc, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x14, 0xb9, 0x67, 0xe2, 0x2a, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: errors.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package pulumirpc
|
||||
|
||||
|
@ -45,7 +44,7 @@ func init() { proto.RegisterFile("errors.proto", fileDescriptor2) }
|
|||
|
||||
var fileDescriptor2 = []byte{
|
||||
// 106 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x2d, 0x2a, 0xca,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x49, 0x2d, 0x2a, 0xca,
|
||||
0x2f, 0x2a, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2c, 0x28, 0xcd, 0x29, 0xcd, 0xcd,
|
||||
0x2c, 0x2a, 0x48, 0x56, 0x72, 0xe3, 0xe2, 0x72, 0x05, 0x49, 0x39, 0x27, 0x96, 0x16, 0xa7, 0x0a,
|
||||
0x49, 0x70, 0xb1, 0xe7, 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: language.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package pulumirpc
|
||||
|
||||
|
@ -320,7 +319,7 @@ func init() { proto.RegisterFile("language.proto", fileDescriptor3) }
|
|||
|
||||
var fileDescriptor3 = []byte{
|
||||
// 451 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6e, 0xd4, 0x30,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x6e, 0xd4, 0x30,
|
||||
0x10, 0xc6, 0x9b, 0xa6, 0xfb, 0x6f, 0x16, 0x5a, 0x64, 0xb5, 0x2b, 0x93, 0x5e, 0x42, 0x00, 0x91,
|
||||
0x53, 0x2a, 0x15, 0x81, 0x28, 0x27, 0x10, 0x54, 0x15, 0x12, 0x07, 0x64, 0x1e, 0x00, 0xb9, 0xc9,
|
||||
0x6c, 0x14, 0x9a, 0xd8, 0xc6, 0xb1, 0x41, 0x79, 0x4a, 0x5e, 0x85, 0x47, 0x40, 0xb1, 0x93, 0x65,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: plugin.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package pulumirpc
|
||||
|
||||
|
@ -72,7 +71,7 @@ func init() { proto.RegisterFile("plugin.proto", fileDescriptor4) }
|
|||
|
||||
var fileDescriptor4 = []byte{
|
||||
// 130 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d,
|
||||
0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2c, 0x28, 0xcd, 0x29, 0xcd, 0xcd,
|
||||
0x2c, 0x2a, 0x48, 0x56, 0x52, 0xe3, 0xe2, 0x0a, 0x00, 0x4b, 0x79, 0xe6, 0xa5, 0xe5, 0x0b, 0x49,
|
||||
0x70, 0xb1, 0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: provider.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package pulumirpc
|
||||
|
||||
|
@ -886,7 +885,7 @@ func init() { proto.RegisterFile("provider.proto", fileDescriptor5) }
|
|||
|
||||
var fileDescriptor5 = []byte{
|
||||
// 839 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x56, 0xdd, 0x6e, 0xc3, 0x34,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xdd, 0x6e, 0xc3, 0x34,
|
||||
0x14, 0x6e, 0xda, 0xae, 0x6b, 0x4f, 0x7f, 0x14, 0x19, 0xd8, 0xba, 0x8c, 0x8b, 0x29, 0xdc, 0x4c,
|
||||
0x20, 0xa5, 0xa8, 0xbb, 0xe0, 0x47, 0x9b, 0x40, 0xdd, 0xba, 0x51, 0x4d, 0xeb, 0x46, 0xa6, 0x81,
|
||||
0xe0, 0x06, 0x65, 0x8d, 0xdb, 0x85, 0xa6, 0x71, 0x70, 0x9c, 0xa2, 0xf2, 0x06, 0x88, 0x37, 0xe0,
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: resource.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package pulumirpc
|
||||
|
||||
|
@ -417,7 +416,7 @@ func init() { proto.RegisterFile("resource.proto", fileDescriptor6) }
|
|||
|
||||
var fileDescriptor6 = []byte{
|
||||
// 472 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x54, 0x3b, 0x8f, 0xd3, 0x40,
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x3b, 0x8f, 0xd3, 0x40,
|
||||
0x10, 0x3e, 0xdb, 0x87, 0x43, 0x86, 0x53, 0x38, 0x0d, 0x28, 0x67, 0x0c, 0x3a, 0x22, 0xd3, 0x84,
|
||||
0xc6, 0x11, 0x47, 0x41, 0x49, 0x45, 0x41, 0x81, 0x10, 0xa6, 0x06, 0xc9, 0xb1, 0x87, 0xc8, 0x90,
|
||||
0x78, 0x97, 0x7d, 0x9c, 0x74, 0x7f, 0x06, 0xfe, 0x1a, 0x05, 0x3f, 0x04, 0xad, 0xd7, 0x1b, 0xe2,
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestDiffs(t *testing.T) {
|
|||
Dependencies: []string{"@pulumi/pulumi"},
|
||||
Quick: true,
|
||||
StackName: "diffstack",
|
||||
UpdateCommandlineFlags: []string{"--color=raw"},
|
||||
UpdateCommandlineFlags: []string{"--color=raw", "--diff"},
|
||||
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
|
||||
assert.NotNil(t, stack.Deployment)
|
||||
assert.Equal(t, 5, len(stack.Deployment.Resources))
|
||||
|
|
Loading…
Reference in New Issue