2020-12-15 22:24:46 +00:00
|
|
|
package lifecycletest
|
|
|
|
|
|
|
|
import (
|
2024-07-26 12:14:45 +00:00
|
|
|
"context"
|
2023-12-05 17:19:10 +00:00
|
|
|
"errors"
|
2022-09-14 02:43:39 +00:00
|
|
|
"fmt"
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
"strings"
|
2020-12-15 22:24:46 +00:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/blang/semver"
|
|
|
|
combinations "github.com/mxschmitt/golang-combinations"
|
|
|
|
"github.com/stretchr/testify/assert"
|
2023-05-18 19:08:18 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-12-15 22:24:46 +00:00
|
|
|
|
2023-09-18 11:01:28 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/display"
|
2023-11-21 15:16:13 +00:00
|
|
|
. "github.com/pulumi/pulumi/pkg/v3/engine" //nolint:revive
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/deploy/deploytest"
|
2023-04-07 22:40:41 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/deploy/providers"
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/plugin"
|
2021-07-28 02:17:54 +00:00
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
|
2020-12-15 22:24:46 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestDestroyTarget(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
// Try refreshing a stack with combinations of the above resources as target to destroy.
|
|
|
|
subsets := combinations.All(complexTestDependencyGraphNames)
|
|
|
|
|
2022-09-14 02:43:39 +00:00
|
|
|
//nolint:paralleltest // false positive because range var isn't used directly in t.Run(name) arg
|
2020-12-15 22:24:46 +00:00
|
|
|
for _, subset := range subsets {
|
2022-09-14 02:43:39 +00:00
|
|
|
subset := subset
|
2020-12-15 22:24:46 +00:00
|
|
|
// limit to up to 3 resources to destroy. This keeps the test running time under
|
|
|
|
// control as it only generates a few hundred combinations instead of several thousand.
|
|
|
|
if len(subset) <= 3 {
|
2022-09-14 02:43:39 +00:00
|
|
|
t.Run(fmt.Sprintf("%v", subset), func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
destroySpecificTargets(t, subset, true, /*targetDependents*/
|
|
|
|
func(urns []resource.URN, deleted map[resource.URN]bool) {})
|
|
|
|
})
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-14 02:43:39 +00:00
|
|
|
t.Run("destroy root", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
destroySpecificTargets(
|
|
|
|
t, []string{"A"}, true, /*targetDependents*/
|
|
|
|
func(urns []resource.URN, deleted map[resource.URN]bool) {
|
|
|
|
// when deleting 'A' we expect A, B, C, D, E, F, G, H, I, J, K, and L to be deleted
|
|
|
|
names := complexTestDependencyGraphNames
|
|
|
|
assert.Equal(t, map[resource.URN]bool{
|
|
|
|
pickURN(t, urns, names, "A"): true,
|
|
|
|
pickURN(t, urns, names, "B"): true,
|
|
|
|
pickURN(t, urns, names, "C"): true,
|
|
|
|
pickURN(t, urns, names, "D"): true,
|
|
|
|
pickURN(t, urns, names, "E"): true,
|
|
|
|
pickURN(t, urns, names, "F"): true,
|
|
|
|
pickURN(t, urns, names, "G"): true,
|
|
|
|
pickURN(t, urns, names, "H"): true,
|
|
|
|
pickURN(t, urns, names, "I"): true,
|
|
|
|
pickURN(t, urns, names, "J"): true,
|
|
|
|
pickURN(t, urns, names, "K"): true,
|
|
|
|
pickURN(t, urns, names, "L"): true,
|
|
|
|
}, deleted)
|
|
|
|
})
|
|
|
|
})
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
destroySpecificTargets(
|
|
|
|
t, []string{"A"}, false, /*targetDependents*/
|
|
|
|
func(urns []resource.URN, deleted map[resource.URN]bool) {})
|
|
|
|
}
|
|
|
|
|
|
|
|
func destroySpecificTargets(
|
|
|
|
t *testing.T, targets []string, targetDependents bool,
|
2023-03-03 16:36:39 +00:00
|
|
|
validate func(urns []resource.URN, deleted map[resource.URN]bool),
|
|
|
|
) {
|
2020-12-15 22:24:46 +00:00
|
|
|
// A
|
|
|
|
// _________|_________
|
|
|
|
// B C D
|
|
|
|
// ___|___ ___|___
|
|
|
|
// E F G H I J
|
|
|
|
// |__|
|
|
|
|
// K L
|
|
|
|
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
urns, old, programF := generateComplexTestDependencyGraph(t, p)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffConfigF: func(
|
|
|
|
_ context.Context,
|
|
|
|
req plugin.DiffConfigRequest,
|
2023-03-03 16:36:39 +00:00
|
|
|
) (plugin.DiffResult, error) {
|
2024-07-26 12:14:45 +00:00
|
|
|
if !req.OldOutputs["A"].DeepEquals(req.NewInputs["A"]) {
|
2020-12-15 22:24:46 +00:00
|
|
|
return plugin.DiffResult{
|
|
|
|
ReplaceKeys: []resource.PropertyKey{"A"},
|
|
|
|
DeleteBeforeReplace: true,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
return plugin.DiffResult{}, nil
|
|
|
|
},
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffF: func(
|
|
|
|
_ context.Context,
|
|
|
|
req plugin.DiffRequest,
|
2023-03-03 16:36:39 +00:00
|
|
|
) (plugin.DiffResult, error) {
|
2024-07-26 12:14:45 +00:00
|
|
|
if !req.OldOutputs["A"].DeepEquals(req.NewInputs["A"]) {
|
2020-12-15 22:24:46 +00:00
|
|
|
return plugin.DiffResult{ReplaceKeys: []resource.PropertyKey{"A"}}, nil
|
|
|
|
}
|
|
|
|
return plugin.DiffResult{}, nil
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
p.Options.TargetDependents = targetDependents
|
|
|
|
|
|
|
|
destroyTargets := []resource.URN{}
|
|
|
|
for _, target := range targets {
|
|
|
|
destroyTargets = append(destroyTargets, pickURN(t, urns, complexTestDependencyGraphNames, target))
|
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns(destroyTargets)
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
p.Options.T = t
|
|
|
|
// Skip the display tests, as destroys can happen in different orders, and thus create a flaky test here.
|
|
|
|
p.Options.SkipDisplayTests = true
|
2020-12-15 22:24:46 +00:00
|
|
|
t.Logf("Destroying targets: %v", destroyTargets)
|
|
|
|
|
|
|
|
// If we're not forcing the targets to be destroyed, then expect to get a failure here as
|
|
|
|
// we'll have downstream resources to delete that weren't specified explicitly.
|
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Destroy,
|
|
|
|
ExpectFailure: !targetDependents,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
deleted := make(map[resource.URN]bool)
|
|
|
|
for _, entry := range entries {
|
|
|
|
assert.Equal(t, deploy.OpDelete, entry.Step.Op())
|
|
|
|
deleted[entry.Step.URN()] = true
|
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
for _, target := range p.Options.Targets.Literals() {
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.Contains(t, deleted, target)
|
|
|
|
}
|
|
|
|
|
|
|
|
validate(urns, deleted)
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}}
|
|
|
|
|
|
|
|
p.Run(t, old)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUpdateTarget(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
// Try refreshing a stack with combinations of the above resources as target to destroy.
|
|
|
|
subsets := combinations.All(complexTestDependencyGraphNames)
|
|
|
|
|
2022-09-14 02:43:39 +00:00
|
|
|
//nolint:paralleltest // false positive because range var isn't used directly in t.Run(name) arg
|
2020-12-15 22:24:46 +00:00
|
|
|
for _, subset := range subsets {
|
2022-09-14 02:43:39 +00:00
|
|
|
subset := subset
|
2020-12-15 22:24:46 +00:00
|
|
|
// limit to up to 3 resources to destroy. This keeps the test running time under
|
|
|
|
// control as it only generates a few hundred combinations instead of several thousand.
|
|
|
|
if len(subset) <= 3 {
|
2022-09-14 02:43:39 +00:00
|
|
|
t.Run(fmt.Sprintf("update %v", subset), func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2022-10-25 19:05:35 +00:00
|
|
|
updateSpecificTargets(t, subset, nil, false /*targetDependents*/, -1)
|
2022-09-14 02:43:39 +00:00
|
|
|
})
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-25 19:05:35 +00:00
|
|
|
updateSpecificTargets(t, []string{"A"}, nil, false /*targetDependents*/, -1)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
// Also update a target that doesn't exist to make sure we don't crash or otherwise go off the rails.
|
|
|
|
updateInvalidTarget(t)
|
2021-11-17 01:12:36 +00:00
|
|
|
|
|
|
|
// We want to check that targetDependents is respected
|
2022-10-25 19:05:35 +00:00
|
|
|
updateSpecificTargets(t, []string{"C"}, nil, true /*targetDependents*/, -1)
|
|
|
|
|
2023-10-18 13:10:22 +00:00
|
|
|
updateSpecificTargets(t, nil, []string{"**C**"}, false, 3)
|
2022-10-25 19:05:35 +00:00
|
|
|
updateSpecificTargets(t, nil, []string{"**providers:pkgA**"}, false, 3)
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
2022-10-25 19:05:35 +00:00
|
|
|
func updateSpecificTargets(t *testing.T, targets, globTargets []string, targetDependents bool, expectedUpdates int) {
|
2020-12-15 22:24:46 +00:00
|
|
|
// A
|
|
|
|
// _________|_________
|
|
|
|
// B C D
|
|
|
|
// ___|___ ___|___
|
|
|
|
// E F G H I J
|
|
|
|
// |__|
|
|
|
|
// K L
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
urns, old, programF := generateComplexTestDependencyGraph(t, p)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffF: func(context.Context, plugin.DiffRequest) (plugin.DiffResult, error) {
|
2020-12-15 22:24:46 +00:00
|
|
|
// all resources will change.
|
|
|
|
return plugin.DiffResult{
|
|
|
|
Changes: plugin.DiffSome,
|
|
|
|
}, nil
|
|
|
|
},
|
|
|
|
|
2024-07-26 12:14:45 +00:00
|
|
|
UpdateF: func(_ context.Context, req plugin.UpdateRequest) (plugin.UpdateResponse, error) {
|
|
|
|
outputs := req.OldOutputs.Copy()
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
outputs["output_prop"] = resource.NewPropertyValue(42)
|
2024-07-26 12:14:45 +00:00
|
|
|
return plugin.UpdateResponse{
|
|
|
|
Properties: outputs,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2021-11-17 01:12:36 +00:00
|
|
|
p.Options.TargetDependents = targetDependents
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
p.Options.T = t
|
2022-10-25 19:05:35 +00:00
|
|
|
updateTargets := globTargets
|
2020-12-15 22:24:46 +00:00
|
|
|
for _, target := range targets {
|
|
|
|
updateTargets = append(updateTargets,
|
2022-10-25 19:05:35 +00:00
|
|
|
string(pickURN(t, urns, complexTestDependencyGraphNames, target)))
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargets(updateTargets)
|
2020-12-15 22:24:46 +00:00
|
|
|
t.Logf("Updating targets: %v", updateTargets)
|
|
|
|
|
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: false,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
updated := make(map[resource.URN]bool)
|
|
|
|
sames := make(map[resource.URN]bool)
|
|
|
|
for _, entry := range entries {
|
|
|
|
if entry.Step.Op() == deploy.OpUpdate {
|
|
|
|
updated[entry.Step.URN()] = true
|
|
|
|
} else if entry.Step.Op() == deploy.OpSame {
|
|
|
|
sames[entry.Step.URN()] = true
|
|
|
|
} else {
|
|
|
|
assert.FailNowf(t, "", "Got a step that wasn't a same/update: %v", entry.Step.Op())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
for _, target := range p.Options.Targets.Literals() {
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.Contains(t, updated, target)
|
|
|
|
}
|
|
|
|
|
2021-11-17 01:12:36 +00:00
|
|
|
if !targetDependents {
|
|
|
|
// We should only perform updates on the entries we have targeted.
|
2023-05-23 20:17:59 +00:00
|
|
|
for _, target := range p.Options.Targets.Literals() {
|
2023-11-20 08:59:00 +00:00
|
|
|
assert.Contains(t, targets, target.Name())
|
2021-11-17 01:12:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We expect to find at least one other resource updates.
|
|
|
|
|
|
|
|
// NOTE: The test is limited to only passing a subset valid behavior. By specifying
|
|
|
|
// a URN with no dependents, no other urns will be updated and the test will fail
|
|
|
|
// (incorrectly).
|
|
|
|
found := false
|
|
|
|
updateList := []string{}
|
|
|
|
for target := range updated {
|
2023-11-20 08:59:00 +00:00
|
|
|
updateList = append(updateList, target.Name())
|
|
|
|
if !contains(targets, target.Name()) {
|
2021-11-17 01:12:36 +00:00
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(t, found, "Updates: %v", updateList)
|
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
for _, target := range p.Options.Targets.Literals() {
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NotContains(t, sames, target)
|
|
|
|
}
|
2022-10-25 19:05:35 +00:00
|
|
|
if expectedUpdates > -1 {
|
|
|
|
assert.Equal(t, expectedUpdates, len(updated), "Updates = %#v", updated)
|
|
|
|
}
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
p.RunWithName(t, old, strings.Join(updateTargets, ","))
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
2021-11-17 01:12:36 +00:00
|
|
|
func contains(list []string, entry string) bool {
|
|
|
|
for _, e := range list {
|
|
|
|
if e == entry {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
func updateInvalidTarget(t *testing.T) {
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
_, old, programF := generateComplexTestDependencyGraph(t, p)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffF: func(context.Context, plugin.DiffRequest) (plugin.DiffResult, error) {
|
2020-12-15 22:24:46 +00:00
|
|
|
// all resources will change.
|
|
|
|
return plugin.DiffResult{
|
|
|
|
Changes: plugin.DiffSome,
|
|
|
|
}, nil
|
|
|
|
},
|
|
|
|
|
2024-07-26 12:14:45 +00:00
|
|
|
UpdateF: func(_ context.Context, req plugin.UpdateRequest) (plugin.UpdateResponse, error) {
|
|
|
|
outputs := req.OldOutputs.Copy()
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
outputs["output_prop"] = resource.NewPropertyValue(42)
|
2024-07-26 12:14:45 +00:00
|
|
|
return plugin.UpdateResponse{
|
|
|
|
Properties: outputs,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns([]resource.URN{"foo"})
|
|
|
|
t.Logf("Updating invalid targets: %v", p.Options.Targets)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: true,
|
|
|
|
}}
|
|
|
|
|
|
|
|
p.Run(t, old)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCreateDuringTargetedUpdate_CreateMentionedAsTarget(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
program1F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host1F := deploytest.NewPluginHostF(nil, nil, program1F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: host1F},
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
p.Steps = []TestStep{{Op: Update}}
|
|
|
|
snap1 := p.Run(t, nil)
|
|
|
|
|
|
|
|
// Now, create a resource resB. This shouldn't be a problem since resB isn't referenced by anything.
|
2023-09-28 21:50:18 +00:00
|
|
|
program2F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host2F := deploytest.NewPluginHostF(nil, nil, program2F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
resA := p.NewURN("pkgA:m:typA", "resA", "")
|
|
|
|
resB := p.NewURN("pkgA:m:typA", "resB", "")
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = host2F
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns([]resource.URN{resA, resB})
|
2020-12-15 22:24:46 +00:00
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: false,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
if entry.Step.URN() == resA {
|
|
|
|
assert.Equal(t, deploy.OpSame, entry.Step.Op())
|
|
|
|
} else if entry.Step.URN() == resB {
|
|
|
|
assert.Equal(t, deploy.OpCreate, entry.Step.Op())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}}
|
|
|
|
p.Run(t, snap1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCreateDuringTargetedUpdate_UntargetedCreateNotReferenced(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
program1F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host1F := deploytest.NewPluginHostF(nil, nil, program1F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: host1F},
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
p.Steps = []TestStep{{Op: Update}}
|
|
|
|
snap1 := p.Run(t, nil)
|
|
|
|
|
|
|
|
// Now, create a resource resB. This shouldn't be a problem since resB isn't referenced by anything.
|
2023-09-28 21:50:18 +00:00
|
|
|
program2F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host2F := deploytest.NewPluginHostF(nil, nil, program2F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
resA := p.NewURN("pkgA:m:typA", "resA", "")
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = host2F
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns([]resource.URN{resA})
|
2020-12-15 22:24:46 +00:00
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: false,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
// everything should be a same op here.
|
|
|
|
assert.Equal(t, deploy.OpSame, entry.Step.Op())
|
|
|
|
}
|
|
|
|
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}}
|
|
|
|
p.Run(t, snap1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCreateDuringTargetedUpdate_UntargetedCreateReferencedByTarget(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
program1F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host1F := deploytest.NewPluginHostF(nil, nil, program1F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: host1F},
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
p.Steps = []TestStep{{Op: Update}}
|
|
|
|
p.Run(t, nil)
|
|
|
|
|
|
|
|
resA := p.NewURN("pkgA:m:typA", "resA", "")
|
|
|
|
resB := p.NewURN("pkgA:m:typA", "resB", "")
|
|
|
|
|
|
|
|
// Now, create a resource resB. But reference it from A. This will cause a dependency we can't
|
|
|
|
// satisfy.
|
2023-09-28 21:50:18 +00:00
|
|
|
program2F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resB", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true,
|
2020-12-15 22:24:46 +00:00
|
|
|
deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{resB},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host2F := deploytest.NewPluginHostF(nil, nil, program2F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = host2F
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns([]resource.URN{resA})
|
2020-12-15 22:24:46 +00:00
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: true,
|
|
|
|
}}
|
|
|
|
p.Run(t, nil)
|
|
|
|
}
|
|
|
|
|
2023-04-07 22:40:41 +00:00
|
|
|
func TestCreateDuringTargetedUpdate_UntargetedProviderReferencedByTarget(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
2023-10-18 13:10:22 +00:00
|
|
|
return &deploytest.Provider{}, nil
|
2023-04-07 22:40:41 +00:00
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-10-18 13:10:22 +00:00
|
|
|
// Create a resource A with --target but don't target its explicit provider.
|
2023-04-07 22:40:41 +00:00
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource(providers.MakeProviderType("pkgA"), "provA", true)
|
2023-04-07 22:40:41 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
provID := resp.ID
|
2023-04-07 22:40:41 +00:00
|
|
|
if provID == "" {
|
|
|
|
provID = providers.UnknownID
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
provRef, err := providers.NewReference(resp.URN, provID)
|
2023-04-07 22:40:41 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true, deploytest.ResourceOptions{
|
2023-04-07 22:40:41 +00:00
|
|
|
Provider: provRef.String(),
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host1F := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2023-04-07 22:40:41 +00:00
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: host1F},
|
2023-04-07 22:40:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
resA := p.NewURN("pkgA:m:typA", "resA", "")
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns([]resource.URN{resA})
|
2023-04-07 22:40:41 +00:00
|
|
|
p.Steps = []TestStep{{
|
2023-10-18 13:10:22 +00:00
|
|
|
Op: Update,
|
2023-04-07 22:40:41 +00:00
|
|
|
}}
|
|
|
|
p.Run(t, nil)
|
|
|
|
}
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
func TestCreateDuringTargetedUpdate_UntargetedCreateReferencedByUntargetedCreate(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
program1F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host1F := deploytest.NewPluginHostF(nil, nil, program1F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: host1F},
|
2020-12-15 22:24:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
p.Steps = []TestStep{{Op: Update}}
|
|
|
|
snap1 := p.Run(t, nil)
|
|
|
|
|
|
|
|
resA := p.NewURN("pkgA:m:typA", "resA", "")
|
|
|
|
resB := p.NewURN("pkgA:m:typA", "resB", "")
|
|
|
|
|
|
|
|
// Now, create a resource resB. But reference it from A. This will cause a dependency we can't
|
|
|
|
// satisfy.
|
2023-09-28 21:50:18 +00:00
|
|
|
program2F := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resB", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resC", true,
|
2020-12-15 22:24:46 +00:00
|
|
|
deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{resB},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2023-09-28 21:50:18 +00:00
|
|
|
host2F := deploytest.NewPluginHostF(nil, nil, program2F, loaders...)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = host2F
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns([]resource.URN{resA})
|
2020-12-15 22:24:46 +00:00
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: false,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
assert.Equal(t, deploy.OpSame, entry.Step.Op())
|
|
|
|
}
|
|
|
|
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}}
|
|
|
|
p.Run(t, snap1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReplaceSpecificTargets(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-12-15 22:24:46 +00:00
|
|
|
// A
|
|
|
|
// _________|_________
|
|
|
|
// B C D
|
|
|
|
// ___|___ ___|___
|
|
|
|
// E F G H I J
|
|
|
|
// |__|
|
|
|
|
// K L
|
|
|
|
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
urns, old, programF := generateComplexTestDependencyGraph(t, p)
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffF: func(context.Context, plugin.DiffRequest) (plugin.DiffResult, error) {
|
2020-12-15 22:24:46 +00:00
|
|
|
// No resources will change.
|
|
|
|
return plugin.DiffResult{Changes: plugin.DiffNone}, nil
|
|
|
|
},
|
|
|
|
|
2024-07-26 12:14:45 +00:00
|
|
|
CreateF: func(_ context.Context, req plugin.CreateRequest) (plugin.CreateResponse, error) {
|
|
|
|
return plugin.CreateResponse{
|
|
|
|
ID: "created-id",
|
|
|
|
Properties: req.Properties,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
p.Options.T = t
|
|
|
|
p.Options.SkipDisplayTests = true
|
2020-12-15 22:24:46 +00:00
|
|
|
getURN := func(name string) resource.URN {
|
|
|
|
return pickURN(t, urns, complexTestDependencyGraphNames, name)
|
|
|
|
}
|
|
|
|
|
2022-10-25 19:05:35 +00:00
|
|
|
p.Options.ReplaceTargets = deploy.NewUrnTargetsFromUrns([]resource.URN{
|
2020-12-15 22:24:46 +00:00
|
|
|
getURN("F"),
|
|
|
|
getURN("B"),
|
|
|
|
getURN("G"),
|
2022-10-25 19:05:35 +00:00
|
|
|
})
|
2020-12-15 22:24:46 +00:00
|
|
|
|
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Update,
|
|
|
|
ExpectFailure: false,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
replaced := make(map[resource.URN]bool)
|
|
|
|
sames := make(map[resource.URN]bool)
|
|
|
|
for _, entry := range entries {
|
|
|
|
if entry.Step.Op() == deploy.OpReplace {
|
|
|
|
replaced[entry.Step.URN()] = true
|
|
|
|
} else if entry.Step.Op() == deploy.OpSame {
|
|
|
|
sames[entry.Step.URN()] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-25 19:05:35 +00:00
|
|
|
for _, target := range p.Options.ReplaceTargets.Literals() {
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.Contains(t, replaced, target)
|
|
|
|
}
|
|
|
|
|
2022-10-25 19:05:35 +00:00
|
|
|
for _, target := range p.Options.ReplaceTargets.Literals() {
|
2020-12-15 22:24:46 +00:00
|
|
|
assert.NotContains(t, sames, target)
|
|
|
|
}
|
|
|
|
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2020-12-15 22:24:46 +00:00
|
|
|
},
|
|
|
|
}}
|
|
|
|
|
|
|
|
p.Run(t, old)
|
|
|
|
}
|
2021-07-28 02:17:54 +00:00
|
|
|
|
2023-03-03 16:36:39 +00:00
|
|
|
var componentBasedTestDependencyGraphNames = []string{
|
|
|
|
"A", "B", "C", "D", "E", "F", "G", "H",
|
|
|
|
"I", "J", "K", "L", "M", "N",
|
|
|
|
}
|
2021-07-28 02:17:54 +00:00
|
|
|
|
|
|
|
func generateParentedTestDependencyGraph(t *testing.T, p *TestPlan) (
|
|
|
|
// Parent-child graph
|
|
|
|
// A B
|
|
|
|
// __|__ ____|____
|
|
|
|
// D I E F
|
|
|
|
// __|__ __|__ __|__
|
|
|
|
// G H J K L M
|
|
|
|
//
|
|
|
|
// A has children D, I
|
|
|
|
// D has children G, H
|
|
|
|
// B has children E, F
|
|
|
|
// E has children J, K
|
|
|
|
// F has children L, M
|
|
|
|
//
|
|
|
|
// Dependency graph
|
|
|
|
// G H
|
|
|
|
// | __|__
|
|
|
|
// I K N
|
|
|
|
//
|
|
|
|
// I depends on G
|
|
|
|
// K depends on H
|
|
|
|
// N depends on H
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
[]resource.URN, *deploy.Snapshot, deploytest.LanguageRuntimeFactory,
|
2023-03-03 16:36:39 +00:00
|
|
|
) {
|
2021-07-28 02:17:54 +00:00
|
|
|
resTypeComponent := tokens.Type("pkgA:index:Component")
|
|
|
|
resTypeResource := tokens.Type("pkgA:index:Resource")
|
|
|
|
|
|
|
|
names := componentBasedTestDependencyGraphNames
|
|
|
|
|
|
|
|
urnA := p.NewURN(resTypeComponent, names[0], "")
|
|
|
|
urnB := p.NewURN(resTypeComponent, names[1], "")
|
|
|
|
urnC := p.NewURN(resTypeResource, names[2], "")
|
|
|
|
urnD := p.NewURN(resTypeComponent, names[3], urnA)
|
|
|
|
urnE := p.NewURN(resTypeComponent, names[4], urnB)
|
|
|
|
urnF := p.NewURN(resTypeComponent, names[5], urnB)
|
|
|
|
urnG := p.NewURN(resTypeResource, names[6], urnD)
|
|
|
|
urnH := p.NewURN(resTypeResource, names[7], urnD)
|
|
|
|
urnI := p.NewURN(resTypeResource, names[8], urnA)
|
|
|
|
urnJ := p.NewURN(resTypeResource, names[9], urnE)
|
|
|
|
urnK := p.NewURN(resTypeResource, names[10], urnE)
|
|
|
|
urnL := p.NewURN(resTypeResource, names[11], urnF)
|
|
|
|
urnM := p.NewURN(resTypeResource, names[12], urnF)
|
|
|
|
urnN := p.NewURN(resTypeResource, names[13], "")
|
|
|
|
|
|
|
|
urns := []resource.URN{urnA, urnB, urnC, urnD, urnE, urnF, urnG, urnH, urnI, urnJ, urnK, urnL, urnM, urnN}
|
|
|
|
|
|
|
|
newResource := func(urn, parent resource.URN, id resource.ID,
|
2023-03-03 16:36:39 +00:00
|
|
|
dependencies []resource.URN, propertyDeps propertyDependencies,
|
|
|
|
) *resource.State {
|
2021-07-28 02:17:54 +00:00
|
|
|
return newResource(urn, parent, id, "", dependencies, propertyDeps,
|
|
|
|
nil, urn.Type() != resTypeComponent)
|
|
|
|
}
|
|
|
|
|
|
|
|
old := &deploy.Snapshot{
|
|
|
|
Resources: []*resource.State{
|
|
|
|
newResource(urnA, "", "0", nil, nil),
|
|
|
|
newResource(urnB, "", "1", nil, nil),
|
|
|
|
newResource(urnC, "", "2", nil, nil),
|
|
|
|
newResource(urnD, urnA, "3", nil, nil),
|
|
|
|
newResource(urnE, urnB, "4", nil, nil),
|
|
|
|
newResource(urnF, urnB, "5", nil, nil),
|
|
|
|
newResource(urnG, urnD, "6", nil, nil),
|
|
|
|
newResource(urnH, urnD, "7", nil, nil),
|
|
|
|
newResource(urnI, urnA, "8", []resource.URN{urnG},
|
|
|
|
propertyDependencies{"A": []resource.URN{urnG}}),
|
|
|
|
newResource(urnJ, urnE, "9", nil, nil),
|
|
|
|
newResource(urnK, urnE, "10", []resource.URN{urnH},
|
|
|
|
propertyDependencies{"A": []resource.URN{urnH}}),
|
|
|
|
newResource(urnL, urnF, "11", nil, nil),
|
|
|
|
newResource(urnM, urnF, "12", nil, nil),
|
|
|
|
newResource(urnN, "", "13", []resource.URN{urnH},
|
|
|
|
propertyDependencies{"A": []resource.URN{urnH}}),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(
|
2021-07-28 02:17:54 +00:00
|
|
|
func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
register := func(urn, parent resource.URN) resource.ID {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource(
|
2021-07-28 02:17:54 +00:00
|
|
|
urn.Type(),
|
2023-11-20 08:59:00 +00:00
|
|
|
urn.Name(),
|
2021-07-28 02:17:54 +00:00
|
|
|
urn.Type() != resTypeComponent,
|
|
|
|
deploytest.ResourceOptions{
|
|
|
|
Inputs: nil,
|
|
|
|
Parent: parent,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
2024-04-19 11:08:56 +00:00
|
|
|
return resp.ID
|
2021-07-28 02:17:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
register(urnA, "")
|
|
|
|
register(urnB, "")
|
|
|
|
register(urnC, "")
|
|
|
|
register(urnD, urnA)
|
|
|
|
register(urnE, urnB)
|
|
|
|
register(urnF, urnB)
|
|
|
|
register(urnG, urnD)
|
|
|
|
register(urnH, urnD)
|
|
|
|
register(urnI, urnA)
|
|
|
|
register(urnJ, urnE)
|
|
|
|
register(urnK, urnE)
|
|
|
|
register(urnL, urnF)
|
|
|
|
register(urnM, urnF)
|
|
|
|
register(urnN, "")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
return urns, old, programF
|
2021-07-28 02:17:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDestroyTargetWithChildren(t *testing.T) {
|
2022-03-04 08:17:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2021-07-28 02:17:54 +00:00
|
|
|
// when deleting 'A' with targetDependents specified we expect A, D, G, H, I, K and N to be deleted.
|
|
|
|
destroySpecificTargetsWithChildren(
|
|
|
|
t, []string{"A"}, true, /*targetDependents*/
|
|
|
|
func(urns []resource.URN, deleted map[resource.URN]bool) {
|
|
|
|
names := componentBasedTestDependencyGraphNames
|
|
|
|
assert.Equal(t, map[resource.URN]bool{
|
|
|
|
pickURN(t, urns, names, "A"): true,
|
|
|
|
pickURN(t, urns, names, "D"): true,
|
|
|
|
pickURN(t, urns, names, "G"): true,
|
|
|
|
pickURN(t, urns, names, "H"): true,
|
|
|
|
pickURN(t, urns, names, "I"): true,
|
|
|
|
pickURN(t, urns, names, "K"): true,
|
|
|
|
pickURN(t, urns, names, "N"): true,
|
|
|
|
}, deleted)
|
|
|
|
})
|
|
|
|
|
|
|
|
// when deleting 'A' with targetDependents not specified, we expect an error.
|
|
|
|
destroySpecificTargetsWithChildren(
|
|
|
|
t, []string{"A"}, false, /*targetDependents*/
|
|
|
|
func(urns []resource.URN, deleted map[resource.URN]bool) {})
|
|
|
|
|
|
|
|
// when deleting 'B' we expect B, E, F, J, K, L, M to be deleted.
|
|
|
|
destroySpecificTargetsWithChildren(
|
|
|
|
t, []string{"B"}, false, /*targetDependents*/
|
|
|
|
func(urns []resource.URN, deleted map[resource.URN]bool) {
|
|
|
|
names := componentBasedTestDependencyGraphNames
|
|
|
|
assert.Equal(t, map[resource.URN]bool{
|
|
|
|
pickURN(t, urns, names, "B"): true,
|
|
|
|
pickURN(t, urns, names, "E"): true,
|
|
|
|
pickURN(t, urns, names, "F"): true,
|
|
|
|
pickURN(t, urns, names, "J"): true,
|
|
|
|
pickURN(t, urns, names, "K"): true,
|
|
|
|
pickURN(t, urns, names, "L"): true,
|
|
|
|
pickURN(t, urns, names, "M"): true,
|
|
|
|
}, deleted)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func destroySpecificTargetsWithChildren(
|
|
|
|
t *testing.T, targets []string, targetDependents bool,
|
2023-03-03 16:36:39 +00:00
|
|
|
validate func(urns []resource.URN, deleted map[resource.URN]bool),
|
|
|
|
) {
|
2021-07-28 02:17:54 +00:00
|
|
|
p := &TestPlan{}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
urns, old, programF := generateParentedTestDependencyGraph(t, p)
|
2021-07-28 02:17:54 +00:00
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffConfigF: func(
|
|
|
|
_ context.Context,
|
|
|
|
req plugin.DiffConfigRequest,
|
2023-03-03 16:36:39 +00:00
|
|
|
) (plugin.DiffResult, error) {
|
2024-07-26 12:14:45 +00:00
|
|
|
if !req.OldOutputs["A"].DeepEquals(req.NewInputs["A"]) {
|
2021-07-28 02:17:54 +00:00
|
|
|
return plugin.DiffResult{
|
|
|
|
ReplaceKeys: []resource.PropertyKey{"A"},
|
|
|
|
DeleteBeforeReplace: true,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
return plugin.DiffResult{}, nil
|
|
|
|
},
|
2024-07-26 12:14:45 +00:00
|
|
|
DiffF: func(_ context.Context, req plugin.DiffRequest) (plugin.DiffResult, error) {
|
|
|
|
if !req.OldOutputs["A"].DeepEquals(req.NewInputs["A"]) {
|
2021-07-28 02:17:54 +00:00
|
|
|
return plugin.DiffResult{ReplaceKeys: []resource.PropertyKey{"A"}}, nil
|
|
|
|
}
|
|
|
|
return plugin.DiffResult{}, nil
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2021-07-28 02:17:54 +00:00
|
|
|
p.Options.TargetDependents = targetDependents
|
|
|
|
|
|
|
|
destroyTargets := []resource.URN{}
|
|
|
|
for _, target := range targets {
|
|
|
|
destroyTargets = append(destroyTargets, pickURN(t, urns, componentBasedTestDependencyGraphNames, target))
|
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
p.Options.Targets = deploy.NewUrnTargetsFromUrns(destroyTargets)
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
p.Options.T = t
|
|
|
|
p.Options.SkipDisplayTests = true
|
2021-07-28 02:17:54 +00:00
|
|
|
t.Logf("Destroying targets: %v", destroyTargets)
|
|
|
|
|
|
|
|
// If we're not forcing the targets to be destroyed, then expect to get a failure here as
|
|
|
|
// we'll have downstream resources to delete that weren't specified explicitly.
|
|
|
|
p.Steps = []TestStep{{
|
|
|
|
Op: Destroy,
|
|
|
|
ExpectFailure: !targetDependents,
|
|
|
|
Validate: func(project workspace.Project, target deploy.Target, entries JournalEntries,
|
2024-05-06 17:34:24 +00:00
|
|
|
evts []Event, err error,
|
2023-10-11 14:44:09 +00:00
|
|
|
) error {
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2021-07-28 02:17:54 +00:00
|
|
|
assert.True(t, len(entries) > 0)
|
|
|
|
|
|
|
|
deleted := make(map[resource.URN]bool)
|
|
|
|
for _, entry := range entries {
|
|
|
|
assert.Equal(t, deploy.OpDelete, entry.Step.Op())
|
|
|
|
deleted[entry.Step.URN()] = true
|
|
|
|
}
|
|
|
|
|
2023-05-23 20:17:59 +00:00
|
|
|
for _, target := range p.Options.Targets.Literals() {
|
2021-07-28 02:17:54 +00:00
|
|
|
assert.Contains(t, deleted, target)
|
|
|
|
}
|
|
|
|
|
|
|
|
validate(urns, deleted)
|
2023-10-11 14:44:09 +00:00
|
|
|
return err
|
2021-07-28 02:17:54 +00:00
|
|
|
},
|
|
|
|
}}
|
|
|
|
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
p.RunWithName(t, old, strings.Join(targets, ","))
|
2021-07-28 02:17:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newResource(urn, parent resource.URN, id resource.ID, provider string, dependencies []resource.URN,
|
2023-03-03 16:36:39 +00:00
|
|
|
propertyDeps propertyDependencies, outputs resource.PropertyMap, custom bool,
|
|
|
|
) *resource.State {
|
2021-07-28 02:17:54 +00:00
|
|
|
inputs := resource.PropertyMap{}
|
|
|
|
for k := range propertyDeps {
|
|
|
|
inputs[k] = resource.NewStringProperty("foo")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &resource.State{
|
|
|
|
Type: urn.Type(),
|
|
|
|
URN: urn,
|
|
|
|
Custom: custom,
|
|
|
|
Delete: false,
|
|
|
|
ID: id,
|
|
|
|
Inputs: inputs,
|
|
|
|
Outputs: outputs,
|
|
|
|
Dependencies: dependencies,
|
|
|
|
PropertyDependencies: propertyDeps,
|
|
|
|
Provider: provider,
|
|
|
|
Parent: parent,
|
|
|
|
}
|
|
|
|
}
|
2023-04-07 22:40:41 +00:00
|
|
|
|
2023-05-02 16:42:58 +00:00
|
|
|
// TestTargetedCreateDefaultProvider checks that an update that targets a resource still creates the default
|
2023-04-07 22:40:41 +00:00
|
|
|
// provider if not targeted.
|
2023-05-02 16:42:58 +00:00
|
|
|
func TestTargetedCreateDefaultProvider(t *testing.T) {
|
2023-04-07 22:40:41 +00:00
|
|
|
t.Parallel()
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
2023-04-07 22:40:41 +00:00
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true, deploytest.ResourceOptions{})
|
2023-09-28 21:50:18 +00:00
|
|
|
assert.NoError(t, err)
|
2023-04-07 22:40:41 +00:00
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
return nil
|
|
|
|
})
|
2023-04-07 22:40:41 +00:00
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2023-04-07 22:40:41 +00:00
|
|
|
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
2023-05-02 16:42:58 +00:00
|
|
|
// Check that update succeeds despite the default provider not being targeted.
|
2023-09-28 21:50:18 +00:00
|
|
|
options := TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}
|
2023-10-11 14:44:09 +00:00
|
|
|
snap, err := TestOp(Update).Run(project, p.GetTarget(t, nil), options, false, p.BackendClient, nil)
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2023-05-02 16:42:58 +00:00
|
|
|
|
|
|
|
// Check that the default provider was created.
|
|
|
|
var foundDefaultProvider bool
|
|
|
|
for _, res := range snap.Resources {
|
|
|
|
if res.URN == "urn:pulumi:test::test::pulumi:providers:pkgA::default" {
|
|
|
|
foundDefaultProvider = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(t, foundDefaultProvider)
|
2023-04-07 22:40:41 +00:00
|
|
|
}
|
2023-05-18 19:08:18 +00:00
|
|
|
|
|
|
|
// Returns the resource with the matching URN, or nil.
|
|
|
|
func findResourceByURN(rs []*resource.State, urn resource.URN) *resource.State {
|
|
|
|
for _, r := range rs {
|
|
|
|
if r.URN == urn {
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestEnsureUntargetedSame checks that an untargeted resource retains the prior state after an update when the provider
|
|
|
|
// alters the inputs. This is a regression test for pulumi/pulumi#12964.
|
|
|
|
func TestEnsureUntargetedSame(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Provider that alters inputs during Check.
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
CheckF: func(
|
|
|
|
_ context.Context,
|
|
|
|
req plugin.CheckRequest,
|
|
|
|
) (plugin.CheckResponse, error) {
|
2023-05-18 19:08:18 +00:00
|
|
|
// Pulumi GCP provider alters inputs during Check.
|
2024-07-26 12:14:45 +00:00
|
|
|
req.News["__defaults"] = resource.NewStringProperty("exists")
|
|
|
|
return plugin.CheckResponse{Properties: req.News}, nil
|
2023-05-18 19:08:18 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Program that creates 2 resources.
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test-test", false)
|
2023-05-18 19:08:18 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true, deploytest.ResourceOptions{
|
2023-05-18 19:08:18 +00:00
|
|
|
Inputs: resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("foo"),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
2023-05-18 19:08:18 +00:00
|
|
|
Inputs: resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("bar"),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2023-05-18 19:08:18 +00:00
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Set up stack with initial two resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
options := TestUpdateOptions{T: t, HostF: hostF}
|
|
|
|
origSnap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), options, false, p.BackendClient, nil, "0")
|
2023-10-13 09:46:07 +00:00
|
|
|
require.NoError(t, err)
|
2023-05-18 19:08:18 +00:00
|
|
|
|
|
|
|
// Target only `resA` and run a targeted update.
|
2023-09-28 21:50:18 +00:00
|
|
|
options = TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
finalSnap, err := TestOp(Update).RunStep(project, p.GetTarget(t, origSnap), options, false, p.BackendClient, nil, "1")
|
2023-10-13 09:46:07 +00:00
|
|
|
require.NoError(t, err)
|
2023-05-18 19:08:18 +00:00
|
|
|
|
|
|
|
// Check that `resB` (untargeted) is the same between the two snapshots.
|
|
|
|
{
|
|
|
|
initialState := findResourceByURN(origSnap.Resources, "urn:pulumi:test::test::pkgA:m:typA::resB")
|
|
|
|
assert.NotNil(t, initialState, "initial `resB` state not found")
|
|
|
|
|
|
|
|
finalState := findResourceByURN(finalSnap.Resources, "urn:pulumi:test::test::pkgA:m:typA::resB")
|
|
|
|
assert.NotNil(t, finalState, "final `resB` state not found")
|
|
|
|
|
|
|
|
assert.Equal(t, initialState, finalState)
|
|
|
|
}
|
|
|
|
}
|
2023-05-23 20:17:59 +00:00
|
|
|
|
|
|
|
// TestReplaceSpecificTargetsPlan checks combinations of --target and --replace for expected behavior.
|
|
|
|
func TestReplaceSpecificTargetsPlan(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initial state
|
|
|
|
fooVal := "bar"
|
|
|
|
|
|
|
|
// Don't try to create resB yet.
|
|
|
|
createResB := false
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test-test", false)
|
2023-05-23 20:17:59 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true, deploytest.ResourceOptions{
|
2023-05-23 20:17:59 +00:00
|
|
|
Inputs: resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty(fooVal),
|
|
|
|
},
|
|
|
|
ReplaceOnChanges: []string{"foo"},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
if createResB {
|
|
|
|
// Now try to create resB which is not targeted and should show up in the plan.
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
2023-05-23 20:17:59 +00:00
|
|
|
Inputs: resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty(fooVal),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
err = monitor.RegisterResourceOutputs(resp.URN, resource.PropertyMap{
|
2023-05-23 20:17:59 +00:00
|
|
|
"foo": resource.NewStringProperty(fooVal),
|
|
|
|
})
|
|
|
|
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
p.Options.HostF = deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2023-05-23 20:17:59 +00:00
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
2023-10-11 14:44:09 +00:00
|
|
|
old, err := TestOp(Update).Run(project, p.GetTarget(t, nil), TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: p.Options.HostF,
|
2023-05-23 20:17:59 +00:00
|
|
|
}, false, p.BackendClient, nil)
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2023-05-23 20:17:59 +00:00
|
|
|
|
|
|
|
// Configure next update.
|
|
|
|
fooVal = "changed-from-bar" // This triggers a replace
|
|
|
|
|
|
|
|
// Now try to create resB.
|
|
|
|
createResB = true
|
|
|
|
|
|
|
|
urnA := resource.URN("urn:pulumi:test::test::pkgA:m:typA::resA")
|
|
|
|
urnB := resource.URN("urn:pulumi:test::test::pkgA:m:typA::resB")
|
|
|
|
|
|
|
|
// `--target-replace a`
|
|
|
|
t.Run("EnsureUntargetedIsSame", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
// Create the update plan with only targeted resources.
|
2023-10-11 14:44:09 +00:00
|
|
|
plan, err := TestOp(Update).Plan(project, p.GetTarget(t, old), TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: p.Options.HostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Experimental: true,
|
|
|
|
GeneratePlan: true,
|
|
|
|
|
|
|
|
// `--target-replace a` means ReplaceTargets and UpdateTargets are both set for a.
|
|
|
|
Targets: deploy.NewUrnTargetsFromUrns([]resource.URN{
|
|
|
|
urnA,
|
|
|
|
}),
|
|
|
|
ReplaceTargets: deploy.NewUrnTargetsFromUrns([]resource.URN{
|
|
|
|
urnA,
|
|
|
|
}),
|
|
|
|
},
|
2023-05-23 20:17:59 +00:00
|
|
|
}, p.BackendClient, nil)
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2023-05-23 20:17:59 +00:00
|
|
|
assert.NotNil(t, plan)
|
|
|
|
|
|
|
|
// Ensure resB is in the plan.
|
|
|
|
foundResB := false
|
|
|
|
for _, r := range plan.ResourcePlans {
|
|
|
|
if r.Goal == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch r.Goal.Name {
|
|
|
|
case "resB":
|
|
|
|
foundResB = true
|
|
|
|
// Ensure resB is created in the plan.
|
|
|
|
assert.Equal(t, []display.StepOp{
|
|
|
|
deploy.OpSame,
|
|
|
|
}, r.Ops)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(t, foundResB, "resB should be in the plan")
|
|
|
|
})
|
|
|
|
|
|
|
|
// `--replace a`
|
|
|
|
t.Run("EnsureReplaceTargetIsReplacedAndNotTargeted", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
// Create the update plan with only targeted resources.
|
2023-10-11 14:44:09 +00:00
|
|
|
plan, err := TestOp(Update).Plan(project, p.GetTarget(t, old), TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: p.Options.HostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Experimental: true,
|
|
|
|
GeneratePlan: true,
|
|
|
|
|
|
|
|
// `--replace a` means ReplaceTargets is set. It is not a targeted update.
|
|
|
|
// Both a and b should be changed.
|
|
|
|
ReplaceTargets: deploy.NewUrnTargetsFromUrns([]resource.URN{
|
|
|
|
urnA,
|
|
|
|
}),
|
|
|
|
},
|
2023-05-23 20:17:59 +00:00
|
|
|
}, p.BackendClient, nil)
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2023-05-23 20:17:59 +00:00
|
|
|
assert.NotNil(t, plan)
|
|
|
|
|
|
|
|
foundResA := false
|
|
|
|
foundResB := false
|
|
|
|
for _, r := range plan.ResourcePlans {
|
|
|
|
if r.Goal == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch r.Goal.Name {
|
|
|
|
case "resA":
|
|
|
|
foundResA = true
|
|
|
|
assert.Equal(t, []display.StepOp{
|
|
|
|
deploy.OpCreateReplacement,
|
|
|
|
deploy.OpReplace,
|
|
|
|
deploy.OpDeleteReplaced,
|
|
|
|
}, r.Ops)
|
|
|
|
case "resB":
|
|
|
|
foundResB = true
|
|
|
|
assert.Equal(t, []display.StepOp{
|
|
|
|
deploy.OpCreate,
|
|
|
|
}, r.Ops)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(t, foundResA, "resA should be in the plan")
|
|
|
|
assert.True(t, foundResB, "resB should be in the plan")
|
|
|
|
})
|
|
|
|
|
|
|
|
// `--replace a --target b`
|
|
|
|
// This is a targeted update where the `--replace a` is irrelevant as a is not targeted.
|
|
|
|
t.Run("EnsureUntargetedReplaceTargetIsNotReplaced", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
// Create the update plan with only targeted resources.
|
2023-10-11 14:44:09 +00:00
|
|
|
plan, err := TestOp(Update).Plan(project, p.GetTarget(t, old), TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: p.Options.HostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Experimental: true,
|
|
|
|
GeneratePlan: true,
|
|
|
|
|
|
|
|
Targets: deploy.NewUrnTargetsFromUrns([]resource.URN{
|
|
|
|
urnB,
|
|
|
|
}),
|
|
|
|
ReplaceTargets: deploy.NewUrnTargetsFromUrns([]resource.URN{
|
|
|
|
urnA,
|
|
|
|
}),
|
|
|
|
},
|
2023-05-23 20:17:59 +00:00
|
|
|
}, p.BackendClient, nil)
|
2023-10-13 09:46:07 +00:00
|
|
|
assert.NoError(t, err)
|
2023-05-23 20:17:59 +00:00
|
|
|
assert.NotNil(t, plan)
|
|
|
|
|
|
|
|
foundResA := false
|
|
|
|
foundResB := false
|
|
|
|
for _, r := range plan.ResourcePlans {
|
|
|
|
if r.Goal == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch r.Goal.Name {
|
|
|
|
case "resA":
|
|
|
|
foundResA = true
|
|
|
|
assert.Equal(t, []display.StepOp{
|
|
|
|
deploy.OpSame,
|
|
|
|
}, r.Ops)
|
|
|
|
case "resB":
|
|
|
|
foundResB = true
|
|
|
|
assert.Equal(t, []display.StepOp{
|
|
|
|
deploy.OpCreate,
|
|
|
|
}, r.Ops)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.True(t, foundResA, "resA should be in the plan")
|
|
|
|
assert.True(t, foundResB, "resB should be in the plan")
|
|
|
|
})
|
|
|
|
}
|
2023-07-21 14:38:50 +00:00
|
|
|
|
|
|
|
func TestTargetDependents(t *testing.T) {
|
|
|
|
// Regression test for https://github.com/pulumi/pulumi/pull/13560. This test ensures that when
|
|
|
|
// --target-dependents is set we don't start creating untargted resources.
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
2023-07-21 14:38:50 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true, deploytest.ResourceOptions{})
|
2023-07-21 14:38:50 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{})
|
2023-07-21 14:38:50 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2023-07-21 14:38:50 +00:00
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Target only resA and check only A is created
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{"urn:pulumi:test::test::pkgA:m:typA::resA"}),
|
|
|
|
TargetDependents: false,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-10-13 09:46:07 +00:00
|
|
|
require.NoError(t, err)
|
2023-07-21 14:38:50 +00:00
|
|
|
// Check we only have three resources, stack, provider, and resA
|
|
|
|
require.Equal(t, 3, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run another fresh update (note we're starting from a nil snapshot again), and target only resA and check
|
|
|
|
// only A is created but also turn on --target-dependents.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{"urn:pulumi:test::test::pkgA:m:typA::resA"}),
|
|
|
|
TargetDependents: true,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "2")
|
2023-10-13 09:46:07 +00:00
|
|
|
require.NoError(t, err)
|
2023-07-21 14:38:50 +00:00
|
|
|
// Check we still only have three resources, stack, provider, and resA
|
|
|
|
require.Equal(t, 3, len(snap.Resources))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetDependentsExplicitProvider(t *testing.T) {
|
|
|
|
// Regression test for https://github.com/pulumi/pulumi/pull/13560. This test ensures that when
|
|
|
|
// --target-dependents is set we still target explicit providers resources.
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
2023-07-21 14:38:50 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource(
|
2023-07-21 14:38:50 +00:00
|
|
|
providers.MakeProviderType("pkgA"), "provider", true, deploytest.ResourceOptions{})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
provID := resp.ID
|
2023-07-21 14:38:50 +00:00
|
|
|
if provID == "" {
|
|
|
|
provID = providers.UnknownID
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
provRef, err := providers.NewReference(resp.URN, provID)
|
2023-07-21 14:38:50 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resA", true, deploytest.ResourceOptions{
|
2023-07-21 14:38:50 +00:00
|
|
|
Provider: provRef.String(),
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
2023-07-21 14:38:50 +00:00
|
|
|
Provider: provRef.String(),
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2023-09-28 21:50:18 +00:00
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
2023-07-21 14:38:50 +00:00
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Target only the explicit provider and check that only the provider is created
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{"urn:pulumi:test::test::pulumi:providers:pkgA::provider"}),
|
|
|
|
TargetDependents: false,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "0")
|
2023-10-13 09:46:07 +00:00
|
|
|
require.NoError(t, err)
|
2023-07-21 14:38:50 +00:00
|
|
|
// Check we only have two resources, stack, and provider
|
|
|
|
require.Equal(t, 2, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run another fresh update (note we're starting from a nil snapshot again), and target only the provider
|
|
|
|
// but turn on --target-dependents and check the provider, A, and B are created
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-09-28 21:50:18 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{"urn:pulumi:test::test::pulumi:providers:pkgA::provider"}),
|
|
|
|
TargetDependents: true,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-10-13 09:46:07 +00:00
|
|
|
require.NoError(t, err)
|
2023-07-21 14:38:50 +00:00
|
|
|
// Check we still only have four resources, stack, provider, resA, and resB.
|
|
|
|
require.Equal(t, 4, len(snap.Resources))
|
|
|
|
}
|
2023-10-18 13:10:22 +00:00
|
|
|
|
|
|
|
func TestTargetDependentsSiblingResources(t *testing.T) {
|
|
|
|
// Regression test for https://github.com/pulumi/pulumi/pull/13591. This test ensures that when
|
|
|
|
// --target-dependents is set we don't target sibling resources (that is resources created by the same
|
|
|
|
// provider as the one being targeted).
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
2023-10-18 13:10:22 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
// We're creating 8 resources here (one the implicit default provider). First we create three
|
|
|
|
// pkgA:m:typA resources called "implicitX", "implicitY", and "implicitZ" (which will trigger the
|
|
|
|
// creation of the default provider for pkgA). Second we create an explicit provider for pkgA and then
|
|
|
|
// create three resources using that ("explicitX", "explicitY", and "explicitZ"). We want to check
|
|
|
|
// that if we target the X resources, the Y resources aren't created, but the providers are, and the Z
|
|
|
|
// resources are if --target-dependents is on.
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "implicitX", true)
|
2023-10-18 13:10:22 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "implicitY", true)
|
2023-10-18 13:10:22 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "implicitZ", true, deploytest.ResourceOptions{
|
|
|
|
Parent: resp.URN,
|
2023-10-18 13:10:22 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err = monitor.RegisterResource(
|
2023-10-18 13:10:22 +00:00
|
|
|
providers.MakeProviderType("pkgA"), "provider", true, deploytest.ResourceOptions{})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
provID := resp.ID
|
2023-10-18 13:10:22 +00:00
|
|
|
if provID == "" {
|
|
|
|
provID = providers.UnknownID
|
|
|
|
}
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
provRef, err := providers.NewReference(resp.URN, provID)
|
2023-10-18 13:10:22 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err = monitor.RegisterResource("pkgA:m:typA", "explicitX", true, deploytest.ResourceOptions{
|
2023-10-18 13:10:22 +00:00
|
|
|
Provider: provRef.String(),
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "explicitY", true, deploytest.ResourceOptions{
|
2023-10-18 13:10:22 +00:00
|
|
|
Provider: provRef.String(),
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "explicitZ", true, deploytest.ResourceOptions{
|
|
|
|
Parent: resp.URN,
|
2023-10-18 13:10:22 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Target implicitX and explicitX and ensure that those, their children and the providers are created.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-10-18 13:10:22 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::implicitX",
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::explicitX",
|
|
|
|
}),
|
|
|
|
TargetDependents: false,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "0")
|
2023-10-18 13:10:22 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Check we only have the 5 resources expected, the stack, the two providers and the two X resources.
|
|
|
|
require.Equal(t, 5, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run another fresh update (note we're starting from a nil snapshot again) but turn on
|
|
|
|
// --target-dependents and check we get 7 resources, the same set as above plus the two Z resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-10-18 13:10:22 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::implicitX",
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::explicitX",
|
|
|
|
}),
|
|
|
|
TargetDependents: true,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-10-18 13:10:22 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 7, len(snap.Resources))
|
|
|
|
}
|
2023-12-02 22:37:12 +00:00
|
|
|
|
|
|
|
// Regression test for https://github.com/pulumi/pulumi/issues/14531. This test ensures that when
|
|
|
|
// --targets is set non-targeted parents in creates trigger an error.
|
|
|
|
func TestTargetUntargetedParent(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs := resource.PropertyMap{}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
2023-12-02 22:37:12 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("component", "parent", false)
|
2023-12-02 22:37:12 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "child", true, deploytest.ResourceOptions{
|
|
|
|
Parent: resp.URN,
|
2023-12-02 22:37:12 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
//nolint:paralleltest // Requires serial access to TestPlan
|
|
|
|
t.Run("target update", func(t *testing.T) {
|
|
|
|
// Create all resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-02 22:37:12 +00:00
|
|
|
HostF: hostF,
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "0")
|
2023-12-02 22:37:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Check we have 4 resources in the stack (stack, parent, provider, child)
|
|
|
|
require.Equal(t, 4, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run an update to target the child. This works because we don't need to create the parent so can just
|
|
|
|
// SameStep it using the data currently in state.
|
|
|
|
inputs = resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("bar"),
|
|
|
|
}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-02 22:37:12 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**child**",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-12-02 22:37:12 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, 4, len(snap.Resources))
|
|
|
|
parentURN := snap.Resources[1].URN
|
|
|
|
assert.Equal(t, "parent", parentURN.Name())
|
|
|
|
assert.Equal(t, parentURN, snap.Resources[3].Parent)
|
|
|
|
})
|
|
|
|
|
|
|
|
//nolint:paralleltest // Requires serial access to TestPlan
|
|
|
|
t.Run("target create", func(t *testing.T) {
|
|
|
|
// Create all resources from scratch (nil snapshot) but only target the child. This should error that the parent
|
|
|
|
// needs to be created.
|
|
|
|
snap, err := TestOp(Update).Run(project, p.GetTarget(t, nil), TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
2023-12-02 22:37:12 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**child**",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil)
|
|
|
|
assert.ErrorContains(t, err, "untargeted create")
|
|
|
|
// We should have two resources the stack and the default provider we made for the child.
|
|
|
|
assert.Equal(t, 2, len(snap.Resources))
|
|
|
|
assert.Equal(t, tokens.Type("pulumi:pulumi:Stack"), snap.Resources[0].URN.Type())
|
|
|
|
assert.Equal(t, tokens.Type("pulumi:providers:pkgA"), snap.Resources[1].URN.Type())
|
|
|
|
})
|
|
|
|
}
|
2023-12-05 17:19:10 +00:00
|
|
|
|
|
|
|
// TestTargetDestroyDependencyErrors ensures we get an error when doing a targeted destroy of a resource that has a
|
|
|
|
// dependency and the dependency isn't specified as a target and TargetDependents isn't set.
|
|
|
|
func TestTargetDestroyDependencyErrors(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
CreateF: func(_ context.Context, req plugin.CreateRequest) (plugin.CreateResponse, error) {
|
|
|
|
return plugin.CreateResponse{
|
|
|
|
ID: "created-id",
|
|
|
|
Properties: req.Properties,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}, deploytest.WithoutGrpc),
|
|
|
|
}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(info plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{resp.URN},
|
2023-12-05 17:19:10 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: hostF},
|
2023-12-05 17:19:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
validateSnap := func(snap *deploy.Snapshot) {
|
|
|
|
assert.NotNil(t, snap)
|
|
|
|
assert.Nil(t, snap.VerifyIntegrity())
|
|
|
|
assert.Len(t, snap.Resources, 3)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resA"), snap.Resources[1].URN)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resB"), snap.Resources[2].URN)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run an update for initial state.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), p.Options, false, p.BackendClient, nil, "0")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Destroy).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err) // Expect error because we didn't specify the dependency as a target or TargetDependents
|
|
|
|
validateSnap(snap)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestTargetDestroyChildErrors ensures we get an error when doing a targeted destroy of a resource that has a
|
|
|
|
// child, and the child isn't specified as a target and TargetDependents isn't set.
|
|
|
|
func TestTargetDestroyChildErrors(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
CreateF: func(_ context.Context, req plugin.CreateRequest) (plugin.CreateResponse, error) {
|
|
|
|
return plugin.CreateResponse{
|
|
|
|
ID: "created-id",
|
|
|
|
Properties: req.Properties,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}, deploytest.WithoutGrpc),
|
|
|
|
}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(info plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
|
|
|
Parent: resp.URN,
|
2023-12-05 17:19:10 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: hostF},
|
2023-12-05 17:19:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
validateSnap := func(snap *deploy.Snapshot) {
|
|
|
|
assert.NotNil(t, snap)
|
|
|
|
assert.Nil(t, snap.VerifyIntegrity())
|
|
|
|
assert.Len(t, snap.Resources, 3)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resA"), snap.Resources[1].URN)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA$pkgA:m:typA::resB"), snap.Resources[2].URN)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run an update for initial state.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), p.Options, false, p.BackendClient, nil, "0")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Destroy).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err) // Expect error because we didn't specify the child as a target or TargetDependents
|
|
|
|
validateSnap(snap)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestTargetDestroyDeleteFails ensures a resource that is part of a targeted destroy that fails to delete still
|
|
|
|
// remains in the snapshot.
|
|
|
|
func TestTargetDestroyDeleteFails(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
CreateF: func(_ context.Context, req plugin.CreateRequest) (plugin.CreateResponse, error) {
|
|
|
|
return plugin.CreateResponse{
|
|
|
|
ID: "created-id",
|
|
|
|
Properties: req.Properties,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
2024-07-26 12:14:45 +00:00
|
|
|
DeleteF: func(_ context.Context, req plugin.DeleteRequest) (plugin.DeleteResponse, error) {
|
|
|
|
return plugin.DeleteResponse{Status: resource.StatusUnknown}, errors.New("can't delete")
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}, deploytest.WithoutGrpc),
|
|
|
|
}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(info plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: hostF},
|
2023-12-05 17:19:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
validateSnap := func(snap *deploy.Snapshot) {
|
|
|
|
assert.NotNil(t, snap)
|
|
|
|
assert.Nil(t, snap.VerifyIntegrity())
|
|
|
|
assert.Len(t, snap.Resources, 2)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resA"), snap.Resources[1].URN)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run an update for initial state.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), p.Options, false, p.BackendClient, nil, "0")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
|
|
|
|
// Now run the targeted destroy. We expect an error because the resA errored on delete.
|
|
|
|
// The state should still contain resA.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Destroy).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestTargetDestroyDependencyDeleteFails ensures a resource that is part of a targeted destroy that fails to delete
|
|
|
|
// still remains in the snapshot.
|
|
|
|
func TestTargetDestroyDependencyDeleteFails(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
CreateF: func(_ context.Context, req plugin.CreateRequest) (plugin.CreateResponse, error) {
|
|
|
|
return plugin.CreateResponse{
|
|
|
|
ID: "created-id",
|
|
|
|
Properties: req.Properties,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
2024-07-26 12:14:45 +00:00
|
|
|
DeleteF: func(_ context.Context, req plugin.DeleteRequest) (plugin.DeleteResponse, error) {
|
|
|
|
assert.Equal(t, "urn:pulumi:test::test::pkgA:m:typA::resB", string(req.URN))
|
|
|
|
return plugin.DeleteResponse{Status: resource.StatusUnknown}, errors.New("can't delete")
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}, deploytest.WithoutGrpc),
|
|
|
|
}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(info plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{resp.URN},
|
2023-12-05 17:19:10 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: hostF},
|
2023-12-05 17:19:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
validateSnap := func(snap *deploy.Snapshot) {
|
|
|
|
assert.NotNil(t, snap)
|
|
|
|
assert.Nil(t, snap.VerifyIntegrity())
|
|
|
|
assert.Len(t, snap.Resources, 3)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resA"), snap.Resources[1].URN)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resB"), snap.Resources[2].URN)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run an update for initial state.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
originalSnap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), p.Options, false, p.BackendClient, nil, "0")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
validateSnap(originalSnap)
|
|
|
|
|
|
|
|
// Now run the targeted destroy specifying TargetDependents.
|
|
|
|
// We expect an error because resB errored on delete.
|
|
|
|
// The state should still contain resA and resB.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Destroy).RunStep(project, p.GetTarget(t, originalSnap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
TargetDependents: true,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
|
|
|
|
// Run the targeted destroy again against the original snapshot, this time explicitly specifying the targets.
|
|
|
|
// We expect an error because resB errored on delete.
|
|
|
|
// The state should still contain resA and resB.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Destroy).RunStep(project, p.GetTarget(t, originalSnap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resB",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "2")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestTargetDestroyChildDeleteFails ensures a resource that is part of a targeted destroy that fails to delete
|
|
|
|
// still remains in the snapshot.
|
|
|
|
func TestTargetDestroyChildDeleteFails(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{
|
2024-07-26 12:14:45 +00:00
|
|
|
CreateF: func(_ context.Context, req plugin.CreateRequest) (plugin.CreateResponse, error) {
|
|
|
|
return plugin.CreateResponse{
|
|
|
|
ID: "created-id",
|
|
|
|
Properties: req.Properties,
|
|
|
|
Status: resource.StatusOK,
|
|
|
|
}, nil
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
2024-07-26 12:14:45 +00:00
|
|
|
DeleteF: func(_ context.Context, req plugin.DeleteRequest) (plugin.DeleteResponse, error) {
|
|
|
|
assert.Equal(t, "urn:pulumi:test::test::pkgA:m:typA$pkgA:m:typA::resB", string(req.URN))
|
|
|
|
return plugin.DeleteResponse{Status: resource.StatusUnknown}, errors.New("can't delete")
|
2023-12-05 17:19:10 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}, deploytest.WithoutGrpc),
|
|
|
|
}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(info plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "resA", true)
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "resB", true, deploytest.ResourceOptions{
|
|
|
|
Parent: resp.URN,
|
2023-12-05 17:19:10 +00:00
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
|
|
|
|
p := &TestPlan{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
Options: TestUpdateOptions{T: t, HostF: hostF},
|
2023-12-05 17:19:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
validateSnap := func(snap *deploy.Snapshot) {
|
|
|
|
assert.NotNil(t, snap)
|
|
|
|
assert.Nil(t, snap.VerifyIntegrity())
|
|
|
|
assert.Len(t, snap.Resources, 3)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA::resA"), snap.Resources[1].URN)
|
|
|
|
assert.Equal(t, resource.URN("urn:pulumi:test::test::pkgA:m:typA$pkgA:m:typA::resB"), snap.Resources[2].URN)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run an update for initial state.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
originalSnap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), p.Options, false, p.BackendClient, nil, "0")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
validateSnap(originalSnap)
|
|
|
|
|
|
|
|
// Now run the targeted destroy specifying TargetDependents.
|
|
|
|
// We expect an error because resB errored on delete.
|
|
|
|
// The state should still contain resA and resB.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Destroy).RunStep(project, p.GetTarget(t, originalSnap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
}),
|
|
|
|
TargetDependents: true,
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
|
|
|
|
// Run the targeted destroy again against the original snapshot, this time explicitly specifying the targets.
|
|
|
|
// We expect an error because resB errored on delete.
|
|
|
|
// The state should still contain resA and resB.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Destroy).RunStep(project, p.GetTarget(t, originalSnap), TestUpdateOptions{
|
|
|
|
T: t,
|
2023-12-05 17:19:10 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA::resA",
|
|
|
|
"urn:pulumi:test::test::pkgA:m:typA$pkgA:m:typA::resB",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "2")
|
2023-12-05 17:19:10 +00:00
|
|
|
assert.Error(t, err)
|
|
|
|
validateSnap(snap)
|
|
|
|
}
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
|
|
|
|
func TestDependencyUnreleatedToTargetUpdatedSucceeds(t *testing.T) {
|
|
|
|
// This test is a regression test for https://github.com/pulumi/pulumi/issues/12096
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs := resource.PropertyMap{}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "target", true, deploytest.ResourceOptions{
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "unrelated", true, deploytest.ResourceOptions{
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
programF2 := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "target", true, deploytest.ResourceOptions{
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "dep", true, deploytest.ResourceOptions{
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "unrelated", true, deploytest.ResourceOptions{
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Dependencies: []resource.URN{
|
2024-04-19 11:08:56 +00:00
|
|
|
resp.URN,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
},
|
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
hostF2 := deploytest.NewPluginHostF(nil, nil, programF2, loaders...)
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Create all resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
HostF: hostF,
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "0")
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Check we have 4 resources in the stack (stack, parent, provider, child)
|
|
|
|
require.Equal(t, 4, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run an update to target the target, and make sure the unrelated dependency isn't changed
|
|
|
|
inputs = resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("bar"),
|
|
|
|
}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
HostF: hostF2,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**target**",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, 4, len(snap.Resources))
|
|
|
|
unrelatedURN := snap.Resources[3].URN
|
|
|
|
assert.Equal(t, "unrelated", unrelatedURN.Name())
|
|
|
|
assert.Equal(t, 0, len(snap.Resources[2].Dependencies))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetUntargetedParentWithUpdatedDependency(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs := resource.PropertyMap{}
|
|
|
|
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "newResource", true)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("component", "parent", false)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "child", true, deploytest.ResourceOptions{
|
|
|
|
Parent: resp.URN,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
programF2 := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
resp, err := monitor.RegisterResource("pkgA:m:typA", "newResource", true)
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
respParent, err := monitor.RegisterResource("component", "parent", false, deploytest.ResourceOptions{
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Dependencies: []resource.URN{
|
2024-04-19 11:08:56 +00:00
|
|
|
resp.URN,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
},
|
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "child", true, deploytest.ResourceOptions{
|
|
|
|
Parent: respParent.URN,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
|
|
|
hostF2 := deploytest.NewPluginHostF(nil, nil, programF2, loaders...)
|
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
//nolint:paralleltest // Requires serial access to TestPlan
|
|
|
|
t.Run("target update", func(t *testing.T) {
|
|
|
|
// Create all resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
HostF: hostF,
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "0")
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Check we have 5 resources in the stack (stack, newResource, parent, provider, child)
|
|
|
|
require.Equal(t, 5, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run an update to target the child. This works because we don't need to create the parent so can just
|
|
|
|
// SameStep it using the data currently in state.
|
|
|
|
inputs = resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("bar"),
|
|
|
|
}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
HostF: hostF2,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**child**",
|
|
|
|
}),
|
|
|
|
},
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
}, false, p.BackendClient, nil, "1")
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Equal(t, 5, len(snap.Resources))
|
|
|
|
parentURN := snap.Resources[3].URN
|
|
|
|
assert.Equal(t, "parent", parentURN.Name())
|
|
|
|
assert.Equal(t, parentURN, snap.Resources[4].Parent)
|
|
|
|
parentDeps := snap.Resources[3].Dependencies
|
|
|
|
assert.Equal(t, 0, len(parentDeps))
|
|
|
|
})
|
|
|
|
|
|
|
|
//nolint:paralleltest // Requires serial access to TestPlan
|
|
|
|
t.Run("target create", func(t *testing.T) {
|
|
|
|
// Create all resources from scratch (nil snapshot) but only target the child. This should error that the parent
|
|
|
|
// needs to be created.
|
|
|
|
snap, err := TestOp(Update).Run(project, p.GetTarget(t, nil), TestUpdateOptions{
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
T: t,
|
Make sure non-targeted resources are not updated (#15476)
When the `--target` option is used, resources that already exist in the
snapshot, but aren't directly targeted should not be updated at all.
Internally in the engine, this is done by turning them into a
`SameStep`, meaning no updates will actually be preformed, and we will
make it look like the resource stayed the same.
However, we currently still write the "new" state of the resource (e.g.
updated dependencies, inputs, etc.) into the snapshot. This is mostly
fine as long as the new dependencies already exist. If a dependency on a
resource is that doesn't already exist is added however this breaks.
Since the resource that's being depended on doesn't exist in the
snapshot and isn't targeted, we won't create it. At the same time we're
adding a dependency on that virtually non-existing resource, which makes
the snapshot invalid.
Since we're in `--target` mode, we should do what we promised the user,
and only update the targeted resources, nothing else. Introduce a new
`NonTargetedSameStep` here, which does exactly that. It's essentially
the same as a `SameStep`, but we always use the *old* state instead of
the new one when writing it out. Since the resource is not targeted,
this leaves it in the same state as before.
Fixes #12096
Fixes #15382
2024-03-05 07:49:11 +00:00
|
|
|
HostF: hostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**child**",
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil)
|
|
|
|
assert.ErrorContains(t, err, "untargeted create")
|
|
|
|
// We should have two resources the stack and the default provider we made for the child.
|
|
|
|
assert.Equal(t, 2, len(snap.Resources))
|
|
|
|
assert.Equal(t, tokens.Type("pulumi:pulumi:Stack"), snap.Resources[0].URN.Type())
|
|
|
|
assert.Equal(t, tokens.Type("pulumi:providers:pkgA"), snap.Resources[1].URN.Type())
|
|
|
|
})
|
|
|
|
}
|
2024-03-20 09:44:49 +00:00
|
|
|
|
|
|
|
func TestTargetChangeProviderVersion(t *testing.T) {
|
|
|
|
// This test is a regression test for https://github.com/pulumi/pulumi/issues/15704
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
deploytest.NewProviderLoader("pkgB", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
deploytest.NewProviderLoader("pkgB", semver.MustParse("2.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs := resource.PropertyMap{}
|
|
|
|
|
|
|
|
providerVersion := "1.0.0"
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
2024-03-20 09:44:49 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:index:typA", "target", true, deploytest.ResourceOptions{
|
2024-03-20 09:44:49 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgB:index:typA", "unrelated", true, deploytest.ResourceOptions{
|
2024-03-20 09:44:49 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
Version: providerVersion,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
options := TestUpdateOptions{T: t, HostF: hostF}
|
2024-03-20 09:44:49 +00:00
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Create all resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), options, false, p.BackendClient, nil, "0")
|
2024-03-20 09:44:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Check we have 5 resources in the stack (stack, provider A, target, provider B, unrelated)
|
|
|
|
require.Equal(t, 5, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run an update to target the target, that also happens to change the unrelated provider version.
|
|
|
|
providerVersion = "2.0.0"
|
|
|
|
inputs = resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("bar"),
|
|
|
|
}
|
|
|
|
options.UpdateOptions = UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**target**",
|
|
|
|
}),
|
|
|
|
}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), options, false, p.BackendClient, nil, "1")
|
2024-03-20 09:44:49 +00:00
|
|
|
assert.ErrorContains(t, err,
|
|
|
|
"for resource urn:pulumi:test::test::pkgB:index:typA::unrelated has not been registered yet")
|
|
|
|
// 6 because we have the stack, provider A, target, provider B, unrelated, and the new provider B
|
|
|
|
assert.Equal(t, 6, len(snap.Resources))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTargetChangeAndSameProviderVersion(t *testing.T) {
|
|
|
|
// This test is a regression test for https://github.com/pulumi/pulumi/issues/15704
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
deploytest.NewProviderLoader("pkgB", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
deploytest.NewProviderLoader("pkgB", semver.MustParse("2.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
inputs := resource.PropertyMap{}
|
|
|
|
|
|
|
|
providerVersion := "1.0.0"
|
|
|
|
programF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
2024-03-20 09:44:49 +00:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgA:index:typA", "target", true, deploytest.ResourceOptions{
|
2024-03-20 09:44:49 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgB:index:typA", "unrelated1", true, deploytest.ResourceOptions{
|
2024-03-20 09:44:49 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
Version: providerVersion,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
2024-04-19 11:08:56 +00:00
|
|
|
_, err = monitor.RegisterResource("pkgB:index:typA", "unrelated2", true, deploytest.ResourceOptions{
|
2024-03-20 09:44:49 +00:00
|
|
|
Inputs: inputs,
|
|
|
|
// This one always uses 1.0.0
|
|
|
|
Version: "1.0.0",
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
hostF := deploytest.NewPluginHostF(nil, nil, programF, loaders...)
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
options := TestUpdateOptions{T: t, HostF: hostF}
|
2024-03-20 09:44:49 +00:00
|
|
|
p := &TestPlan{}
|
|
|
|
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
// Create all resources.
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), options, false, p.BackendClient, nil, "0")
|
2024-03-20 09:44:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
// Check we have 6 resources in the stack (stack, provider A, target, provider B, unrelated1, unrelated2)
|
|
|
|
require.Equal(t, 6, len(snap.Resources))
|
|
|
|
|
|
|
|
// Run an update to target the target, that also happens to change the unrelated provider version.
|
|
|
|
providerVersion = "2.0.0"
|
|
|
|
inputs = resource.PropertyMap{
|
|
|
|
"foo": resource.NewStringProperty("bar"),
|
|
|
|
}
|
|
|
|
options.UpdateOptions = UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{
|
|
|
|
"**target**",
|
|
|
|
}),
|
|
|
|
}
|
Add display to the engine tests (#16050)
We want to add more test coverage to the display code. The best way to
do that is to add it to the engine tests, that already cover most of the
pulumi functionality.
It's probably not really possible to review all of the output, but at
least it gives us a baseline, which we can work with.
There's a couple of tests that are flaky for reasons I don't quite
understand yet. I marked them as to skip and we can look at them later.
I'd rather get in the baseline tests sooner, rather than spending a
bunch of time looking at that. The output differences also seem very
minor, so not super concerning.
The biggest remaining issue is that this doesn't interact well with the
Chdir we're doing in the engine. We could either pass the CWD through,
or just try to get rid of that Chdir. So this should only be merged
after https://github.com/pulumi/pulumi/pull/15607.
I've tried to split this into a few commits, separating out adding the
testdata, so it's hopefully a little easier to review, even though the
PR is still quite large.
One other thing to note is that we're comparing that the output has all
the same lines, and not that it is exactly the same. Because of how the
engine is implemented, there's a bunch of race conditions otherwise,
that would make us have to skip a bunch of tests, just because e.g.
resource A is sometimes deleted before resource B and sometimes it's the
other way around.
The biggest downside of that is that running with `PULUMI_ACCEPT` will
produce a diff even when there are no changes. Hopefully we won't have
to run that way too often though, so it might not be a huge issue?
---------
Co-authored-by: Fraser Waters <fraser@pulumi.com>
2024-05-13 07:18:25 +00:00
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), options, false, p.BackendClient, nil, "1")
|
2024-03-20 09:44:49 +00:00
|
|
|
assert.ErrorContains(t, err,
|
|
|
|
"for resource urn:pulumi:test::test::pkgB:index:typA::unrelated1 has not been registered yet")
|
|
|
|
// Check we have 7 resources in the stack (stack, provider A, target, provider B, unrelated1, unrelated2, new
|
|
|
|
// provider B)
|
|
|
|
assert.Equal(t, 7, len(snap.Resources))
|
|
|
|
}
|
Propagate deleted dependencies of untargeted resources (#16247)
When using `--target` to target specific resources during an update, we
use the list of targets to decide which steps to generate given a set of
resource registrations. Specifically:
* If the registration event names a resource that is targeted, we
process it as usual.
* If the registration event names a resource that _is not_ targeted, we
emit a `SameStep` for it.
In the latter case, the emission of a `SameStep` means that the old
state for the resource will be copied across to the new state. This is
the desired behaviour -- the resource was not targeted and so the new
state should contain the resource exactly as it was prior to the update.
However, this presents a problem if the old state has references to
resources that either will not appear in the new state, or will appear
in the wrong place. Consider the following program in TypeScript-esque
pseudocode:
```typescript
const a = new Resource("a")
const b = new Resource("b", { dependency: a })
const c = new Resource("c")
```
Here, `b` depends on `a`, while `a` and `c` have no dependencies. We run
this program without specifying targets and obtain a state containing
`a`, `b` and `c`, with `a` appearing before `b` due to `b`'s dependency
on `a`. We now modify the program as follows:
```typescript
const b = new Resource("b")
const c = new Resource("c")
```
`a` has been removed from the program and consequently `b` no longer
depends on it. We once more run the program, this time with a `--target`
of `c`. That is to say, neither `a` nor `b` is targeted. The execution
proceeds as follows:
* `a` is not in the program, so no `RegisterResourceEvent` will be
emitted and processed for it.
* `b` is in the program, but it is not targeted. Its
`RegisterResourceEvent` will be turned into a `SameStep` and `b`'s _old
state will be copied as-is to the new state_.
* `c` is in the program and is targeted. It will be processed as normal.
At the end of execution when we come to write the snapshot, we take the
following actions:
* We first write the processed resources: `b`'s old state and `c`'s new
state.
* We then copy over any unprocessed resources from the base (previous)
snapshot. This includes `a` (which is again desirable since its deletion
should not be processed due to it not being targeted).
Our snapshot is now not topologically sorted and thus invalid: `b` has a
dependency on `a`, but `a` appears after it. Presently this bug will
manifest irrespective of the nature of the dependency: `.Dependencies`,
`.PropertyDependencies` and `.DeletedWith` are all affected.
This commit fixes this issue by traversing all untargeted resource
dependency relationships and ensuring that `SameStep`s (or better if
they have been targeted) are emitted before emitting the depending
resource's `SameStep`.
* Fixes #16052
* Fixes #15959
2024-05-23 12:31:03 +00:00
|
|
|
|
|
|
|
func TestUntargetedDependencyChainsArePreserved(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
loaders := []*deploytest.ProviderLoader{
|
|
|
|
deploytest.NewProviderLoader("pkgA", semver.MustParse("1.0.0"), func() (plugin.Provider, error) {
|
|
|
|
return &deploytest.Provider{}, nil
|
|
|
|
}),
|
|
|
|
}
|
|
|
|
|
|
|
|
targetName := "target"
|
|
|
|
|
|
|
|
t.Run("dependencies", func(t *testing.T) {
|
|
|
|
beforeF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
a, err := monitor.RegisterResource("pkgA:m:typA", "a", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
b, err := monitor.RegisterResource("pkgA:m:typA", "b", true, deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{a.URN},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true, deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{b.URN},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
beforeHostF := deploytest.NewPluginHostF(nil, nil, beforeF, loaders...)
|
|
|
|
|
|
|
|
t.Run("deleting the bottom of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
b, err := monitor.RegisterResource("pkgA:m:typA", "b", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true, deploytest.ResourceOptions{
|
|
|
|
Dependencies: []resource.URN{b.URN},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleting the middle of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "a", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleting the entirety of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("property dependencies", func(t *testing.T) {
|
|
|
|
beforeF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
a, err := monitor.RegisterResource("pkgA:m:typA", "a", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
b, err := monitor.RegisterResource("pkgA:m:typA", "b", true, deploytest.ResourceOptions{
|
|
|
|
PropertyDeps: map[resource.PropertyKey][]resource.URN{
|
|
|
|
"prop": {a.URN},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true, deploytest.ResourceOptions{
|
|
|
|
PropertyDeps: map[resource.PropertyKey][]resource.URN{
|
|
|
|
"prop": {b.URN},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
beforeHostF := deploytest.NewPluginHostF(nil, nil, beforeF, loaders...)
|
|
|
|
|
|
|
|
t.Run("deleting the bottom of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
b, err := monitor.RegisterResource("pkgA:m:typA", "b", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true, deploytest.ResourceOptions{
|
|
|
|
PropertyDeps: map[resource.PropertyKey][]resource.URN{
|
|
|
|
"prop": {b.URN},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleting the middle of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "a", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleting the entirety of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleted with", func(t *testing.T) {
|
|
|
|
beforeF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
a, err := monitor.RegisterResource("pkgA:m:typA", "a", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
b, err := monitor.RegisterResource("pkgA:m:typA", "b", true, deploytest.ResourceOptions{
|
|
|
|
DeletedWith: a.URN,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true, deploytest.ResourceOptions{
|
|
|
|
DeletedWith: b.URN,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
beforeHostF := deploytest.NewPluginHostF(nil, nil, beforeF, loaders...)
|
|
|
|
|
|
|
|
t.Run("deleting the bottom of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
b, err := monitor.RegisterResource("pkgA:m:typA", "b", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true, deploytest.ResourceOptions{
|
|
|
|
DeletedWith: b.URN,
|
|
|
|
})
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleting the middle of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "a", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("deleting the entirety of a dependency chain", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// Arrange.
|
|
|
|
p := &TestPlan{}
|
|
|
|
project := p.GetProject()
|
|
|
|
|
|
|
|
snap, err := TestOp(Update).RunStep(project, p.GetTarget(t, nil), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: beforeHostF,
|
|
|
|
}, false, p.BackendClient, nil, "0")
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
afterF := deploytest.NewLanguageRuntimeF(func(_ plugin.RunInfo, monitor *deploytest.ResourceMonitor) error {
|
|
|
|
_, err := monitor.RegisterResource("pulumi:pulumi:Stack", "test", false)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", "c", true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = monitor.RegisterResource("pkgA:m:typA", targetName, true)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
afterHostF := deploytest.NewPluginHostF(nil, nil, afterF, loaders...)
|
|
|
|
|
|
|
|
// Act.
|
|
|
|
snap, err = TestOp(Update).RunStep(project, p.GetTarget(t, snap), TestUpdateOptions{
|
|
|
|
T: t,
|
|
|
|
HostF: afterHostF,
|
|
|
|
UpdateOptions: UpdateOptions{
|
|
|
|
Targets: deploy.NewUrnTargets([]string{fmt.Sprintf("**%s**", targetName)}),
|
|
|
|
},
|
|
|
|
}, false, p.BackendClient, nil, "1")
|
|
|
|
|
|
|
|
// Assert.
|
|
|
|
assert.NoError(t, err)
|
|
|
|
assert.NoError(t, snap.VerifyIntegrity())
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|