2022-05-23 19:13:21 +00:00
|
|
|
// Copyright 2016-2022, Pulumi Corporation.
|
2018-05-22 19:43:36 +00:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
2017-08-30 01:25:50 +00:00
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
package diy
|
2017-08-30 01:25:50 +00:00
|
|
|
|
|
|
|
import (
|
2019-04-25 03:55:39 +00:00
|
|
|
"context"
|
2023-02-10 12:24:28 +00:00
|
|
|
"errors"
|
2017-10-24 03:37:40 +00:00
|
|
|
"fmt"
|
2022-06-07 07:34:44 +00:00
|
|
|
"io"
|
2018-01-25 02:22:41 +00:00
|
|
|
"path"
|
2017-08-30 01:25:50 +00:00
|
|
|
"path/filepath"
|
2018-01-25 02:22:41 +00:00
|
|
|
"strings"
|
2017-10-24 03:37:40 +00:00
|
|
|
"time"
|
2017-08-30 01:25:50 +00:00
|
|
|
|
2023-10-18 10:52:54 +00:00
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/env"
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/retry"
|
2021-02-08 18:49:57 +00:00
|
|
|
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/engine"
|
2019-10-29 22:19:44 +00:00
|
|
|
|
2021-02-10 00:20:01 +00:00
|
|
|
"gocloud.dev/blob"
|
2019-08-14 18:50:03 +00:00
|
|
|
"gocloud.dev/gcerrors"
|
2018-02-28 18:02:02 +00:00
|
|
|
|
2021-03-17 13:20:05 +00:00
|
|
|
"github.com/pulumi/pulumi/pkg/v3/backend"
|
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/deploy"
|
|
|
|
"github.com/pulumi/pulumi/pkg/v3/resource/stack"
|
|
|
|
"github.com/pulumi/pulumi/pkg/v3/secrets"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/apitype"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/encoding"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/resource/config"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
|
|
|
|
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
|
2017-08-30 01:25:50 +00:00
|
|
|
)
|
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 15:29:46 +00:00
|
|
|
// DisableIntegrityChecking can be set to true to disable checkpoint state integrity verification. This is not
|
2017-12-01 00:42:55 +00:00
|
|
|
// recommended, because it could mean proceeding even in the face of a corrupted checkpoint state file, but can
|
|
|
|
// be used as a last resort when a command absolutely must be run.
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 15:29:46 +00:00
|
|
|
var DisableIntegrityChecking bool
|
2017-12-01 00:42:55 +00:00
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
type diyQuery struct {
|
2019-10-23 22:15:04 +00:00
|
|
|
root string
|
|
|
|
proj *workspace.Project
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (q *diyQuery) GetRoot() string {
|
2019-10-23 22:15:04 +00:00
|
|
|
return q.root
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (q *diyQuery) GetProject() *workspace.Project {
|
2019-10-23 22:15:04 +00:00
|
|
|
return q.proj
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
// update is an implementation of engine.Update backed by diy state.
|
2018-01-08 21:01:40 +00:00
|
|
|
type update struct {
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
root string
|
|
|
|
proj *workspace.Project
|
|
|
|
target *deploy.Target
|
2024-01-30 15:53:10 +00:00
|
|
|
backend *diyBackend
|
2017-10-18 22:37:18 +00:00
|
|
|
}
|
2017-08-30 01:25:50 +00:00
|
|
|
|
2018-01-08 21:01:40 +00:00
|
|
|
func (u *update) GetRoot() string {
|
|
|
|
return u.root
|
2017-12-13 18:46:54 +00:00
|
|
|
}
|
|
|
|
|
2018-02-14 21:56:16 +00:00
|
|
|
func (u *update) GetProject() *workspace.Project {
|
|
|
|
return u.proj
|
2017-10-11 20:23:44 +00:00
|
|
|
}
|
|
|
|
|
2018-01-08 21:01:40 +00:00
|
|
|
func (u *update) GetTarget() *deploy.Target {
|
|
|
|
return u.target
|
2017-10-11 20:23:44 +00:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) newQuery(
|
2022-07-18 13:36:31 +00:00
|
|
|
ctx context.Context,
|
2023-03-03 16:36:39 +00:00
|
|
|
op backend.QueryOperation,
|
|
|
|
) (engine.QueryInfo, error) {
|
2024-01-30 15:53:10 +00:00
|
|
|
return &diyQuery{root: op.Root, proj: op.Proj}, nil
|
2019-10-23 22:15:04 +00:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) newUpdate(
|
2022-07-18 13:36:31 +00:00
|
|
|
ctx context.Context,
|
2023-05-08 13:54:43 +00:00
|
|
|
secretsProvider secrets.Provider,
|
2024-01-30 15:53:10 +00:00
|
|
|
ref *diyBackendReference,
|
2023-03-03 16:36:39 +00:00
|
|
|
op backend.UpdateOperation,
|
|
|
|
) (*update, error) {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
2018-01-08 21:01:40 +00:00
|
|
|
|
|
|
|
// Construct the deployment target.
|
2023-05-08 13:54:43 +00:00
|
|
|
target, err := b.getTarget(ctx, secretsProvider, ref,
|
2022-07-18 13:36:31 +00:00
|
|
|
op.StackConfiguration.Config, op.StackConfiguration.Decrypter)
|
2018-01-08 21:01:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct and return a new update.
|
|
|
|
return &update{
|
2019-04-18 22:57:54 +00:00
|
|
|
root: op.Root,
|
|
|
|
proj: op.Proj,
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
target: target,
|
|
|
|
backend: b,
|
2018-01-08 21:01:40 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) getTarget(
|
2022-07-18 13:36:31 +00:00
|
|
|
ctx context.Context,
|
2023-05-08 13:54:43 +00:00
|
|
|
secretsProvider secrets.Provider,
|
2024-01-30 15:53:10 +00:00
|
|
|
ref *diyBackendReference,
|
2022-07-18 13:36:31 +00:00
|
|
|
cfg config.Map,
|
2023-03-03 16:36:39 +00:00
|
|
|
dec config.Decrypter,
|
|
|
|
) (*deploy.Target, error) {
|
2023-05-08 13:54:43 +00:00
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
|
|
|
stack, err := b.GetStack(ctx, ref)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
snapshot, err := stack.Snapshot(ctx, secretsProvider)
|
2018-01-08 21:01:40 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &deploy.Target{
|
2023-05-08 13:54:43 +00:00
|
|
|
Name: ref.Name(),
|
2024-01-30 15:53:10 +00:00
|
|
|
Organization: "organization", // diy has no organizations really, but we just always say it's "organization"
|
2022-08-31 09:33:29 +00:00
|
|
|
Config: cfg,
|
|
|
|
Decrypter: dec,
|
|
|
|
Snapshot: snapshot,
|
2018-01-08 21:01:40 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2023-05-08 13:54:43 +00:00
|
|
|
var errCheckpointNotFound = errors.New("checkpoint does not exist")
|
|
|
|
|
|
|
|
// stackExists simply does a check that the checkpoint file we expect for this stack exists.
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) stackExists(
|
2022-07-18 13:36:31 +00:00
|
|
|
ctx context.Context,
|
2024-01-30 15:53:10 +00:00
|
|
|
ref *diyBackendReference,
|
2023-05-08 13:54:43 +00:00
|
|
|
) (string, error) {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
Make some stack-related CLI improvements (#947)
This change includes a handful of stack-related CLI formatting
improvements that I've been noodling on in the background for a while,
based on things that tend to trip up demos and the inner loop workflow.
This includes:
* If `pulumi stack select` is run by itself, use an interactive
CLI menu to let the user select an existing stack, or choose to
create a new one. This looks as follows
$ pulumi stack select
Please choose a stack, or choose to create a new one:
abcdef
babblabblabble
> currentlyselected
defcon
<create a new stack>
and is navigated in the usual way (key up, down, enter).
* If a stack name is passed that does not exist, prompt the user
to ask whether s/he wants to create one on-demand. This hooks
interesting moments in time, like `pulumi stack select foo`,
and cuts down on the need to run additional commands.
* If a current stack is required, but none is currently selected,
then pop the same interactive menu shown above to select one.
Depending on the command being run, we may or may not show the
option to create a new stack (e.g., that doesn't make much sense
when you're running `pulumi destroy`, but might when you're
running `pulumi stack`). This again lets you do with a single
command what would have otherwise entailed an error with multiple
commands to recover from it.
* If you run `pulumi stack init` without any additional arguments,
we interactively prompt for the stack name. Before, we would
error and you'd then need to run `pulumi stack init <name>`.
* Colorize some things nicely; for example, now all prompts will
by default become bright white.
2018-02-16 23:03:54 +00:00
|
|
|
|
2023-05-08 13:54:43 +00:00
|
|
|
chkpath := b.stackPath(ctx, ref)
|
|
|
|
exists, err := b.bucket.Exists(ctx, chkpath)
|
|
|
|
if err != nil {
|
|
|
|
return chkpath, fmt.Errorf("failed to load checkpoint: %w", err)
|
|
|
|
}
|
|
|
|
if !exists {
|
|
|
|
return chkpath, errCheckpointNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return chkpath, nil
|
|
|
|
}
|
2017-08-30 01:25:50 +00:00
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) getSnapshot(ctx context.Context,
|
|
|
|
secretsProvider secrets.Provider, ref *diyBackendReference,
|
2023-05-08 13:54:43 +00:00
|
|
|
) (*deploy.Snapshot, error) {
|
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
|
|
|
|
|
|
|
checkpoint, err := b.getCheckpoint(ctx, ref)
|
2017-08-30 01:25:50 +00:00
|
|
|
if err != nil {
|
2023-05-08 13:54:43 +00:00
|
|
|
return nil, fmt.Errorf("failed to load checkpoint: %w", err)
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 15:29:46 +00:00
|
|
|
// Materialize an actual snapshot object.
|
2023-05-08 13:54:43 +00:00
|
|
|
snapshot, err := stack.DeserializeCheckpoint(ctx, secretsProvider, checkpoint)
|
2017-10-22 20:39:21 +00:00
|
|
|
if err != nil {
|
2023-05-08 13:54:43 +00:00
|
|
|
return nil, err
|
2017-10-22 20:39:21 +00:00
|
|
|
}
|
2017-10-18 22:37:18 +00:00
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 15:29:46 +00:00
|
|
|
// Ensure the snapshot passes verification before returning it, to catch bugs early.
|
2023-12-04 15:12:56 +00:00
|
|
|
if !backend.DisableIntegrityChecking {
|
|
|
|
if err := snapshot.VerifyIntegrity(); err != nil {
|
|
|
|
return nil, fmt.Errorf("snapshot integrity failure; refusing to use it: %w", err)
|
2017-12-01 00:42:55 +00:00
|
|
|
}
|
2017-11-30 19:13:18 +00:00
|
|
|
}
|
|
|
|
|
2023-05-08 13:54:43 +00:00
|
|
|
return snapshot, nil
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
// GetCheckpoint loads a checkpoint file for the given stack in this project, from the current project workspace.
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) getCheckpoint(ctx context.Context, ref *diyBackendReference) (*apitype.CheckpointV3, error) {
|
2023-04-03 21:36:44 +00:00
|
|
|
chkpath := b.stackPath(ctx, ref)
|
|
|
|
bytes, err := b.bucket.ReadAll(ctx, chkpath)
|
2017-10-25 17:20:08 +00:00
|
|
|
if err != nil {
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
return nil, err
|
2017-10-25 17:20:08 +00:00
|
|
|
}
|
2022-05-23 19:13:21 +00:00
|
|
|
m := encoding.JSON
|
|
|
|
if encoding.IsCompressed(bytes) {
|
|
|
|
m = encoding.Gzip(m)
|
|
|
|
}
|
2017-08-30 01:25:50 +00:00
|
|
|
|
2022-05-23 19:13:21 +00:00
|
|
|
return stack.UnmarshalVersionedCheckpointToLatestCheckpoint(m, bytes)
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) saveCheckpoint(
|
2023-04-03 21:36:44 +00:00
|
|
|
ctx context.Context,
|
2024-01-30 15:53:10 +00:00
|
|
|
ref *diyBackendReference,
|
2023-04-03 21:36:44 +00:00
|
|
|
checkpoint *apitype.VersionedCheckpoint,
|
2023-03-03 16:36:39 +00:00
|
|
|
) (backupFile string, file string, _ error) {
|
2017-10-16 19:04:35 +00:00
|
|
|
// Make a serializable stack and then use the encoder to encode it.
|
2023-04-03 21:36:44 +00:00
|
|
|
file = b.stackPath(ctx, ref)
|
2022-05-23 19:13:21 +00:00
|
|
|
m, ext := encoding.Detect(strings.TrimSuffix(file, ".gz"))
|
2017-08-30 01:25:50 +00:00
|
|
|
if m == nil {
|
2023-01-11 11:24:10 +00:00
|
|
|
return "", "", fmt.Errorf("resource serialization failed; illegal markup extension: '%v'", ext)
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
if filepath.Ext(file) == "" {
|
|
|
|
file = file + ext
|
|
|
|
}
|
2022-05-23 19:13:21 +00:00
|
|
|
if b.gzip {
|
|
|
|
if filepath.Ext(file) != encoding.GZIPExt {
|
|
|
|
file = file + ".gz"
|
|
|
|
}
|
|
|
|
m = encoding.Gzip(m)
|
|
|
|
} else {
|
|
|
|
file = strings.TrimSuffix(file, ".gz")
|
|
|
|
}
|
|
|
|
|
2023-01-11 11:24:10 +00:00
|
|
|
byts, err := m.Marshal(checkpoint)
|
2019-04-17 20:48:38 +00:00
|
|
|
if err != nil {
|
2023-01-11 11:24:10 +00:00
|
|
|
return "", "", fmt.Errorf("An IO error occurred while marshalling the checkpoint: %w", err)
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
|
2022-03-08 11:21:39 +00:00
|
|
|
// Back up the existing file if it already exists. Don't delete the original, the following WriteAll will
|
|
|
|
// atomically replace it anyway and various other bits of the system depend on being able to find the
|
|
|
|
// .json file to know the stack currently exists (see https://github.com/pulumi/pulumi/issues/9033 for
|
|
|
|
// context).
|
2022-05-23 19:13:21 +00:00
|
|
|
filePlain := strings.TrimSuffix(file, ".gz")
|
|
|
|
fileGzip := filePlain + ".gz"
|
|
|
|
// We need to make sure that an out of date state file doesn't exist so we
|
|
|
|
// only keep the file of the type we are working with.
|
2023-04-03 21:36:44 +00:00
|
|
|
bckGzip := backupTarget(ctx, b.bucket, fileGzip, b.gzip)
|
|
|
|
bckPlain := backupTarget(ctx, b.bucket, filePlain, !b.gzip)
|
2022-05-23 19:13:21 +00:00
|
|
|
if b.gzip {
|
2023-01-11 11:24:10 +00:00
|
|
|
backupFile = bckGzip
|
2022-05-23 19:13:21 +00:00
|
|
|
} else {
|
2023-01-11 11:24:10 +00:00
|
|
|
backupFile = bckPlain
|
2022-05-23 19:13:21 +00:00
|
|
|
}
|
2017-08-30 01:25:50 +00:00
|
|
|
|
|
|
|
// And now write out the new snapshot file, overwriting that location.
|
2023-04-03 21:36:44 +00:00
|
|
|
if err = b.bucket.WriteAll(ctx, file, byts, nil); err != nil {
|
2020-04-24 23:19:47 +00:00
|
|
|
|
|
|
|
b.mutex.Lock()
|
|
|
|
defer b.mutex.Unlock()
|
|
|
|
|
2020-04-28 00:14:07 +00:00
|
|
|
// FIXME: Would be nice to make these configurable
|
|
|
|
delay, _ := time.ParseDuration("1s")
|
|
|
|
maxDelay, _ := time.ParseDuration("30s")
|
|
|
|
backoff := 1.2
|
|
|
|
|
2020-04-24 23:19:47 +00:00
|
|
|
// Retry the write 10 times in case of upstream bucket errors
|
2023-04-03 21:36:44 +00:00
|
|
|
_, _, err = retry.Until(ctx, retry.Acceptor{
|
2020-04-28 00:23:47 +00:00
|
|
|
Delay: &delay,
|
2020-04-28 00:14:07 +00:00
|
|
|
MaxDelay: &maxDelay,
|
2020-04-28 00:23:47 +00:00
|
|
|
Backoff: &backoff,
|
2020-04-24 23:19:47 +00:00
|
|
|
Accept: func(try int, nextRetryTime time.Duration) (bool, interface{}, error) {
|
|
|
|
// And now write out the new snapshot file, overwriting that location.
|
2023-04-03 21:36:44 +00:00
|
|
|
err := b.bucket.WriteAll(ctx, file, byts, nil)
|
2020-04-24 23:19:47 +00:00
|
|
|
if err != nil {
|
|
|
|
logging.V(7).Infof("Error while writing snapshot to: %s (attempt=%d, error=%s)", file, try, err)
|
|
|
|
if try > 10 {
|
2021-11-13 02:37:17 +00:00
|
|
|
return false, nil, fmt.Errorf("An IO error occurred while writing the new snapshot file: %w", err)
|
2020-04-24 23:19:47 +00:00
|
|
|
}
|
|
|
|
return false, nil, nil
|
|
|
|
}
|
|
|
|
return true, nil, nil
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
2023-01-11 11:24:10 +00:00
|
|
|
return backupFile, "", err
|
2020-04-24 23:19:47 +00:00
|
|
|
}
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
|
2023-02-10 12:24:28 +00:00
|
|
|
logging.V(7).Infof("Saved stack %s checkpoint to: %s (backup=%s)", ref.FullyQualifiedName(), file, backupFile)
|
Implement resource protection (#751)
This change implements resource protection, as per pulumi/pulumi#689.
The overall idea is that a resource can be marked as "protect: true",
which will prevent deletion of that resource for any reason whatsoever
(straight deletion, replacement, etc). This is expressed in the
program. To "unprotect" a resource, one must perform an update setting
"protect: false", and then afterwards, they can delete the resource.
For example:
let res = new MyResource("precious", { .. }, { protect: true });
Afterwards, the resource will display in the CLI with a lock icon, and
any attempts to remove it will fail in the usual ways (in planning or,
worst case, during an actual update).
This was done by adding a new ResourceOptions bag parameter to the
base Resource types. This is unfortunately a breaking change, but now
is the right time to take this one. We had been adding new settings
one by one -- like parent and dependsOn -- and this new approach will
set us up to add any number of additional settings down the road,
without needing to worry about breaking anything ever again.
This is related to protected stacks, as described in
pulumi/pulumi-service#399. Most likely this will serve as a foundational
building block that enables the coarser grained policy management.
2017-12-20 22:31:07 +00:00
|
|
|
|
2017-10-24 03:37:40 +00:00
|
|
|
// And if we are retaining historical checkpoint information, write it out again
|
2024-01-30 15:53:10 +00:00
|
|
|
if b.Env.GetBool(env.DIYBackendRetainCheckpoints) {
|
2023-04-03 21:36:44 +00:00
|
|
|
if err = b.bucket.WriteAll(ctx, fmt.Sprintf("%v.%v", file, time.Now().UnixNano()), byts, nil); err != nil {
|
2023-01-11 11:24:10 +00:00
|
|
|
return backupFile, "", fmt.Errorf("An IO error occurred while writing the new snapshot file: %w", err)
|
2017-10-24 03:37:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-11 11:24:10 +00:00
|
|
|
return backupFile, file, nil
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) saveStack(
|
2023-04-03 21:36:44 +00:00
|
|
|
ctx context.Context,
|
2024-01-30 15:53:10 +00:00
|
|
|
ref *diyBackendReference, snap *deploy.Snapshot,
|
2023-02-10 12:24:28 +00:00
|
|
|
sm secrets.Manager,
|
|
|
|
) (string, error) {
|
|
|
|
contract.Requiref(ref != nil, "ref", "ref was nil")
|
2024-03-25 10:30:14 +00:00
|
|
|
chk, err := stack.SerializeCheckpoint(ref.FullyQualifiedName(), snap, false /* showSecrets */)
|
2023-01-11 11:24:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("serializaing checkpoint: %w", err)
|
|
|
|
}
|
|
|
|
|
2023-04-03 21:36:44 +00:00
|
|
|
backup, file, err := b.saveCheckpoint(ctx, ref, chk)
|
2023-01-11 11:24:10 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2023-12-04 15:12:56 +00:00
|
|
|
if !backend.DisableIntegrityChecking {
|
2017-12-01 00:42:55 +00:00
|
|
|
// Finally, *after* writing the checkpoint, check the integrity. This is done afterwards so that we write
|
|
|
|
// out the checkpoint file since it may contain resource state updates. But we will warn the user that the
|
|
|
|
// file is already written and might be bad.
|
|
|
|
if verifyerr := snap.VerifyIntegrity(); verifyerr != nil {
|
2021-11-13 02:37:17 +00:00
|
|
|
return "", fmt.Errorf(
|
|
|
|
"%s: snapshot integrity failure; it was already written, but is invalid (backup available at %s): %w",
|
2023-01-11 11:24:10 +00:00
|
|
|
file, backup, verifyerr)
|
2017-12-01 00:42:55 +00:00
|
|
|
}
|
2017-11-30 19:13:18 +00:00
|
|
|
}
|
|
|
|
|
Make some stack-related CLI improvements (#947)
This change includes a handful of stack-related CLI formatting
improvements that I've been noodling on in the background for a while,
based on things that tend to trip up demos and the inner loop workflow.
This includes:
* If `pulumi stack select` is run by itself, use an interactive
CLI menu to let the user select an existing stack, or choose to
create a new one. This looks as follows
$ pulumi stack select
Please choose a stack, or choose to create a new one:
abcdef
babblabblabble
> currentlyselected
defcon
<create a new stack>
and is navigated in the usual way (key up, down, enter).
* If a stack name is passed that does not exist, prompt the user
to ask whether s/he wants to create one on-demand. This hooks
interesting moments in time, like `pulumi stack select foo`,
and cuts down on the need to run additional commands.
* If a current stack is required, but none is currently selected,
then pop the same interactive menu shown above to select one.
Depending on the command being run, we may or may not show the
option to create a new stack (e.g., that doesn't make much sense
when you're running `pulumi destroy`, but might when you're
running `pulumi stack`). This again lets you do with a single
command what would have otherwise entailed an error with multiple
commands to recover from it.
* If you run `pulumi stack init` without any additional arguments,
we interactively prompt for the stack name. Before, we would
error and you'd then need to run `pulumi stack init <name>`.
* Colorize some things nicely; for example, now all prompts will
by default become bright white.
2018-02-16 23:03:54 +00:00
|
|
|
return file, nil
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
|
Improve the overall cloud CLI experience
This improves the overall cloud CLI experience workflow.
Now whether a stack is local or cloud is inherent to the stack
itself. If you interact with a cloud stack, we transparently talk
to the cloud; if you interact with a local stack, we just do the
right thing, and perform all operations locally. Aside from sometimes
seeing a cloud emoji pop-up ☁️, the experience is quite similar.
For example, to initialize a new cloud stack, simply:
$ pulumi login
Logging into Pulumi Cloud: https://pulumi.com/
Enter Pulumi access token: <enter your token>
$ pulumi stack init my-cloud-stack
Note that you may log into a specific cloud if you'd like. For
now, this is just for our own testing purposes, but someday when we
support custom clouds (e.g., Enterprise), you can just say:
$ pulumi login --cloud-url https://corp.acme.my-ppc.net:9873
The cloud is now the default. If you instead prefer a "fire and
forget" style of stack, you can skip the login and pass `--local`:
$ pulumi stack init my-faf-stack --local
If you are logged in and run `pulumi`, we tell you as much:
$ pulumi
Usage:
pulumi [command]
// as before...
Currently logged into the Pulumi Cloud ☁️
https://pulumi.com/
And if you list your stacks, we tell you which one is local or not:
$ pulumi stack ls
NAME LAST UPDATE RESOURCE COUNT CLOUD URL
my-cloud-stack 2017-12-01 ... 3 https://pulumi.com/
my-faf-stack n/a 0 n/a
And `pulumi stack` by itself prints information like your cloud org,
PPC name, and so on, in addition to the usuals.
I shall write up more details and make sure to document these changes.
This change also fairly significantly refactors the layout of cloud
versus local logic, so that the cmd/ package is resonsible for CLI
things, and the new pkg/backend/ package is responsible for the
backends. The following is the overall resulting package architecture:
* The backend.Backend interface can be implemented to substitute
a new backend. This has operations to get and list stacks,
perform updates, and so on.
* The backend.Stack struct is a wrapper around a stack that has
or is being manipulated by a Backend. It resembles our existing
Stack notions in the engine, but carries additional metadata
about its source. Notably, it offers functions that allow
operations like updating and deleting on the Backend from which
it came.
* There is very little else in the pkg/backend/ package.
* A new package, pkg/backend/local/, encapsulates all local state
management for "fire and forget" scenarios. It simply implements
the above logic and contains anything specific to the local
experience.
* A peer package, pkg/backend/cloud/, encapsulates all logic
required for the cloud experience. This includes its subpackage
apitype/ which contains JSON schema descriptions required for
REST calls against the cloud backend. It also contains handy
functions to list which clouds we have authenticated with.
* A subpackage here, pkg/backend/state/, is not a provider at all.
Instead, it contains all of the state management functions that
are currently shared between local and cloud backends. This
includes configuration logic -- including encryption -- as well
as logic pertaining to which stacks are known to the workspace.
This addresses pulumi/pulumi#629 and pulumi/pulumi#494.
2017-12-02 15:29:46 +00:00
|
|
|
// removeStack removes information about a stack from the current workspace.
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) removeStack(ctx context.Context, ref *diyBackendReference) error {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
2017-10-25 17:20:08 +00:00
|
|
|
|
2017-08-30 01:25:50 +00:00
|
|
|
// Just make a backup of the file and don't write out anything new.
|
2023-04-03 21:36:44 +00:00
|
|
|
file := b.stackPath(ctx, ref)
|
|
|
|
backupTarget(ctx, b.bucket, file, false)
|
2018-01-25 02:22:41 +00:00
|
|
|
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
historyDir := ref.HistoryDir()
|
2023-04-03 21:36:44 +00:00
|
|
|
return removeAllByPrefix(ctx, b.bucket, historyDir)
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
|
|
|
|
2022-03-08 11:21:39 +00:00
|
|
|
// backupTarget makes a backup of an existing file, in preparation for writing a new one.
|
2023-04-03 21:36:44 +00:00
|
|
|
func backupTarget(ctx context.Context, bucket Bucket, file string, keepOriginal bool) string {
|
2023-02-16 20:36:43 +00:00
|
|
|
contract.Requiref(file != "", "file", "must not be empty")
|
2017-11-30 19:13:18 +00:00
|
|
|
bck := file + ".bak"
|
2022-03-08 11:21:39 +00:00
|
|
|
|
2023-04-03 21:36:44 +00:00
|
|
|
err := bucket.Copy(ctx, bck, file, nil)
|
2022-03-08 11:21:39 +00:00
|
|
|
if err != nil {
|
|
|
|
logging.V(5).Infof("error copying %s to %s: %s", file, bck, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !keepOriginal {
|
2023-04-03 21:36:44 +00:00
|
|
|
err = bucket.Delete(ctx, file)
|
2022-03-08 11:21:39 +00:00
|
|
|
if err != nil {
|
|
|
|
logging.V(5).Infof("error deleting source object after rename: %v (%v) skipping", file, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-30 01:25:50 +00:00
|
|
|
// IDEA: consider multiple backups (.bak.bak.bak...etc).
|
2017-11-30 19:13:18 +00:00
|
|
|
return bck
|
2017-08-30 01:25:50 +00:00
|
|
|
}
|
2018-01-25 02:22:41 +00:00
|
|
|
|
2018-02-21 05:05:57 +00:00
|
|
|
// backupStack copies the current Checkpoint file to ~/.pulumi/backups.
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) backupStack(ctx context.Context, ref *diyBackendReference) error {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
2018-02-21 05:05:57 +00:00
|
|
|
|
|
|
|
// Exit early if backups are disabled.
|
2024-01-30 15:53:10 +00:00
|
|
|
if b.Env.GetBool(env.DIYBackendDisableCheckpointBackups) {
|
2018-02-21 05:05:57 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the current checkpoint file. (Assuming it aleady exists.)
|
2023-04-03 21:36:44 +00:00
|
|
|
stackPath := b.stackPath(ctx, ref)
|
|
|
|
byts, err := b.bucket.ReadAll(ctx, stackPath)
|
2018-02-21 05:05:57 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the backup directory.
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
backupDir := ref.BackupDir()
|
2018-02-21 05:05:57 +00:00
|
|
|
|
|
|
|
// Write out the new backup checkpoint file.
|
|
|
|
stackFile := filepath.Base(stackPath)
|
|
|
|
ext := filepath.Ext(stackFile)
|
|
|
|
base := strings.TrimSuffix(stackFile, ext)
|
2022-05-23 19:13:21 +00:00
|
|
|
if ext2 := filepath.Ext(base); ext2 != "" && ext == encoding.GZIPExt {
|
|
|
|
// base: stack-name.json, ext: .gz
|
|
|
|
// ->
|
|
|
|
// base: stack-name, ext: .json.gz
|
|
|
|
ext = ext2 + ext
|
|
|
|
base = strings.TrimSuffix(base, ext2)
|
|
|
|
}
|
2018-02-21 05:05:57 +00:00
|
|
|
backupFile := fmt.Sprintf("%s.%v%s", base, time.Now().UnixNano(), ext)
|
2023-04-03 21:36:44 +00:00
|
|
|
return b.bucket.WriteAll(ctx, filepath.Join(backupDir, backupFile), byts, nil)
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) stackPath(ctx context.Context, ref *diyBackendReference) string {
|
2023-02-10 12:24:28 +00:00
|
|
|
if ref == nil {
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
return StacksDir
|
2022-06-07 20:17:39 +00:00
|
|
|
}
|
2022-06-07 07:34:44 +00:00
|
|
|
|
2022-06-07 20:17:39 +00:00
|
|
|
// We can't use listBucket here for as we need to do a partial prefix match on filename, while the
|
|
|
|
// "dir" option to listBucket is always suffixed with "/". Also means we don't need to save any
|
|
|
|
// results in a slice.
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
plainPath := filepath.ToSlash(ref.StackBasePath()) + ".json"
|
2022-06-07 20:17:39 +00:00
|
|
|
gzipedPath := plainPath + ".gz"
|
|
|
|
|
|
|
|
bucketIter := b.bucket.List(&blob.ListOptions{
|
|
|
|
Delimiter: "/",
|
|
|
|
Prefix: plainPath,
|
|
|
|
})
|
|
|
|
|
|
|
|
var plainObj *blob.ListObject
|
|
|
|
for {
|
|
|
|
file, err := bucketIter.Next(ctx)
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// Error fetching the available ojects, assume .json
|
|
|
|
return plainPath
|
|
|
|
}
|
2022-06-07 07:34:44 +00:00
|
|
|
|
2022-06-07 20:17:39 +00:00
|
|
|
// plainObj will always come out first since allObjs is sorted by Key
|
|
|
|
if file.Key == plainPath {
|
|
|
|
plainObj = file
|
|
|
|
} else if file.Key == gzipedPath {
|
|
|
|
// We have a plain .json file and it was modified after this gzipped one so use it.
|
|
|
|
if plainObj != nil && plainObj.ModTime.After(file.ModTime) {
|
|
|
|
return plainPath
|
2022-05-23 19:13:21 +00:00
|
|
|
}
|
2022-06-07 20:17:39 +00:00
|
|
|
// else use the gzipped object
|
|
|
|
return gzipedPath
|
2022-05-23 19:13:21 +00:00
|
|
|
}
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
}
|
2022-06-07 20:17:39 +00:00
|
|
|
// Couldn't find any objects, assume nongzipped path?
|
|
|
|
return plainPath
|
Remove the need to `pulumi init` for the local backend
This change removes the need to `pulumi init` when targeting the local
backend. A fair amount of the change lays the foundation that the next
set of changes to stop having `pulumi init` be used for cloud stacks
as well.
Previously, `pulumi init` logically did two things:
1. It created the bookkeeping directory for local stacks, this was
stored in `<repository-root>/.pulumi`, where `<repository-root>` was
the path to what we belived the "root" of your project was. In the
case of git repositories, this was the directory that contained your
`.git` folder.
2. It recorded repository information in
`<repository-root>/.pulumi/repository.json`. This was used by the
cloud backend when computing what project to interact with on
Pulumi.com
The new identity model will remove the need for (2), since we only
need an owner and stack name to fully qualify a stack on
pulumi.com, so it's easy enough to stop creating a folder just for
that.
However, for the local backend, we need to continue to retain some
information about stacks (e.g. checkpoints, history, etc). In
addition, we need to store our workspace settings (which today just
contains the selected stack) somehere.
For state stored by the local backend, we change the URL scheme from
`local://` to `local://<optional-root-path>`. When
`<optional-root-path>` is unset, it defaults to `$HOME`. We create our
`.pulumi` folder in that directory. This is important because stack
names now must be unique within the backend, but we have some tests
using local stacks which use fixed stack names, so each integration
test really wants its own "view" of the world.
For the workspace settings, we introduce a new `workspaces` directory
in `~/.pulumi`. In this folder we write the workspace settings file
for each project. The file name is the name of the project, combined
with the SHA1 of the path of the project file on disk, to ensure that
multiple pulumi programs with the same project name have different
workspace settings.
This does mean that moving a project's location on disk will cause the
CLI to "forget" what the selected stack was, which is unfortunate, but
not the end of the world. If this ends up being a big pain point, we
can certianly try to play games in the future (for example, if we saw
a .git folder in a parent folder, we could store data in there).
With respect to compatibility, we don't attempt to migrate older files
to their newer locations. For long lived stacks managed using the
local backend, we can provide information on where to move things
to. For all stacks (regardless of backend) we'll require the user to
`pulumi stack select` their stack again, but that seems like the
correct trade-off vs writing complicated upgrade code.
2018-04-16 23:15:10 +00:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
// getHistory returns stored update history. The first element of the result will be
|
2018-01-25 02:22:41 +00:00
|
|
|
// the most recent update record.
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) getHistory(
|
2023-04-03 21:36:44 +00:00
|
|
|
ctx context.Context,
|
2024-01-30 15:53:10 +00:00
|
|
|
stack *diyBackendReference,
|
2023-04-03 21:36:44 +00:00
|
|
|
pageSize int, page int,
|
|
|
|
) ([]backend.UpdateInfo, error) {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(stack != nil, "stack", "must not be nil")
|
2018-01-25 02:22:41 +00:00
|
|
|
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
dir := stack.HistoryDir()
|
2021-02-10 00:20:01 +00:00
|
|
|
// TODO: we could consider optimizing the list operation using `page` and `pageSize`.
|
|
|
|
// Unfortunately, this is mildly invasive given the gocloud List API.
|
2023-04-03 21:36:44 +00:00
|
|
|
allFiles, err := listBucket(ctx, b.bucket, dir)
|
2018-01-25 02:22:41 +00:00
|
|
|
if err != nil {
|
|
|
|
// History doesn't exist until a stack has been updated.
|
2021-11-19 20:21:37 +00:00
|
|
|
if gcerrors.Code(err) == gcerrors.NotFound {
|
2018-01-25 02:22:41 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-10 00:20:01 +00:00
|
|
|
var historyEntries []*blob.ListObject
|
2021-02-08 18:49:57 +00:00
|
|
|
|
2021-02-10 00:20:01 +00:00
|
|
|
// filter down to just history entries, reversing list to be in most recent order.
|
2019-04-25 03:55:39 +00:00
|
|
|
// listBucket returns the array sorted by file name, but because of how we name files, older updates come before
|
2021-02-10 00:20:01 +00:00
|
|
|
// newer ones.
|
|
|
|
for i := len(allFiles) - 1; i >= 0; i-- {
|
2018-02-28 22:35:13 +00:00
|
|
|
file := allFiles[i]
|
2019-04-25 03:55:39 +00:00
|
|
|
filepath := file.Key
|
2018-01-25 02:22:41 +00:00
|
|
|
|
2021-02-10 00:20:01 +00:00
|
|
|
// ignore checkpoints
|
2022-05-23 19:13:21 +00:00
|
|
|
if !strings.HasSuffix(filepath, ".history.json") &&
|
|
|
|
!strings.HasSuffix(filepath, ".history.json.gz") {
|
2018-01-25 02:22:41 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-02-10 00:20:01 +00:00
|
|
|
historyEntries = append(historyEntries, file)
|
|
|
|
}
|
|
|
|
|
|
|
|
start := 0
|
|
|
|
end := len(historyEntries) - 1
|
|
|
|
if pageSize > 0 {
|
|
|
|
if page < 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
|
|
|
start = (page - 1) * pageSize
|
|
|
|
end = start + pageSize - 1
|
|
|
|
if end > len(historyEntries)-1 {
|
|
|
|
end = len(historyEntries) - 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var updates []backend.UpdateInfo
|
|
|
|
|
|
|
|
for i := start; i <= end; i++ {
|
|
|
|
file := historyEntries[i]
|
|
|
|
filepath := file.Key
|
|
|
|
|
2018-01-25 02:22:41 +00:00
|
|
|
var update backend.UpdateInfo
|
2023-04-03 21:36:44 +00:00
|
|
|
b, err := b.bucket.ReadAll(ctx, filepath)
|
2018-01-25 02:22:41 +00:00
|
|
|
if err != nil {
|
2021-11-13 02:37:17 +00:00
|
|
|
return nil, fmt.Errorf("reading history file %s: %w", filepath, err)
|
2018-01-25 02:22:41 +00:00
|
|
|
}
|
2022-05-23 19:13:21 +00:00
|
|
|
m := encoding.JSON
|
|
|
|
if encoding.IsCompressed(b) {
|
|
|
|
m = encoding.Gzip(m)
|
|
|
|
}
|
|
|
|
err = m.Unmarshal(b, &update)
|
2018-01-25 02:22:41 +00:00
|
|
|
if err != nil {
|
2021-11-13 02:37:17 +00:00
|
|
|
return nil, fmt.Errorf("reading history file %s: %w", filepath, err)
|
2018-01-25 02:22:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
updates = append(updates, update)
|
|
|
|
}
|
|
|
|
|
|
|
|
return updates, nil
|
|
|
|
}
|
|
|
|
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) renameHistory(ctx context.Context, oldName, newName *diyBackendReference) error {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(oldName != nil, "oldName", "must not be nil")
|
|
|
|
contract.Requiref(newName != nil, "newName", "must not be nil")
|
2019-08-14 18:50:03 +00:00
|
|
|
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
oldHistory := oldName.HistoryDir()
|
|
|
|
newHistory := newName.HistoryDir()
|
2019-08-14 18:50:03 +00:00
|
|
|
|
2023-04-03 21:36:44 +00:00
|
|
|
allFiles, err := listBucket(ctx, b.bucket, oldHistory)
|
2019-08-14 18:50:03 +00:00
|
|
|
if err != nil {
|
|
|
|
// if there's nothing there, we don't really need to do a rename.
|
2021-11-19 20:21:37 +00:00
|
|
|
if gcerrors.Code(err) == gcerrors.NotFound {
|
2019-08-14 18:50:03 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, file := range allFiles {
|
|
|
|
fileName := objectName(file)
|
|
|
|
oldBlob := path.Join(oldHistory, fileName)
|
|
|
|
|
2022-05-25 08:10:36 +00:00
|
|
|
// The filename format is <stack-name>-<timestamp>.[checkpoint|history].json[.gz], we need to change
|
|
|
|
// the stack name part but retain the other parts. If we find files that don't match this format
|
|
|
|
// ignore them.
|
|
|
|
dashIndex := strings.LastIndex(fileName, "-")
|
2023-02-10 12:24:28 +00:00
|
|
|
if dashIndex == -1 || (fileName[:dashIndex] != oldName.name.String()) {
|
2022-05-25 08:10:36 +00:00
|
|
|
// No dash or the string up to the dash isn't the old name
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-02-10 12:24:28 +00:00
|
|
|
newFileName := newName.name.String() + fileName[dashIndex:]
|
2019-08-14 18:50:03 +00:00
|
|
|
newBlob := path.Join(newHistory, newFileName)
|
|
|
|
|
2023-04-03 21:36:44 +00:00
|
|
|
if err := b.bucket.Copy(ctx, newBlob, oldBlob, nil); err != nil {
|
2021-11-13 02:37:17 +00:00
|
|
|
return fmt.Errorf("copying history file: %w", err)
|
2019-08-14 18:50:03 +00:00
|
|
|
}
|
2023-04-03 21:36:44 +00:00
|
|
|
if err := b.bucket.Delete(ctx, oldBlob); err != nil {
|
2021-11-13 02:37:17 +00:00
|
|
|
return fmt.Errorf("deleting existing history file: %w", err)
|
2019-08-14 18:50:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-25 02:22:41 +00:00
|
|
|
// addToHistory saves the UpdateInfo and makes a copy of the current Checkpoint file.
|
2024-01-30 15:53:10 +00:00
|
|
|
func (b *diyBackend) addToHistory(ctx context.Context, ref *diyBackendReference, update backend.UpdateInfo) error {
|
2023-02-10 12:24:28 +00:00
|
|
|
contract.Requiref(ref != nil, "ref", "must not be nil")
|
2018-01-25 02:22:41 +00:00
|
|
|
|
filestate: Introduce referenceStore to control layout
Adds a referenceStore abstraction to control the layout of the storage,
and a legacyReferenceStore implementation based on the current layout
(that does not support projects).
This allows us to move code to determine file paths of stacks, their
histories, and their backups, all into a single component that we can
swap out for project support.
localBackendReferences keep track of the referenceStore that built them.
The primary reason for this is that when we add support for migrating a
stack state from legacy to project mode, `backend.store` will become
mutable.
For references created before the store for a backend was changed, we
still need to be able to access their original file paths, so we need to
hold onto the original referenceStore.
However, as a side-effect of this,
it's more convenient to acess paths from `ref.Foo()` rather than
`backend.foo(ref)` or `backend.store.Foo(ref)`.
In the future, we may also move stackPath to the store,
since right now the .json/.json.gz logic is duplicated in a couple
places.
Extracted from #12134
2023-03-14 00:49:05 +00:00
|
|
|
dir := ref.HistoryDir()
|
2018-01-25 02:22:41 +00:00
|
|
|
|
|
|
|
// Prefix for the update and checkpoint files.
|
2023-02-10 12:24:28 +00:00
|
|
|
pathPrefix := path.Join(dir, fmt.Sprintf("%s-%d", ref.name, time.Now().UnixNano()))
|
2018-01-25 02:22:41 +00:00
|
|
|
|
2022-05-23 19:13:21 +00:00
|
|
|
m, ext := encoding.JSON, "json"
|
|
|
|
if b.gzip {
|
|
|
|
m = encoding.Gzip(m)
|
|
|
|
ext += ".gz"
|
|
|
|
}
|
|
|
|
|
2018-01-25 02:22:41 +00:00
|
|
|
// Save the history file.
|
2022-05-23 19:13:21 +00:00
|
|
|
byts, err := m.Marshal(&update)
|
2018-01-25 02:22:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-05-23 19:13:21 +00:00
|
|
|
historyFile := fmt.Sprintf("%s.history.%s", pathPrefix, ext)
|
2023-04-03 21:36:44 +00:00
|
|
|
if err = b.bucket.WriteAll(ctx, historyFile, byts, nil); err != nil {
|
2018-01-25 02:22:41 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-25 03:55:39 +00:00
|
|
|
// Make a copy of the checkpoint file. (Assuming it already exists.)
|
2022-05-23 19:13:21 +00:00
|
|
|
checkpointFile := fmt.Sprintf("%s.checkpoint.%s", pathPrefix, ext)
|
2023-04-03 21:36:44 +00:00
|
|
|
return b.bucket.Copy(ctx, checkpointFile, b.stackPath(ctx, ref), nil)
|
2018-01-25 02:22:41 +00:00
|
|
|
}
|