diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 27038fc1a459..6b34dbb21111 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,6 +21,7 @@ on: env: BUILDX_VERSION: "latest" BUILDKIT_IMAGE: "moby/buildkit:latest" + SCOUT_VERSION: "1.11.0" REPO_SLUG: "docker/buildx-bin" DESTDIR: "./bin" TEST_CACHE_SCOPE: "test" @@ -214,6 +215,36 @@ jobs: name: test-reports-${{ env.TESTREPORTS_NAME }} path: ${{ env.TESTREPORTS_BASEDIR }} + govulncheck: + runs-on: ubuntu-24.04 + permissions: + # required to write sarif report + security-events: write + steps: + - + name: Checkout + uses: actions/checkout@v4 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.BUILDKIT_IMAGE }} + buildkitd-flags: --debug + - + name: Run + uses: docker/bake-action@v5 + with: + targets: govulncheck + env: + GOVULNCHECK_FORMAT: sarif + - + name: Upload SARIF report + if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }} + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: ${{ env.DESTDIR }}/govulncheck.out + prepare-binaries: runs-on: ubuntu-24.04 outputs: @@ -328,6 +359,38 @@ jobs: *.cache-from=type=gha,scope=bin-image *.cache-to=type=gha,scope=bin-image,mode=max + scout: + runs-on: ubuntu-24.04 + if: ${{ github.ref == 'refs/heads/master' && github.repository == 'docker/buildx' }} + permissions: + # required to write sarif report + security-events: write + needs: + - bin-image + steps: + - + name: Checkout + uses: actions/checkout@v4 + - + name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKERPUBLICBOT_USERNAME }} + password: ${{ secrets.DOCKERPUBLICBOT_WRITE_PAT }} + - + name: Scout + id: scout + uses: crazy-max/.github/.github/actions/docker-scout@ccae1c98f1237b5c19e4ef77ace44fa68b3bc7e4 + with: + version: ${{ env.SCOUT_VERSION }} + format: sarif + image: registry://${{ env.REPO_SLUG }}:master + - + name: Upload SARIF report + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: ${{ steps.scout.outputs.result-file }} + release: runs-on: ubuntu-24.04 needs: @@ -359,7 +422,7 @@ jobs: - name: GitHub Release if: startsWith(github.ref, 'refs/tags/v') - uses: softprops/action-gh-release@a74c6b72af54cfa997e81df42d94703d6313a2d0 # v2.0.6 + uses: softprops/action-gh-release@c062e08bd532815e2082a85e87e3ef29c3e6d191 # v2.0.8 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/docs-release.yml b/.github/workflows/docs-release.yml index 99bbf7cbecab..135282739db1 100644 --- a/.github/workflows/docs-release.yml +++ b/.github/workflows/docs-release.yml @@ -57,7 +57,7 @@ jobs: VENDOR_MODULE: github.com/docker/buildx@${{ env.RELEASE_NAME }} - name: Create PR on docs repo - uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6.1.0 + uses: peter-evans/create-pull-request@4320041ed380b20e97d388d56a7fb4f9b8c20e79 # v7.0.0 with: token: ${{ secrets.GHPAT_DOCS_DISPATCH }} push-to-fork: docker-tools-robot/docker.github.io diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 53700b0a4953..1b75e5342518 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -17,3 +17,5 @@ jobs: - name: Run uses: actions/labeler@v5 + with: + sync-labels: true diff --git a/.mailmap b/.mailmap index 4e45f1aa75ff..ac77e4b46ff5 100644 --- a/.mailmap +++ b/.mailmap @@ -1,11 +1,25 @@ # This file lists all individuals having contributed content to the repository. # For how it is generated, see hack/dockerfiles/authors.Dockerfile. +Batuhan Apaydın +Batuhan Apaydın CrazyMax CrazyMax <1951866+crazy-max@users.noreply.github.com> CrazyMax +David Karlsson +David Karlsson <35727626+dvdksn@users.noreply.github.com> +jaihwan104 +jaihwan104 <42341126+jaihwan104@users.noreply.github.com> +Kenyon Ralph +Kenyon Ralph Sebastiaan van Stijn Sebastiaan van Stijn +Shaun Thompson +Shaun Thompson +Silvin Lubecki +Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> +Talon Bowler +Talon Bowler Tibor Vass Tibor Vass Tõnis Tiigi diff --git a/AUTHORS b/AUTHORS index abf95bf70ae2..80297a513c45 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,45 +1,112 @@ # This file lists all individuals having contributed content to the repository. # For how it is generated, see hack/dockerfiles/authors.Dockerfile. +accetto <34798830+accetto@users.noreply.github.com> Akihiro Suda +Aleksa Sarai Alex Couture-Beil Andrew Haines +Andy Caldwell Andy MacKinlay Anthony Poschen +Arnold Sobanski Artur Klauser -Batuhan Apaydın +Avi Deitcher +Batuhan Apaydın +Ben Peachey +Bertrand Paquet Bin Du Brandon Philips Brian Goff +Bryce Lampe +Cameron Adams +Christian Dupuis +Cory Snider CrazyMax +David Gageot +David Karlsson +David Scott dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Devin Bayer Djordje Lukic +Dmitry Makovey Dmytro Makovey Donghui Wang <977675308@qq.com> +Doug Borg +Edgar Lee +Eli Treuherz +Eliott Wiener +Elran Shefer faust Felipe Santos +Felix de Souza Fernando Miguel gfrancesco gracenoah +Guillaume Lours <705411+glours@users.noreply.github.com> +guoguangwu Hollow Man +Ian King'ori +idnandre Ilya Dmitrichenko +Isaac Gaskin Jack Laxson +jaihwan104 Jean-Yves Gastaud +Jhan S. Álvarez <51450231+yastanotheruser@users.noreply.github.com> +Jonathan A. Sternberg +Jonathan Piché +Justin Chadwell +Kenyon Ralph khs1994 +Kijima Daigo +Kohei Tokunaga Kotaro Adachi +Kushagra Mansingh <12158241+kushmansingh@users.noreply.github.com> l00397676 +Laura Brehm +Laurent Goderre +Mark Hildreth <113933455+markhildreth-gravity@users.noreply.github.com> +Mayeul Blanzat Michal Augustyn +Milas Bowman +Mitsuru Kariya +Moleus +Nick Santos +Nick Sieger +Nicolas De Loof +Niklas Gehlen Patrick Van Stee +Paweł Gronowski +Phong Tran +Qasim Sarfraz +Rob Murray +robertlestak Saul Shanabrook +Sean P. Kane Sebastiaan van Stijn +Shaun Thompson SHIMA Tatsuya Silvin Lubecki +Simon A. Eugster Solomon Hykes +Sumner Warren Sune Keller +Talon Bowler +Tianon Gravi Tibor Vass +Tim Smith +Timofey Kirillov +Tyler Smith Tõnis Tiigi Ulysses Souza +Usual Coder <34403413+Usual-Coder@users.noreply.github.com> Wang Jinglei +Wei +Wojciech M Xiang Dai <764524258@qq.com> +Zachary Povey zelahi +Zero +zhyon404 +Zsolt diff --git a/Dockerfile b/Dockerfile index c01d56880e14..02826f9f9ae0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,8 @@ ARG GO_VERSION=1.22 ARG XX_VERSION=1.4.0 # for testing -ARG DOCKER_VERSION=27.0.3 +ARG DOCKER_VERSION=27.1.1 +ARG DOCKER_CLI_VERSION=${DOCKER_VERSION} ARG GOTESTSUM_VERSION=v1.9.0 ARG REGISTRY_VERSION=2.8.0 ARG BUILDKIT_VERSION=v0.14.1 @@ -13,7 +14,7 @@ ARG UNDOCK_VERSION=0.7.0 FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest FROM moby/moby-bin:$DOCKER_VERSION AS docker-engine -FROM dockereng/cli-bin:$DOCKER_VERSION AS docker-cli +FROM dockereng/cli-bin:$DOCKER_CLI_VERSION AS docker-cli FROM registry:$REGISTRY_VERSION AS registry FROM moby/buildkit:$BUILDKIT_VERSION AS buildkit FROM crazymax/undock:$UNDOCK_VERSION AS undock diff --git a/README.md b/README.md index f02c3954c438..e4595caffac3 100644 --- a/README.md +++ b/README.md @@ -56,8 +56,7 @@ For more information on how to use Buildx, see Using `buildx` with Docker requires Docker engine 19.03 or newer. -> **Warning** -> +> [!WARNING] > Using an incompatible version of Docker may result in unexpected behavior, > and will likely cause issues, especially when using Buildx builders with more > recent versions of BuildKit. @@ -75,8 +74,7 @@ Docker Engine package repositories contain Docker Buildx packages when installed ## Manual download -> **Important** -> +> [!IMPORTANT] > This section is for unattended installation of the buildx component. These > instructions are mostly suitable for testing purposes. We do not recommend > installing buildx using manual download in production environments as they @@ -107,8 +105,7 @@ On Windows: * `C:\ProgramData\Docker\cli-plugins` * `C:\Program Files\Docker\cli-plugins` -> **Note** -> +> [!NOTE] > On Unix environments, it may also be necessary to make it executable with `chmod +x`: > ```shell > $ chmod +x ~/.docker/cli-plugins/docker-buildx diff --git a/bake/bake.go b/bake/bake.go index b150b17acdab..9ed1dfd8e159 100644 --- a/bake/bake.go +++ b/bake/bake.go @@ -25,6 +25,7 @@ import ( "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/session/auth/authprovider" + "github.com/moby/buildkit/util/entitlements" "github.com/pkg/errors" "github.com/tonistiigi/go-csvvalue" "github.com/zclconf/go-cty/cty" @@ -542,7 +543,7 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) o := t[kk[1]] switch keys[1] { - case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest": + case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network": if len(parts) == 2 { o.ArrValue = append(o.ArrValue, parts[1]) } @@ -703,11 +704,12 @@ type Target struct { Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"` Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"` NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"` - NetworkMode *string `json:"-" hcl:"-" cty:"-"` + NetworkMode *string `json:"network" hcl:"network" cty:"network"` NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"` ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"` Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"` Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"` + Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"` // IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md. // linked is a private field to mark a target used as a linked one @@ -732,6 +734,12 @@ func (t *Target) normalize() { t.NoCacheFilter = removeDupes(t.NoCacheFilter) t.Ulimits = removeDupes(t.Ulimits) + if t.NetworkMode != nil && *t.NetworkMode == "host" { + t.Entitlements = append(t.Entitlements, "network.host") + } + + t.Entitlements = removeDupes(t.Entitlements) + for k, v := range t.Contexts { if v == "" { delete(t.Contexts, k) @@ -831,6 +839,9 @@ func (t *Target) Merge(t2 *Target) { if t2.Description != "" { t.Description = t2.Description } + if t2.Entitlements != nil { // merge + t.Entitlements = append(t.Entitlements, t2.Entitlements...) + } t.Inherits = append(t.Inherits, t2.Inherits...) } @@ -885,6 +896,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error { t.Platforms = o.ArrValue case "output": t.Outputs = o.ArrValue + case "entitlements": + t.Entitlements = append(t.Entitlements, o.ArrValue...) case "annotations": t.Annotations = append(t.Annotations, o.ArrValue...) case "attest": @@ -901,6 +914,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error { t.ShmSize = &value case "ulimits": t.Ulimits = o.ArrValue + case "network": + t.NetworkMode = &value case "pull": pull, err := strconv.ParseBool(value) if err != nil { @@ -1313,7 +1328,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) { } if t.Call != nil { - bo.PrintFunc = &build.PrintFunc{ + bo.CallFunc = &build.CallFunc{ Name: *t.Call, } } @@ -1368,6 +1383,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) { } bo.Ulimits = ulimits + for _, ent := range t.Entitlements { + bo.Allow = append(bo.Allow, entitlements.Entitlement(ent)) + } + return bo, nil } diff --git a/bake/bake_test.go b/bake/bake_test.go index db9f7ae2e593..bb95499a76a8 100644 --- a/bake/bake_test.go +++ b/bake/bake_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/moby/buildkit/util/entitlements" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1726,3 +1727,132 @@ func TestAnnotations(t *testing.T) { require.Len(t, bo["app"].Exports, 1) require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"]) } + +func TestHCLEntitlements(t *testing.T) { + fp := File{ + Name: "docker-bake.hcl", + Data: []byte( + `target "app" { + entitlements = ["security.insecure", "network.host"] + }`), + } + ctx := context.TODO() + m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil) + require.NoError(t, err) + + bo, err := TargetsToBuildOpt(m, &Input{}) + require.NoError(t, err) + + require.Equal(t, 1, len(g)) + require.Equal(t, []string{"app"}, g["default"].Targets) + + require.Equal(t, 1, len(m)) + require.Contains(t, m, "app") + require.Len(t, m["app"].Entitlements, 2) + require.Equal(t, "security.insecure", m["app"].Entitlements[0]) + require.Equal(t, "network.host", m["app"].Entitlements[1]) + + require.Len(t, bo["app"].Allow, 2) + require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0]) + require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1]) +} + +func TestEntitlementsForNetHostCompose(t *testing.T) { + fp := File{ + Name: "docker-bake.hcl", + Data: []byte( + `target "app" { + dockerfile = "app.Dockerfile" + }`), + } + + fp2 := File{ + Name: "docker-compose.yml", + Data: []byte( + `services: + app: + build: + network: "host" +`), + } + + ctx := context.TODO() + m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"app"}, nil, nil) + require.NoError(t, err) + + bo, err := TargetsToBuildOpt(m, &Input{}) + require.NoError(t, err) + + require.Equal(t, 1, len(g)) + require.Equal(t, []string{"app"}, g["default"].Targets) + + require.Equal(t, 1, len(m)) + require.Contains(t, m, "app") + require.Len(t, m["app"].Entitlements, 1) + require.Equal(t, "network.host", m["app"].Entitlements[0]) + require.Equal(t, "host", *m["app"].NetworkMode) + + require.Len(t, bo["app"].Allow, 1) + require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0]) + require.Equal(t, "host", bo["app"].NetworkMode) +} + +func TestEntitlementsForNetHost(t *testing.T) { + fp := File{ + Name: "docker-bake.hcl", + Data: []byte( + `target "app" { + dockerfile = "app.Dockerfile" + network = "host" + }`), + } + + ctx := context.TODO() + m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil) + require.NoError(t, err) + + bo, err := TargetsToBuildOpt(m, &Input{}) + require.NoError(t, err) + + require.Equal(t, 1, len(g)) + require.Equal(t, []string{"app"}, g["default"].Targets) + + require.Equal(t, 1, len(m)) + require.Contains(t, m, "app") + require.Len(t, m["app"].Entitlements, 1) + require.Equal(t, "network.host", m["app"].Entitlements[0]) + require.Equal(t, "host", *m["app"].NetworkMode) + + require.Len(t, bo["app"].Allow, 1) + require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0]) + require.Equal(t, "host", bo["app"].NetworkMode) +} + +func TestNetNone(t *testing.T) { + fp := File{ + Name: "docker-bake.hcl", + Data: []byte( + `target "app" { + dockerfile = "app.Dockerfile" + network = "none" + }`), + } + + ctx := context.TODO() + m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil) + require.NoError(t, err) + + bo, err := TargetsToBuildOpt(m, &Input{}) + require.NoError(t, err) + + require.Equal(t, 1, len(g)) + require.Equal(t, []string{"app"}, g["default"].Targets) + + require.Equal(t, 1, len(m)) + require.Contains(t, m, "app") + require.Len(t, m["app"].Entitlements, 0) + require.Equal(t, "none", *m["app"].NetworkMode) + + require.Len(t, bo["app"].Allow, 0) + require.Equal(t, "none", bo["app"].NetworkMode) +} diff --git a/bake/entitlements.go b/bake/entitlements.go new file mode 100644 index 000000000000..030e812beda5 --- /dev/null +++ b/bake/entitlements.go @@ -0,0 +1,175 @@ +package bake + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "slices" + "strings" + + "github.com/containerd/console" + "github.com/docker/buildx/build" + "github.com/moby/buildkit/util/entitlements" + "github.com/pkg/errors" +) + +type EntitlementKey string + +const ( + EntitlementKeyNetworkHost EntitlementKey = "network.host" + EntitlementKeySecurityInsecure EntitlementKey = "security.insecure" + EntitlementKeyFSRead EntitlementKey = "fs.read" + EntitlementKeyFSWrite EntitlementKey = "fs.write" + EntitlementKeyFS EntitlementKey = "fs" + EntitlementKeyImagePush EntitlementKey = "image.push" + EntitlementKeyImageLoad EntitlementKey = "image.load" + EntitlementKeyImage EntitlementKey = "image" + EntitlementKeySSH EntitlementKey = "ssh" +) + +type EntitlementConf struct { + NetworkHost bool + SecurityInsecure bool + FSRead []string + FSWrite []string + ImagePush []string + ImageLoad []string + SSH bool +} + +func ParseEntitlements(in []string) (EntitlementConf, error) { + var conf EntitlementConf + for _, e := range in { + switch e { + case string(EntitlementKeyNetworkHost): + conf.NetworkHost = true + case string(EntitlementKeySecurityInsecure): + conf.SecurityInsecure = true + case string(EntitlementKeySSH): + conf.SSH = true + default: + k, v, _ := strings.Cut(e, "=") + switch k { + case string(EntitlementKeyFSRead): + conf.FSRead = append(conf.FSRead, v) + case string(EntitlementKeyFSWrite): + conf.FSWrite = append(conf.FSWrite, v) + case string(EntitlementKeyFS): + conf.FSRead = append(conf.FSRead, v) + conf.FSWrite = append(conf.FSWrite, v) + case string(EntitlementKeyImagePush): + conf.ImagePush = append(conf.ImagePush, v) + case string(EntitlementKeyImageLoad): + conf.ImageLoad = append(conf.ImageLoad, v) + case string(EntitlementKeyImage): + conf.ImagePush = append(conf.ImagePush, v) + conf.ImageLoad = append(conf.ImageLoad, v) + default: + return conf, errors.Errorf("uknown entitlement key %q", k) + } + + // TODO: dedupe slices and parent paths + } + } + return conf, nil +} + +func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf, error) { + var expected EntitlementConf + + for _, v := range m { + if err := c.check(v, &expected); err != nil { + return EntitlementConf{}, err + } + } + + return expected, nil +} + +func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error { + for _, e := range bo.Allow { + switch e { + case entitlements.EntitlementNetworkHost: + if !c.NetworkHost { + expected.NetworkHost = true + } + case entitlements.EntitlementSecurityInsecure: + if !c.SecurityInsecure { + expected.SecurityInsecure = true + } + } + } + return nil +} + +func (c EntitlementConf) Prompt(ctx context.Context, out io.Writer) error { + var term bool + if _, err := console.ConsoleFromFile(os.Stdin); err == nil { + term = true + } + + var msgs []string + var flags []string + + if c.NetworkHost { + msgs = append(msgs, " - Running build containers that can access host network") + flags = append(flags, "network.host") + } + if c.SecurityInsecure { + msgs = append(msgs, " - Running privileged containers that can make system changes") + flags = append(flags, "security.insecure") + } + + if len(msgs) == 0 { + return nil + } + + fmt.Fprintf(out, "Your build is requesting privileges for following possibly insecure capabilities:\n\n") + for _, m := range msgs { + fmt.Fprintf(out, "%s\n", m) + } + + for i, f := range flags { + flags[i] = "--allow=" + f + } + + if term { + fmt.Fprintf(out, "\nIn order to not see this message in the future pass %q to grant requested privileges.\n", strings.Join(flags, " ")) + } else { + fmt.Fprintf(out, "\nPass %q to grant requested privileges.\n", strings.Join(flags, " ")) + } + + args := append([]string(nil), os.Args...) + if v, ok := os.LookupEnv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND"); ok && v != "" { + args[0] = v + } + idx := slices.Index(args, "bake") + + if idx != -1 { + fmt.Fprintf(out, "\nYour full command with requested privileges:\n\n") + fmt.Fprintf(out, "%s %s %s\n\n", strings.Join(args[:idx+1], " "), strings.Join(flags, " "), strings.Join(args[idx+1:], " ")) + } + + if term { + fmt.Fprintf(out, "Do you want to grant requested privileges and continue? [y/N] ") + reader := bufio.NewReader(os.Stdin) + answerCh := make(chan string, 1) + go func() { + answer, _, _ := reader.ReadLine() + answerCh <- string(answer) + close(answerCh) + }() + + select { + case <-ctx.Done(): + case answer := <-answerCh: + if strings.ToLower(string(answer)) == "y" { + return nil + } + } + } + + return errors.Errorf("additional privileges requested") +} diff --git a/bake/hclparser/hclparser.go b/bake/hclparser/hclparser.go index eee3a67017ce..fe7dc772dd78 100644 --- a/bake/hclparser/hclparser.go +++ b/bake/hclparser/hclparser.go @@ -75,7 +75,12 @@ type WithGetName interface { GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(hcl.Expression) hcl.Diagnostics) (string, error) } -var errUndefined = errors.New("undefined") +// errUndefined is returned when a variable or function is not defined. +type errUndefined struct{} + +func (errUndefined) Error() string { + return "undefined" +} func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map[string]struct{}, allowMissing bool) hcl.Diagnostics { fns, hcldiags := funcCalls(exp) @@ -85,7 +90,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map for _, fn := range fns { if err := p.resolveFunction(ectx, fn); err != nil { - if allowMissing && errors.Is(err, errUndefined) { + if allowMissing && errors.Is(err, errUndefined{}) { continue } return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr()) @@ -139,7 +144,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map } for _, block := range blocks { if err := p.resolveBlock(block, target); err != nil { - if allowMissing && errors.Is(err, errUndefined) { + if allowMissing && errors.Is(err, errUndefined{}) { continue } return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr()) @@ -147,7 +152,7 @@ func (p *parser) loadDeps(ectx *hcl.EvalContext, exp hcl.Expression, exclude map } } else { if err := p.resolveValue(ectx, v.RootName()); err != nil { - if allowMissing && errors.Is(err, errUndefined) { + if allowMissing && errors.Is(err, errUndefined{}) { continue } return wrapErrorDiagnostic("Invalid expression", err, exp.Range().Ptr(), exp.Range().Ptr()) @@ -169,7 +174,7 @@ func (p *parser) resolveFunction(ectx *hcl.EvalContext, name string) error { } f, ok := p.funcs[name] if !ok { - return errors.Wrapf(errUndefined, "function %q does not exist", name) + return errors.Wrapf(errUndefined{}, "function %q does not exist", name) } if _, ok := p.progressF[key(ectx, name)]; ok { return errors.Errorf("function cycle not allowed for %s", name) @@ -259,7 +264,7 @@ func (p *parser) resolveValue(ectx *hcl.EvalContext, name string) (err error) { if _, builtin := p.opt.Vars[name]; !ok && !builtin { vr, ok := p.vars[name] if !ok { - return errors.Wrapf(errUndefined, "variable %q does not exist", name) + return errors.Wrapf(errUndefined{}, "variable %q does not exist", name) } def = vr.Default ectx = p.ectx diff --git a/bake/hclparser/stdlib.go b/bake/hclparser/stdlib.go index e0cd9b5f6615..b0e094f6606d 100644 --- a/bake/hclparser/stdlib.go +++ b/bake/hclparser/stdlib.go @@ -1,6 +1,9 @@ package hclparser import ( + "errors" + "path" + "strings" "time" "github.com/hashicorp/go-cty-funcs/cidr" @@ -9,174 +12,251 @@ import ( "github.com/hashicorp/go-cty-funcs/uuid" "github.com/hashicorp/hcl/v2/ext/tryfunc" "github.com/hashicorp/hcl/v2/ext/typeexpr" - "github.com/pkg/errors" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/function" "github.com/zclconf/go-cty/cty/function/stdlib" ) -var stdlibFunctions = map[string]function.Function{ - "absolute": stdlib.AbsoluteFunc, - "add": stdlib.AddFunc, - "and": stdlib.AndFunc, - "base64decode": encoding.Base64DecodeFunc, - "base64encode": encoding.Base64EncodeFunc, - "bcrypt": crypto.BcryptFunc, - "byteslen": stdlib.BytesLenFunc, - "bytesslice": stdlib.BytesSliceFunc, - "can": tryfunc.CanFunc, - "ceil": stdlib.CeilFunc, - "chomp": stdlib.ChompFunc, - "chunklist": stdlib.ChunklistFunc, - "cidrhost": cidr.HostFunc, - "cidrnetmask": cidr.NetmaskFunc, - "cidrsubnet": cidr.SubnetFunc, - "cidrsubnets": cidr.SubnetsFunc, - "coalesce": stdlib.CoalesceFunc, - "coalescelist": stdlib.CoalesceListFunc, - "compact": stdlib.CompactFunc, - "concat": stdlib.ConcatFunc, - "contains": stdlib.ContainsFunc, - "convert": typeexpr.ConvertFunc, - "csvdecode": stdlib.CSVDecodeFunc, - "distinct": stdlib.DistinctFunc, - "divide": stdlib.DivideFunc, - "element": stdlib.ElementFunc, - "equal": stdlib.EqualFunc, - "flatten": stdlib.FlattenFunc, - "floor": stdlib.FloorFunc, - "format": stdlib.FormatFunc, - "formatdate": stdlib.FormatDateFunc, - "formatlist": stdlib.FormatListFunc, - "greaterthan": stdlib.GreaterThanFunc, - "greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc, - "hasindex": stdlib.HasIndexFunc, - "indent": stdlib.IndentFunc, - "index": stdlib.IndexFunc, - "indexof": indexOfFunc, - "int": stdlib.IntFunc, - "join": stdlib.JoinFunc, - "jsondecode": stdlib.JSONDecodeFunc, - "jsonencode": stdlib.JSONEncodeFunc, - "keys": stdlib.KeysFunc, - "length": stdlib.LengthFunc, - "lessthan": stdlib.LessThanFunc, - "lessthanorequalto": stdlib.LessThanOrEqualToFunc, - "log": stdlib.LogFunc, - "lookup": stdlib.LookupFunc, - "lower": stdlib.LowerFunc, - "max": stdlib.MaxFunc, - "md5": crypto.Md5Func, - "merge": stdlib.MergeFunc, - "min": stdlib.MinFunc, - "modulo": stdlib.ModuloFunc, - "multiply": stdlib.MultiplyFunc, - "negate": stdlib.NegateFunc, - "not": stdlib.NotFunc, - "notequal": stdlib.NotEqualFunc, - "or": stdlib.OrFunc, - "parseint": stdlib.ParseIntFunc, - "pow": stdlib.PowFunc, - "range": stdlib.RangeFunc, - "regex_replace": stdlib.RegexReplaceFunc, - "regex": stdlib.RegexFunc, - "regexall": stdlib.RegexAllFunc, - "replace": stdlib.ReplaceFunc, - "reverse": stdlib.ReverseFunc, - "reverselist": stdlib.ReverseListFunc, - "rsadecrypt": crypto.RsaDecryptFunc, - "sethaselement": stdlib.SetHasElementFunc, - "setintersection": stdlib.SetIntersectionFunc, - "setproduct": stdlib.SetProductFunc, - "setsubtract": stdlib.SetSubtractFunc, - "setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc, - "setunion": stdlib.SetUnionFunc, - "sha1": crypto.Sha1Func, - "sha256": crypto.Sha256Func, - "sha512": crypto.Sha512Func, - "signum": stdlib.SignumFunc, - "slice": stdlib.SliceFunc, - "sort": stdlib.SortFunc, - "split": stdlib.SplitFunc, - "strlen": stdlib.StrlenFunc, - "substr": stdlib.SubstrFunc, - "subtract": stdlib.SubtractFunc, - "timeadd": stdlib.TimeAddFunc, - "timestamp": timestampFunc, - "title": stdlib.TitleFunc, - "trim": stdlib.TrimFunc, - "trimprefix": stdlib.TrimPrefixFunc, - "trimspace": stdlib.TrimSpaceFunc, - "trimsuffix": stdlib.TrimSuffixFunc, - "try": tryfunc.TryFunc, - "upper": stdlib.UpperFunc, - "urlencode": encoding.URLEncodeFunc, - "uuidv4": uuid.V4Func, - "uuidv5": uuid.V5Func, - "values": stdlib.ValuesFunc, - "zipmap": stdlib.ZipmapFunc, +type funcDef struct { + name string + fn function.Function + factory func() function.Function +} + +var stdlibFunctions = []funcDef{ + {name: "absolute", fn: stdlib.AbsoluteFunc}, + {name: "add", fn: stdlib.AddFunc}, + {name: "and", fn: stdlib.AndFunc}, + {name: "base64decode", fn: encoding.Base64DecodeFunc}, + {name: "base64encode", fn: encoding.Base64EncodeFunc}, + {name: "basename", factory: basenameFunc}, + {name: "bcrypt", fn: crypto.BcryptFunc}, + {name: "byteslen", fn: stdlib.BytesLenFunc}, + {name: "bytesslice", fn: stdlib.BytesSliceFunc}, + {name: "can", fn: tryfunc.CanFunc}, + {name: "ceil", fn: stdlib.CeilFunc}, + {name: "chomp", fn: stdlib.ChompFunc}, + {name: "chunklist", fn: stdlib.ChunklistFunc}, + {name: "cidrhost", fn: cidr.HostFunc}, + {name: "cidrnetmask", fn: cidr.NetmaskFunc}, + {name: "cidrsubnet", fn: cidr.SubnetFunc}, + {name: "cidrsubnets", fn: cidr.SubnetsFunc}, + {name: "coalesce", fn: stdlib.CoalesceFunc}, + {name: "coalescelist", fn: stdlib.CoalesceListFunc}, + {name: "compact", fn: stdlib.CompactFunc}, + {name: "concat", fn: stdlib.ConcatFunc}, + {name: "contains", fn: stdlib.ContainsFunc}, + {name: "convert", fn: typeexpr.ConvertFunc}, + {name: "csvdecode", fn: stdlib.CSVDecodeFunc}, + {name: "dirname", factory: dirnameFunc}, + {name: "distinct", fn: stdlib.DistinctFunc}, + {name: "divide", fn: stdlib.DivideFunc}, + {name: "element", fn: stdlib.ElementFunc}, + {name: "equal", fn: stdlib.EqualFunc}, + {name: "flatten", fn: stdlib.FlattenFunc}, + {name: "floor", fn: stdlib.FloorFunc}, + {name: "format", fn: stdlib.FormatFunc}, + {name: "formatdate", fn: stdlib.FormatDateFunc}, + {name: "formatlist", fn: stdlib.FormatListFunc}, + {name: "greaterthan", fn: stdlib.GreaterThanFunc}, + {name: "greaterthanorequalto", fn: stdlib.GreaterThanOrEqualToFunc}, + {name: "hasindex", fn: stdlib.HasIndexFunc}, + {name: "indent", fn: stdlib.IndentFunc}, + {name: "index", fn: stdlib.IndexFunc}, + {name: "indexof", factory: indexOfFunc}, + {name: "int", fn: stdlib.IntFunc}, + {name: "join", fn: stdlib.JoinFunc}, + {name: "jsondecode", fn: stdlib.JSONDecodeFunc}, + {name: "jsonencode", fn: stdlib.JSONEncodeFunc}, + {name: "keys", fn: stdlib.KeysFunc}, + {name: "length", fn: stdlib.LengthFunc}, + {name: "lessthan", fn: stdlib.LessThanFunc}, + {name: "lessthanorequalto", fn: stdlib.LessThanOrEqualToFunc}, + {name: "log", fn: stdlib.LogFunc}, + {name: "lookup", fn: stdlib.LookupFunc}, + {name: "lower", fn: stdlib.LowerFunc}, + {name: "max", fn: stdlib.MaxFunc}, + {name: "md5", fn: crypto.Md5Func}, + {name: "merge", fn: stdlib.MergeFunc}, + {name: "min", fn: stdlib.MinFunc}, + {name: "modulo", fn: stdlib.ModuloFunc}, + {name: "multiply", fn: stdlib.MultiplyFunc}, + {name: "negate", fn: stdlib.NegateFunc}, + {name: "not", fn: stdlib.NotFunc}, + {name: "notequal", fn: stdlib.NotEqualFunc}, + {name: "or", fn: stdlib.OrFunc}, + {name: "parseint", fn: stdlib.ParseIntFunc}, + {name: "pow", fn: stdlib.PowFunc}, + {name: "range", fn: stdlib.RangeFunc}, + {name: "regex_replace", fn: stdlib.RegexReplaceFunc}, + {name: "regex", fn: stdlib.RegexFunc}, + {name: "regexall", fn: stdlib.RegexAllFunc}, + {name: "replace", fn: stdlib.ReplaceFunc}, + {name: "reverse", fn: stdlib.ReverseFunc}, + {name: "reverselist", fn: stdlib.ReverseListFunc}, + {name: "rsadecrypt", fn: crypto.RsaDecryptFunc}, + {name: "sanitize", factory: sanitizeFunc}, + {name: "sethaselement", fn: stdlib.SetHasElementFunc}, + {name: "setintersection", fn: stdlib.SetIntersectionFunc}, + {name: "setproduct", fn: stdlib.SetProductFunc}, + {name: "setsubtract", fn: stdlib.SetSubtractFunc}, + {name: "setsymmetricdifference", fn: stdlib.SetSymmetricDifferenceFunc}, + {name: "setunion", fn: stdlib.SetUnionFunc}, + {name: "sha1", fn: crypto.Sha1Func}, + {name: "sha256", fn: crypto.Sha256Func}, + {name: "sha512", fn: crypto.Sha512Func}, + {name: "signum", fn: stdlib.SignumFunc}, + {name: "slice", fn: stdlib.SliceFunc}, + {name: "sort", fn: stdlib.SortFunc}, + {name: "split", fn: stdlib.SplitFunc}, + {name: "strlen", fn: stdlib.StrlenFunc}, + {name: "substr", fn: stdlib.SubstrFunc}, + {name: "subtract", fn: stdlib.SubtractFunc}, + {name: "timeadd", fn: stdlib.TimeAddFunc}, + {name: "timestamp", factory: timestampFunc}, + {name: "title", fn: stdlib.TitleFunc}, + {name: "trim", fn: stdlib.TrimFunc}, + {name: "trimprefix", fn: stdlib.TrimPrefixFunc}, + {name: "trimspace", fn: stdlib.TrimSpaceFunc}, + {name: "trimsuffix", fn: stdlib.TrimSuffixFunc}, + {name: "try", fn: tryfunc.TryFunc}, + {name: "upper", fn: stdlib.UpperFunc}, + {name: "urlencode", fn: encoding.URLEncodeFunc}, + {name: "uuidv4", fn: uuid.V4Func}, + {name: "uuidv5", fn: uuid.V5Func}, + {name: "values", fn: stdlib.ValuesFunc}, + {name: "zipmap", fn: stdlib.ZipmapFunc}, } // indexOfFunc constructs a function that finds the element index for a given // value in a list. -var indexOfFunc = function.New(&function.Spec{ - Params: []function.Parameter{ - { - Name: "list", - Type: cty.DynamicPseudoType, +func indexOfFunc() function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, }, - { - Name: "value", - Type: cty.DynamicPseudoType, - }, - }, - Type: function.StaticReturnType(cty.Number), - Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { - if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { - return cty.NilVal, errors.New("argument must be a list or tuple") - } - - if !args[0].IsKnown() { - return cty.UnknownVal(cty.Number), nil - } - - if args[0].LengthInt() == 0 { // Easy path - return cty.NilVal, errors.New("cannot search an empty list") - } - - for it := args[0].ElementIterator(); it.Next(); { - i, v := it.Element() - eq, err := stdlib.Equal(v, args[1]) - if err != nil { - return cty.NilVal, err + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") } - if !eq.IsKnown() { + + if !args[0].IsKnown() { return cty.UnknownVal(cty.Number), nil } - if eq.True() { - return i, nil + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") } - } - return cty.NilVal, errors.New("item not found") - }, -}) + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, + }) +} + +// basenameFunc constructs a function that returns the last element of a path. +func basenameFunc() function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + return cty.StringVal(path.Base(in)), nil + }, + }) +} + +// dirnameFunc constructs a function that returns the directory of a path. +func dirnameFunc() function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + return cty.StringVal(path.Dir(in)), nil + }, + }) +} + +// sanitizyFunc constructs a function that replaces all non-alphanumeric characters with a underscore, +// leaving only characters that are valid for a Bake target name. +func sanitizeFunc() function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + in := args[0].AsString() + // only [a-zA-Z0-9_-]+ is allowed + var b strings.Builder + for _, r := range in { + if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '_' || r == '-' { + b.WriteRune(r) + } else { + b.WriteRune('_') + } + } + return cty.StringVal(b.String()), nil + }, + }) +} // timestampFunc constructs a function that returns a string representation of the current date and time. // // This function was imported from terraform's datetime utilities. -var timestampFunc = function.New(&function.Spec{ - Params: []function.Parameter{}, - Type: function.StaticReturnType(cty.String), - Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { - return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil - }, -}) +func timestampFunc() function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, + }) +} func Stdlib() map[string]function.Function { funcs := make(map[string]function.Function, len(stdlibFunctions)) - for k, v := range stdlibFunctions { - funcs[k] = v + for _, v := range stdlibFunctions { + if v.factory != nil { + funcs[v.name] = v.factory() + } else { + funcs[v.name] = v.fn + } } return funcs } diff --git a/bake/hclparser/stdlib_test.go b/bake/hclparser/stdlib_test.go index df2336cee9de..e0d3dd29b9fc 100644 --- a/bake/hclparser/stdlib_test.go +++ b/bake/hclparser/stdlib_test.go @@ -3,6 +3,7 @@ package hclparser import ( "testing" + "github.com/stretchr/testify/require" "github.com/zclconf/go-cty/cty" ) @@ -34,16 +35,165 @@ func TestIndexOf(t *testing.T) { for name, test := range tests { name, test := name, test t.Run(name, func(t *testing.T) { - got, err := indexOfFunc.Call([]cty.Value{test.input, test.key}) - if err != nil { - if test.wantErr { - return - } - t.Fatalf("unexpected error: %s", err) + got, err := indexOfFunc().Call([]cty.Value{test.input, test.key}) + if test.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, test.want, got) } - if !got.RawEquals(test.want) { - t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want) + }) + } +} + +func TestBasename(t *testing.T) { + type testCase struct { + input cty.Value + want cty.Value + wantErr bool + } + tests := map[string]testCase{ + "empty": { + input: cty.StringVal(""), + want: cty.StringVal("."), + }, + "slash": { + input: cty.StringVal("/"), + want: cty.StringVal("/"), + }, + "simple": { + input: cty.StringVal("/foo/bar"), + want: cty.StringVal("bar"), + }, + "simple no slash": { + input: cty.StringVal("foo/bar"), + want: cty.StringVal("bar"), + }, + "dot": { + input: cty.StringVal("/foo/bar."), + want: cty.StringVal("bar."), + }, + "dotdot": { + input: cty.StringVal("/foo/bar.."), + want: cty.StringVal("bar.."), + }, + "dotdotdot": { + input: cty.StringVal("/foo/bar..."), + want: cty.StringVal("bar..."), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + got, err := basenameFunc().Call([]cty.Value{test.input}) + if test.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, test.want, got) } }) } } + +func TestDirname(t *testing.T) { + type testCase struct { + input cty.Value + want cty.Value + wantErr bool + } + tests := map[string]testCase{ + "empty": { + input: cty.StringVal(""), + want: cty.StringVal("."), + }, + "slash": { + input: cty.StringVal("/"), + want: cty.StringVal("/"), + }, + "simple": { + input: cty.StringVal("/foo/bar"), + want: cty.StringVal("/foo"), + }, + "simple no slash": { + input: cty.StringVal("foo/bar"), + want: cty.StringVal("foo"), + }, + "dot": { + input: cty.StringVal("/foo/bar."), + want: cty.StringVal("/foo"), + }, + "dotdot": { + input: cty.StringVal("/foo/bar.."), + want: cty.StringVal("/foo"), + }, + "dotdotdot": { + input: cty.StringVal("/foo/bar..."), + want: cty.StringVal("/foo"), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + got, err := dirnameFunc().Call([]cty.Value{test.input}) + if test.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, test.want, got) + } + }) + } +} + +func TestSanitize(t *testing.T) { + type testCase struct { + input cty.Value + want cty.Value + } + tests := map[string]testCase{ + "empty": { + input: cty.StringVal(""), + want: cty.StringVal(""), + }, + "simple": { + input: cty.StringVal("foo/bar"), + want: cty.StringVal("foo_bar"), + }, + "simple no slash": { + input: cty.StringVal("foobar"), + want: cty.StringVal("foobar"), + }, + "dot": { + input: cty.StringVal("foo/bar."), + want: cty.StringVal("foo_bar_"), + }, + "dotdot": { + input: cty.StringVal("foo/bar.."), + want: cty.StringVal("foo_bar__"), + }, + "dotdotdot": { + input: cty.StringVal("foo/bar..."), + want: cty.StringVal("foo_bar___"), + }, + "utf8": { + input: cty.StringVal("foo/🍕bar"), + want: cty.StringVal("foo__bar"), + }, + "symbols": { + input: cty.StringVal("foo/bar!@(ba+z)"), + want: cty.StringVal("foo_bar___ba_z_"), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + got, err := sanitizeFunc().Call([]cty.Value{test.input}) + require.NoError(t, err) + require.Equal(t, test.want, got) + }) + } +} diff --git a/build/build.go b/build/build.go index 34a895d4c356..dadcdcfff6b1 100644 --- a/build/build.go +++ b/build/build.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "os" + "slices" "strconv" "strings" "sync" @@ -34,6 +35,7 @@ import ( gateway "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" spb "github.com/moby/buildkit/sourcepolicy/pb" @@ -44,15 +46,12 @@ import ( specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" ) -var ( - errStdinConflict = errors.New("invalid argument: can't use stdin for both build context and dockerfile") - errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles") -) - const ( printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56" printLintFallbackImage = "docker.io/docker/dockerfile-upstream:1.8.1@sha256:e87caa74dcb7d46cd820352bfea12591f3dba3ddc4285e19c7dcd13359f7cefd" @@ -83,13 +82,13 @@ type Options struct { Session []session.Attachable Linked bool // Linked marks this target as exclusively linked (not requested by the user). - PrintFunc *PrintFunc + CallFunc *CallFunc ProvenanceResponseMode confutil.MetadataProvenanceMode SourcePolicy *spb.Policy GroupRef string } -type PrintFunc struct { +type CallFunc struct { Name string Format string IgnoreStatus bool @@ -98,7 +97,7 @@ type PrintFunc struct { type Inputs struct { ContextPath string DockerfilePath string - InStream io.Reader + InStream *SyncMultiReader ContextState *llb.State DockerfileInline string NamedContexts map[string]NamedContext @@ -170,7 +169,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } } - if noMobyDriver != nil && !noDefaultLoad() && noPrintFunc(opt) { + if noMobyDriver != nil && !noDefaultLoad() && noCallFunc(opt) { var noOutputTargets []string for name, opt := range opt { if noMobyDriver.Features(ctx)[driver.DefaultLoad] { @@ -213,7 +212,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s for k, opt := range opt { multiDriver := len(drivers[k]) > 1 hasMobyDriver := false - gitattrs, addVCSLocalDir, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath) + addGitAttrs, err := getGitAttributes(ctx, opt.Inputs.ContextPath, opt.Inputs.DockerfilePath) if err != nil { logrus.WithError(err).Warn("current commit information was not captured by the build") } @@ -230,16 +229,14 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s if err != nil { return nil, err } - so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, addVCSLocalDir, w, docker) + so, release, err := toSolveOpt(ctx, np.Node(), multiDriver, opt, gatewayOpts, configDir, w, docker) if err != nil { return nil, err } if err := saveLocalState(so, k, opt, np.Node(), configDir); err != nil { return nil, err } - for k, v := range gitattrs { - so.FrontendAttrs[k] = v - } + addGitAttrs(so) defers = append(defers, release) reqn = append(reqn, &reqForNode{ resolvedNode: np, @@ -298,6 +295,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } } + sharedSessions, err := detectSharedMounts(ctx, reqForNodes) + if err != nil { + return nil, err + } + sharedSessionsWG := map[string]*sync.WaitGroup{} + resp = map[string]*client.SolveResponse{} var respMu sync.Mutex results := waitmap.New() @@ -306,7 +309,7 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s childTargets := calculateChildTargets(reqForNodes, opt) for k, opt := range opt { - err := func(k string) error { + err := func(k string) (err error) { opt := opt dps := drivers[k] multiDriver := len(drivers[k]) > 1 @@ -318,6 +321,12 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s } baseCtx := ctx + if multiTarget { + defer func() { + err = errors.Wrapf(err, "target %s", k) + }() + } + res := make([]*client.SolveResponse, len(dps)) eg2, ctx := errgroup.WithContext(ctx) @@ -362,7 +371,37 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s if err != nil { return err } + + var done func() + if sessions, ok := sharedSessions[node.Name]; ok { + wg, ok := sharedSessionsWG[node.Name] + if ok { + wg.Add(1) + } else { + wg = &sync.WaitGroup{} + wg.Add(1) + sharedSessionsWG[node.Name] = wg + for _, s := range sessions { + s := s + eg.Go(func() error { + return s.Run(baseCtx, c.Dialer()) + }) + } + go func() { + wg.Wait() + for _, s := range sessions { + s.Close() + } + }() + } + done = wg.Done + } + eg2.Go(func() error { + if done != nil { + defer done() + } + pw = progress.ResetTime(pw) if err := waitContextDeps(ctx, dp.driverIndex, results, so); err != nil { @@ -393,15 +432,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s defer func() { <-done }() cc := c - var printRes map[string][]byte + var callRes map[string][]byte buildFunc := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) { - if opt.PrintFunc != nil { + if opt.CallFunc != nil { if _, ok := req.FrontendOpt["frontend.caps"]; !ok { req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward" } else { req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward" } - req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name + req.FrontendOpt["requestid"] = "frontend." + opt.CallFunc.Name } res, err := c.Solve(ctx, req) @@ -417,8 +456,8 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s return nil, err } } - if opt.PrintFunc != nil { - printRes = res.Metadata + if opt.CallFunc != nil { + callRes = res.Metadata } rKey := resultKey(dp.driverIndex, k) @@ -474,13 +513,15 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s if rr.ExporterResponse == nil { rr.ExporterResponse = map[string]string{} } - for k, v := range printRes { + for k, v := range callRes { rr.ExporterResponse[k] = string(v) } - rr.ExporterResponse["buildx.build.ref"] = buildRef - if node.Driver.HistoryAPISupported(ctx) { - if err := setRecordProvenance(ctx, c, rr, so.Ref, opt.ProvenanceResponseMode, pw); err != nil { - return err + if opt.CallFunc == nil { + rr.ExporterResponse["buildx.build.ref"] = buildRef + if node.Driver.HistoryAPISupported(ctx) { + if err := setRecordProvenance(ctx, c, rr, so.Ref, opt.ProvenanceResponseMode, pw); err != nil { + return err + } } } @@ -530,6 +571,13 @@ func BuildWithResultHandler(ctx context.Context, nodes []builder.Node, opt map[s tracing.FinishWithError(span, err) } }() + + if multiTarget { + defer func() { + err = errors.Wrapf(err, "target %s", k) + }() + } + pw := progress.WithPrefix(w, "default", false) if err := eg2.Wait(); err != nil { return err @@ -793,6 +841,124 @@ func resultKey(index int, name string) string { return fmt.Sprintf("%d-%s", index, name) } +// detectSharedMounts looks for same local mounts used by multiple requests to the same node +// and creates a separate session that will be used by all detected requests. +func detectSharedMounts(ctx context.Context, reqs map[string][]*reqForNode) (_ map[string][]*session.Session, err error) { + type fsTracker struct { + fs fsutil.FS + so []*client.SolveOpt + } + type fsKey struct { + name string + dir string + } + + m := map[string]map[fsKey]*fsTracker{} + for _, reqs := range reqs { + for _, req := range reqs { + nodeName := req.resolvedNode.Node().Name + if _, ok := m[nodeName]; !ok { + m[nodeName] = map[fsKey]*fsTracker{} + } + fsMap := m[nodeName] + for name, m := range req.so.LocalMounts { + fs, ok := m.(*fs) + if !ok { + continue + } + key := fsKey{name: name, dir: fs.dir} + if _, ok := fsMap[key]; !ok { + fsMap[key] = &fsTracker{fs: fs.FS} + } + fsMap[key].so = append(fsMap[key].so, req.so) + } + } + } + + type sharedSession struct { + *session.Session + fsMap map[string]fsutil.FS + } + + sessionMap := map[string][]*sharedSession{} + + defer func() { + if err != nil { + for _, sessions := range sessionMap { + for _, s := range sessions { + s.Close() + } + } + } + }() + + for node, fsMap := range m { + for key, fs := range fsMap { + if len(fs.so) <= 1 { + continue + } + + sessions := sessionMap[node] + + // find session that doesn't have the fs name reserved + idx := slices.IndexFunc(sessions, func(s *sharedSession) bool { + _, ok := s.fsMap[key.name] + return !ok + }) + + var ss *sharedSession + if idx == -1 { + s, err := session.NewSession(ctx, fs.so[0].SharedKey) + if err != nil { + return nil, err + } + ss = &sharedSession{Session: s, fsMap: map[string]fsutil.FS{}} + sessions = append(sessions, ss) + sessionMap[node] = sessions + } else { + ss = sessions[idx] + } + + ss.fsMap[key.name] = fs.fs + for _, so := range fs.so { + if so.FrontendAttrs == nil { + so.FrontendAttrs = map[string]string{} + } + so.FrontendAttrs["local-sessionid:"+key.name] = ss.ID() + } + } + } + + resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult { + st.Uid = 0 + st.Gid = 0 + return fsutil.MapResultKeep + } + + // convert back to regular sessions + sessions := map[string][]*session.Session{} + for n, ss := range sessionMap { + arr := make([]*session.Session, 0, len(ss)) + for _, s := range ss { + arr = append(arr, s.Session) + + src := make(filesync.StaticDirSource, len(s.fsMap)) + for name, fs := range s.fsMap { + fs, err := fsutil.NewFilterFS(fs, &fsutil.FilterOpt{ + Map: resetUIDAndGID, + }) + if err != nil { + return nil, err + } + src[name] = fs + } + s.Allow(filesync.NewFSSyncProvider(src)) + } + sessions[n] = arr + } + return sessions, nil +} + // calculateChildTargets returns all the targets that depend on current target for reverse index func calculateChildTargets(reqs map[string][]*reqForNode, opt map[string]Options) map[string][]string { out := make(map[string][]string) @@ -940,9 +1106,9 @@ func fallbackPrintError(err error, req gateway.SolveRequest) (gateway.SolveReque return req, false } -func noPrintFunc(opt map[string]Options) bool { +func noCallFunc(opt map[string]Options) bool { for _, v := range opt { - if v.PrintFunc != nil { + if v.CallFunc != nil { return false } } diff --git a/build/git.go b/build/git.go index 8ee1875ce0ba..0b7aba10407d 100644 --- a/build/git.go +++ b/build/git.go @@ -17,10 +17,19 @@ import ( const DockerfileLabel = "com.docker.image.source.entrypoint" -func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath string) (map[string]string, func(key, dir string, so *client.SolveOpt), error) { - res := make(map[string]string) +type gitAttrsAppendFunc func(so *client.SolveOpt) + +func gitAppendNoneFunc(_ *client.SolveOpt) {} + +func getGitAttributes(ctx context.Context, contextPath, dockerfilePath string) (f gitAttrsAppendFunc, err error) { + defer func() { + if f == nil { + f = gitAppendNoneFunc + } + }() + if contextPath == "" { - return nil, nil, nil + return nil, nil } setGitLabels := false @@ -39,7 +48,7 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st } if !setGitLabels && !setGitInfo { - return nil, nil, nil + return nil, nil } // figure out in which directory the git command needs to run in @@ -54,25 +63,27 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st gitc, err := gitutil.New(gitutil.WithContext(ctx), gitutil.WithWorkingDir(wd)) if err != nil { if st, err1 := os.Stat(path.Join(wd, ".git")); err1 == nil && st.IsDir() { - return res, nil, errors.Wrap(err, "git was not found in the system") + return nil, errors.Wrap(err, "git was not found in the system") } - return nil, nil, nil + return nil, nil } if !gitc.IsInsideWorkTree() { if st, err := os.Stat(path.Join(wd, ".git")); err == nil && st.IsDir() { - return res, nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree") + return nil, errors.New("failed to read current commit information with git rev-parse --is-inside-work-tree") } - return nil, nil, nil + return nil, nil } root, err := gitc.RootDir() if err != nil { - return res, nil, errors.Wrap(err, "failed to get git root dir") + return nil, errors.Wrap(err, "failed to get git root dir") } + res := make(map[string]string) + if sha, err := gitc.FullCommit(); err != nil && !gitutil.IsUnknownRevision(err) { - return res, nil, errors.Wrap(err, "failed to get git commit") + return nil, errors.Wrap(err, "failed to get git commit") } else if sha != "" { checkDirty := false if v, ok := os.LookupEnv("BUILDX_GIT_CHECK_DIRTY"); ok { @@ -112,20 +123,38 @@ func getGitAttributes(ctx context.Context, contextPath string, dockerfilePath st } } - return res, func(key, dir string, so *client.SolveOpt) { - if !setGitInfo || root == "" { - return + return func(so *client.SolveOpt) { + if so.FrontendAttrs == nil { + so.FrontendAttrs = make(map[string]string) } - dir, err := filepath.Abs(dir) - if err != nil { - return + for k, v := range res { + so.FrontendAttrs[k] = v } - if lp, err := osutil.GetLongPathName(dir); err == nil { - dir = lp + + if !setGitInfo || root == "" { + return } - dir = osutil.SanitizePath(dir) - if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") { - so.FrontendAttrs["vcs:localdir:"+key] = r + + for key, mount := range so.LocalMounts { + fs, ok := mount.(*fs) + if !ok { + continue + } + dir, err := filepath.EvalSymlinks(fs.dir) // keep same behavior as fsutil.NewFS + if err != nil { + continue + } + dir, err = filepath.Abs(dir) + if err != nil { + continue + } + if lp, err := osutil.GetLongPathName(dir); err == nil { + dir = lp + } + dir = osutil.SanitizePath(dir) + if r, err := filepath.Rel(root, dir); err == nil && !strings.HasPrefix(r, "..") { + so.FrontendAttrs["vcs:localdir:"+key] = r + } } }, nil } diff --git a/build/git_test.go b/build/git_test.go index d8698507d953..ee4e9403cb9d 100644 --- a/build/git_test.go +++ b/build/git_test.go @@ -31,7 +31,7 @@ func setupTest(tb testing.TB) { } func TestGetGitAttributesNotGitRepo(t *testing.T) { - _, _, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile") + _, err := getGitAttributes(context.Background(), t.TempDir(), "Dockerfile") assert.NoError(t, err) } @@ -39,16 +39,18 @@ func TestGetGitAttributesBadGitRepo(t *testing.T) { tmp := t.TempDir() require.NoError(t, os.MkdirAll(path.Join(tmp, ".git"), 0755)) - _, _, err := getGitAttributes(context.Background(), tmp, "Dockerfile") + _, err := getGitAttributes(context.Background(), tmp, "Dockerfile") assert.Error(t, err) } func TestGetGitAttributesNoContext(t *testing.T) { setupTest(t) - gitattrs, _, err := getGitAttributes(context.Background(), "", "Dockerfile") + addGitAttrs, err := getGitAttributes(context.Background(), "", "Dockerfile") assert.NoError(t, err) - assert.Empty(t, gitattrs) + var so client.SolveOpt + addGitAttrs(&so) + assert.Empty(t, so.FrontendAttrs) } func TestGetGitAttributes(t *testing.T) { @@ -115,15 +117,17 @@ func TestGetGitAttributes(t *testing.T) { if tt.envGitInfo != "" { t.Setenv("BUILDX_GIT_INFO", tt.envGitInfo) } - gitattrs, _, err := getGitAttributes(context.Background(), ".", "Dockerfile") + addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile") require.NoError(t, err) + var so client.SolveOpt + addGitAttrs(&so) for _, e := range tt.expected { - assert.Contains(t, gitattrs, e) - assert.NotEmpty(t, gitattrs[e]) + assert.Contains(t, so.FrontendAttrs, e) + assert.NotEmpty(t, so.FrontendAttrs[e]) if e == "label:"+DockerfileLabel { - assert.Equal(t, "Dockerfile", gitattrs[e]) + assert.Equal(t, "Dockerfile", so.FrontendAttrs[e]) } else if e == "label:"+specs.AnnotationSource || e == "vcs:source" { - assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs[e]) + assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs[e]) } } }) @@ -140,20 +144,25 @@ func TestGetGitAttributesDirty(t *testing.T) { require.NoError(t, os.WriteFile(filepath.Join("dir", "Dockerfile"), df, 0644)) t.Setenv("BUILDX_GIT_LABELS", "true") - gitattrs, _, _ := getGitAttributes(context.Background(), ".", "Dockerfile") - assert.Equal(t, 5, len(gitattrs)) - - assert.Contains(t, gitattrs, "label:"+DockerfileLabel) - assert.Equal(t, "Dockerfile", gitattrs["label:"+DockerfileLabel]) - assert.Contains(t, gitattrs, "label:"+specs.AnnotationSource) - assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["label:"+specs.AnnotationSource]) - assert.Contains(t, gitattrs, "label:"+specs.AnnotationRevision) - assert.True(t, strings.HasSuffix(gitattrs["label:"+specs.AnnotationRevision], "-dirty")) - - assert.Contains(t, gitattrs, "vcs:source") - assert.Equal(t, "git@github.com:docker/buildx.git", gitattrs["vcs:source"]) - assert.Contains(t, gitattrs, "vcs:revision") - assert.True(t, strings.HasSuffix(gitattrs["vcs:revision"], "-dirty")) + addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile") + require.NoError(t, err) + + var so client.SolveOpt + addGitAttrs(&so) + + assert.Equal(t, 5, len(so.FrontendAttrs)) + + assert.Contains(t, so.FrontendAttrs, "label:"+DockerfileLabel) + assert.Equal(t, "Dockerfile", so.FrontendAttrs["label:"+DockerfileLabel]) + assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationSource) + assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["label:"+specs.AnnotationSource]) + assert.Contains(t, so.FrontendAttrs, "label:"+specs.AnnotationRevision) + assert.True(t, strings.HasSuffix(so.FrontendAttrs["label:"+specs.AnnotationRevision], "-dirty")) + + assert.Contains(t, so.FrontendAttrs, "vcs:source") + assert.Equal(t, "git@github.com:docker/buildx.git", so.FrontendAttrs["vcs:source"]) + assert.Contains(t, so.FrontendAttrs, "vcs:revision") + assert.True(t, strings.HasSuffix(so.FrontendAttrs["vcs:revision"], "-dirty")) } func TestLocalDirs(t *testing.T) { @@ -163,15 +172,17 @@ func TestLocalDirs(t *testing.T) { FrontendAttrs: map[string]string{}, } - _, addVCSLocalDir, err := getGitAttributes(context.Background(), ".", "Dockerfile") + addGitAttrs, err := getGitAttributes(context.Background(), ".", "Dockerfile") require.NoError(t, err) - require.NotNil(t, addVCSLocalDir) - require.NoError(t, setLocalMount("context", ".", so, addVCSLocalDir)) + require.NoError(t, setLocalMount("context", ".", so)) + require.NoError(t, setLocalMount("dockerfile", ".", so)) + + addGitAttrs(so) + require.Contains(t, so.FrontendAttrs, "vcs:localdir:context") assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"]) - require.NoError(t, setLocalMount("dockerfile", ".", so, addVCSLocalDir)) require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile") assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:dockerfile"]) } @@ -194,16 +205,17 @@ func TestLocalDirsSub(t *testing.T) { so := &client.SolveOpt{ FrontendAttrs: map[string]string{}, } + require.NoError(t, setLocalMount("context", ".", so)) + require.NoError(t, setLocalMount("dockerfile", "app", so)) - _, addVCSLocalDir, err := getGitAttributes(context.Background(), ".", "app/Dockerfile") + addGitAttrs, err := getGitAttributes(context.Background(), ".", "app/Dockerfile") require.NoError(t, err) - require.NotNil(t, addVCSLocalDir) - require.NoError(t, setLocalMount("context", ".", so, addVCSLocalDir)) + addGitAttrs(so) + require.Contains(t, so.FrontendAttrs, "vcs:localdir:context") assert.Equal(t, ".", so.FrontendAttrs["vcs:localdir:context"]) - require.NoError(t, setLocalMount("dockerfile", "app", so, addVCSLocalDir)) require.Contains(t, so.FrontendAttrs, "vcs:localdir:dockerfile") assert.Equal(t, "app", so.FrontendAttrs["vcs:localdir:dockerfile"]) } diff --git a/build/opt.go b/build/opt.go index 0d6dfbeff3e5..c8d52c9172a0 100644 --- a/build/opt.go +++ b/build/opt.go @@ -1,11 +1,12 @@ package build import ( - "bufio" + "bytes" "context" "io" "os" "path/filepath" + "slices" "strconv" "strings" "syscall" @@ -34,7 +35,7 @@ import ( "github.com/tonistiigi/fsutil" ) -func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, addVCSLocalDir func(key, dir string, so *client.SolveOpt), pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) { +func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, docker *dockerutil.Client) (_ *client.SolveOpt, release func(), err error) { nodeDriver := node.Driver defers := make([]func(), 0, 2) releaseF := func() { @@ -157,7 +158,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op case 1: // valid case 0: - if !noDefaultLoad() && opt.PrintFunc == nil { + if !noDefaultLoad() && opt.CallFunc == nil { if nodeDriver.IsMobyDriver() { // backwards compat for docker driver only: // this ensures the build results in a docker image. @@ -260,9 +261,9 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op } so.Exports = opt.Exports - so.Session = opt.Session + so.Session = slices.Clone(opt.Session) - releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, addVCSLocalDir, pw, &so) + releaseLoad, err := loadInputs(ctx, nodeDriver, opt.Inputs, pw, &so) if err != nil { return nil, nil, err } @@ -347,15 +348,15 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt Op so.FrontendAttrs["ulimit"] = ulimits } - // mark info request as internal - if opt.PrintFunc != nil { + // mark call request as internal + if opt.CallFunc != nil { so.Internal = true } return &so, releaseF, nil } -func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSLocalDir func(key, dir string, so *client.SolveOpt), pw progress.Writer, target *client.SolveOpt) (func(), error) { +func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, pw progress.Writer, target *client.SolveOpt) (func(), error) { if inp.ContextPath == "" { return nil, errors.New("please specify build context (e.g. \".\" for the current directory)") } @@ -364,7 +365,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL var ( err error - dockerfileReader io.Reader + dockerfileReader io.ReadCloser dockerfileDir string dockerfileName = inp.DockerfilePath toRemove []string @@ -379,11 +380,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL target.FrontendInputs["dockerfile"] = *inp.ContextState case inp.ContextPath == "-": if inp.DockerfilePath == "-" { - return nil, errStdinConflict + return nil, errors.Errorf("invalid argument: can't use stdin for both build context and dockerfile") } - buf := bufio.NewReader(inp.InStream) - magic, err := buf.Peek(archiveHeaderSize * 2) + rc := inp.InStream.NewReadCloser() + magic, err := inp.InStream.Peek(archiveHeaderSize * 2) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to peek context header from STDIN") } @@ -391,23 +392,23 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL if isArchive(magic) { // stdin is context up := uploadprovider.New() - target.FrontendAttrs["context"] = up.Add(buf) + target.FrontendAttrs["context"] = up.Add(rc) target.Session = append(target.Session, up) } else { if inp.DockerfilePath != "" { - return nil, errDockerfileConflict + return nil, errors.Errorf("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles") } // stdin is dockerfile - dockerfileReader = buf + dockerfileReader = rc inp.ContextPath, _ = os.MkdirTemp("", "empty-dir") toRemove = append(toRemove, inp.ContextPath) - if err := setLocalMount("context", inp.ContextPath, target, addVCSLocalDir); err != nil { + if err := setLocalMount("context", inp.ContextPath, target); err != nil { return nil, err } } } case osutil.IsLocalDir(inp.ContextPath): - if err := setLocalMount("context", inp.ContextPath, target, addVCSLocalDir); err != nil { + if err := setLocalMount("context", inp.ContextPath, target); err != nil { return nil, err } sharedKey := inp.ContextPath @@ -417,7 +418,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL target.SharedKey = sharedKey switch inp.DockerfilePath { case "-": - dockerfileReader = inp.InStream + dockerfileReader = inp.InStream.NewReadCloser() case "": dockerfileDir = inp.ContextPath default: @@ -426,7 +427,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL } case IsRemoteURL(inp.ContextPath): if inp.DockerfilePath == "-" { - dockerfileReader = inp.InStream + dockerfileReader = inp.InStream.NewReadCloser() } else if filepath.IsAbs(inp.DockerfilePath) { dockerfileDir = filepath.Dir(inp.DockerfilePath) dockerfileName = filepath.Base(inp.DockerfilePath) @@ -438,11 +439,11 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL } if inp.DockerfileInline != "" { - dockerfileReader = strings.NewReader(inp.DockerfileInline) + dockerfileReader = io.NopCloser(strings.NewReader(inp.DockerfileInline)) } if dockerfileReader != nil { - dockerfileDir, err = createTempDockerfile(dockerfileReader) + dockerfileDir, err = createTempDockerfile(dockerfileReader, inp.InStream) if err != nil { return nil, err } @@ -466,7 +467,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL } if dockerfileDir != "" { - if err := setLocalMount("dockerfile", dockerfileDir, target, addVCSLocalDir); err != nil { + if err := setLocalMount("dockerfile", dockerfileDir, target); err != nil { return nil, err } dockerfileName = handleLowercaseDockerfile(dockerfileDir, dockerfileName) @@ -528,7 +529,7 @@ func loadInputs(ctx context.Context, d *driver.DriverHandle, inp Inputs, addVCSL if k == "context" || k == "dockerfile" { localName = "_" + k // underscore to avoid collisions } - if err := setLocalMount(localName, v.Path, target, addVCSLocalDir); err != nil { + if err := setLocalMount(localName, v.Path, target); err != nil { return nil, err } target.FrontendAttrs["context:"+k] = "local:" + localName @@ -570,26 +571,19 @@ func resolveDigest(localPath, tag string) (dig string, _ error) { return dig, nil } -func setLocalMount(name, root string, so *client.SolveOpt, addVCSLocalDir func(key, dir string, so *client.SolveOpt)) error { - lm, err := fsutil.NewFS(root) - if err != nil { - return err - } - root, err = filepath.EvalSymlinks(root) // keep same behavior as fsutil.NewFS +func setLocalMount(name, dir string, so *client.SolveOpt) error { + lm, err := fsutil.NewFS(dir) if err != nil { return err } if so.LocalMounts == nil { so.LocalMounts = map[string]fsutil.FS{} } - so.LocalMounts[name] = lm - if addVCSLocalDir != nil { - addVCSLocalDir(name, root, so) - } + so.LocalMounts[name] = &fs{FS: lm, dir: dir} return nil } -func createTempDockerfile(r io.Reader) (string, error) { +func createTempDockerfile(r io.Reader, multiReader *SyncMultiReader) (string, error) { dir, err := os.MkdirTemp("", "dockerfile") if err != nil { return "", err @@ -599,6 +593,16 @@ func createTempDockerfile(r io.Reader) (string, error) { return "", err } defer f.Close() + + if multiReader != nil { + dt, err := io.ReadAll(r) + if err != nil { + return "", err + } + multiReader.Reset(dt) + r = bytes.NewReader(dt) + } + if _, err := io.Copy(f, r); err != nil { return "", err } @@ -635,3 +639,10 @@ func handleLowercaseDockerfile(dir, p string) string { } return p } + +type fs struct { + fsutil.FS + dir string +} + +var _ fsutil.FS = &fs{} diff --git a/build/replicatedstream.go b/build/replicatedstream.go new file mode 100644 index 000000000000..cdf2c7241f38 --- /dev/null +++ b/build/replicatedstream.go @@ -0,0 +1,164 @@ +package build + +import ( + "bufio" + "bytes" + "io" + "sync" +) + +type SyncMultiReader struct { + source *bufio.Reader + buffer []byte + static []byte + mu sync.Mutex + cond *sync.Cond + readers []*syncReader + err error + offset int +} + +type syncReader struct { + mr *SyncMultiReader + offset int + closed bool +} + +func NewSyncMultiReader(source io.Reader) *SyncMultiReader { + mr := &SyncMultiReader{ + source: bufio.NewReader(source), + buffer: make([]byte, 0, 32*1024), + } + mr.cond = sync.NewCond(&mr.mu) + return mr +} + +func (mr *SyncMultiReader) Peek(n int) ([]byte, error) { + mr.mu.Lock() + defer mr.mu.Unlock() + + if mr.static != nil { + return mr.static[min(n, len(mr.static)):], nil + } + + return mr.source.Peek(n) +} + +func (mr *SyncMultiReader) Reset(dt []byte) { + mr.mu.Lock() + defer mr.mu.Unlock() + + mr.static = dt +} + +func (mr *SyncMultiReader) NewReadCloser() io.ReadCloser { + mr.mu.Lock() + defer mr.mu.Unlock() + + if mr.static != nil { + return io.NopCloser(bytes.NewReader(mr.static)) + } + + reader := &syncReader{ + mr: mr, + } + mr.readers = append(mr.readers, reader) + return reader +} + +func (sr *syncReader) Read(p []byte) (int, error) { + sr.mr.mu.Lock() + defer sr.mr.mu.Unlock() + + return sr.read(p) +} + +func (sr *syncReader) read(p []byte) (int, error) { + end := sr.mr.offset + len(sr.mr.buffer) + +loop0: + for { + if sr.closed { + return 0, io.EOF + } + + end := sr.mr.offset + len(sr.mr.buffer) + + if sr.mr.err != nil && sr.offset == end { + return 0, sr.mr.err + } + + start := sr.offset - sr.mr.offset + + dt := sr.mr.buffer[start:] + + if len(dt) > 0 { + n := copy(p, dt) + sr.offset += n + sr.mr.cond.Broadcast() + return n, nil + } + + // check for readers that have not caught up + hasOpen := false + for _, r := range sr.mr.readers { + if !r.closed { + hasOpen = true + } else { + continue + } + if r.offset < end { + sr.mr.cond.Wait() + continue loop0 + } + } + + if !hasOpen { + return 0, io.EOF + } + break + } + + last := sr.mr.offset + len(sr.mr.buffer) + // another reader has already updated the buffer + if last > end || sr.mr.err != nil { + return sr.read(p) + } + + sr.mr.offset += len(sr.mr.buffer) + + sr.mr.buffer = sr.mr.buffer[:cap(sr.mr.buffer)] + n, err := sr.mr.source.Read(sr.mr.buffer) + if n >= 0 { + sr.mr.buffer = sr.mr.buffer[:n] + } else { + sr.mr.buffer = sr.mr.buffer[:0] + } + + sr.mr.cond.Broadcast() + + if err != nil { + sr.mr.err = err + return 0, err + } + + nn := copy(p, sr.mr.buffer) + sr.offset += nn + + return nn, nil +} + +func (sr *syncReader) Close() error { + sr.mr.mu.Lock() + defer sr.mr.mu.Unlock() + + if sr.closed { + return nil + } + + sr.closed = true + + sr.mr.cond.Broadcast() + + return nil +} diff --git a/build/replicatedstream_test.go b/build/replicatedstream_test.go new file mode 100644 index 000000000000..985303b40069 --- /dev/null +++ b/build/replicatedstream_test.go @@ -0,0 +1,77 @@ +package build + +import ( + "bytes" + "crypto/rand" + "io" + mathrand "math/rand" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func generateRandomData(size int) []byte { + data := make([]byte, size) + rand.Read(data) + return data +} +func TestSyncMultiReaderParallel(t *testing.T) { + data := generateRandomData(1024 * 1024) + source := bytes.NewReader(data) + mr := NewSyncMultiReader(source) + + var wg sync.WaitGroup + numReaders := 10 + bufferSize := 4096 * 4 + + readers := make([]io.ReadCloser, numReaders) + + for i := 0; i < numReaders; i++ { + readers[i] = mr.NewReadCloser() + } + + for i := 0; i < numReaders; i++ { + wg.Add(1) + go func(readerId int) { + defer wg.Done() + reader := readers[readerId] + defer reader.Close() + + totalRead := 0 + buf := make([]byte, bufferSize) + for totalRead < len(data) { + // Simulate random read sizes + readSize := mathrand.Intn(bufferSize) //nolint:gosec + n, err := reader.Read(buf[:readSize]) + + if n > 0 { + assert.Equal(t, data[totalRead:totalRead+n], buf[:n], "Reader %d mismatch", readerId) + totalRead += n + } + + if err == io.EOF { + assert.Equal(t, len(data), totalRead, "Reader %d EOF mismatch", readerId) + return + } + + require.NoError(t, err, "Reader %d error", readerId) + + if mathrand.Intn(1000) == 0 { //nolint:gosec + t.Logf("Reader %d closing", readerId) + // Simulate random close + return + } + + // Simulate random timing between reads + time.Sleep(time.Millisecond * time.Duration(mathrand.Intn(5))) //nolint:gosec + } + + assert.Equal(t, len(data), totalRead, "Reader %d total read mismatch", readerId) + }(i) + } + + wg.Wait() +} diff --git a/builder/node.go b/builder/node.go index 271b488b8672..e5a8e66d4631 100644 --- a/builder/node.go +++ b/builder/node.go @@ -8,7 +8,6 @@ import ( "github.com/containerd/platforms" "github.com/docker/buildx/driver" - ctxkube "github.com/docker/buildx/driver/kubernetes/context" "github.com/docker/buildx/store" "github.com/docker/buildx/store/storeutil" "github.com/docker/buildx/util/dockerutil" @@ -18,7 +17,6 @@ import ( "github.com/moby/buildkit/util/grpcerrors" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" ) @@ -119,37 +117,19 @@ func (b *Builder) LoadNodes(ctx context.Context, opts ...LoadNodesOption) (_ []N return nil } - contextStore := b.opts.dockerCli.ContextStore() - - var kcc driver.KubeClientConfig - kcc, err = ctxkube.ConfigFromEndpoint(n.Endpoint, contextStore) - if err != nil { - // err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock". - // try again with name="default". - // FIXME(@AkihiroSuda): n should retain real context name. - kcc, err = ctxkube.ConfigFromEndpoint("default", contextStore) - if err != nil { - logrus.Error(err) - } - } - - tryToUseKubeConfigInCluster := false - if kcc == nil { - tryToUseKubeConfigInCluster = true - } else { - if _, err := kcc.ClientConfig(); err != nil { - tryToUseKubeConfigInCluster = true - } - } - if tryToUseKubeConfigInCluster { - kccInCluster := driver.KubeClientConfigInCluster{} - if _, err := kccInCluster.ClientConfig(); err == nil { - logrus.Debug("using kube config in cluster") - kcc = kccInCluster - } - } - - d, err := driver.GetDriver(ctx, driver.BuilderName(n.Name), factory, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.BuildkitdFlags, n.Files, n.DriverOpts, n.Platforms, b.opts.contextPathHash, lno.dialMeta) + d, err := driver.GetDriver(ctx, factory, driver.InitConfig{ + Name: driver.BuilderName(n.Name), + EndpointAddr: n.Endpoint, + DockerAPI: dockerapi, + ContextStore: b.opts.dockerCli.ContextStore(), + BuildkitdFlags: n.BuildkitdFlags, + Files: n.Files, + DriverOpts: n.DriverOpts, + Auth: imageopt.Auth, + Platforms: n.Platforms, + ContextPathHash: b.opts.contextPathHash, + DialMeta: lno.dialMeta, + }) if err != nil { node.Err = err return nil diff --git a/commands/bake.go b/commands/bake.go index a7d73ed9f9dd..bec1cadda07a 100644 --- a/commands/bake.go +++ b/commands/bake.go @@ -4,12 +4,16 @@ import ( "bytes" "cmp" "context" + "crypto/sha256" + "encoding/hex" "encoding/json" "fmt" "io" "os" "slices" + "sort" "strings" + "sync" "text/tabwriter" "github.com/containerd/console" @@ -26,14 +30,15 @@ import ( "github.com/docker/buildx/util/confutil" "github.com/docker/buildx/util/desktop" "github.com/docker/buildx/util/dockerutil" + "github.com/docker/buildx/util/osutil" "github.com/docker/buildx/util/progress" "github.com/docker/buildx/util/tracing" "github.com/docker/cli/cli/command" - "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/progress/progressui" "github.com/pkg/errors" "github.com/spf13/cobra" + "go.opentelemetry.io/otel/attribute" ) type bakeOptions struct { @@ -44,6 +49,7 @@ type bakeOptions struct { listVars bool sbom string provenance string + allow []string builder string metadataFile string @@ -53,6 +59,8 @@ type bakeOptions struct { } func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in bakeOptions, cFlags commonFlags) (err error) { + mp := dockerCli.MeterProvider() + ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake") if err != nil { return err @@ -61,27 +69,12 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba end(err) }() - var url string - cmdContext := "cwd://" - - if len(targets) > 0 { - if build.IsRemoteURL(targets[0]) { - url = targets[0] - targets = targets[1:] - if len(targets) > 0 { - if build.IsRemoteURL(targets[0]) { - cmdContext = targets[0] - targets = targets[1:] - } - } - } - } - + url, cmdContext, targets := bakeArgs(targets) if len(targets) == 0 { targets = []string{"default"} } - callFunc, err := buildflags.ParsePrintFunc(in.callFunc) + callFunc, err := buildflags.ParseCallFunc(in.callFunc) if err != nil { return err } @@ -110,6 +103,11 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba } contextPathHash, _ := os.Getwd() + ent, err := bake.ParseEntitlements(in.allow) + if err != nil { + return err + } + ctx2, cancel := context.WithCancel(context.TODO()) defer cancel() @@ -117,6 +115,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba var progressConsoleDesc, progressTextDesc string // instance only needed for reading remote bake files or building + var driverType string if url != "" || !in.printOnly { b, err := builder.New(dockerCli, builder.WithName(in.builder), @@ -134,53 +133,33 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba } progressConsoleDesc = fmt.Sprintf("%s:%s", b.Driver, b.Name) progressTextDesc = fmt.Sprintf("building with %q instance using %s driver", b.Name, b.Driver) + driverType = b.Driver } var term bool if _, err := console.ConsoleFromFile(os.Stderr); err == nil { term = true } + attributes := bakeMetricAttributes(dockerCli, driverType, url, cmdContext, targets, &in) progressMode := progressui.DisplayMode(cFlags.progress) var printer *progress.Printer - printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode, - progress.WithDesc(progressTextDesc, progressConsoleDesc), - progress.WithOnClose(func() { - if p := printer; p != nil { - printWarnings(os.Stderr, p.Warnings(), progressMode) - } - }), - ) - if err != nil { + + makePrinter := func() error { + var err error + printer, err = progress.NewPrinter(ctx2, os.Stderr, progressMode, + progress.WithDesc(progressTextDesc, progressConsoleDesc), + progress.WithMetrics(mp, attributes), + progress.WithOnClose(func() { + printWarnings(os.Stderr, printer.Warnings(), progressMode) + }), + ) return err } - var resp map[string]*client.SolveResponse - - defer func() { - if printer != nil { - err1 := printer.Wait() - if err == nil { - err = err1 - } - if err != nil { - return - } - if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode { - desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term) - } - if resp != nil && len(in.metadataFile) > 0 { - dt := make(map[string]interface{}) - for t, r := range resp { - dt[t] = decodeExporterResponse(r.ExporterResponse) - } - if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() { - dt["buildx.build.warnings"] = warnings - } - err = writeMetadataFile(in.metadataFile, dt) - } - } - }() + if err := makePrinter(); err != nil { + return err + } files, inp, err := readBakeFiles(ctx, nodes, url, in.files, dockerCli.In(), printer) if err != nil { @@ -203,10 +182,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba if err != nil { return err } - - err = printer.Wait() - printer = nil - if err != nil { + if err = printer.Wait(); err != nil { return err } if in.listTargets { @@ -249,68 +225,79 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba } if in.printOnly { - dt, err := json.MarshalIndent(def, "", " ") - if err != nil { + if err = printer.Wait(); err != nil { return err } - err = printer.Wait() - printer = nil + dtdef, err := json.MarshalIndent(def, "", " ") if err != nil { return err } - fmt.Fprintln(dockerCli.Out(), string(dt)) - return nil + _, err = fmt.Fprintln(dockerCli.Out(), string(dtdef)) + return err } for _, opt := range bo { - if opt.PrintFunc != nil { - cf, err := buildflags.ParsePrintFunc(opt.PrintFunc.Name) + if opt.CallFunc != nil { + cf, err := buildflags.ParseCallFunc(opt.CallFunc.Name) if err != nil { return err } - opt.PrintFunc.Name = cf.Name + opt.CallFunc.Name = cf.Name } } - prm := confutil.MetadataProvenance() - if len(in.metadataFile) == 0 { - prm = confutil.MetadataProvenanceModeDisabled - } - - groupRef := identity.NewID() - var refs []string - for k, b := range bo { - b.Ref = identity.NewID() - b.GroupRef = groupRef - b.ProvenanceResponseMode = prm - refs = append(refs, b.Ref) - bo[k] = b - } - dt, err := json.Marshal(def) + exp, err := ent.Validate(bo) if err != nil { return err } - if err := saveLocalStateGroup(dockerCli, groupRef, localstate.StateGroup{ - Definition: dt, - Targets: targets, - Inputs: overrides, - Refs: refs, - }); err != nil { + if err := exp.Prompt(ctx, &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil { return err } + if printer.IsDone() { + // init new printer as old one was stopped to show the prompt + if err := makePrinter(); err != nil { + return err + } + } - resp, err = build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer) - if err != nil { - return wrapBuildError(err, true) + if err := saveLocalStateGroup(dockerCli, in, targets, bo, overrides, def); err != nil { + return err + } + + done := timeBuildCommand(mp, attributes) + resp, retErr := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer) + if err := printer.Wait(); retErr == nil { + retErr = err } + if retErr != nil { + err = wrapBuildError(retErr, true) + } + done(err) - err = printer.Wait() if err != nil { return err } + if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode { + desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term) + } + if len(in.metadataFile) > 0 { + dt := make(map[string]interface{}) + for t, r := range resp { + dt[t] = decodeExporterResponse(r.ExporterResponse) + } + if callFunc == nil { + if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() { + dt["buildx.build.warnings"] = warnings + } + } + if err := writeMetadataFile(in.metadataFile, dt); err != nil { + return err + } + } + var callFormatJSON bool - var jsonResults = map[string]map[string]any{} + jsonResults := map[string]map[string]any{} if callFunc != nil { callFormatJSON = callFunc.Format == "json" } @@ -325,14 +312,14 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba for _, name := range names { req := bo[name] - if req.PrintFunc == nil { + if req.CallFunc == nil { continue } - pf := &pb.PrintFunc{ - Name: req.PrintFunc.Name, - Format: req.PrintFunc.Format, - IgnoreStatus: req.PrintFunc.IgnoreStatus, + pf := &pb.CallFunc{ + Name: req.CallFunc.Name, + Format: req.CallFunc.Format, + IgnoreStatus: req.CallFunc.IgnoreStatus, } if callFunc != nil { @@ -396,7 +383,7 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba "build": def, } if res, ok := jsonResults[name]; ok { - printName := bo[name].PrintFunc.Name + printName := bo[name].CallFunc.Name if printName == "lint" { printName = "check" } @@ -451,6 +438,7 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command { flags.StringVar(&options.provenance, "provenance", "", `Shorthand for "--set=*.attest=type=provenance"`) flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`) flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`) + flags.StringArrayVar(&options.allow, "allow", nil, "Allow build to access specified resources") flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`) flags.Lookup("check").NoOptDefVal = "true" @@ -468,12 +456,49 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command { return cmd } -func saveLocalStateGroup(dockerCli command.Cli, ref string, lsg localstate.StateGroup) error { +func saveLocalStateGroup(dockerCli command.Cli, in bakeOptions, targets []string, bo map[string]build.Options, overrides []string, def any) error { + prm := confutil.MetadataProvenance() + if len(in.metadataFile) == 0 { + prm = confutil.MetadataProvenanceModeDisabled + } + groupRef := identity.NewID() + refs := make([]string, 0, len(bo)) + for k, b := range bo { + b.Ref = identity.NewID() + b.GroupRef = groupRef + b.ProvenanceResponseMode = prm + refs = append(refs, b.Ref) + bo[k] = b + } l, err := localstate.New(confutil.ConfigDir(dockerCli)) if err != nil { return err } - return l.SaveGroup(ref, lsg) + dtdef, err := json.MarshalIndent(def, "", " ") + if err != nil { + return err + } + return l.SaveGroup(groupRef, localstate.StateGroup{ + Definition: dtdef, + Targets: targets, + Inputs: overrides, + Refs: refs, + }) +} + +// bakeArgs will retrieve the remote url, command context, and targets +// from the command line arguments. +func bakeArgs(args []string) (url, cmdContext string, targets []string) { + cmdContext, targets = "cwd://", args + if len(targets) == 0 || !build.IsRemoteURL(targets[0]) { + return url, cmdContext, targets + } + url, targets = targets[0], targets[1:] + if len(targets) == 0 || !build.IsRemoteURL(targets[0]) { + return url, cmdContext, targets + } + cmdContext, targets = targets[0], targets[1:] + return url, cmdContext, targets } func readBakeFiles(ctx context.Context, nodes []builder.Node, url string, names []string, stdin io.Reader, pw progress.Writer) (files []bake.File, inp *bake.Input, err error) { @@ -590,3 +615,85 @@ func printTargetList(w io.Writer, cfg *bake.Config) error { return nil } + +func bakeMetricAttributes(dockerCli command.Cli, driverType, url, cmdContext string, targets []string, options *bakeOptions) attribute.Set { + return attribute.NewSet( + commandNameAttribute.String("bake"), + attribute.Stringer(string(commandOptionsHash), &bakeOptionsHash{ + bakeOptions: options, + configDir: confutil.ConfigDir(dockerCli), + url: url, + cmdContext: cmdContext, + targets: targets, + }), + driverNameAttribute.String(options.builder), + driverTypeAttribute.String(driverType), + ) +} + +type bakeOptionsHash struct { + *bakeOptions + configDir string + url string + cmdContext string + targets []string + result string + resultOnce sync.Once +} + +func (o *bakeOptionsHash) String() string { + o.resultOnce.Do(func() { + url := o.url + cmdContext := o.cmdContext + if cmdContext == "cwd://" { + // Resolve the directory if the cmdContext is the current working directory. + cmdContext = osutil.GetWd() + } + + // Sort the inputs for files and targets since the ordering + // doesn't matter, but avoid modifying the original slice. + files := immutableSort(o.files) + targets := immutableSort(o.targets) + + joinedFiles := strings.Join(files, ",") + joinedTargets := strings.Join(targets, ",") + salt := confutil.TryNodeIdentifier(o.configDir) + + h := sha256.New() + for _, s := range []string{url, cmdContext, joinedFiles, joinedTargets, salt} { + _, _ = io.WriteString(h, s) + h.Write([]byte{0}) + } + o.result = hex.EncodeToString(h.Sum(nil)) + }) + return o.result +} + +// immutableSort will sort the entries in s without modifying the original slice. +func immutableSort(s []string) []string { + if !sort.StringsAreSorted(s) { + cpy := make([]string, len(s)) + copy(cpy, s) + sort.Strings(cpy) + return cpy + } + return s +} + +type syncWriter struct { + w io.Writer + once sync.Once + wait func() error +} + +func (w *syncWriter) Write(p []byte) (n int, err error) { + w.once.Do(func() { + if w.wait != nil { + err = w.wait() + } + }) + if err != nil { + return 0, err + } + return w.w.Write(p) +} diff --git a/commands/build.go b/commands/build.go index 7359a19477e0..274209fd3fe4 100644 --- a/commands/build.go +++ b/commands/build.go @@ -79,7 +79,7 @@ type buildOptions struct { noCacheFilter []string outputs []string platforms []string - printFunc string + callFunc string secrets []string shmSize dockeropts.MemBytes ssh []string @@ -199,13 +199,13 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error return nil, err } - opts.PrintFunc, err = buildflags.ParsePrintFunc(o.printFunc) + opts.CallFunc, err = buildflags.ParseCallFunc(o.callFunc) if err != nil { return nil, err } prm := confutil.MetadataProvenance() - if opts.PrintFunc != nil || len(o.metadataFile) == 0 { + if opts.CallFunc != nil || len(o.metadataFile) == 0 { prm = confutil.MetadataProvenanceModeDisabled } opts.ProvenanceResponseMode = string(prm) @@ -224,15 +224,22 @@ func (o *buildOptions) toDisplayMode() (progressui.DisplayMode, error) { return progress, nil } -func buildMetricAttributes(dockerCli command.Cli, b *builder.Builder, options *buildOptions) attribute.Set { +const ( + commandNameAttribute = attribute.Key("command.name") + commandOptionsHash = attribute.Key("command.options.hash") + driverNameAttribute = attribute.Key("driver.name") + driverTypeAttribute = attribute.Key("driver.type") +) + +func buildMetricAttributes(dockerCli command.Cli, driverType string, options *buildOptions) attribute.Set { return attribute.NewSet( - attribute.String("command.name", "build"), - attribute.Stringer("command.options.hash", &buildOptionsHash{ + commandNameAttribute.String("build"), + attribute.Stringer(string(commandOptionsHash), &buildOptionsHash{ buildOptions: options, configDir: confutil.ConfigDir(dockerCli), }), - attribute.String("driver.name", options.builder), - attribute.String("driver.type", b.Driver), + driverNameAttribute.String(options.builder), + driverTypeAttribute.String(driverType), ) } @@ -308,12 +315,13 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) if err != nil { return err } + driverType := b.Driver var term bool if _, err := console.ConsoleFromFile(os.Stderr); err == nil { term = true } - attributes := buildMetricAttributes(dockerCli, b, &options) + attributes := buildMetricAttributes(dockerCli, driverType, &options) ctx2, cancel := context.WithCancel(context.TODO()) defer cancel() @@ -367,21 +375,24 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions) return errors.Wrap(err, "writing image ID file") } } - if opts.PrintFunc != nil { - if exitcode, err := printResult(dockerCli.Out(), opts.PrintFunc, resp.ExporterResponse); err != nil { - return err - } else if exitcode != 0 { - os.Exit(exitcode) - } - } else if options.metadataFile != "" { + if options.metadataFile != "" { dt := decodeExporterResponse(resp.ExporterResponse) - if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() { - dt["buildx.build.warnings"] = warnings + if opts.CallFunc == nil { + if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() { + dt["buildx.build.warnings"] = warnings + } } if err := writeMetadataFile(options.metadataFile, dt); err != nil { return err } } + if opts.CallFunc != nil { + if exitcode, err := printResult(dockerCli.Out(), opts.CallFunc, resp.ExporterResponse); err != nil { + return err + } else if exitcode != 0 { + os.Exit(exitcode) + } + } return nil } @@ -634,8 +645,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D cobrautil.MarkFlagsExperimental(flags, "root", "detach", "server-config") } - flags.StringVar(&options.printFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`) - flags.VarPF(callAlias(&options.printFunc, "check"), "check", "", `Shorthand for "--call=check"`) + flags.StringVar(&options.callFunc, "call", "build", `Set method for evaluating build ("check", "outline", "targets")`) + flags.VarPF(callAlias(&options.callFunc, "check"), "check", "", `Shorthand for "--call=check"`) flags.Lookup("check").NoOptDefVal = "true" // hidden flags @@ -644,7 +655,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D var ignoreBool bool var ignoreInt int64 - flags.StringVar(&options.printFunc, "print", "", "Print result of information request (e.g., outline, targets)") + flags.StringVar(&options.callFunc, "print", "", "Print result of information request (e.g., outline, targets)") cobrautil.MarkFlagsExperimental(flags, "print") flags.MarkHidden("print") @@ -731,9 +742,17 @@ func writeMetadataFile(filename string, dt interface{}) error { } func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} { + decFunc := func(k, v string) ([]byte, error) { + if k == "result.json" { + // result.json is part of metadata response for subrequests which + // is already a JSON object: https://github.com/moby/buildkit/blob/f6eb72f2f5db07ddab89ac5e2bd3939a6444f4be/frontend/dockerui/requests.go#L100-L102 + return []byte(v), nil + } + return base64.StdEncoding.DecodeString(v) + } out := make(map[string]interface{}) for k, v := range exporterResponse { - dt, err := base64.StdEncoding.DecodeString(v) + dt, err := decFunc(k, v) if err != nil { out[k] = v continue @@ -863,7 +882,7 @@ func printWarnings(w io.Writer, warnings []client.VertexWarning, mode progressui } } -func printResult(w io.Writer, f *controllerapi.PrintFunc, res map[string]string) (int, error) { +func printResult(w io.Writer, f *controllerapi.CallFunc, res map[string]string) (int, error) { switch f.Name { case "outline": return 0, printValue(w, outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res) @@ -872,17 +891,29 @@ func printResult(w io.Writer, f *controllerapi.PrintFunc, res map[string]string) case "subrequests.describe": return 0, printValue(w, subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res) case "lint": - err := printValue(w, lint.PrintLintViolations, lint.SubrequestLintDefinition.Version, f.Format, res) - if err != nil { - return 0, err - } - lintResults := lint.LintResults{} if result, ok := res["result.json"]; ok { if err := json.Unmarshal([]byte(result), &lintResults); err != nil { return 0, err } } + + warningCount := len(lintResults.Warnings) + if f.Format != "json" && warningCount > 0 { + var warningCountMsg string + if warningCount == 1 { + warningCountMsg = "1 warning has been found!" + } else if warningCount > 1 { + warningCountMsg = fmt.Sprintf("%d warnings have been found!", warningCount) + } + fmt.Fprintf(w, "Check complete, %s\n", warningCountMsg) + } + + err := printValue(w, printLintViolationsWrapper, lint.SubrequestLintDefinition.Version, f.Format, res) + if err != nil { + return 0, err + } + if lintResults.Error != nil { // Print the error message and the source // Normally, we would use `errdefs.WithSource` to attach the source to the @@ -921,9 +952,9 @@ func printResult(w io.Writer, f *controllerapi.PrintFunc, res map[string]string) return 0, nil } -type printFunc func([]byte, io.Writer) error +type callFunc func([]byte, io.Writer) error -func printValue(w io.Writer, printer printFunc, version string, format string, res map[string]string) error { +func printValue(w io.Writer, printer callFunc, version string, format string, res map[string]string) error { if format == "json" { fmt.Fprintln(w, res["result.json"]) return nil @@ -937,6 +968,11 @@ func printValue(w io.Writer, printer printFunc, version string, format string, r return printer([]byte(res["result.json"]), w) } +// FIXME: remove once https://github.com/docker/buildx/pull/2672 is sorted +func printLintViolationsWrapper(dt []byte, w io.Writer) error { + return lint.PrintLintViolations(dt, w, nil) +} + type invokeConfig struct { controllerapi.InvokeConfig onFlag string diff --git a/commands/root.go b/commands/root.go index 8c0d86274272..360b40ab8848 100644 --- a/commands/root.go +++ b/commands/root.go @@ -21,6 +21,7 @@ import ( ) func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command { + var opt rootOptions cmd := &cobra.Command{ Short: "Docker Buildx", Long: `Extended build capabilities with BuildKit`, @@ -32,6 +33,10 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman HiddenDefaultCmd: true, }, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if opt.debug { + debug.Enable() + } + cmd.SetContext(appcontext.Context()) if !isPlugin { return nil @@ -47,11 +52,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman cmd.TraverseChildren = true cmd.DisableFlagsInUseLine = true cli.DisableFlagsInUseLine(cmd) - - // DEBUG=1 should perform the same as --debug at the docker root level - if debug.IsEnabled() { - debug.Enable() - } } logrus.SetFormatter(&logutil.Formatter{}) @@ -68,16 +68,16 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman cmd.SetHelpTemplate(cmd.HelpTemplate() + "\nExperimental commands and flags are hidden. Set BUILDX_EXPERIMENTAL=1 to show them.\n") } - addCommands(cmd, dockerCli) + addCommands(cmd, &opt, dockerCli) return cmd } type rootOptions struct { builder string + debug bool } -func addCommands(cmd *cobra.Command, dockerCli command.Cli) { - opts := &rootOptions{} +func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) { rootFlags(opts, cmd.PersistentFlags()) cmd.AddCommand( @@ -112,4 +112,5 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) { func rootFlags(options *rootOptions, flags *pflag.FlagSet) { flags.StringVar(&options.builder, "builder", os.Getenv("BUILDX_BUILDER"), "Override the configured builder instance") + flags.BoolVarP(&options.debug, "debug", "D", debug.IsEnabled(), "Enable debug logging") } diff --git a/controller/build/build.go b/controller/build/build.go index 4fb54e355b3b..1f53ee03c326 100644 --- a/controller/build/build.go +++ b/controller/build/build.go @@ -3,7 +3,6 @@ package build import ( "context" "io" - "os" "path/filepath" "strings" "sync" @@ -19,7 +18,6 @@ import ( "github.com/docker/buildx/util/platformutil" "github.com/docker/buildx/util/progress" "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/config" dockeropts "github.com/docker/cli/opts" "github.com/docker/docker/api/types/container" "github.com/moby/buildkit/client" @@ -50,7 +48,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build Inputs: build.Inputs{ ContextPath: in.ContextPath, DockerfilePath: in.DockerfileName, - InStream: inStream, + InStream: build.NewSyncMultiReader(inStream), NamedContexts: contexts, }, Ref: in.Ref, @@ -76,7 +74,7 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build } opts.Platforms = platforms - dockerConfig := config.LoadDefaultConfigFile(os.Stderr) + dockerConfig := dockerCli.ConfigFile() opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil)) secrets, err := controllerapi.CreateSecrets(in.Secrets) @@ -160,11 +158,11 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in controllerapi.Build } opts.Allow = allow - if in.PrintFunc != nil { - opts.PrintFunc = &build.PrintFunc{ - Name: in.PrintFunc.Name, - Format: in.PrintFunc.Format, - IgnoreStatus: in.PrintFunc.IgnoreStatus, + if in.CallFunc != nil { + opts.CallFunc = &build.CallFunc{ + Name: in.CallFunc.Name, + Format: in.CallFunc.Format, + IgnoreStatus: in.CallFunc.IgnoreStatus, } } diff --git a/controller/pb/controller.pb.go b/controller/pb/controller.pb.go index 6dedbe4c4abd..5a66806d662c 100644 --- a/controller/pb/controller.pb.go +++ b/controller/pb/controller.pb.go @@ -273,7 +273,7 @@ func (m *BuildRequest) GetOptions() *BuildOptions { type BuildOptions struct { ContextPath string `protobuf:"bytes,1,opt,name=ContextPath,proto3" json:"ContextPath,omitempty"` DockerfileName string `protobuf:"bytes,2,opt,name=DockerfileName,proto3" json:"DockerfileName,omitempty"` - PrintFunc *PrintFunc `protobuf:"bytes,3,opt,name=PrintFunc,proto3" json:"PrintFunc,omitempty"` + CallFunc *CallFunc `protobuf:"bytes,3,opt,name=CallFunc,proto3" json:"CallFunc,omitempty"` NamedContexts map[string]string `protobuf:"bytes,4,rep,name=NamedContexts,proto3" json:"NamedContexts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Allow []string `protobuf:"bytes,5,rep,name=Allow,proto3" json:"Allow,omitempty"` Attests []*Attest `protobuf:"bytes,6,rep,name=Attests,proto3" json:"Attests,omitempty"` @@ -346,9 +346,9 @@ func (m *BuildOptions) GetDockerfileName() string { return "" } -func (m *BuildOptions) GetPrintFunc() *PrintFunc { +func (m *BuildOptions) GetCallFunc() *CallFunc { if m != nil { - return m.PrintFunc + return m.CallFunc } return nil } @@ -810,7 +810,7 @@ func (m *Secret) GetEnv() string { return "" } -type PrintFunc struct { +type CallFunc struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"` IgnoreStatus bool `protobuf:"varint,3,opt,name=IgnoreStatus,proto3" json:"IgnoreStatus,omitempty"` @@ -819,45 +819,45 @@ type PrintFunc struct { XXX_sizecache int32 `json:"-"` } -func (m *PrintFunc) Reset() { *m = PrintFunc{} } -func (m *PrintFunc) String() string { return proto.CompactTextString(m) } -func (*PrintFunc) ProtoMessage() {} -func (*PrintFunc) Descriptor() ([]byte, []int) { +func (m *CallFunc) Reset() { *m = CallFunc{} } +func (m *CallFunc) String() string { return proto.CompactTextString(m) } +func (*CallFunc) ProtoMessage() {} +func (*CallFunc) Descriptor() ([]byte, []int) { return fileDescriptor_ed7f10298fa1d90f, []int{12} } -func (m *PrintFunc) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PrintFunc.Unmarshal(m, b) +func (m *CallFunc) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CallFunc.Unmarshal(m, b) } -func (m *PrintFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PrintFunc.Marshal(b, m, deterministic) +func (m *CallFunc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CallFunc.Marshal(b, m, deterministic) } -func (m *PrintFunc) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrintFunc.Merge(m, src) +func (m *CallFunc) XXX_Merge(src proto.Message) { + xxx_messageInfo_CallFunc.Merge(m, src) } -func (m *PrintFunc) XXX_Size() int { - return xxx_messageInfo_PrintFunc.Size(m) +func (m *CallFunc) XXX_Size() int { + return xxx_messageInfo_CallFunc.Size(m) } -func (m *PrintFunc) XXX_DiscardUnknown() { - xxx_messageInfo_PrintFunc.DiscardUnknown(m) +func (m *CallFunc) XXX_DiscardUnknown() { + xxx_messageInfo_CallFunc.DiscardUnknown(m) } -var xxx_messageInfo_PrintFunc proto.InternalMessageInfo +var xxx_messageInfo_CallFunc proto.InternalMessageInfo -func (m *PrintFunc) GetName() string { +func (m *CallFunc) GetName() string { if m != nil { return m.Name } return "" } -func (m *PrintFunc) GetFormat() string { +func (m *CallFunc) GetFormat() string { if m != nil { return m.Format } return "" } -func (m *PrintFunc) GetIgnoreStatus() bool { +func (m *CallFunc) GetIgnoreStatus() bool { if m != nil { return m.IgnoreStatus } @@ -2062,7 +2062,7 @@ func init() { proto.RegisterType((*Attest)(nil), "buildx.controller.v1.Attest") proto.RegisterType((*SSH)(nil), "buildx.controller.v1.SSH") proto.RegisterType((*Secret)(nil), "buildx.controller.v1.Secret") - proto.RegisterType((*PrintFunc)(nil), "buildx.controller.v1.PrintFunc") + proto.RegisterType((*CallFunc)(nil), "buildx.controller.v1.CallFunc") proto.RegisterType((*InspectRequest)(nil), "buildx.controller.v1.InspectRequest") proto.RegisterType((*InspectResponse)(nil), "buildx.controller.v1.InspectResponse") proto.RegisterType((*UlimitOpt)(nil), "buildx.controller.v1.UlimitOpt") @@ -2094,130 +2094,130 @@ func init() { func init() { proto.RegisterFile("controller.proto", fileDescriptor_ed7f10298fa1d90f) } var fileDescriptor_ed7f10298fa1d90f = []byte{ - // 1957 bytes of a gzipped FileDescriptorProto + // 1961 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x5f, 0x73, 0x1b, 0xb7, 0x11, 0xef, 0x91, 0x14, 0xff, 0x2c, 0x45, 0xd9, 0x46, 0x6d, 0x17, 0x3e, 0x3b, 0xb6, 0x7c, 0xb6, - 0x53, 0x4e, 0xdd, 0xa1, 0x12, 0xa5, 0x8e, 0xe3, 0x38, 0x99, 0xa9, 0x44, 0x89, 0x95, 0x32, 0xb6, - 0xa4, 0x01, 0x65, 0x67, 0xda, 0xcc, 0x34, 0x73, 0x22, 0x21, 0xea, 0x46, 0xa7, 0x03, 0x7b, 0x00, - 0xf5, 0xa7, 0x4f, 0x7d, 0x68, 0xdf, 0x3a, 0xfd, 0x1e, 0x9d, 0x7e, 0x84, 0x3e, 0xf5, 0xad, 0x1f, - 0xa7, 0x1f, 0xa1, 0x83, 0x05, 0xee, 0x78, 0x14, 0x79, 0x94, 0xd4, 0x3e, 0x11, 0xbb, 0xf8, 0xed, + 0x53, 0x4e, 0xdd, 0xa1, 0x12, 0xa5, 0x8e, 0xe3, 0x38, 0x9d, 0xa9, 0x44, 0x89, 0x95, 0x32, 0xb6, + 0xa4, 0x01, 0x65, 0x67, 0x9a, 0xce, 0x34, 0x73, 0x22, 0x21, 0xea, 0x46, 0xa7, 0x03, 0x7b, 0x00, + 0xf5, 0xa7, 0x4f, 0x7d, 0x68, 0xdf, 0x3a, 0xfd, 0x1e, 0x9d, 0x7e, 0x84, 0x3e, 0xf5, 0xa1, 0xdf, + 0xa7, 0x1f, 0xa1, 0x83, 0x05, 0xee, 0x78, 0x14, 0x79, 0x94, 0xd4, 0x3c, 0x11, 0xbb, 0xf8, 0xed, 0x2e, 0x76, 0x6f, 0xb1, 0xbb, 0x20, 0xdc, 0xee, 0x89, 0x48, 0xc5, 0x22, 0x0c, 0x79, 0xdc, 0x1a, 0xc6, 0x42, 0x09, 0x72, 0xf7, 0x60, 0x14, 0x84, 0xfd, 0xf3, 0x56, 0x66, 0xe3, 0xf4, 0x73, 0xf7, 0xed, 0x20, 0x50, 0x47, 0xa3, 0x83, 0x56, 0x4f, 0x9c, 0xac, 0x9c, 0x88, 0x83, 0x8b, 0x15, 0x44, 0x1d, 0x07, 0x6a, 0xc5, 0x1f, 0x06, 0x2b, 0x92, 0xc7, 0xa7, 0x41, 0x8f, 0xcb, 0x15, 0x2b, 0x94, 0xfc, 0x1a, 0x95, 0xee, 0xab, 0x5c, 0x61, 0x29, 0x46, 0x71, 0x8f, 0x0f, 0x45, 0x18, 0xf4, 0x2e, 0x56, 0x86, 0x07, 0x2b, 0x66, 0x65, 0xc4, 0xbc, 0x26, 0xdc, 0x7d, 0x17, 0x48, 0xb5, 0x17, 0x8b, - 0x1e, 0x97, 0x92, 0x4b, 0xc6, 0xff, 0x30, 0xe2, 0x52, 0x91, 0xdb, 0x50, 0x64, 0xfc, 0x90, 0x3a, + 0x1e, 0x97, 0x92, 0x4b, 0xc6, 0xff, 0x38, 0xe2, 0x52, 0x91, 0xdb, 0x50, 0x64, 0xfc, 0x90, 0x3a, 0xcb, 0x4e, 0xb3, 0xc6, 0xf4, 0xd2, 0xdb, 0x83, 0x7b, 0x97, 0x90, 0x72, 0x28, 0x22, 0xc9, 0xc9, 0x6b, 0x58, 0xd8, 0x8e, 0x0e, 0x85, 0xa4, 0xce, 0x72, 0xb1, 0x59, 0x5f, 0x7d, 0xda, 0x9a, 0xe5, 0x5c, 0xcb, 0xca, 0x69, 0x24, 0x33, 0x78, 0x4f, 0x42, 0x3d, 0xc3, 0x25, 0x8f, 0xa0, 0x96, 0x90, 0x1b, 0xd6, 0xf0, 0x98, 0x41, 0x3a, 0xb0, 0xb8, 0x1d, 0x9d, 0x8a, 0x63, 0xde, 0x16, 0xd1, 0x61, 0x30, 0xa0, 0x85, 0x65, 0xa7, 0x59, 0x5f, 0xf5, 0x66, 0x1b, 0xcb, 0x22, 0xd9, 0x84, 0x9c, 0xf7, - 0x1d, 0xd0, 0x8d, 0x40, 0xf6, 0x44, 0x14, 0xf1, 0x5e, 0xe2, 0x4c, 0xae, 0xd3, 0x93, 0x67, 0x2a, - 0x5c, 0x3a, 0x93, 0xf7, 0x10, 0x1e, 0xcc, 0xd0, 0x65, 0xc2, 0xe2, 0xfd, 0x1e, 0x16, 0xd7, 0xf5, + 0x2d, 0xd0, 0x8d, 0x40, 0xf6, 0x44, 0x14, 0xf1, 0x5e, 0xe2, 0x4c, 0xae, 0xd3, 0x93, 0x67, 0x2a, + 0x5c, 0x3a, 0x93, 0xf7, 0x10, 0x1e, 0xcc, 0xd0, 0x65, 0xc2, 0xe2, 0xfd, 0x01, 0x16, 0xd7, 0xf5, 0xd9, 0xf2, 0x95, 0x7f, 0x03, 0x95, 0xdd, 0xa1, 0x0a, 0x44, 0x24, 0xe7, 0x7b, 0x83, 0x6a, 0x2c, - 0x92, 0x25, 0x22, 0xde, 0xbf, 0x17, 0xad, 0x01, 0xcb, 0x20, 0xcb, 0x50, 0x6f, 0x8b, 0x48, 0xf1, + 0x92, 0x25, 0x22, 0xde, 0x7f, 0x16, 0xad, 0x01, 0xcb, 0x20, 0xcb, 0x50, 0x6f, 0x8b, 0x48, 0xf1, 0x73, 0xb5, 0xe7, 0xab, 0x23, 0x6b, 0x28, 0xcb, 0x22, 0x9f, 0xc2, 0xd2, 0x86, 0xe8, 0x1d, 0xf3, - 0xf8, 0x30, 0x08, 0xf9, 0x8e, 0x7f, 0xc2, 0xad, 0x4b, 0x97, 0xb8, 0xe4, 0x5b, 0xed, 0x75, 0x10, - 0xa9, 0xce, 0x28, 0xea, 0xd1, 0x22, 0x1e, 0xed, 0x49, 0xde, 0x57, 0xb5, 0x30, 0x36, 0x96, 0x20, - 0x3f, 0x40, 0x43, 0xab, 0xe9, 0x5b, 0xd3, 0x92, 0x96, 0x30, 0x31, 0x5e, 0x5d, 0xed, 0x5d, 0x6b, - 0x42, 0x6e, 0x33, 0x52, 0xf1, 0x05, 0x9b, 0xd4, 0x45, 0xee, 0xc2, 0xc2, 0x5a, 0x18, 0x8a, 0x33, - 0xba, 0xb0, 0x5c, 0x6c, 0xd6, 0x98, 0x21, 0xc8, 0x97, 0x50, 0x59, 0x53, 0x8a, 0x4b, 0x25, 0x69, - 0x19, 0x8d, 0x3d, 0x9a, 0x6d, 0xcc, 0x80, 0x58, 0x02, 0x26, 0xbb, 0x50, 0x43, 0xfb, 0x6b, 0xf1, - 0x40, 0xd2, 0x0a, 0x4a, 0x7e, 0x7e, 0x8d, 0x63, 0xa6, 0x32, 0xe6, 0x88, 0x63, 0x1d, 0x64, 0x13, - 0x6a, 0x6d, 0xbf, 0x77, 0xc4, 0x3b, 0xb1, 0x38, 0xa1, 0x55, 0x54, 0xf8, 0xf3, 0xd9, 0x0a, 0x11, - 0x66, 0x15, 0x5a, 0x35, 0xa9, 0x24, 0x59, 0x83, 0x0a, 0x12, 0xfb, 0x82, 0xd6, 0x6e, 0xa6, 0x24, - 0x91, 0x23, 0x1e, 0x2c, 0xb6, 0x07, 0xb1, 0x18, 0x0d, 0xf7, 0xfc, 0x98, 0x47, 0x8a, 0x02, 0x7e, - 0xea, 0x09, 0x1e, 0x79, 0x0b, 0x95, 0xcd, 0xf3, 0xa1, 0x88, 0x95, 0xa4, 0xf5, 0x79, 0x97, 0xd7, - 0x80, 0xac, 0x01, 0x2b, 0x41, 0x1e, 0x03, 0x6c, 0x9e, 0xab, 0xd8, 0xdf, 0x12, 0x3a, 0xec, 0x8b, - 0xf8, 0x39, 0x32, 0x1c, 0xd2, 0x81, 0xf2, 0x3b, 0xff, 0x80, 0x87, 0x92, 0x36, 0x50, 0x77, 0xeb, - 0x1a, 0x81, 0x35, 0x02, 0xc6, 0x90, 0x95, 0xd6, 0x79, 0xbd, 0xc3, 0xd5, 0x99, 0x88, 0x8f, 0xdf, - 0x8b, 0x3e, 0xa7, 0x4b, 0x26, 0xaf, 0x33, 0x2c, 0xf2, 0x1c, 0x1a, 0x3b, 0xc2, 0x04, 0x2f, 0x08, - 0x15, 0x8f, 0xe9, 0x2d, 0x3c, 0xcc, 0x24, 0x13, 0xef, 0x72, 0xe8, 0xab, 0x43, 0x11, 0x9f, 0x48, - 0x7a, 0x1b, 0x11, 0x63, 0x86, 0xce, 0xa0, 0x2e, 0xef, 0xc5, 0x5c, 0x49, 0x7a, 0x67, 0x5e, 0x06, - 0x19, 0x10, 0x4b, 0xc0, 0x84, 0x42, 0xa5, 0x7b, 0x74, 0xd2, 0x0d, 0xfe, 0xc8, 0x29, 0x59, 0x76, - 0x9a, 0x45, 0x96, 0x90, 0xe4, 0x25, 0x14, 0xbb, 0xdd, 0x2d, 0xfa, 0x53, 0xd4, 0xf6, 0x20, 0x47, - 0x5b, 0x77, 0x8b, 0x69, 0x14, 0x21, 0x50, 0xda, 0xf7, 0x07, 0x92, 0xde, 0xc5, 0x73, 0xe1, 0x9a, - 0xdc, 0x87, 0xf2, 0xbe, 0x1f, 0x0f, 0xb8, 0xa2, 0xf7, 0xd0, 0x67, 0x4b, 0x91, 0x37, 0x50, 0xf9, - 0x10, 0x06, 0x27, 0x81, 0x92, 0xf4, 0xfe, 0xbc, 0xcb, 0x69, 0x40, 0xbb, 0x43, 0xc5, 0x12, 0xbc, - 0x3e, 0x2d, 0xc6, 0x9b, 0xc7, 0xf4, 0x67, 0xa8, 0x33, 0x21, 0xf5, 0x8e, 0x0d, 0x17, 0xa5, 0xcb, - 0x4e, 0xb3, 0xca, 0x12, 0x52, 0x1f, 0x6d, 0x6f, 0x14, 0x86, 0xf4, 0x01, 0xb2, 0x71, 0x6d, 0xbe, - 0xbd, 0x4e, 0x83, 0xbd, 0x91, 0x3c, 0xa2, 0x2e, 0xee, 0x64, 0x38, 0xe3, 0xfd, 0x77, 0xc2, 0xef, - 0xd3, 0x87, 0xd9, 0x7d, 0xcd, 0x21, 0xdb, 0xb0, 0xd8, 0xc5, 0xb6, 0xb4, 0x87, 0xcd, 0x88, 0x3e, - 0x42, 0x3f, 0x5e, 0xb4, 0x74, 0xe7, 0x6a, 0x25, 0x9d, 0x4b, 0xfb, 0x90, 0x6d, 0x5e, 0x2d, 0x03, - 0x66, 0x13, 0xa2, 0x49, 0x5d, 0xfd, 0x64, 0x5c, 0x57, 0x5d, 0xa8, 0xfe, 0x46, 0x27, 0xb9, 0x66, - 0x3f, 0x46, 0x76, 0x4a, 0xeb, 0x64, 0x5a, 0x8b, 0x22, 0xa1, 0x7c, 0x53, 0x77, 0x9f, 0x60, 0xb8, - 0xb3, 0x2c, 0xf2, 0x25, 0xdc, 0xdf, 0x8b, 0xc5, 0x29, 0x8f, 0xfc, 0xa8, 0xc7, 0x93, 0x6a, 0x8e, - 0x99, 0xb7, 0x8c, 0xba, 0x72, 0x76, 0xdd, 0x5f, 0x03, 0x99, 0xae, 0x5e, 0xfa, 0x74, 0xc7, 0xfc, - 0x22, 0xa9, 0xfa, 0xc7, 0xfc, 0x42, 0x17, 0xb0, 0x53, 0x3f, 0x1c, 0x25, 0xb5, 0xd7, 0x10, 0x5f, - 0x17, 0xbe, 0x72, 0xdc, 0x6f, 0x60, 0x69, 0xb2, 0xb0, 0xdc, 0x48, 0xfa, 0x0d, 0xd4, 0x33, 0xb7, - 0xe7, 0x26, 0xa2, 0xde, 0xbf, 0x1c, 0xa8, 0x67, 0xae, 0x38, 0x26, 0xe3, 0xc5, 0x90, 0x5b, 0x61, - 0x5c, 0x93, 0x75, 0x58, 0x58, 0x53, 0x2a, 0xd6, 0xad, 0x4a, 0xe7, 0xf3, 0x2f, 0xaf, 0x2c, 0x14, - 0x2d, 0x84, 0x9b, 0xab, 0x6c, 0x44, 0x75, 0xf0, 0x37, 0xb8, 0x54, 0x41, 0x84, 0xa1, 0xc6, 0xce, - 0x52, 0x63, 0x59, 0x96, 0xfb, 0x15, 0xc0, 0x58, 0xec, 0x46, 0x3e, 0xfc, 0xc3, 0x81, 0x3b, 0x53, - 0xd5, 0x70, 0xa6, 0x27, 0x5b, 0x93, 0x9e, 0xac, 0x5e, 0xb3, 0xb2, 0x4e, 0xfb, 0xf3, 0x7f, 0x9c, - 0x76, 0x07, 0xca, 0xa6, 0x05, 0xcd, 0x3c, 0xa1, 0x0b, 0xd5, 0x8d, 0x40, 0xfa, 0x07, 0x21, 0xef, - 0xa3, 0x68, 0x95, 0xa5, 0x34, 0xf6, 0x3f, 0x3c, 0xbd, 0x89, 0x9e, 0x21, 0x3c, 0x53, 0x6b, 0xc8, - 0x12, 0x14, 0xd2, 0xd9, 0xa9, 0xb0, 0xbd, 0xa1, 0xc1, 0xba, 0xf1, 0x1b, 0x57, 0x6b, 0xcc, 0x10, - 0x5e, 0x07, 0xca, 0xa6, 0x7a, 0x4d, 0xe1, 0x5d, 0xa8, 0x76, 0x82, 0x90, 0xe3, 0xfc, 0x60, 0xce, - 0x9c, 0xd2, 0xda, 0xbd, 0xcd, 0xe8, 0xd4, 0x9a, 0xd5, 0x4b, 0xef, 0x87, 0xcc, 0x98, 0xa0, 0xfd, - 0xc0, 0x89, 0xc2, 0xfa, 0x81, 0x73, 0xc4, 0x7d, 0x28, 0x77, 0x44, 0x7c, 0xe2, 0x2b, 0xab, 0xcc, - 0x52, 0xba, 0x35, 0x6d, 0x0f, 0x22, 0x11, 0xf3, 0xae, 0xf2, 0xd5, 0xc8, 0xb8, 0x52, 0x65, 0x13, - 0x3c, 0xcf, 0x83, 0xa5, 0xed, 0x48, 0x0e, 0x79, 0x4f, 0xe5, 0x8f, 0xa4, 0xbb, 0x70, 0x2b, 0xc5, - 0xd8, 0x61, 0x34, 0x33, 0x53, 0x39, 0x37, 0x9f, 0xa9, 0xfe, 0xee, 0x40, 0x2d, 0xad, 0x9a, 0xa4, - 0x0d, 0x65, 0xfc, 0x62, 0xc9, 0x64, 0xfb, 0xf2, 0x8a, 0x32, 0xdb, 0xfa, 0x88, 0x68, 0xdb, 0xbd, - 0x8c, 0xa8, 0xfb, 0x3d, 0xd4, 0x33, 0xec, 0x19, 0x49, 0xb2, 0x9a, 0x4d, 0x92, 0xdc, 0xb6, 0x63, - 0x8c, 0x64, 0x53, 0x68, 0x03, 0xca, 0x86, 0x39, 0x33, 0xf4, 0x04, 0x4a, 0x5b, 0x7e, 0x6c, 0xd2, - 0xa7, 0xc8, 0x70, 0xad, 0x79, 0x5d, 0x71, 0xa8, 0x30, 0xdc, 0x45, 0x86, 0x6b, 0xef, 0x9f, 0x0e, - 0x34, 0xec, 0x98, 0x6a, 0x23, 0xc8, 0xe1, 0xb6, 0xb9, 0xc5, 0x3c, 0x4e, 0x78, 0xd6, 0xff, 0x37, - 0x73, 0x42, 0x99, 0x40, 0x5b, 0x97, 0x65, 0x4d, 0x34, 0xa6, 0x54, 0xba, 0x6d, 0xb8, 0x37, 0x13, - 0x7a, 0xa3, 0x6b, 0xf4, 0x02, 0xee, 0x8c, 0x07, 0xf0, 0xfc, 0x3c, 0xb9, 0x0b, 0x24, 0x0b, 0xb3, - 0x03, 0xfa, 0x13, 0xa8, 0xeb, 0x07, 0x4d, 0xbe, 0x98, 0x07, 0x8b, 0x06, 0x60, 0x23, 0x43, 0xa0, - 0x74, 0xcc, 0x2f, 0x4c, 0x36, 0xd4, 0x18, 0xae, 0xbd, 0xbf, 0x39, 0xfa, 0x5d, 0x32, 0x1c, 0xa9, - 0xf7, 0x5c, 0x4a, 0x7f, 0xa0, 0x13, 0xb0, 0xb4, 0x1d, 0x05, 0xca, 0x66, 0xdf, 0xa7, 0x79, 0xef, - 0x93, 0xe1, 0x48, 0x69, 0x98, 0x95, 0xda, 0xfa, 0x09, 0x43, 0x29, 0xf2, 0x1a, 0x4a, 0x1b, 0xbe, - 0xf2, 0x6d, 0x2e, 0xe4, 0x4c, 0x63, 0x1a, 0x91, 0x11, 0xd4, 0xe4, 0x7a, 0x45, 0x3f, 0xc2, 0x86, - 0x23, 0xe5, 0x3d, 0x87, 0xdb, 0x97, 0xb5, 0xcf, 0x70, 0xed, 0x0b, 0xa8, 0x67, 0xb4, 0xe0, 0xdd, - 0xde, 0xed, 0x20, 0xa0, 0xca, 0xf4, 0x52, 0xfb, 0x9a, 0x1e, 0x64, 0xd1, 0xd8, 0xf0, 0x6e, 0x41, - 0x03, 0x55, 0xa7, 0x11, 0xfc, 0x53, 0x01, 0x2a, 0x89, 0x8a, 0xd7, 0x13, 0x7e, 0x3f, 0xcd, 0xf3, - 0x7b, 0xda, 0xe5, 0x57, 0x50, 0xd2, 0x35, 0xc6, 0xba, 0x9c, 0x33, 0xca, 0x74, 0xfa, 0x19, 0x31, - 0x0d, 0x27, 0xdf, 0x42, 0x99, 0x71, 0xa9, 0xc7, 0x2e, 0xf3, 0x40, 0x79, 0x36, 0x5b, 0xd0, 0x60, - 0xc6, 0xc2, 0x56, 0x48, 0x8b, 0x77, 0x83, 0x41, 0xe4, 0x87, 0xb4, 0x34, 0x4f, 0xdc, 0x60, 0x32, - 0xe2, 0x86, 0x31, 0x0e, 0xf7, 0x5f, 0x1c, 0xa8, 0xcf, 0x0d, 0xf5, 0xfc, 0x27, 0xe4, 0xd4, 0xb3, - 0xb6, 0xf8, 0x3f, 0x3e, 0x6b, 0xff, 0x5c, 0x98, 0x54, 0x84, 0x13, 0x98, 0xbe, 0x4f, 0x43, 0x11, - 0x44, 0xca, 0xa6, 0x6c, 0x86, 0xa3, 0x0f, 0xda, 0x3e, 0xe9, 0xdb, 0xc6, 0xa0, 0x97, 0xfa, 0x9a, - 0xed, 0x08, 0xcd, 0xab, 0x63, 0x1a, 0x18, 0x62, 0x5c, 0xf6, 0x8b, 0xb6, 0xec, 0xeb, 0xd4, 0xf8, - 0x20, 0x79, 0x8c, 0x81, 0xab, 0x31, 0x5c, 0xeb, 0x4a, 0xbf, 0x23, 0x90, 0xbb, 0x80, 0xc2, 0x96, - 0x42, 0x2b, 0x67, 0x7d, 0x5a, 0x36, 0xe1, 0x68, 0x9f, 0x25, 0x56, 0xce, 0xfa, 0xb4, 0x92, 0x5a, - 0x39, 0x43, 0x2b, 0xfb, 0xea, 0x82, 0x56, 0x4d, 0x02, 0xee, 0xab, 0x0b, 0xdd, 0x8a, 0x98, 0x08, - 0xc3, 0x03, 0xbf, 0x77, 0x4c, 0x6b, 0xa6, 0x07, 0x26, 0xb4, 0x9e, 0x55, 0x75, 0xcc, 0x03, 0x3f, - 0xc4, 0x57, 0x4d, 0x95, 0x25, 0xa4, 0xb7, 0x06, 0xb5, 0x34, 0x55, 0x74, 0x77, 0xeb, 0xf4, 0xf1, - 0x53, 0x34, 0x58, 0xa1, 0xd3, 0x4f, 0xb2, 0xbc, 0x30, 0x9d, 0xe5, 0xc5, 0x4c, 0x96, 0xbf, 0x86, - 0xc6, 0x44, 0xd2, 0x68, 0x10, 0x13, 0x67, 0xd2, 0x2a, 0xc2, 0xb5, 0xe6, 0xb5, 0x45, 0x68, 0xde, - 0xed, 0x0d, 0x86, 0x6b, 0xef, 0x19, 0x34, 0x26, 0xd2, 0x65, 0x56, 0x5d, 0xf6, 0x9e, 0x42, 0xc3, - 0x34, 0xb8, 0xfc, 0xb2, 0xf3, 0x1f, 0x07, 0x96, 0x12, 0x8c, 0xad, 0x3c, 0xbf, 0x82, 0xea, 0x29, - 0x8f, 0x15, 0x3f, 0x4f, 0x7b, 0x11, 0x9d, 0x1e, 0x95, 0x3f, 0x22, 0x82, 0xa5, 0x48, 0xf2, 0x35, - 0x54, 0x25, 0xea, 0xe1, 0xc9, 0xac, 0xf3, 0x38, 0x4f, 0xca, 0xda, 0x4b, 0xf1, 0x64, 0x05, 0x4a, - 0xa1, 0x18, 0x48, 0xfc, 0xee, 0xf5, 0xd5, 0x87, 0x79, 0x72, 0xef, 0xc4, 0x80, 0x21, 0x90, 0xbc, - 0x85, 0xea, 0x99, 0x1f, 0x47, 0x41, 0x34, 0x48, 0xde, 0xfb, 0x4f, 0xf2, 0x84, 0xbe, 0x37, 0x38, - 0x96, 0x0a, 0x78, 0x0d, 0x7d, 0x89, 0x0e, 0x85, 0x8d, 0x89, 0xf7, 0x5b, 0x9d, 0xcb, 0x9a, 0xb4, - 0xee, 0x6f, 0x43, 0xc3, 0xdc, 0x87, 0x8f, 0x3c, 0x96, 0x7a, 0x72, 0x74, 0xe6, 0xdd, 0xd9, 0xf5, - 0x2c, 0x94, 0x4d, 0x4a, 0x7a, 0x3f, 0xda, 0x76, 0x97, 0x30, 0x74, 0x2e, 0x0d, 0xfd, 0xde, 0xb1, - 0x3f, 0x48, 0xbe, 0x53, 0x42, 0xea, 0x9d, 0x53, 0x6b, 0xcf, 0x5c, 0xdb, 0x84, 0xd4, 0xb9, 0x19, - 0xf3, 0xd3, 0x40, 0x8e, 0x87, 0xd8, 0x94, 0x5e, 0xfd, 0x6b, 0x05, 0xa0, 0x9d, 0x9e, 0x87, 0xec, - 0xc1, 0x02, 0xda, 0x23, 0xde, 0xdc, 0xe6, 0x89, 0x7e, 0xbb, 0xcf, 0xae, 0xd1, 0x60, 0xc9, 0x47, - 0x9d, 0xfc, 0x38, 0xf4, 0x90, 0xe7, 0x79, 0x65, 0x22, 0x3b, 0x37, 0xb9, 0x2f, 0xae, 0x40, 0x59, - 0xbd, 0x1f, 0xa0, 0x6c, 0xb2, 0x80, 0xe4, 0xd5, 0xc2, 0x6c, 0xde, 0xba, 0xcf, 0xe7, 0x83, 0x8c, - 0xd2, 0xcf, 0x1c, 0xc2, 0x6c, 0xa5, 0x24, 0xde, 0x9c, 0x56, 0x68, 0x6f, 0x4c, 0x5e, 0x00, 0x26, - 0xba, 0x4e, 0xd3, 0x21, 0xdf, 0x41, 0xd9, 0xd4, 0x3a, 0xf2, 0xc9, 0x6c, 0x81, 0x44, 0xdf, 0xfc, - 0xed, 0xa6, 0xf3, 0x99, 0x43, 0xde, 0x43, 0x49, 0x37, 0x79, 0x92, 0xd3, 0xb1, 0x32, 0x13, 0x82, - 0xeb, 0xcd, 0x83, 0xd8, 0x28, 0xfe, 0x08, 0x30, 0x1e, 0x35, 0x48, 0xce, 0xbf, 0x36, 0x53, 0x33, - 0x8b, 0xdb, 0xbc, 0x1a, 0x68, 0x0d, 0xbc, 0xd7, 0x7d, 0xf6, 0x50, 0x90, 0xdc, 0x0e, 0x9b, 0x5e, - 0x23, 0xd7, 0x9b, 0x07, 0xb1, 0xea, 0x8e, 0xa0, 0x31, 0xf1, 0xaf, 0x2e, 0xf9, 0x45, 0xbe, 0x93, - 0x97, 0xff, 0x24, 0x76, 0x5f, 0x5e, 0x0b, 0x6b, 0x2d, 0xa9, 0xec, 0xac, 0x66, 0xb7, 0x49, 0xeb, - 0x2a, 0xbf, 0x27, 0xff, 0xa1, 0x75, 0x57, 0xae, 0x8d, 0x37, 0x56, 0xd7, 0x4b, 0xbf, 0x2b, 0x0c, - 0x0f, 0x0e, 0xca, 0xf8, 0x67, 0xf7, 0x17, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x32, 0x20, - 0xaa, 0x8a, 0x17, 0x00, 0x00, + 0xf8, 0x30, 0x08, 0xf9, 0x8e, 0x7f, 0xc2, 0xad, 0x4b, 0x97, 0xb8, 0xe4, 0x6b, 0xa8, 0xb6, 0xfd, + 0x30, 0xec, 0x8c, 0xa2, 0x1e, 0x2d, 0xe2, 0xc9, 0x1e, 0xcf, 0x3e, 0x59, 0x82, 0x62, 0x29, 0x9e, + 0xfc, 0x1e, 0x1a, 0x5a, 0x47, 0xdf, 0xda, 0x95, 0xb4, 0x84, 0x59, 0xf1, 0xea, 0x6a, 0xd7, 0x5a, + 0x13, 0x72, 0x9b, 0x91, 0x8a, 0x2f, 0xd8, 0xa4, 0x2e, 0x72, 0x17, 0x16, 0xd6, 0xc2, 0x50, 0x9c, + 0xd1, 0x85, 0xe5, 0x62, 0xb3, 0xc6, 0x0c, 0x41, 0xbe, 0x84, 0xca, 0x9a, 0x52, 0x5c, 0x2a, 0x49, + 0xcb, 0x68, 0xec, 0xd1, 0x6c, 0x63, 0x06, 0xc4, 0x12, 0x30, 0xd9, 0x85, 0x1a, 0xda, 0x5f, 0x8b, + 0x07, 0x92, 0x56, 0x50, 0xf2, 0xf3, 0x6b, 0x1c, 0x33, 0x95, 0x31, 0x47, 0x1c, 0xeb, 0x20, 0x9b, + 0x50, 0x6b, 0xfb, 0xbd, 0x23, 0xde, 0x89, 0xc5, 0x09, 0xad, 0xa2, 0xc2, 0x9f, 0xe7, 0x05, 0xae, + 0x77, 0xc4, 0xad, 0x42, 0xab, 0x26, 0x95, 0x24, 0x6b, 0x50, 0x41, 0x62, 0x5f, 0xd0, 0xda, 0xcd, + 0x94, 0x24, 0x72, 0xc4, 0x83, 0xc5, 0xf6, 0x20, 0x16, 0xa3, 0xe1, 0x9e, 0x1f, 0xf3, 0x48, 0x51, + 0xc0, 0xef, 0x3c, 0xc1, 0x23, 0x6f, 0xa1, 0xb2, 0x79, 0x3e, 0x14, 0xb1, 0x92, 0xb4, 0x3e, 0xef, + 0xe6, 0x1a, 0x90, 0x35, 0x60, 0x25, 0xc8, 0x63, 0x80, 0xcd, 0x73, 0x15, 0xfb, 0x5b, 0x42, 0x87, + 0x7d, 0x11, 0x3f, 0x47, 0x86, 0x43, 0x3a, 0x50, 0x7e, 0xe7, 0x1f, 0xf0, 0x50, 0xd2, 0x06, 0xea, + 0x6e, 0x5d, 0x23, 0xb0, 0x46, 0xc0, 0x18, 0xb2, 0xd2, 0x3a, 0xa9, 0x77, 0xb8, 0x3a, 0x13, 0xf1, + 0xf1, 0x7b, 0xd1, 0xe7, 0x74, 0xc9, 0x24, 0x75, 0x86, 0x45, 0x9e, 0x43, 0x63, 0x47, 0x98, 0xe0, + 0x05, 0xa1, 0xe2, 0x31, 0xbd, 0x85, 0x87, 0x99, 0x64, 0xe2, 0x45, 0x0e, 0x7d, 0x75, 0x28, 0xe2, + 0x13, 0x49, 0x6f, 0x23, 0x62, 0xcc, 0xd0, 0x19, 0xd4, 0xe5, 0xbd, 0x98, 0x2b, 0x49, 0xef, 0xcc, + 0xcb, 0x20, 0x03, 0x62, 0x09, 0x98, 0x50, 0xa8, 0x74, 0x8f, 0x4e, 0xba, 0xc1, 0x9f, 0x38, 0x25, + 0xcb, 0x4e, 0xb3, 0xc8, 0x12, 0x92, 0xbc, 0x84, 0x62, 0xb7, 0xbb, 0x45, 0x7f, 0x8a, 0xda, 0x1e, + 0xe4, 0x68, 0xeb, 0x6e, 0x31, 0x8d, 0x22, 0x04, 0x4a, 0xfb, 0xfe, 0x40, 0xd2, 0xbb, 0x78, 0x2e, + 0x5c, 0x93, 0xfb, 0x50, 0xde, 0xf7, 0xe3, 0x01, 0x57, 0xf4, 0x1e, 0xfa, 0x6c, 0x29, 0xf2, 0x06, + 0x2a, 0x1f, 0xc2, 0xe0, 0x24, 0x50, 0x92, 0xde, 0xc7, 0xab, 0xf9, 0x64, 0xb6, 0x72, 0x03, 0xda, + 0x1d, 0x2a, 0x96, 0xe0, 0xf5, 0x69, 0x31, 0xde, 0x3c, 0xa6, 0x3f, 0x43, 0x9d, 0x09, 0xa9, 0x77, + 0x6c, 0xb8, 0x28, 0x5d, 0x76, 0x9a, 0x55, 0x96, 0x90, 0xfa, 0x68, 0x7b, 0xa3, 0x30, 0xa4, 0x0f, + 0x90, 0x8d, 0x6b, 0xf3, 0xed, 0x75, 0x1a, 0xec, 0x8d, 0xe4, 0x11, 0x75, 0x71, 0x27, 0xc3, 0x19, + 0xef, 0xbf, 0x13, 0x7e, 0x9f, 0x3e, 0xcc, 0xee, 0x6b, 0x0e, 0xd9, 0x86, 0xc5, 0x2e, 0xf6, 0xa4, + 0x3d, 0xec, 0x44, 0xf4, 0x11, 0xfa, 0xf1, 0xa2, 0xa5, 0xdb, 0x56, 0x2b, 0x69, 0x5b, 0xda, 0x87, + 0x6c, 0xe7, 0x6a, 0x19, 0x30, 0x9b, 0x10, 0x4d, 0x8a, 0xea, 0x27, 0xe3, 0xa2, 0xea, 0x42, 0xf5, + 0xb7, 0x3a, 0xc9, 0x35, 0xfb, 0x31, 0xb2, 0x53, 0x5a, 0x27, 0xd3, 0x5a, 0x14, 0x09, 0xe5, 0x9b, + 0xa2, 0xfb, 0x04, 0xc3, 0x9d, 0x65, 0x91, 0x2f, 0xe1, 0xfe, 0x5e, 0x2c, 0x4e, 0x79, 0xe4, 0x47, + 0x3d, 0x9e, 0x94, 0x72, 0xcc, 0xbc, 0x65, 0xd4, 0x95, 0xb3, 0xeb, 0xfe, 0x06, 0xc8, 0x74, 0xf5, + 0xd2, 0xa7, 0x3b, 0xe6, 0x17, 0x49, 0xc9, 0x3f, 0xe6, 0x17, 0xba, 0x80, 0x9d, 0xfa, 0xe1, 0x28, + 0x29, 0xbc, 0x86, 0xf8, 0xba, 0xf0, 0x95, 0xe3, 0x7e, 0x03, 0x4b, 0x93, 0x85, 0xe5, 0x46, 0xd2, + 0x6f, 0xa0, 0x9e, 0xb9, 0x3d, 0x37, 0x11, 0xf5, 0xfe, 0xed, 0x40, 0x3d, 0x73, 0xc5, 0x31, 0x19, + 0x2f, 0x86, 0xdc, 0x0a, 0xe3, 0x9a, 0xac, 0xc3, 0xc2, 0x9a, 0x52, 0xb1, 0xee, 0x53, 0x3a, 0x9f, + 0x7f, 0x79, 0x65, 0xa1, 0x68, 0x21, 0xdc, 0x5c, 0x65, 0x23, 0xaa, 0x83, 0xbf, 0xc1, 0xa5, 0x0a, + 0x22, 0x0c, 0x35, 0xf6, 0x95, 0x1a, 0xcb, 0xb2, 0xdc, 0xaf, 0x00, 0xc6, 0x62, 0x37, 0xf2, 0xe1, + 0x9f, 0x0e, 0xdc, 0x99, 0xaa, 0x86, 0x33, 0x3d, 0xd9, 0x9a, 0xf4, 0x64, 0xf5, 0x9a, 0x95, 0x75, + 0xda, 0x9f, 0x1f, 0x71, 0xda, 0x1d, 0x28, 0x9b, 0x16, 0x34, 0xf3, 0x84, 0x2e, 0x54, 0x37, 0x02, + 0xe9, 0x1f, 0x84, 0xbc, 0x8f, 0xa2, 0x55, 0x96, 0xd2, 0xd8, 0xff, 0xf0, 0xf4, 0x26, 0x7a, 0x86, + 0xf0, 0x4c, 0xad, 0x21, 0x4b, 0x50, 0x48, 0x07, 0xa7, 0xc2, 0xf6, 0x86, 0x06, 0xeb, 0xae, 0x6f, + 0x5c, 0xad, 0x31, 0x43, 0x78, 0x1d, 0x28, 0x9b, 0xea, 0x35, 0x85, 0x77, 0xa1, 0xda, 0x09, 0x42, + 0x8e, 0xc3, 0x83, 0x39, 0x73, 0x4a, 0x6b, 0xf7, 0x36, 0xa3, 0x53, 0x6b, 0x56, 0x2f, 0xbd, 0xef, + 0xc7, 0x33, 0x82, 0x76, 0x03, 0xa7, 0x09, 0xeb, 0x06, 0xce, 0x10, 0xf7, 0xa1, 0xdc, 0x11, 0xf1, + 0x89, 0xaf, 0xac, 0x2e, 0x4b, 0xe9, 0xce, 0xb4, 0x3d, 0x88, 0x44, 0xcc, 0xbb, 0xca, 0x57, 0x23, + 0xe3, 0x49, 0x95, 0x4d, 0xf0, 0x3c, 0x0f, 0x96, 0xb6, 0x23, 0x39, 0xe4, 0x3d, 0x95, 0x3f, 0x8e, + 0xee, 0xc2, 0xad, 0x14, 0x63, 0x07, 0xd1, 0xcc, 0x3c, 0xe5, 0xdc, 0x7c, 0x9e, 0xfa, 0x87, 0x03, + 0xb5, 0xb4, 0x68, 0x92, 0x36, 0x94, 0xf1, 0x83, 0x25, 0x53, 0xed, 0xcb, 0x2b, 0xaa, 0x6c, 0xeb, + 0x23, 0xa2, 0x6d, 0xf3, 0x32, 0xa2, 0xee, 0x77, 0x50, 0xcf, 0xb0, 0x67, 0xe4, 0xc8, 0x6a, 0x36, + 0x47, 0x72, 0xbb, 0x8e, 0x31, 0x92, 0xcd, 0xa0, 0x0d, 0x28, 0x1b, 0xe6, 0xcc, 0xd0, 0x13, 0x28, + 0x6d, 0xf9, 0xb1, 0xc9, 0x9e, 0x22, 0xc3, 0xb5, 0xe6, 0x75, 0xc5, 0xa1, 0xc2, 0x70, 0x17, 0x19, + 0xae, 0xbd, 0x7f, 0x39, 0xd0, 0xb0, 0x23, 0xaa, 0x8d, 0x20, 0x87, 0xdb, 0xe6, 0x12, 0xf3, 0x38, + 0xe1, 0x59, 0xff, 0xdf, 0xcc, 0x09, 0x65, 0x02, 0x6d, 0x5d, 0x96, 0x35, 0xd1, 0x98, 0x52, 0xe9, + 0xb6, 0xe1, 0xde, 0x4c, 0xe8, 0x8d, 0x6e, 0xd1, 0x0b, 0xb8, 0x33, 0x1e, 0xbe, 0xf3, 0xf3, 0xe4, + 0x2e, 0x90, 0x2c, 0xcc, 0x0e, 0xe7, 0x4f, 0xa0, 0xae, 0x1f, 0x33, 0xf9, 0x62, 0x1e, 0x2c, 0x1a, + 0x80, 0x8d, 0x0c, 0x81, 0xd2, 0x31, 0xbf, 0x30, 0xd9, 0x50, 0x63, 0xb8, 0xf6, 0xfe, 0xee, 0xe8, + 0x37, 0xc9, 0x70, 0xa4, 0xde, 0x73, 0x29, 0xfd, 0x81, 0x4e, 0xc0, 0xd2, 0x76, 0x14, 0x28, 0x9b, + 0x7d, 0x9f, 0xe6, 0xbd, 0x4d, 0x86, 0x23, 0xa5, 0x61, 0x56, 0x6a, 0xeb, 0x27, 0x0c, 0xa5, 0xc8, + 0x6b, 0x28, 0x6d, 0xf8, 0xca, 0xb7, 0xb9, 0x90, 0x33, 0x8c, 0x69, 0x44, 0x46, 0x50, 0x93, 0xeb, + 0x15, 0xfd, 0x00, 0x1b, 0x8e, 0x94, 0xf7, 0x1c, 0x6e, 0x5f, 0xd6, 0x3e, 0xc3, 0xb5, 0x2f, 0xa0, + 0x9e, 0xd1, 0x82, 0x57, 0x7b, 0xb7, 0x83, 0x80, 0x2a, 0xd3, 0x4b, 0xed, 0x6b, 0x7a, 0x90, 0x45, + 0x63, 0xc3, 0xbb, 0x05, 0x0d, 0x54, 0x9d, 0x46, 0xf0, 0xcf, 0x05, 0xa8, 0x24, 0x2a, 0x5e, 0x4f, + 0xf8, 0xfd, 0x34, 0xcf, 0xef, 0x69, 0x97, 0x5f, 0x41, 0x49, 0x97, 0x18, 0xeb, 0x72, 0xce, 0x24, + 0xd3, 0xe9, 0x67, 0xc4, 0x34, 0x9c, 0xfc, 0x1a, 0xca, 0x8c, 0x4b, 0x3d, 0x75, 0x99, 0xd7, 0xc9, + 0xb3, 0xd9, 0x82, 0x06, 0x33, 0x16, 0xb6, 0x42, 0x5a, 0xbc, 0x1b, 0x0c, 0x22, 0x3f, 0xa4, 0xa5, + 0x79, 0xe2, 0x06, 0x93, 0x11, 0x37, 0x8c, 0x71, 0xb8, 0xff, 0xea, 0x40, 0x7d, 0x6e, 0xa8, 0xe7, + 0x3f, 0x1f, 0xa7, 0x9e, 0xb4, 0xc5, 0xff, 0xf3, 0x49, 0xfb, 0x97, 0xc2, 0xa4, 0x22, 0x1c, 0xc0, + 0xf4, 0x7d, 0x1a, 0x8a, 0x20, 0x52, 0x36, 0x65, 0x33, 0x1c, 0x7d, 0xd0, 0xf6, 0x49, 0xdf, 0xf6, + 0x05, 0xbd, 0xd4, 0xd7, 0x6c, 0x47, 0x68, 0x5e, 0x1d, 0xd3, 0xc0, 0x10, 0xe3, 0xaa, 0x5f, 0xb4, + 0x55, 0x5f, 0xa7, 0xc6, 0x07, 0xc9, 0x63, 0x0c, 0x5c, 0x8d, 0xe1, 0x5a, 0x57, 0xfa, 0x1d, 0x81, + 0xdc, 0x05, 0x14, 0xb6, 0x14, 0x5a, 0x39, 0xeb, 0xd3, 0xb2, 0x09, 0x47, 0xfb, 0x2c, 0xb1, 0x72, + 0xd6, 0xa7, 0x95, 0xd4, 0xca, 0x19, 0x5a, 0xd9, 0x57, 0x17, 0xb4, 0x6a, 0x12, 0x70, 0x5f, 0x5d, + 0xe8, 0x4e, 0xc4, 0x44, 0x18, 0x1e, 0xf8, 0xbd, 0x63, 0x5a, 0x33, 0x2d, 0x30, 0xa1, 0xf5, 0xa8, + 0xaa, 0x63, 0x1e, 0xf8, 0x21, 0x3e, 0x6a, 0xaa, 0x2c, 0x21, 0xbd, 0x35, 0xa8, 0xa5, 0xa9, 0xa2, + 0x9b, 0x5b, 0xa7, 0x8f, 0x9f, 0xa2, 0xc1, 0x0a, 0x9d, 0x7e, 0x92, 0xe5, 0x85, 0xe9, 0x2c, 0x2f, + 0x66, 0xb2, 0xfc, 0x35, 0x34, 0x26, 0x92, 0x46, 0x83, 0x98, 0x38, 0x93, 0x56, 0x11, 0xae, 0x35, + 0xaf, 0x2d, 0x42, 0xf3, 0x66, 0x6f, 0x30, 0x5c, 0x7b, 0xcf, 0xa0, 0x31, 0x91, 0x2e, 0xb3, 0xea, + 0xb2, 0xf7, 0x14, 0x1a, 0xa6, 0xc1, 0xe5, 0x97, 0x9d, 0xff, 0x3a, 0xb0, 0x94, 0x60, 0x6c, 0xe5, + 0xf9, 0x15, 0x54, 0x4f, 0x79, 0xac, 0xf8, 0x79, 0xda, 0x8b, 0xe8, 0xf4, 0xa4, 0xfc, 0x11, 0x11, + 0x2c, 0x45, 0xea, 0x27, 0xbc, 0x44, 0x3d, 0x3c, 0x19, 0x75, 0x1e, 0xe7, 0x49, 0x59, 0x7b, 0x29, + 0x9e, 0xac, 0x40, 0x29, 0x14, 0x03, 0x89, 0xdf, 0xbd, 0xbe, 0xfa, 0x30, 0x4f, 0xee, 0x9d, 0x18, + 0x30, 0x04, 0x92, 0xb7, 0x50, 0x3d, 0xf3, 0xe3, 0x28, 0x88, 0x06, 0xc9, 0x73, 0xff, 0x49, 0x9e, + 0xd0, 0x77, 0x06, 0xc7, 0x52, 0x01, 0xaf, 0xa1, 0x2f, 0xd1, 0xa1, 0xb0, 0x31, 0xf1, 0x7e, 0xa7, + 0x73, 0x59, 0x93, 0xd6, 0xfd, 0x6d, 0x68, 0x98, 0xfb, 0xf0, 0x91, 0xc7, 0x52, 0x0f, 0x8e, 0xce, + 0xbc, 0x3b, 0xbb, 0x9e, 0x85, 0xb2, 0x49, 0x49, 0xef, 0x07, 0xdb, 0xee, 0x12, 0x86, 0xce, 0xa5, + 0xa1, 0xdf, 0x3b, 0xf6, 0x07, 0xc9, 0x77, 0x4a, 0x48, 0xbd, 0x73, 0x6a, 0xed, 0x99, 0x6b, 0x9b, + 0x90, 0x3a, 0x37, 0x63, 0x7e, 0x1a, 0xc8, 0xf1, 0x0c, 0x9b, 0xd2, 0xab, 0x7f, 0xab, 0x00, 0xb4, + 0xd3, 0xf3, 0x90, 0x3d, 0x58, 0x40, 0x7b, 0xc4, 0x9b, 0xdb, 0x3c, 0xd1, 0x6f, 0xf7, 0xd9, 0x35, + 0x1a, 0x2c, 0xf9, 0xa8, 0x93, 0x1f, 0x87, 0x1e, 0xf2, 0x3c, 0xaf, 0x4c, 0x64, 0xe7, 0x26, 0xf7, + 0xc5, 0x15, 0x28, 0xab, 0xf7, 0x03, 0x94, 0x4d, 0x16, 0x90, 0xbc, 0x5a, 0x98, 0xcd, 0x5b, 0xf7, + 0xf9, 0x7c, 0x90, 0x51, 0xfa, 0x99, 0x43, 0x98, 0xad, 0x94, 0xc4, 0x9b, 0xd3, 0x0a, 0xed, 0x8d, + 0xc9, 0x0b, 0xc0, 0x44, 0xd7, 0x69, 0x3a, 0xe4, 0x5b, 0x28, 0x9b, 0x5a, 0x47, 0x3e, 0x99, 0x2d, + 0x90, 0xe8, 0x9b, 0xbf, 0xdd, 0x74, 0x3e, 0x73, 0xc8, 0x7b, 0x28, 0xe9, 0x26, 0x4f, 0x72, 0x3a, + 0x56, 0x66, 0x42, 0x70, 0xbd, 0x79, 0x10, 0x1b, 0xc5, 0x1f, 0x00, 0xc6, 0xa3, 0x06, 0xc9, 0xf9, + 0xd3, 0x66, 0x6a, 0x66, 0x71, 0x9b, 0x57, 0x03, 0xad, 0x81, 0xf7, 0xba, 0xcf, 0x1e, 0x0a, 0x92, + 0xdb, 0x61, 0xd3, 0x6b, 0xe4, 0x7a, 0xf3, 0x20, 0x56, 0xdd, 0x11, 0x34, 0x26, 0xfe, 0xd1, 0x25, + 0xbf, 0xc8, 0x77, 0xf2, 0xf2, 0x1f, 0xc4, 0xee, 0xcb, 0x6b, 0x61, 0xad, 0x25, 0x95, 0x9d, 0xd5, + 0xec, 0x36, 0x69, 0x5d, 0xe5, 0xf7, 0xe4, 0xbf, 0xb3, 0xee, 0xca, 0xb5, 0xf1, 0xc6, 0xea, 0x7a, + 0xe9, 0xfb, 0xc2, 0xf0, 0xe0, 0xa0, 0x8c, 0x7f, 0x74, 0x7f, 0xf1, 0xbf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0xc9, 0xe6, 0x4b, 0xb6, 0x86, 0x17, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/controller/pb/controller.proto b/controller/pb/controller.proto index ca68bdd67aed..b9d81dd4d197 100644 --- a/controller/pb/controller.proto +++ b/controller/pb/controller.proto @@ -49,7 +49,7 @@ message BuildRequest { message BuildOptions { string ContextPath = 1; string DockerfileName = 2; - PrintFunc PrintFunc = 3; + CallFunc CallFunc = 3; map NamedContexts = 4; repeated string Allow = 5; @@ -111,7 +111,7 @@ message Secret { string Env = 3; } -message PrintFunc { +message CallFunc { string Name = 1; string Format = 2; bool IgnoreStatus = 3; diff --git a/controller/remote/io.go b/controller/remote/io.go index 6a8c59c31afe..6b8f7646687a 100644 --- a/controller/remote/io.go +++ b/controller/remote/io.go @@ -207,6 +207,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage if cfg.signal != nil { eg.Go(func() error { + names := signalNames() for { var sig syscall.Signal select { @@ -216,7 +217,7 @@ func attachIO(ctx context.Context, stream msgStream, initMessage *pb.InitMessage case <-ctx.Done(): return nil } - name := sigToName[sig] + name := names[sig] if name == "" { continue } @@ -380,12 +381,12 @@ func copyToStream(fd uint32, snd msgStream, r io.Reader) error { }) } -var sigToName = map[syscall.Signal]string{} - -func init() { +func signalNames() map[syscall.Signal]string { + m := make(map[syscall.Signal]string, len(signal.SignalMap)) for name, value := range signal.SignalMap { - sigToName[value] = name + m[value] = name } + return m } type debugStream struct { diff --git a/docker-bake.hcl b/docker-bake.hcl index b13663d1875e..fe6a78a51633 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -217,3 +217,18 @@ target "integration-test" { inherits = ["integration-test-base"] target = "integration-test" } + +variable "GOVULNCHECK_FORMAT" { + default = null +} + +target "govulncheck" { + inherits = ["_common"] + dockerfile = "./hack/dockerfiles/govulncheck.Dockerfile" + target = "output" + args = { + FORMAT = GOVULNCHECK_FORMAT + } + no-cache-filter = ["run"] + output = ["${DESTDIR}"] +} diff --git a/docs/bake-reference.md b/docs/bake-reference.md index 0c4126be3044..542c50a589cd 100644 --- a/docs/bake-reference.md +++ b/docs/bake-reference.md @@ -443,8 +443,7 @@ COPY --from=src . . #### Use another target as base -> **Note** -> +> [!NOTE] > You should prefer to use regular multi-stage builds over this option. You can > Use this feature when you have multiple Dockerfiles that can't be easily > merged into one. @@ -506,6 +505,25 @@ $ docker buildx bake --print -f - <<< 'target "default" {}' } ``` +### `target.entitlements` + +Entitlements are permissions that the build process requires to run. + +Currently supported entitlements are: + +- `network.host`: Allows the build to use commands that access the host network. In Dockerfile, use [`RUN --network=host`](https://docs.docker.com/reference/dockerfile/#run---networkhost) to run a command with host network enabled. + +- `security.insecure`: Allows the build to run commands in privileged containers that are not limited by the default security sandbox. Such container may potentially access and modify system resources. In Dockerfile, use [`RUN --security=insecure`](https://docs.docker.com/reference/dockerfile/#run---security) to run a command in a privileged container. + +```hcl +target "integration-tests" { + # this target requires privileged containers to run nested containers + entitlements = ["security.insecure"] +} +``` + +Entitlements are enabled with a two-step process. First, a target must declare the entitlements it requires. Secondly, when invoking the `bake` command, the user must grant the entitlements by passing the `--allow` flag or confirming the entitlements when prompted in an interactive terminal. This is to ensure that the user is aware of the possibly insecure permissions they are granting to the build process. + ### `target.inherits` A target can inherit attributes from other targets. @@ -750,6 +768,27 @@ target "app" { } ``` +### `target.network` + +Specify the network mode for the whole build request. This will override the default network mode +for all the `RUN` instructions in the Dockerfile. Accepted values are `default`, `host`, and `none`. + +Usually, a better approach to set the network mode for your build steps is to instead use `RUN --network=` +in your Dockerfile. This way, you can set the network mode for individual build steps and everyone building +the Dockerfile gets consistent behavior without needing to pass additional flags to the build command. + +If you set network mode to `host` in your Bake file, you must also grant `network.host` entitlement when +invoking the `bake` command. This is because `host` network mode requires elevated privileges and can be a security risk. +You can pass `--allow=network.host` to the `docker buildx bake` command to grant the entitlement, or you can +confirm the entitlement when prompted if you are using an interactive terminal. + +```hcl +target "app" { + # make sure this build does not access internet + network = "none" +} +``` + ### `target.no-cache-filter` Don't use build cache for the specified stages. @@ -853,8 +892,7 @@ target "default" { } ``` -> **Note** -> +> [!NOTE] > In most cases, it is recommended to let the builder automatically determine > the appropriate configurations. Manual adjustments should only be considered > when specific performance tuning is required for complex build scenarios. @@ -919,14 +957,12 @@ target "app" { } ``` -> **Note** -> +> [!NOTE] > If you do not provide a `hard limit`, the `soft limit` is used > for both values. If no `ulimits` are set, they are inherited from > the default `ulimits` set on the daemon. -> **Note** -> +> [!NOTE] > In most cases, it is recommended to let the builder automatically determine > the appropriate configurations. Manual adjustments should only be considered > when specific performance tuning is required for complex build scenarios. @@ -1114,8 +1150,7 @@ target "webapp-dev" { } ``` -> **Note** -> +> [!NOTE] > See [User defined HCL functions][hcl-funcs] page for more details. diff --git a/docs/debugging.md b/docs/debugging.md index d066d2a8705e..a964294caae9 100644 --- a/docs/debugging.md +++ b/docs/debugging.md @@ -4,8 +4,7 @@ To assist with creating and debugging complex builds, Buildx provides a debugger to help you step through the build process and easily inspect the state of the build environment at any point. -> **Note** -> +> [!NOTE] > The debug monitor is a new experimental feature in recent versions of Buildx. > There are rough edges, known bugs, and missing features. Please try it out > and let us know what you think! diff --git a/docs/reference/buildx.md b/docs/reference/buildx.md index 2dd0b74177a3..783a136ec4aa 100644 --- a/docs/reference/buildx.md +++ b/docs/reference/buildx.md @@ -32,6 +32,7 @@ Extended build capabilities with BuildKit | Name | Type | Default | Description | |:------------------------|:---------|:--------|:-----------------------------------------| | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | diff --git a/docs/reference/buildx_bake.md b/docs/reference/buildx_bake.md index 2d21b5d3fb26..3e02b859b09e 100644 --- a/docs/reference/buildx_bake.md +++ b/docs/reference/buildx_bake.md @@ -15,9 +15,11 @@ Build from a file | Name | Type | Default | Description | |:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------| +| `--allow` | `stringArray` | | Allow build to access specified resources | | [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) | | [`--check`](#check) | `bool` | | Shorthand for `--call=check` | +| `-D`, `--debug` | `bool` | | Enable debug logging | | [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file | | `--load` | `bool` | | Shorthand for `--set=*.output=type=docker` | | [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file | @@ -41,8 +43,7 @@ as part of the build. Read [High-level build options with Bake](https://docs.docker.com/build/bake/) guide for introduction to writing bake files. -> **Note** -> +> [!NOTE] > `buildx bake` command may receive backwards incompatible features in the future > if needed. We are looking for feedback on improving the command and extending > the functionality further. @@ -163,17 +164,15 @@ $ cat metadata.json } ``` -> **Note** -> -> Build record [provenance](https://docs.docker.com/build/attestations/slsa-provenance/#provenance-attestation-example) +> [!NOTE] +> Build record [provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/#provenance-attestation-example) > (`buildx.build.provenance`) includes minimal provenance by default. Set the > `BUILDX_METADATA_PROVENANCE` environment variable to customize this behavior: > * `min` sets minimal provenance (default). > * `max` sets full provenance. > * `disabled`, `false` or `0` does not set any provenance. -> **Note** -> +> [!NOTE] > Build warnings (`buildx.build.warnings`) are not included by default. Set the > `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to > include them. diff --git a/docs/reference/buildx_build.md b/docs/reference/buildx_build.md index ebe410c38079..1a3cdabb32f4 100644 --- a/docs/reference/buildx_build.md +++ b/docs/reference/buildx_build.md @@ -27,6 +27,7 @@ Start a build | [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) | | [`--cgroup-parent`](#cgroup-parent) | `string` | | Set the parent cgroup for the `RUN` instructions during build | | [`--check`](#check) | `bool` | | Shorthand for `--call=check` | +| `-D`, `--debug` | `bool` | | Enable debug logging | | `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) | | [`-f`](#file), [`--file`](#file) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) | | `--iidfile` | `string` | | Write the image ID to a file | @@ -144,7 +145,7 @@ For more information about annotations, see --attest=type=provenance,... ``` -Create [image attestations](https://docs.docker.com/build/attestations/). +Create [image attestations](https://docs.docker.com/build/metadata/attestations/). BuildKit currently supports: - `sbom` - Software Bill of Materials. @@ -152,7 +153,7 @@ BuildKit currently supports: Use `--attest=type=sbom` to generate an SBOM for an image at build-time. Alternatively, you can use the [`--sbom` shorthand](#sbom). - For more information, see [here](https://docs.docker.com/build/attestations/sbom/). + For more information, see [here](https://docs.docker.com/build/metadata/attestations/sbom/). - `provenance` - SLSA Provenance @@ -162,7 +163,7 @@ BuildKit currently supports: By default, a minimal provenance attestation will be created for the build result, which will only be attached for images pushed to registries. - For more information, see [here](https://docs.docker.com/build/attestations/slsa-provenance/). + For more information, see [here](https://docs.docker.com/build/metadata/attestations/slsa-provenance/). ### Allow extra privileged entitlement (--allow) @@ -581,9 +582,8 @@ $ cat metadata.json } ``` -> **Note** -> -> Build record [provenance](https://docs.docker.com/build/attestations/slsa-provenance/#provenance-attestation-example) +> [!NOTE] +> Build record [provenance](https://docs.docker.com/build/metadata/attestations/slsa-provenance/#provenance-attestation-example) > (`buildx.build.provenance`) includes minimal provenance by default. Set the > `BUILDX_METADATA_PROVENANCE` environment variable to customize this behavior: > @@ -591,6 +591,11 @@ $ cat metadata.json > - `max` sets full provenance. > - `disabled`, `false` or `0` doesn't set any provenance. +> [!NOTE] +> Build warnings (`buildx.build.warnings`) are not included by default. Set the +> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to +> include them. + ### Set the networking mode for the RUN instructions during build (--network) Available options for the networking mode are: @@ -601,12 +606,6 @@ Available options for the networking mode are: Find more details in the [Dockerfile reference](https://docs.docker.com/reference/dockerfile/#run---network). -> **Note** -> -> Build warnings (`buildx.build.warnings`) are not included by default. Set the -> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to -> include them. - ### Ignore build cache for specific stages (--no-cache-filter) The `--no-cache-filter` lets you specify one or more stages of a multi-stage @@ -669,7 +668,7 @@ The arguments for the `--no-cache-filter` flag must be names of stages. ``` Sets the export action for the build result. The default output, when using the -`docker` [build driver](https://docs.docker.com/build/drivers/), is a container +`docker` [build driver](https://docs.docker.com/build/builders/drivers/), is a container image exported to the local image store. The `--output` flag makes this step configurable allows export of results directly to the client's filesystem, an OCI image tarball, a registry, and more. @@ -832,8 +831,7 @@ $ docker buildx build --platform=darwin . Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container output (default `auto`). -> **Note** -> +> [!NOTE] > You can also use the `BUILDKIT_PROGRESS` environment variable to set its value. The following example uses `plain` output during the build: @@ -851,8 +849,7 @@ $ docker buildx build --load --progress=plain . ... ``` -> **Note** -> +> [!NOTE] > Check also the [`BUILDKIT_COLORS`](https://docs.docker.com/build/building/variables/#buildkit_colors) > environment variable for modifying the colors of the terminal output. @@ -877,7 +874,7 @@ to a registry if you use the default image store. Alternatively, you can switch to using the containerd image store. For more information about provenance attestations, see -[here](https://docs.docker.com/build/attestations/slsa-provenance/). +[here](https://docs.docker.com/build/metadata/attestations/slsa-provenance/). ### Push the build result to a registry (--push) @@ -899,7 +896,7 @@ attestations. Provenance attestations only persist for images pushed directly to a registry if you use the default image store. Alternatively, you can switch to using the containerd image store. -For more information, see [here](https://docs.docker.com/build/attestations/sbom/). +For more information, see [here](https://docs.docker.com/build/metadata/attestations/sbom/). ### Secret to expose to the build (--secret) @@ -967,8 +964,7 @@ The format is ``. `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. -> **Note** -> +> [!NOTE] > In most cases, it is recommended to let the builder automatically determine > the appropriate configurations. Manual adjustments should only be considered > when specific performance tuning is required for complex build scenarios. @@ -1054,14 +1050,12 @@ instructions and are specified with a soft and hard limit as such: $ docker buildx build --ulimit nofile=1024:1024 . ``` -> **Note** -> +> [!NOTE] > If you don't provide a `hard limit`, the `soft limit` is used > for both values. If no `ulimits` are set, they're inherited from > the default `ulimits` set on the daemon. -> **Note** -> +> [!NOTE] > In most cases, it is recommended to let the builder automatically determine > the appropriate configurations. Manual adjustments should only be considered > when specific performance tuning is required for complex build scenarios. diff --git a/docs/reference/buildx_create.md b/docs/reference/buildx_create.md index 554e2db7915e..96d71b29afce 100644 --- a/docs/reference/buildx_create.md +++ b/docs/reference/buildx_create.md @@ -15,6 +15,7 @@ Create a new builder instance | `--bootstrap` | `bool` | | Boot builder after creation | | [`--buildkitd-config`](#buildkitd-config) | `string` | | BuildKit daemon config file | | [`--buildkitd-flags`](#buildkitd-flags) | `string` | | BuildKit daemon flags | +| `-D`, `--debug` | `bool` | | Enable debug logging | | [`--driver`](#driver) | `string` | | Driver to use (available: `docker-container`, `kubernetes`, `remote`) | | [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver | | [`--leave`](#leave) | `bool` | | Remove a node from builder instead of changing it | @@ -101,8 +102,7 @@ value is `auto` and can be one of `bridge`, `cni`, `host`: --buildkitd-flags '--oci-worker-net bridge' ``` -> **Note** -> +> [!NOTE] > Network mode "bridge" is supported since BuildKit v0.13 and will become the > default in next v0.14. @@ -120,7 +120,7 @@ backend. Buildx supports the following drivers: * `kubernetes` * `remote` -For more information about build drivers, see [here](https://docs.docker.com/build/drivers/). +For more information about build drivers, see [here](https://docs.docker.com/build/builders/drivers/). #### `docker` driver @@ -167,10 +167,10 @@ Passes additional driver-specific options. For information about available driver options, refer to the detailed documentation for the specific driver: -* [`docker` driver](https://docs.docker.com/build/drivers/docker/) -* [`docker-container` driver](https://docs.docker.com/build/drivers/docker-container/) -* [`kubernetes` driver](https://docs.docker.com/build/drivers/kubernetes/) -* [`remote` driver](https://docs.docker.com/build/drivers/remote/) +* [`docker` driver](https://docs.docker.com/build/builders/drivers/docker/) +* [`docker-container` driver](https://docs.docker.com/build/builders/drivers/docker-container/) +* [`kubernetes` driver](https://docs.docker.com/build/builders/drivers/kubernetes/) +* [`remote` driver](https://docs.docker.com/build/builders/drivers/remote/) ### Remove a node from a builder (--leave) diff --git a/docs/reference/buildx_debug.md b/docs/reference/buildx_debug.md index a07385c9f8ac..6f72b5ccec18 100644 --- a/docs/reference/buildx_debug.md +++ b/docs/reference/buildx_debug.md @@ -15,6 +15,7 @@ Start debugger (EXPERIMENTAL) | Name | Type | Default | Description | |:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------| | `--builder` | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) | | `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) | | `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) | diff --git a/docs/reference/buildx_debug_build.md b/docs/reference/buildx_debug_build.md index a7dd373a1746..be0499c2d23a 100644 --- a/docs/reference/buildx_debug_build.md +++ b/docs/reference/buildx_debug_build.md @@ -23,6 +23,7 @@ Start a build | `--call` | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) | | `--cgroup-parent` | `string` | | Set the parent cgroup for the `RUN` instructions during build | | `--check` | `bool` | | Shorthand for `--call=check` | +| `-D`, `--debug` | `bool` | | Enable debug logging | | `--detach` | `bool` | | Detach buildx server (supported only on linux) (EXPERIMENTAL) | | `-f`, `--file` | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) | | `--iidfile` | `string` | | Write the image ID to a file | diff --git a/docs/reference/buildx_dial-stdio.md b/docs/reference/buildx_dial-stdio.md index d78f8b309f12..7fdc84ec0fe4 100644 --- a/docs/reference/buildx_dial-stdio.md +++ b/docs/reference/buildx_dial-stdio.md @@ -5,11 +5,12 @@ Proxy current stdio streams to builder instance ### Options -| Name | Type | Default | Description | -|:-------------|:---------|:--------|:----------------------------------------------------------------------------------------------------| -| `--builder` | `string` | | Override the configured builder instance | -| `--platform` | `string` | | Target platform: this is used for node selection | -| `--progress` | `string` | `quiet` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output | +| Name | Type | Default | Description | +|:----------------|:---------|:--------|:----------------------------------------------------------------------------------------------------| +| `--builder` | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | +| `--platform` | `string` | | Target platform: this is used for node selection | +| `--progress` | `string` | `quiet` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output | diff --git a/docs/reference/buildx_du.md b/docs/reference/buildx_du.md index 3ac8d9504d5d..684f717a2e09 100644 --- a/docs/reference/buildx_du.md +++ b/docs/reference/buildx_du.md @@ -12,6 +12,7 @@ Disk usage | Name | Type | Default | Description | |:------------------------|:---------|:--------|:-----------------------------------------| | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | `--filter` | `filter` | | Provide filter values | | [`--verbose`](#verbose) | `bool` | | Provide a more verbose output | diff --git a/docs/reference/buildx_imagetools.md b/docs/reference/buildx_imagetools.md index 6e4348b5c2aa..42e191ddf219 100644 --- a/docs/reference/buildx_imagetools.md +++ b/docs/reference/buildx_imagetools.md @@ -20,6 +20,7 @@ Commands to work on images in registry | Name | Type | Default | Description | |:------------------------|:---------|:--------|:-----------------------------------------| | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | diff --git a/docs/reference/buildx_imagetools_create.md b/docs/reference/buildx_imagetools_create.md index 7fb1159b0c5a..d937d5d7c475 100644 --- a/docs/reference/buildx_imagetools_create.md +++ b/docs/reference/buildx_imagetools_create.md @@ -14,6 +14,7 @@ Create a new image based on source images | [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image | | [`--append`](#append) | `bool` | | Append to existing manifest | | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | [`--dry-run`](#dry-run) | `bool` | | Show final image instead of pushing | | [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file | | `--prefer-index` | `bool` | `true` | When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy | @@ -52,8 +53,7 @@ $ docker buildx imagetools create \ foo/bar:alpha foo/bar:beta foo/bar:gamma ``` -> **Note** -> +> [!NOTE] > The `imagetools create` command supports adding annotations to the image > index and descriptor, using the following type prefixes: > diff --git a/docs/reference/buildx_imagetools_inspect.md b/docs/reference/buildx_imagetools_inspect.md index 69a396b60bf3..ed98215b143a 100644 --- a/docs/reference/buildx_imagetools_inspect.md +++ b/docs/reference/buildx_imagetools_inspect.md @@ -12,6 +12,7 @@ Show details of an image in the registry | Name | Type | Default | Description | |:------------------------|:---------|:----------------|:----------------------------------------------| | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template | | [`--raw`](#raw) | `bool` | | Show original, unformatted JSON manifest | diff --git a/docs/reference/buildx_inspect.md b/docs/reference/buildx_inspect.md index 066b9881b360..02847e8755b1 100644 --- a/docs/reference/buildx_inspect.md +++ b/docs/reference/buildx_inspect.md @@ -13,6 +13,7 @@ Inspect current builder instance |:----------------------------|:---------|:--------|:--------------------------------------------| | [`--bootstrap`](#bootstrap) | `bool` | | Ensure builder has booted before inspecting | | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | @@ -43,8 +44,7 @@ name of the builder to inspect to get information about that builder. The following example shows information about a builder instance named `elated_tesla`: -> **Note** -> +> [!NOTE] > The asterisk (`*`) next to node build platform(s) indicate they have been > manually set during `buildx create`. Otherwise the platforms were > automatically detected. diff --git a/docs/reference/buildx_ls.md b/docs/reference/buildx_ls.md index 988d1144930c..dba478c30632 100644 --- a/docs/reference/buildx_ls.md +++ b/docs/reference/buildx_ls.md @@ -9,9 +9,10 @@ List builder instances ### Options -| Name | Type | Default | Description | -|:----------------------|:---------|:--------|:------------------| -| [`--format`](#format) | `string` | `table` | Format the output | +| Name | Type | Default | Description | +|:----------------------|:---------|:--------|:---------------------| +| `-D`, `--debug` | `bool` | | Enable debug logging | +| [`--format`](#format) | `string` | `table` | Format the output | diff --git a/docs/reference/buildx_prune.md b/docs/reference/buildx_prune.md index df739831ec36..f8d4537567cb 100644 --- a/docs/reference/buildx_prune.md +++ b/docs/reference/buildx_prune.md @@ -13,6 +13,7 @@ Remove build cache |:------------------------|:---------|:--------|:------------------------------------------| | `-a`, `--all` | `bool` | | Include internal/frontend images | | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) | | `-f`, `--force` | `bool` | | Do not prompt for confirmation | | `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache | diff --git a/docs/reference/buildx_rm.md b/docs/reference/buildx_rm.md index a9e7cca29913..bcb856e7bdf3 100644 --- a/docs/reference/buildx_rm.md +++ b/docs/reference/buildx_rm.md @@ -13,6 +13,7 @@ Remove one or more builder instances |:------------------------------------|:---------|:--------|:-----------------------------------------| | [`--all-inactive`](#all-inactive) | `bool` | | Remove all inactive builders | | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | [`-f`](#force), [`--force`](#force) | `bool` | | Do not prompt for confirmation | | [`--keep-daemon`](#keep-daemon) | `bool` | | Keep the BuildKit daemon running | | [`--keep-state`](#keep-state) | `bool` | | Keep BuildKit state | diff --git a/docs/reference/buildx_stop.md b/docs/reference/buildx_stop.md index 19aa8fbb9520..dcf0430a9f3e 100644 --- a/docs/reference/buildx_stop.md +++ b/docs/reference/buildx_stop.md @@ -12,6 +12,7 @@ Stop builder instance | Name | Type | Default | Description | |:------------------------|:---------|:--------|:-----------------------------------------| | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | diff --git a/docs/reference/buildx_use.md b/docs/reference/buildx_use.md index c52cde32a7a7..6cd1652f7049 100644 --- a/docs/reference/buildx_use.md +++ b/docs/reference/buildx_use.md @@ -12,6 +12,7 @@ Set the current builder instance | Name | Type | Default | Description | |:------------------------|:---------|:--------|:-------------------------------------------| | [`--builder`](#builder) | `string` | | Override the configured builder instance | +| `-D`, `--debug` | `bool` | | Enable debug logging | | `--default` | `bool` | | Set builder as default for current context | | `--global` | `bool` | | Builder persists context changes | diff --git a/docs/reference/buildx_version.md b/docs/reference/buildx_version.md index 83dde2c8ca97..e186812f12a9 100644 --- a/docs/reference/buildx_version.md +++ b/docs/reference/buildx_version.md @@ -7,6 +7,12 @@ docker buildx version Show buildx version information +### Options + +| Name | Type | Default | Description | +|:----------------|:-------|:--------|:---------------------| +| `-D`, `--debug` | `bool` | | Enable debug logging | + diff --git a/driver/docker/driver.go b/driver/docker/driver.go index 2b1481f6e60e..d9d278e7efa3 100644 --- a/driver/docker/driver.go +++ b/driver/docker/driver.go @@ -29,7 +29,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error { func (d *Driver) Info(ctx context.Context) (*driver.Info, error) { _, err := d.DockerAPI.ServerVersion(ctx) if err != nil { - return nil, errors.Wrapf(driver.ErrNotConnecting, err.Error()) + return nil, errors.Wrapf(driver.ErrNotConnecting{}, err.Error()) } return &driver.Info{ Status: driver.Running, @@ -39,7 +39,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) { func (d *Driver) Version(ctx context.Context) (string, error) { v, err := d.DockerAPI.ServerVersion(ctx) if err != nil { - return "", errors.Wrapf(driver.ErrNotConnecting, err.Error()) + return "", errors.Wrapf(driver.ErrNotConnecting{}, err.Error()) } if bkversion, _ := resolveBuildKitVersion(v.Version); bkversion != "" { return bkversion, nil diff --git a/driver/driver.go b/driver/driver.go index ec88f1a4eab8..c82852cd8fc1 100644 --- a/driver/driver.go +++ b/driver/driver.go @@ -14,8 +14,17 @@ import ( "github.com/pkg/errors" ) -var ErrNotRunning = errors.Errorf("driver not running") -var ErrNotConnecting = errors.Errorf("driver not connecting") +type ErrNotRunning struct{} + +func (ErrNotRunning) Error() string { + return "driver not running" +} + +type ErrNotConnecting struct{} + +func (ErrNotConnecting) Error() string { + return "driver not connecting" +} type Status int @@ -105,7 +114,7 @@ func Boot(ctx, clientContext context.Context, d *DriverHandle, pw progress.Write c, err := d.Client(clientContext) if err != nil { - if errors.Cause(err) == ErrNotRunning && try <= 2 { + if errors.Is(err, ErrNotRunning{}) && try <= 2 { continue } return nil, err diff --git a/driver/kubernetes/driver.go b/driver/kubernetes/driver.go index de03c04efb21..289f17cdc071 100644 --- a/driver/kubernetes/driver.go +++ b/driver/kubernetes/driver.go @@ -38,7 +38,8 @@ const ( type Driver struct { driver.InitConfig - factory driver.Factory + factory driver.Factory + clientConfig ClientConfig // if you add fields, remember to update docs: // https://github.com/docker/docs/blob/main/content/build/drivers/kubernetes.md @@ -198,7 +199,7 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error { func (d *Driver) Dial(ctx context.Context) (net.Conn, error) { restClient := d.clientset.CoreV1().RESTClient() - restClientConfig, err := d.KubeClientConfig.ClientConfig() + restClientConfig, err := d.clientConfig.ClientConfig() if err != nil { return nil, err } diff --git a/driver/kubernetes/factory.go b/driver/kubernetes/factory.go index c75e45447755..b25abb899b68 100644 --- a/driver/kubernetes/factory.go +++ b/driver/kubernetes/factory.go @@ -2,19 +2,22 @@ package kubernetes import ( "context" + "os" "strconv" "strings" "time" - corev1 "k8s.io/api/core/v1" - "github.com/docker/buildx/driver" "github.com/docker/buildx/driver/bkimage" + ctxkube "github.com/docker/buildx/driver/kubernetes/context" "github.com/docker/buildx/driver/kubernetes/manifest" "github.com/docker/buildx/driver/kubernetes/podchooser" dockerclient "github.com/docker/docker/client" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) const ( @@ -23,11 +26,31 @@ const ( defaultTimeout = 120 * time.Second ) +type ClientConfig interface { + ClientConfig() (*rest.Config, error) + Namespace() (string, bool, error) +} + +type ClientConfigInCluster struct{} + +func (k ClientConfigInCluster) ClientConfig() (*rest.Config, error) { + return rest.InClusterConfig() +} + +func (k ClientConfigInCluster) Namespace() (string, bool, error) { + namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "", false, err + } + return strings.TrimSpace(string(namespace)), true, nil +} + func init() { driver.Register(&factory{}) } type factory struct { + cc ClientConfig // used for testing } func (*factory) Name() string { @@ -46,18 +69,50 @@ func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient. } func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver, error) { - if cfg.KubeClientConfig == nil { - return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName) + var err error + var cc ClientConfig + if f.cc != nil { + cc = f.cc + } else { + cc, err = ctxkube.ConfigFromEndpoint(cfg.EndpointAddr, cfg.ContextStore) + if err != nil { + // err is returned if cfg.EndpointAddr is non-context name like "unix:///var/run/docker.sock". + // try again with name="default". + // FIXME(@AkihiroSuda): cfg should retain real context name. + cc, err = ctxkube.ConfigFromEndpoint("default", cfg.ContextStore) + if err != nil { + logrus.Error(err) + } + } + tryToUseConfigInCluster := false + if cc == nil { + tryToUseConfigInCluster = true + } else { + if _, err := cc.ClientConfig(); err != nil { + tryToUseConfigInCluster = true + } + } + if tryToUseConfigInCluster { + ccInCluster := ClientConfigInCluster{} + if _, err := ccInCluster.ClientConfig(); err == nil { + logrus.Debug("using kube config in cluster") + cc = ccInCluster + } + } + if cc == nil { + return nil, errors.Errorf("%s driver requires kubernetes API access", DriverName) + } } + deploymentName, err := buildxNameToDeploymentName(cfg.Name) if err != nil { return nil, err } - namespace, _, err := cfg.KubeClientConfig.Namespace() + namespace, _, err := cc.Namespace() if err != nil { return nil, errors.Wrap(err, "cannot determine Kubernetes namespace, specify manually") } - restClientConfig, err := cfg.KubeClientConfig.ClientConfig() + restClientConfig, err := cc.ClientConfig() if err != nil { return nil, err } @@ -67,9 +122,10 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver } d := &Driver{ - factory: f, - InitConfig: cfg, - clientset: clientset, + factory: f, + clientConfig: cc, + InitConfig: cfg, + clientset: clientset, } deploymentOpt, loadbalance, namespace, defaultLoad, timeout, err := f.processDriverOpts(deploymentName, namespace, cfg) diff --git a/driver/kubernetes/factory_test.go b/driver/kubernetes/factory_test.go index 3224b250e491..bae2932704b4 100644 --- a/driver/kubernetes/factory_test.go +++ b/driver/kubernetes/factory_test.go @@ -11,29 +11,28 @@ import ( "k8s.io/client-go/rest" ) -type mockKubeClientConfig struct { +type mockClientConfig struct { clientConfig *rest.Config namespace string } -func (r *mockKubeClientConfig) ClientConfig() (*rest.Config, error) { +func (r *mockClientConfig) ClientConfig() (*rest.Config, error) { return r.clientConfig, nil } -func (r *mockKubeClientConfig) Namespace() (string, bool, error) { +func (r *mockClientConfig) Namespace() (string, bool, error) { return r.namespace, true, nil } func TestFactory_processDriverOpts(t *testing.T) { - kcc := mockKubeClientConfig{ - clientConfig: &rest.Config{}, - } - cfg := driver.InitConfig{ - Name: driver.BuilderName("test"), - KubeClientConfig: &kcc, + Name: driver.BuilderName("test"), + } + f := factory{ + cc: &mockClientConfig{ + clientConfig: &rest.Config{}, + }, } - f := factory{} t.Run( "ValidOptions", func(t *testing.T) { diff --git a/driver/kubernetes/manifest/manifest.go b/driver/kubernetes/manifest/manifest.go index 7424d88edc78..1137badb7f2b 100644 --- a/driver/kubernetes/manifest/manifest.go +++ b/driver/kubernetes/manifest/manifest.go @@ -7,7 +7,6 @@ import ( "github.com/docker/buildx/util/platformutil" v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -53,10 +52,17 @@ const ( LabelApp = "app" ) -var ( - ErrReservedAnnotationPlatform = errors.Errorf("the annotation \"%s\" is reserved and cannot be customized", AnnotationPlatform) - ErrReservedLabelApp = errors.Errorf("the label \"%s\" is reserved and cannot be customized", LabelApp) -) +type ErrReservedAnnotationPlatform struct{} + +func (ErrReservedAnnotationPlatform) Error() string { + return fmt.Sprintf("the annotation %q is reserved and cannot be customized", AnnotationPlatform) +} + +type ErrReservedLabelApp struct{} + +func (ErrReservedLabelApp) Error() string { + return fmt.Sprintf("the label %q is reserved and cannot be customized", LabelApp) +} func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.ConfigMap, err error) { labels := map[string]string{ @@ -73,14 +79,14 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config for k, v := range opt.CustomAnnotations { if k == AnnotationPlatform { - return nil, nil, ErrReservedAnnotationPlatform + return nil, nil, ErrReservedAnnotationPlatform{} } annotations[k] = v } for k, v := range opt.CustomLabels { if k == LabelApp { - return nil, nil, ErrReservedLabelApp + return nil, nil, ErrReservedLabelApp{} } labels[k] = v } diff --git a/driver/manager.go b/driver/manager.go index 509db1ca7114..400a0cdd72c0 100644 --- a/driver/manager.go +++ b/driver/manager.go @@ -2,17 +2,15 @@ package driver import ( "context" - "os" "sort" - "strings" "sync" + "github.com/docker/cli/cli/context/store" dockerclient "github.com/docker/docker/client" "github.com/moby/buildkit/client" "github.com/moby/buildkit/util/tracing/delegated" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "k8s.io/client-go/rest" ) type Factory interface { @@ -28,38 +26,18 @@ type BuildkitConfig struct { // Rootless bool } -type KubeClientConfig interface { - ClientConfig() (*rest.Config, error) - Namespace() (string, bool, error) -} - -type KubeClientConfigInCluster struct{} - -func (k KubeClientConfigInCluster) ClientConfig() (*rest.Config, error) { - return rest.InClusterConfig() -} - -func (k KubeClientConfigInCluster) Namespace() (string, bool, error) { - namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - return "", false, err - } - return strings.TrimSpace(string(namespace)), true, nil -} - type InitConfig struct { - // This object needs updates to be generic for different drivers - Name string - EndpointAddr string - DockerAPI dockerclient.APIClient - KubeClientConfig KubeClientConfig - BuildkitdFlags []string - Files map[string][]byte - DriverOpts map[string]string - Auth Auth - Platforms []specs.Platform - ContextPathHash string // can be used for determining pods in the driver instance - DialMeta map[string][]string + Name string + EndpointAddr string + DockerAPI dockerclient.APIClient + ContextStore store.Reader + BuildkitdFlags []string + Files map[string][]byte + DriverOpts map[string]string + Auth Auth + Platforms []specs.Platform + ContextPathHash string + DialMeta map[string][]string } var drivers map[string]Factory @@ -104,28 +82,15 @@ func GetFactory(name string, instanceRequired bool) (Factory, error) { return nil, errors.Errorf("failed to find driver %q", name) } -func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, buildkitdFlags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string, dialMeta map[string][]string) (*DriverHandle, error) { - ic := InitConfig{ - EndpointAddr: endpointAddr, - DockerAPI: api, - KubeClientConfig: kcc, - Name: name, - BuildkitdFlags: buildkitdFlags, - DriverOpts: do, - Auth: auth, - Platforms: platforms, - ContextPathHash: contextPathHash, - DialMeta: dialMeta, - Files: files, - } +func GetDriver(ctx context.Context, f Factory, cfg InitConfig) (*DriverHandle, error) { if f == nil { var err error - f, err = GetDefaultFactory(ctx, endpointAddr, api, false, dialMeta) + f, err = GetDefaultFactory(ctx, cfg.EndpointAddr, cfg.DockerAPI, false, cfg.DialMeta) if err != nil { return nil, err } } - d, err := f.New(ctx, ic) + d, err := f.New(ctx, cfg) if err != nil { return nil, err } diff --git a/driver/remote/factory.go b/driver/remote/factory.go index 1c30baad6163..c07c08f2c7fa 100644 --- a/driver/remote/factory.go +++ b/driver/remote/factory.go @@ -7,15 +7,15 @@ import ( "strconv" "strings" - // import connhelpers for special url schemes - _ "github.com/moby/buildkit/client/connhelper/dockercontainer" - _ "github.com/moby/buildkit/client/connhelper/kubepod" - _ "github.com/moby/buildkit/client/connhelper/ssh" - "github.com/docker/buildx/driver" util "github.com/docker/buildx/driver/remote/util" dockerclient "github.com/docker/docker/client" "github.com/pkg/errors" + + // import connhelpers for special url schemes + _ "github.com/moby/buildkit/client/connhelper/dockercontainer" + _ "github.com/moby/buildkit/client/connhelper/kubepod" + _ "github.com/moby/buildkit/client/connhelper/ssh" ) const prioritySupported = 20 diff --git a/driver/remote/util/dialer_unix.go b/driver/remote/util/dialer_unix.go index dd3c95dfe590..307663179f9c 100644 --- a/driver/remote/util/dialer_unix.go +++ b/driver/remote/util/dialer_unix.go @@ -1,7 +1,7 @@ //go:build !windows // +build !windows -package remote +package remoteutil import ( "context" diff --git a/driver/remote/util/dialer_windows.go b/driver/remote/util/dialer_windows.go index c48c8f5501a5..f5d4b9218b39 100644 --- a/driver/remote/util/dialer_windows.go +++ b/driver/remote/util/dialer_windows.go @@ -1,4 +1,4 @@ -package remote +package remoteutil import ( "context" diff --git a/driver/remote/util/endpoint.go b/driver/remote/util/endpoint.go index 53a6510d0bfa..c8a72c56f6a2 100644 --- a/driver/remote/util/endpoint.go +++ b/driver/remote/util/endpoint.go @@ -1,18 +1,19 @@ -package remote +package remoteutil import ( "net/url" + "slices" "github.com/pkg/errors" ) -var schemes = map[string]struct{}{ - "tcp": {}, - "unix": {}, - "ssh": {}, - "docker-container": {}, - "kube-pod": {}, - "npipe": {}, +var schemes = []string{ + "docker-container", + "kube-pod", + "npipe", + "ssh", + "tcp", + "unix", } func IsValidEndpoint(ep string) error { @@ -20,7 +21,7 @@ func IsValidEndpoint(ep string) error { if err != nil { return errors.Wrapf(err, "failed to parse endpoint %s", ep) } - if _, ok := schemes[endpoint.Scheme]; !ok { + if _, ok := slices.BinarySearch(schemes, endpoint.Scheme); !ok { return errors.Errorf("unrecognized url scheme %s", endpoint.Scheme) } return nil diff --git a/driver/remote/util/endpoint_test.go b/driver/remote/util/endpoint_test.go new file mode 100644 index 000000000000..0360e05011cd --- /dev/null +++ b/driver/remote/util/endpoint_test.go @@ -0,0 +1,12 @@ +package remoteutil + +import ( + "slices" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSchemes(t *testing.T) { + require.True(t, slices.IsSorted(schemes)) +} diff --git a/go.mod b/go.mod index a8d485f8165b..c19ab4199451 100644 --- a/go.mod +++ b/go.mod @@ -6,21 +6,21 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/Microsoft/go-winio v0.6.2 github.com/aws/aws-sdk-go-v2/config v1.26.6 - github.com/compose-spec/compose-go/v2 v2.1.4 + github.com/compose-spec/compose-go/v2 v2.1.6 github.com/containerd/console v1.0.4 - github.com/containerd/containerd v1.7.19 + github.com/containerd/containerd v1.7.20 github.com/containerd/continuity v0.4.3 github.com/containerd/errdefs v0.1.0 github.com/containerd/log v0.1.0 github.com/containerd/platforms v0.2.1 - github.com/containerd/typeurl/v2 v2.1.1 + github.com/containerd/typeurl/v2 v2.2.0 github.com/creack/pty v1.1.21 github.com/distribution/reference v0.6.0 - github.com/docker/cli v27.0.3+incompatible + github.com/docker/cli v27.2.0+incompatible github.com/docker/cli-docs-tool v0.8.0 - github.com/docker/docker v27.0.3+incompatible + github.com/docker/docker v27.2.0+incompatible github.com/docker/go-units v0.5.0 - github.com/gofrs/flock v0.12.0 + github.com/gofrs/flock v0.12.1 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.4 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -29,9 +29,9 @@ require ( github.com/hashicorp/hcl/v2 v2.20.1 github.com/in-toto/in-toto-golang v0.5.0 github.com/mitchellh/hashstructure/v2 v2.0.2 - github.com/moby/buildkit v0.15.0 - github.com/moby/sys/mountinfo v0.7.1 - github.com/moby/sys/signal v0.7.0 + github.com/moby/buildkit v0.16.0-rc1 + github.com/moby/sys/mountinfo v0.7.2 + github.com/moby/sys/signal v0.7.1 github.com/morikuni/aec v1.0.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 @@ -51,10 +51,10 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 golang.org/x/mod v0.17.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 + golang.org/x/sys v0.22.0 golang.org/x/term v0.20.0 golang.org/x/text v0.15.0 - google.golang.org/grpc v1.59.0 + google.golang.org/grpc v1.60.1 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.2 k8s.io/apimachinery v0.29.2 @@ -105,7 +105,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -128,7 +128,8 @@ require ( github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect - github.com/moby/sys/user v0.1.0 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -152,9 +153,8 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect @@ -163,13 +163,13 @@ require ( golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect golang.org/x/net v0.25.0 // indirect - golang.org/x/oauth2 v0.11.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.17.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index b2c109b7daaf..4c129d3a4fcb 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ -cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= -cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= -cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= @@ -84,14 +84,14 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+g github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/compose-spec/compose-go/v2 v2.1.4 h1:+1UKMvbBJo22Bpulgb9KAeZwRT99hANf3tDQVeG6ZJo= -github.com/compose-spec/compose-go/v2 v2.1.4/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc= +github.com/compose-spec/compose-go/v2 v2.1.6 h1:d0Cs0DffmOwmSzs0YPHwKCskknGq2jfGg4uGowlEpps= +github.com/compose-spec/compose-go/v2 v2.1.6/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/containerd/containerd v1.7.19 h1:/xQ4XRJ0tamDkdzrrBAUy/LE5nCcxFKdBm4EcPrSMEE= -github.com/containerd/containerd v1.7.19/go.mod h1:h4FtNYUUMB4Phr6v+xG89RYKj9XccvbNSCKjdufCrkc= +github.com/containerd/containerd v1.7.20 h1:Sl6jQYk3TRavaU83h66QMbI2Nqg9Jm6qzwX57Vsn1SQ= +github.com/containerd/containerd v1.7.20/go.mod h1:52GsS5CwquuqPuLncsXwG0t2CiUce+KsNHJZQJvAgR0= github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA= github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= @@ -102,8 +102,8 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk= -github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE= +github.com/containerd/nydus-snapshotter v0.14.0 h1:6/eAi6d7MjaeLLuMO8Udfe5GVsDudmrDNO4SGETMBco= +github.com/containerd/nydus-snapshotter v0.14.0/go.mod h1:TT4jv2SnIDxEBu4H2YOvWQHPOap031ydTaHTuvc5VQk= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= github.com/containerd/stargz-snapshotter v0.15.1 h1:fpsP4kf/Z4n2EYnU0WT8ZCE3eiKDwikDhL6VwxIlgeA= @@ -111,8 +111,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= -github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= -github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= +github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= +github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -124,15 +124,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ= -github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.2.0+incompatible h1:yHD1QEB1/0vr5eBNpu8tncu8gWxg8EydFPOSKHzXSMM= +github.com/docker/cli v27.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli-docs-tool v0.8.0 h1:YcDWl7rQJC3lJ7WVZRwSs3bc9nka97QLWfyJQli8yJU= github.com/docker/cli-docs-tool v0.8.0/go.mod h1:8TQQ3E7mOXoYUs811LiPdUnAhXrcVsBIrW21a5pUbdk= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= -github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4= +github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= @@ -184,8 +184,8 @@ github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc= github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/gofrs/flock v0.12.0 h1:xHW8t8GPAiGtqz7KxiSqfOEXwpOaqhpYZrTE2MQBgXY= -github.com/gofrs/flock v0.12.0/go.mod h1:FirDy1Ing0mI2+kB6wk+vyyAH+e6xiE+EYA0jnzV9jc= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -203,6 +203,7 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI= @@ -226,8 +227,8 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -306,8 +307,8 @@ github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/z github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/buildkit v0.15.0 h1:vnZLThPr9JU6SvItctKoa6NfgPZ8oUApg/TCOaa/SVs= -github.com/moby/buildkit v0.15.0/go.mod h1:oN9S+8I7wF26vrqn9NuAF6dFSyGTfXvtiu9o1NlnnH4= +github.com/moby/buildkit v0.16.0-rc1 h1:G6KBYr6T4B1ylpinYIjcNLcVkPUkuddw3daqaM9yg9A= +github.com/moby/buildkit v0.16.0-rc1/go.mod h1:WLr3pMBXsAoSuZIGdGww1JPz8S8Qp+OTf0dlrPnzbDg= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -316,14 +317,16 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g= -github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= +github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= -github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= +github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0= +github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8= +github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= +github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -394,8 +397,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= @@ -460,6 +463,7 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.4.0/go.mod h1:nHzOclRkoj++EU9ZjSrZvRG0BXIWt8c7loYc0qXAFGQ= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= @@ -475,12 +479,10 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 h1:jd0+5t/YynESZqsSyPz+7PAFdEop0dlN0+PkyHYo8oI= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0/go.mod h1:U707O40ee1FpQGyhvqnzmCJm1Wh6OX6GGBVn0E6Uyyk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 h1:bflGWrfYyuulcdxf14V6n9+CoQcu5SAAdHmDPAJnlps= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0/go.mod h1:qcTO4xHAxZLaLxPd60TdE88rxtItPHgHWqOhOGRr0as= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= @@ -507,32 +509,36 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -544,19 +550,25 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -565,6 +577,7 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -572,19 +585,19 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 h1:1hfbdAfFbkmpg41000wDVqr7jUpK/Yo+LPnIxxGzmkg= +google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= +google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/hack/dockerfiles/authors.Dockerfile b/hack/dockerfiles/authors.Dockerfile index 12fd12068042..13f13bf1bce8 100644 --- a/hack/dockerfiles/authors.Dockerfile +++ b/hack/dockerfiles/authors.Dockerfile @@ -1,20 +1,21 @@ # syntax=docker/dockerfile:1 -FROM alpine:3.14 AS gen +ARG ALPINE_VERSION=3.20 + +FROM alpine:${ALPINE_VERSION} AS gen RUN apk add --no-cache git WORKDIR /src RUN --mount=type=bind,target=. < /out/AUTHORS -cat /out/AUTHORS + set -e + mkdir /out + # see also ".mailmap" for how email addresses and names are deduplicated + { + echo "# This file lists all individuals having contributed content to the repository." + echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile." + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf + } > /out/AUTHORS + cat /out/AUTHORS EOT FROM scratch AS update @@ -22,12 +23,12 @@ COPY --from=gen /out / FROM gen AS validate RUN --mount=type=bind,target=.,rw <&2 'ERROR: Authors result differs. Please update with "make authors"' - git status --porcelain -- AUTHORS - exit 1 -fi + set -e + git add -A + cp -rf /out/* . + if [ -n "$(git status --porcelain -- AUTHORS)" ]; then + echo >&2 'ERROR: Authors result differs. Please update with "make authors"' + git status --porcelain -- AUTHORS + exit 1 + fi EOT diff --git a/hack/dockerfiles/govulncheck.Dockerfile b/hack/dockerfiles/govulncheck.Dockerfile new file mode 100644 index 000000000000..df2cc8768b61 --- /dev/null +++ b/hack/dockerfiles/govulncheck.Dockerfile @@ -0,0 +1,30 @@ +# syntax=docker/dockerfile:1 + +ARG GO_VERSION="1.22" +ARG GOVULNCHECK_VERSION="v1.1.3" +ARG FORMAT="text" + +FROM golang:${GO_VERSION}-alpine AS base +WORKDIR /go/src/github.com/docker/buildx +RUN apk add --no-cache jq moreutils +ARG GOVULNCHECK_VERSION +RUN --mount=type=cache,target=/root/.cache \ + --mount=type=cache,target=/go/pkg/mod \ + go install golang.org/x/vuln/cmd/govulncheck@$GOVULNCHECK_VERSION + +FROM base AS run +ARG FORMAT +RUN --mount=type=bind,target=. <(sponge /out/govulncheck.out) + fi +EOT + +FROM scratch AS output +COPY --from=run /out / diff --git a/tests/bake.go b/tests/bake.go index 364b697dd2ee..bafd4ba6f055 100644 --- a/tests/bake.go +++ b/tests/bake.go @@ -13,6 +13,7 @@ import ( "github.com/docker/buildx/bake" "github.com/docker/buildx/util/gitutil" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/frontend/subrequests/lint" "github.com/moby/buildkit/identity" provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types" "github.com/moby/buildkit/util/contentutil" @@ -56,6 +57,9 @@ var bakeTests = []func(t *testing.T, sb integration.Sandbox){ testListVariables, testBakeCallCheck, testBakeCallCheckFlag, + testBakeCallMetadata, + testBakeMultiPlatform, + testBakeCheckCallOutput, } func testBakePrint(t *testing.T, sb integration.Sandbox) { @@ -887,6 +891,56 @@ target "def" { require.Len(t, md.BuildWarnings, 3, string(dt)) } +func testBakeMultiPlatform(t *testing.T, sb integration.Sandbox) { + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + target := registry + "/buildx/registry:latest" + + dockerfile := []byte(` + FROM --platform=$BUILDPLATFORM busybox:latest AS base + COPY foo /etc/foo + RUN cp /etc/foo /etc/bar + + FROM scratch + COPY --from=base /etc/bar /bar + `) + bakefile := []byte(` + target "default" { + platforms = ["linux/amd64", "linux/arm64"] + } + `) + dir := tmpdir( + t, + fstest.CreateFile("docker-bake.hcl", bakefile, 0600), + fstest.CreateFile("Dockerfile", dockerfile, 0600), + fstest.CreateFile("foo", []byte("foo"), 0600), + ) + + cmd := buildxCmd(sb, withDir(dir), withArgs("bake"), withArgs("--set", fmt.Sprintf("*.output=type=image,name=%s,push=true", target))) + out, err := cmd.CombinedOutput() + + if !isMobyWorker(sb) { + require.NoError(t, err, string(out)) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + + img := imgs.Find("linux/amd64") + require.NotNil(t, img) + img = imgs.Find("linux/arm64") + require.NotNil(t, img) + + } else { + require.Error(t, err, string(out)) + require.Contains(t, string(out), "Multi-platform build is not supported") + } +} + func testBakeMultiExporters(t *testing.T, sb integration.Sandbox) { if !isDockerContainerWorker(sb) { t.Skip("only testing with docker-container worker") @@ -1163,3 +1217,185 @@ target "another" { require.Len(t, warnings, 1) } + +func testBakeCallMetadata(t *testing.T, sb integration.Sandbox) { + dockerfile := []byte(` +frOM busybox as base +cOpy Dockerfile . +from scratch +COPy --from=base \ + /Dockerfile \ + / + `) + bakefile := []byte(` +target "default" {} +`) + dir := tmpdir( + t, + fstest.CreateFile("docker-bake.hcl", bakefile, 0600), + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd( + sb, + withDir(dir), + withArgs("bake", "--call", "check,format=json", "--metadata-file", filepath.Join(dir, "md.json")), + ) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + + var res map[string]any + require.NoError(t, json.Unmarshal(stdout.Bytes(), &res), stdout.String()) + targets, ok := res["target"].(map[string]any) + require.True(t, ok) + def, ok := targets["default"].(map[string]any) + require.True(t, ok) + _, ok = def["build"] + require.True(t, ok) + check, ok := def["check"].(map[string]any) + require.True(t, ok) + warnings, ok := check["warnings"].([]any) + require.True(t, ok) + require.Len(t, warnings, 3) + + dt, err := os.ReadFile(filepath.Join(dir, "md.json")) + require.NoError(t, err) + + type mdT struct { + Default struct { + BuildRef string `json:"buildx.build.ref"` + ResultJSON lint.LintResults `json:"result.json"` + } `json:"default"` + } + var md mdT + require.NoError(t, json.Unmarshal(dt, &md), dt) + require.Empty(t, md.Default.BuildRef) + require.Len(t, md.Default.ResultJSON.Warnings, 3) +} + +func testBakeCheckCallOutput(t *testing.T, sb integration.Sandbox) { + t.Run("check for warning count msg in check without warnings", func(t *testing.T) { + dockerfile := []byte(` +FROM busybox +COPY Dockerfile . + `) + bakefile := []byte(` + target "default" {} + `) + dir := tmpdir( + t, + fstest.CreateFile("docker-bake.hcl", bakefile, 0600), + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd( + sb, + withDir(dir), + withArgs("bake", "--call", "check"), + ) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.NoError(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, no warnings found.") + }) + t.Run("check for warning count msg in check with single warning", func(t *testing.T) { + dockerfile := []byte(` +FROM busybox +copy Dockerfile . + `) + bakefile := []byte(` + target "default" {} + `) + dir := tmpdir( + t, + fstest.CreateFile("docker-bake.hcl", bakefile, 0600), + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd( + sb, + withDir(dir), + withArgs("bake", "--call", "check"), + ) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!") + }) + t.Run("check for warning count msg in check with multiple warnings", func(t *testing.T) { + dockerfile := []byte(` +FROM busybox +copy Dockerfile . + +FROM busybox as base +COPY Dockerfile . + `) + bakefile := []byte(` + target "default" {} + `) + dir := tmpdir( + t, + fstest.CreateFile("docker-bake.hcl", bakefile, 0600), + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd( + sb, + withDir(dir), + withArgs("bake", "--call", "check"), + ) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!") + }) + t.Run("check for warnings with multiple build targets", func(t *testing.T) { + dockerfile1 := []byte(` +FROM busybox +copy Dockerfile . + `) + dockerfile2 := []byte(` +FROM busybox +copy Dockerfile . + +FROM busybox as base +COPY Dockerfile . + `) + bakefile := []byte(` +target "first" { + dockerfile = "Dockerfile.first" +} +target "second" { + dockerfile = "Dockerfile.second" +} + `) + dir := tmpdir( + t, + fstest.CreateFile("docker-bake.hcl", bakefile, 0600), + fstest.CreateFile("Dockerfile.first", dockerfile1, 0600), + fstest.CreateFile("Dockerfile.second", dockerfile2, 0600), + ) + + cmd := buildxCmd( + sb, + withDir(dir), + withArgs("bake", "--call", "check", "first", "second"), + ) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!") + require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!") + }) +} diff --git a/tests/build.go b/tests/build.go index 9c946369dc86..3517c9265edb 100644 --- a/tests/build.go +++ b/tests/build.go @@ -62,7 +62,7 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){ testBuildLabelNoKey, testBuildCacheExportNotSupported, testBuildOCIExportNotSupported, - testBuildMultiPlatformNotSupported, + testBuildMultiPlatform, testDockerHostGateway, testBuildNetworkModeBridge, testBuildShmSize, @@ -74,6 +74,7 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){ testBuildSecret, testBuildDefaultLoad, testBuildCall, + testCheckCallOutput, } func testBuild(t *testing.T, sb integration.Sandbox) { @@ -616,16 +617,47 @@ func testBuildOCIExportNotSupported(t *testing.T, sb integration.Sandbox) { require.Contains(t, string(out), "OCI exporter is not supported") } -func testBuildMultiPlatformNotSupported(t *testing.T, sb integration.Sandbox) { - if !isMobyWorker(sb) { - t.Skip("only testing with docker worker") +func testBuildMultiPlatform(t *testing.T, sb integration.Sandbox) { + dockerfile := []byte(` + FROM --platform=$BUILDPLATFORM busybox:latest AS base + COPY foo /etc/foo + RUN cp /etc/foo /etc/bar + + FROM scratch + COPY --from=base /etc/bar /bar + `) + dir := tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + fstest.CreateFile("foo", []byte("foo"), 0600), + ) + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) } + require.NoError(t, err) + target := registry + "/buildx/registry:latest" - dir := createTestProject(t) - cmd := buildxCmd(sb, withArgs("build", "--platform=linux/amd64,linux/arm64", dir)) + cmd := buildxCmd(sb, withArgs("build", "--platform=linux/amd64,linux/arm64", fmt.Sprintf("--output=type=image,name=%s,push=true", target), dir)) out, err := cmd.CombinedOutput() - require.Error(t, err, string(out)) - require.Contains(t, string(out), "Multi-platform build is not supported") + + if !isMobyWorker(sb) { + require.NoError(t, err, string(out)) + + desc, provider, err := contentutil.ProviderFromRef(target) + require.NoError(t, err) + imgs, err := testutil.ReadImages(sb.Context(), provider, desc) + require.NoError(t, err) + + img := imgs.Find("linux/amd64") + require.NotNil(t, img) + img = imgs.Find("linux/arm64") + require.NotNil(t, img) + + } else { + require.Error(t, err, string(out)) + require.Contains(t, string(out), "Multi-platform build is not supported") + } } func testDockerHostGateway(t *testing.T, sb integration.Sandbox) { @@ -1162,6 +1194,103 @@ FROM second AS binary require.Equal(t, 1, len(res.Sources)) }) + + t.Run("check metadata", func(t *testing.T) { + dockerfile := []byte(` +frOM busybox as base +cOpy Dockerfile . +from scratch +COPy --from=base \ + /Dockerfile \ + / + `) + dir := tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd(sb, withArgs("build", "--call=check,format=json", "--metadata-file", filepath.Join(dir, "md.json"), dir)) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + + var res lint.LintResults + require.NoError(t, json.Unmarshal(stdout.Bytes(), &res), stdout.String()) + require.Len(t, res.Warnings, 3) + + dt, err := os.ReadFile(filepath.Join(dir, "md.json")) + require.NoError(t, err) + + type mdT struct { + BuildRef string `json:"buildx.build.ref"` + ResultJSON lint.LintResults `json:"result.json"` + } + var md mdT + require.NoError(t, json.Unmarshal(dt, &md), dt) + require.Empty(t, md.BuildRef) + require.Len(t, md.ResultJSON.Warnings, 3) + }) +} + +func testCheckCallOutput(t *testing.T, sb integration.Sandbox) { + t.Run("check for warning count msg in check without warnings", func(t *testing.T) { + dockerfile := []byte(` +FROM busybox AS base +COPY Dockerfile . + `) + dir := tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd(sb, withArgs("build", "--call=check", dir)) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.NoError(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, no warnings found.") + }) + + t.Run("check for warning count msg in check with single warning", func(t *testing.T) { + dockerfile := []byte(` +FROM busybox as base +COPY Dockerfile . + `) + dir := tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd(sb, withArgs("build", "--call=check", dir)) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, 1 warning has been found!") + }) + + t.Run("check for warning count msg in check with multiple warnings", func(t *testing.T) { + dockerfile := []byte(` +frOM busybox as base +cOpy Dockerfile . + `) + dir := tmpdir( + t, + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + + cmd := buildxCmd(sb, withArgs("build", "--call=check", dir)) + stdout := bytes.Buffer{} + stderr := bytes.Buffer{} + cmd.Stdout = &stdout + cmd.Stderr = &stderr + require.Error(t, cmd.Run(), stdout.String(), stderr.String()) + require.Contains(t, stdout.String(), "Check complete, 2 warnings have been found!") + }) } func createTestProject(t *testing.T) string { diff --git a/util/buildflags/printfunc.go b/util/buildflags/callfunc.go similarity index 86% rename from util/buildflags/printfunc.go rename to util/buildflags/callfunc.go index c6281069a3d8..7ee1a85f67a4 100644 --- a/util/buildflags/printfunc.go +++ b/util/buildflags/callfunc.go @@ -9,9 +9,9 @@ import ( "github.com/tonistiigi/go-csvvalue" ) -const defaultPrintFunc = "build" +const defaultCallFunc = "build" -func ParsePrintFunc(str string) (*controllerapi.PrintFunc, error) { +func ParseCallFunc(str string) (*controllerapi.CallFunc, error) { if str == "" { return nil, nil } @@ -20,7 +20,7 @@ func ParsePrintFunc(str string) (*controllerapi.PrintFunc, error) { if err != nil { return nil, err } - f := &controllerapi.PrintFunc{} + f := &controllerapi.CallFunc{} for _, field := range fields { parts := strings.SplitN(field, "=", 2) if len(parts) == 2 { @@ -51,7 +51,7 @@ func ParsePrintFunc(str string) (*controllerapi.PrintFunc, error) { f.Name = "lint" } - if f.Name == defaultPrintFunc { + if f.Name == defaultCallFunc { return nil, nil } diff --git a/util/progress/metricwriter.go b/util/progress/metricwriter.go index 23d0fc50e762..84d2f60bd20e 100644 --- a/util/progress/metricwriter.go +++ b/util/progress/metricwriter.go @@ -17,9 +17,35 @@ import ( "golang.org/x/text/language" ) +type rePatterns struct { + LocalSourceType *regexp.Regexp + ImageSourceType *regexp.Regexp + ExecType *regexp.Regexp + ExportImageType *regexp.Regexp + LintMessage *regexp.Regexp +} + +var re = sync.OnceValue(func() *rePatterns { + return &rePatterns{ + LocalSourceType: regexp.MustCompile( + strings.Join([]string{ + `(?P\[internal] load build context)`, + `(?Pload build definition)`, + `(?Pload \.dockerignore)`, + `(?P\[context .+] load from client)`, + }, "|"), + ), + ImageSourceType: regexp.MustCompile(`^\[.*] FROM `), + ExecType: regexp.MustCompile(`^\[.*] RUN `), + ExportImageType: regexp.MustCompile(`^exporting to (image|(?P\w+) image format)$`), + LintMessage: regexp.MustCompile(`^https://docs\.docker\.com/go/dockerfile/rule/([\w|-]+)/`), + } +}) + type metricWriter struct { recorders []metricRecorder attrs attribute.Set + mu sync.Mutex } func newMetrics(mp metric.MeterProvider, attrs attribute.Set) *metricWriter { @@ -38,6 +64,9 @@ func newMetrics(mp metric.MeterProvider, attrs attribute.Set) *metricWriter { } func (mw *metricWriter) Write(ss *client.SolveStatus) { + mw.mu.Lock() + defer mw.mu.Unlock() + for _, recorder := range mw.recorders { recorder.Record(ss) } @@ -128,22 +157,13 @@ func (mr *localSourceTransferMetricRecorder) Record(ss *client.SolveStatus) { } } -var reLocalSourceType = regexp.MustCompile( - strings.Join([]string{ - `(?P\[internal] load build context)`, - `(?Pload build definition)`, - `(?Pload \.dockerignore)`, - `(?P\[context .+] load from client)`, - }, "|"), -) - func detectLocalSourceType(vertexName string) attribute.KeyValue { - match := reLocalSourceType.FindStringSubmatch(vertexName) + match := re().LocalSourceType.FindStringSubmatch(vertexName) if match == nil { return attribute.KeyValue{} } - for i, source := range reLocalSourceType.SubexpNames() { + for i, source := range re().LocalSourceType.SubexpNames() { if len(source) == 0 { // Not a subexpression. continue @@ -241,10 +261,8 @@ func (mr *imageSourceMetricRecorder) Record(ss *client.SolveStatus) { } } -var reImageSourceType = regexp.MustCompile(`^\[.*] FROM `) - func detectImageSourceType(vertexName string) bool { - return reImageSourceType.MatchString(vertexName) + return re().ImageSourceType.MatchString(vertexName) } type ( @@ -278,10 +296,8 @@ func (mr *execMetricRecorder) Record(ss *client.SolveStatus) { } } -var reExecType = regexp.MustCompile(`^\[.*] RUN `) - func detectExecType(vertexName string) bool { - return reExecType.MatchString(vertexName) + return re().ExecType.MatchString(vertexName) } type ( @@ -325,10 +341,8 @@ func (mr *exportImageMetricRecorder) Record(ss *client.SolveStatus) { } } -var reExportImageType = regexp.MustCompile(`^exporting to (image|(?P\w+) image format)$`) - func detectExportImageType(vertexName string) string { - m := reExportImageType.FindStringSubmatch(vertexName) + m := re().ExportImageType.FindStringSubmatch(vertexName) if m == nil { return "" } @@ -456,7 +470,10 @@ func kebabToCamel(s string) string { return strings.Join(words, "") } +var lintRuleNameProperty = attribute.Key("lint.rule.name") + func (mr *lintMetricRecorder) Record(ss *client.SolveStatus) { + reLintMessage := re().LintMessage for _, warning := range ss.Warnings { m := reLintMessage.FindSubmatch([]byte(warning.URL)) if len(m) < 2 { @@ -472,8 +489,3 @@ func (mr *lintMetricRecorder) Record(ss *client.SolveStatus) { ) } } - -var ( - reLintMessage = regexp.MustCompile(`^https://docs\.docker\.com/go/dockerfile/rule/([\w|-]+)/`) - lintRuleNameProperty = attribute.Key("lint.rule.name") -) diff --git a/util/progress/printer.go b/util/progress/printer.go index 052019337a82..95fb680d0087 100644 --- a/util/progress/printer.go +++ b/util/progress/printer.go @@ -44,6 +44,15 @@ func (p *Printer) Wait() error { return p.err } +func (p *Printer) IsDone() bool { + select { + case <-p.done: + return true + default: + return false + } +} + func (p *Printer) Pause() error { p.paused = make(chan struct{}) return p.Wait() diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go index 21730f101724..150a4625e312 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go @@ -22,11 +22,8 @@ import ( "path/filepath" "github.com/compose-spec/compose-go/v2/consts" - "github.com/compose-spec/compose-go/v2/interpolation" "github.com/compose-spec/compose-go/v2/override" "github.com/compose-spec/compose-go/v2/paths" - "github.com/compose-spec/compose-go/v2/template" - "github.com/compose-spec/compose-go/v2/transform" "github.com/compose-spec/compose-go/v2/types" ) @@ -71,22 +68,10 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a ) switch v := extends.(type) { case map[string]any: - if opts.Interpolate != nil { - v, err = interpolation.Interpolate(v, *opts.Interpolate) - if err != nil { - return nil, err - } - } ref = v["service"].(string) file = v["file"] opts.ProcessEvent("extends", v) case string: - if opts.Interpolate != nil { - v, err = opts.Interpolate.Substitute(v, template.Mapping(opts.Interpolate.LookupValue)) - if err != nil { - return nil, err - } - } ref = v opts.ProcessEvent("extends", map[string]any{"service": ref}) } @@ -190,12 +175,6 @@ func getExtendsBaseFromFile( ) } - // Attempt to make a canonical model so ResolveRelativePaths can operate on source:target short syntaxes - source, err = transform.Canonical(source, true) - if err != nil { - return nil, nil, err - } - var remotes []paths.RemoteResource for _, loader := range opts.RemoteResourceLoaders() { remotes = append(remotes, loader.Accept) diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/include.go b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go index 924bb654f724..823c2f7ac5f9 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/include.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go @@ -30,7 +30,7 @@ import ( ) // loadIncludeConfig parse the required config from raw yaml -func loadIncludeConfig(source any, options *Options) ([]types.IncludeConfig, error) { +func loadIncludeConfig(source any) ([]types.IncludeConfig, error) { if source == nil { return nil, nil } @@ -45,23 +45,13 @@ func loadIncludeConfig(source any, options *Options) ([]types.IncludeConfig, err } } } - if options.Interpolate != nil { - for i, config := range configs { - interpolated, err := interp.Interpolate(config.(map[string]any), *options.Interpolate) - if err != nil { - return nil, err - } - configs[i] = interpolated - } - } - var requires []types.IncludeConfig err := Transform(source, &requires) return requires, err } func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapping, model map[string]any, options *Options, included []string) error { - includeConfig, err := loadIncludeConfig(model["include"], options) + includeConfig, err := loadIncludeConfig(model["include"]) if err != nil { return err } diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go index fc2315989692..8a0adbc3b6fe 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go @@ -367,23 +367,6 @@ func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Option } } - if opts.Interpolate != nil && !opts.SkipInterpolation { - dict, err = interp.Interpolate(dict, *opts.Interpolate) - if err != nil { - return nil, err - } - } - - dict, err = transform.Canonical(dict, opts.SkipInterpolation) - if err != nil { - return nil, err - } - - dict, err = override.EnforceUnicity(dict) - if err != nil { - return nil, err - } - if !opts.SkipDefaultValues { dict, err = transform.SetDefaultValues(dict) if err != nil { @@ -432,6 +415,13 @@ func loadYamlFile(ctx context.Context, file types.ConfigFile, opts *Options, wor return errors.New("Top-level object must be a mapping") } + if opts.Interpolate != nil && !opts.SkipInterpolation { + cfg, err = interp.Interpolate(cfg, *opts.Interpolate) + if err != nil { + return err + } + } + fixEmptyNotNull(cfg) if !opts.SkipExtends { @@ -460,6 +450,11 @@ func loadYamlFile(ctx context.Context, file types.ConfigFile, opts *Options, wor return err } + dict, err = override.EnforceUnicity(dict) + if err != nil { + return err + } + if !opts.SkipValidation { if err := schema.Validate(dict); err != nil { return fmt.Errorf("validating %s: %w", file.Filename, err) @@ -469,7 +464,15 @@ func loadYamlFile(ctx context.Context, file types.ConfigFile, opts *Options, wor delete(dict, "version") } } - return nil + + dict, err = transform.Canonical(dict, opts.SkipInterpolation) + if err != nil { + return err + } + + // Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override + dict, err = override.EnforceUnicity(dict) + return err } var processor PostProcessor diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/merge.go b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go index 39adc0bcb7f0..8a468dde9368 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/override/merge.go +++ b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go @@ -237,20 +237,33 @@ func mergeIPAMConfig(c any, o any, path tree.Path) (any, error) { return ipamConfigs, nil } -func convertIntoMapping(a any, defaultValue any) map[string]any { +func convertIntoMapping(a any, defaultValue map[string]any) map[string]any { switch v := a.(type) { case map[string]any: return v case []any: converted := map[string]any{} for _, s := range v { - converted[s.(string)] = defaultValue + if defaultValue == nil { + converted[s.(string)] = nil + } else { + // Create a new map for each key + converted[s.(string)] = copyMap(defaultValue) + } } return converted } return nil } +func copyMap(m map[string]any) map[string]any { + c := make(map[string]any) + for k, v := range m { + c[k] = v + } + return c +} + func override(_ any, other any, _ tree.Path) (any, error) { return other, nil } diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go b/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go index 3bb97f8c1ea9..303f39e208d4 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go @@ -116,24 +116,30 @@ func (r *relativePathsResolver) absPath(value any) (any, error) { } return v, nil } + return nil, fmt.Errorf("unexpected type %T", value) } func (r *relativePathsResolver) absVolumeMount(a any) (any, error) { - vol := a.(map[string]any) - if vol["type"] != types.VolumeTypeBind { + switch vol := a.(type) { + case map[string]any: + if vol["type"] != types.VolumeTypeBind { + return vol, nil + } + src, ok := vol["source"] + if !ok { + return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) + } + abs, err := r.maybeUnixPath(src.(string)) + if err != nil { + return nil, err + } + vol["source"] = abs return vol, nil + default: + // not using canonical format, skip + return a, nil } - src, ok := vol["source"] - if !ok { - return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) - } - abs, err := r.maybeUnixPath(src.(string)) - if err != nil { - return nil, err - } - vol["source"] = abs - return vol, nil } func (r *relativePathsResolver) volumeDriverOpts(a any) (any, error) { diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json index 11a733b4efb3..335cbe096e22 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json @@ -155,8 +155,8 @@ }, "additionalProperties": false }, - "cap_add": {"type": "array", "items": {"type": "string"}}, - "cap_drop": {"type": "array", "items": {"type": "string"}}, + "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, + "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "cgroup": {"type": "string", "enum": ["host", "private"]}, "cgroup_parent": {"type": "string"}, "command": {"$ref": "#/definitions/command"}, @@ -216,9 +216,9 @@ ] }, "device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"}, - "devices": {"type": "array", "items": {"type": "string"}}, + "devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "dns": {"$ref": "#/definitions/string_or_list"}, - "dns_opt": {"type": "array","items": {"type": "string"}}, + "dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true}, "dns_search": {"$ref": "#/definitions/string_or_list"}, "domainname": {"type": "string"}, "entrypoint": {"$ref": "#/definitions/command"}, @@ -230,7 +230,8 @@ "items": { "type": ["string", "number"], "format": "expose" - } + }, + "uniqueItems": true }, "extends": { "oneOf": [ @@ -247,13 +248,14 @@ } ] }, - "external_links": {"type": "array", "items": {"type": "string"}}, + "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "extra_hosts": {"$ref": "#/definitions/list_or_dict"}, "group_add": { "type": "array", "items": { "type": ["string", "number"] - } + }, + "uniqueItems": true }, "healthcheck": {"$ref": "#/definitions/healthcheck"}, "hostname": {"type": "string"}, @@ -262,7 +264,7 @@ "ipc": {"type": "string"}, "isolation": {"type": "string"}, "labels": {"$ref": "#/definitions/list_or_dict"}, - "links": {"type": "array", "items": {"type": "string"}}, + "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "logging": { "type": "object", @@ -348,7 +350,8 @@ "patternProperties": {"^x-": {}} } ] - } + }, + "uniqueItems": true }, "privileged": {"type": ["boolean", "string"]}, "profiles": {"$ref": "#/definitions/list_of_strings"}, @@ -363,7 +366,7 @@ "scale": { "type": ["integer", "string"] }, - "security_opt": {"type": "array", "items": {"type": "string"}}, + "security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, "shm_size": {"type": ["number", "string"]}, "secrets": {"$ref": "#/definitions/service_config_or_secret"}, "sysctls": {"$ref": "#/definitions/list_or_dict"}, @@ -429,11 +432,13 @@ "patternProperties": {"^x-": {}} } ] - } + }, + "uniqueItems": true }, "volumes_from": { "type": "array", - "items": {"type": "string"} + "items": {"type": "string"}, + "uniqueItems": true }, "working_dir": {"type": "string"} }, @@ -475,7 +480,9 @@ "path": {"type": "string"}, "action": {"type": "string", "enum": ["rebuild", "sync", "sync+restart"]}, "target": {"type": "string"} - } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} }, "additionalProperties": false, "patternProperties": {"^x-": {}} @@ -828,7 +835,8 @@ "list_of_strings": { "type": "array", - "items": {"type": "string"} + "items": {"type": "string"}, + "uniqueItems": true }, "list_or_dict": { @@ -842,7 +850,7 @@ }, "additionalProperties": false }, - {"type": "array", "items": {"type": "string"}} + {"type": "array", "items": {"type": "string"}, "uniqueItems": true} ] }, diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go b/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go index dd1ea8a6f63c..c78b17c8099e 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go @@ -1814,6 +1814,12 @@ func deriveDeepCopy_40(dst, src *Trigger) { } copy(dst.Ignore, src.Ignore) } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } } // deriveDeepCopy_41 recursively copies the contents of src into dst. diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/develop.go b/vendor/github.com/compose-spec/compose-go/v2/types/develop.go index 6eeef7a7af1d..24e142c3aeb1 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/develop.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/develop.go @@ -31,8 +31,9 @@ const ( ) type Trigger struct { - Path string `yaml:"path" json:"path"` - Action WatchAction `yaml:"action" json:"action"` - Target string `yaml:"target,omitempty" json:"target,omitempty"` - Ignore []string `yaml:"ignore,omitempty" json:"ignore,omitempty"` + Path string `yaml:"path" json:"path"` + Action WatchAction `yaml:"action" json:"action"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Ignore []string `yaml:"ignore,omitempty" json:"ignore,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` } diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go deleted file mode 100644 index 6656465efbca..000000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_linux.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -import ( - "bufio" - "fmt" - "os" - "sync" -) - -var ( - inUserNS bool - nsOnce sync.Once -) - -// RunningInUserNS detects whether we are currently running in a user namespace. -// Originally copied from github.com/lxc/lxd/shared/util.go -func RunningInUserNS() bool { - nsOnce.Do(func() { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return - } - inUserNS = true - }) - return inUserNS -} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go deleted file mode 100644 index c67f773d0a1e..000000000000 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package userns - -// RunningInUserNS is a stub for non-Linux systems -// Always returns false -func RunningInUserNS() bool { - return false -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index d660c72e2fc6..d9fdca56654c 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.7.19+unknown" + Version = "1.7.20+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/typeurl/v2/README.md b/vendor/github.com/containerd/typeurl/v2/README.md index e3d0742f456c..8d86600a40b3 100644 --- a/vendor/github.com/containerd/typeurl/v2/README.md +++ b/vendor/github.com/containerd/typeurl/v2/README.md @@ -2,7 +2,7 @@ [![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) [![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) +[![codecov](https://codecov.io/gh/containerd/typeurl/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) A Go package for managing the registration, marshaling, and unmarshaling of encoded types. @@ -13,8 +13,8 @@ This package helps when types are sent over a ttrpc/GRPC API and marshaled as a **typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/v2/types.go b/vendor/github.com/containerd/typeurl/v2/types.go index 8d6665bb5ba7..78817b701e3a 100644 --- a/vendor/github.com/containerd/typeurl/v2/types.go +++ b/vendor/github.com/containerd/typeurl/v2/types.go @@ -27,6 +27,7 @@ import ( gogoproto "github.com/gogo/protobuf/proto" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/anypb" ) var ( @@ -122,6 +123,9 @@ func TypeURL(v interface{}) (string, error) { // Is returns true if the type of the Any is the same as v. func Is(any Any, v interface{}) bool { + if any == nil { + return false + } // call to check that v is a pointer tryDereference(v) url, err := TypeURL(v) @@ -193,6 +197,31 @@ func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { return err } +// MarshalProto converts typeurl.Any to google.golang.org/protobuf/types/known/anypb.Any. +func MarshalProto(from Any) *anypb.Any { + if from == nil { + return nil + } + + if pbany, ok := from.(*anypb.Any); ok { + return pbany + } + + return &anypb.Any{ + TypeUrl: from.GetTypeUrl(), + Value: from.GetValue(), + } +} + +// MarshalAnyToProto converts an arbitrary interface to google.golang.org/protobuf/types/known/anypb.Any. +func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { + anyType, err := MarshalAny(from) + if err != nil { + return nil, err + } + return MarshalProto(anyType), nil +} + func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { t, err := getTypeByUrl(typeURL) if err != nil { diff --git a/vendor/github.com/docker/cli/cli-plugins/socket/socket.go b/vendor/github.com/docker/cli/cli-plugins/socket/socket.go index 7096b42b2e94..9a76f7a7805c 100644 --- a/vendor/github.com/docker/cli/cli-plugins/socket/socket.go +++ b/vendor/github.com/docker/cli/cli-plugins/socket/socket.go @@ -95,6 +95,9 @@ func (pl *PluginServer) Addr() net.Addr { // // The error value is that of the underlying [net.Listner.Close] call. func (pl *PluginServer) Close() error { + if pl == nil { + return nil + } logrus.Trace("Closing plugin server") // Close connections first to ensure the connections get io.EOF instead // of a connection reset. diff --git a/vendor/github.com/docker/cli/cli/command/cli.go b/vendor/github.com/docker/cli/cli/command/cli.go index 06478c9c776e..8d01abeb8c99 100644 --- a/vendor/github.com/docker/cli/cli/command/cli.go +++ b/vendor/github.com/docker/cli/cli/command/cli.go @@ -324,7 +324,7 @@ func newAPIClientFromEndpoint(ep docker.Endpoint, configFile *configfile.ConfigF if len(configFile.HTTPHeaders) > 0 { opts = append(opts, client.WithHTTPHeaders(configFile.HTTPHeaders)) } - opts = append(opts, client.WithUserAgent(UserAgent())) + opts = append(opts, withCustomHeadersFromEnv(), client.WithUserAgent(UserAgent())) return client.NewClientWithOpts(opts...) } diff --git a/vendor/github.com/docker/cli/cli/command/cli_options.go b/vendor/github.com/docker/cli/cli/command/cli_options.go index eb2458768ce3..84b121f34abb 100644 --- a/vendor/github.com/docker/cli/cli/command/cli_options.go +++ b/vendor/github.com/docker/cli/cli/command/cli_options.go @@ -2,13 +2,18 @@ package command import ( "context" + "encoding/csv" "io" + "net/http" "os" "strconv" + "strings" "github.com/docker/cli/cli/streams" "github.com/docker/docker/client" + "github.com/docker/docker/errdefs" "github.com/moby/term" + "github.com/pkg/errors" ) // CLIOption is a functional argument to apply options to a [DockerCli]. These @@ -108,3 +113,107 @@ func WithAPIClient(c client.APIClient) CLIOption { return nil } } + +// envOverrideHTTPHeaders is the name of the environment-variable that can be +// used to set custom HTTP headers to be sent by the client. This environment +// variable is the equivalent to the HttpHeaders field in the configuration +// file. +// +// WARNING: If both config and environment-variable are set, the environment +// variable currently overrides all headers set in the configuration file. +// This behavior may change in a future update, as we are considering the +// environment variable to be appending to existing headers (and to only +// override headers with the same name). +// +// While this env-var allows for custom headers to be set, it does not allow +// for built-in headers (such as "User-Agent", if set) to be overridden. +// Also see [client.WithHTTPHeaders] and [client.WithUserAgent]. +// +// This environment variable can be used in situations where headers must be +// set for a specific invocation of the CLI, but should not be set by default, +// and therefore cannot be set in the config-file. +// +// envOverrideHTTPHeaders accepts a comma-separated (CSV) list of key=value pairs, +// where key must be a non-empty, valid MIME header format. Whitespaces surrounding +// the key are trimmed, and the key is normalised. Whitespaces in values are +// preserved, but "key=value" pairs with an empty value (e.g. "key=") are ignored. +// Tuples without a "=" produce an error. +// +// It follows CSV rules for escaping, allowing "key=value" pairs to be quoted +// if they must contain commas, which allows for multiple values for a single +// header to be set. If a key is repeated in the list, later values override +// prior values. +// +// For example, the following value: +// +// one=one-value,"two=two,value","three= a value with whitespace ",four=,five=five=one,five=five-two +// +// Produces four headers (four is omitted as it has an empty value set): +// +// - one (value is "one-value") +// - two (value is "two,value") +// - three (value is " a value with whitespace ") +// - five (value is "five-two", the later value has overridden the prior value) +const envOverrideHTTPHeaders = "DOCKER_CUSTOM_HEADERS" + +// withCustomHeadersFromEnv overriding custom HTTP headers to be sent by the +// client through the [envOverrideHTTPHeaders] environment-variable. This +// environment variable is the equivalent to the HttpHeaders field in the +// configuration file. +// +// WARNING: If both config and environment-variable are set, the environment- +// variable currently overrides all headers set in the configuration file. +// This behavior may change in a future update, as we are considering the +// environment-variable to be appending to existing headers (and to only +// override headers with the same name). +// +// TODO(thaJeztah): this is a client Option, and should be moved to the client. It is non-exported for that reason. +func withCustomHeadersFromEnv() client.Opt { + return func(apiClient *client.Client) error { + value := os.Getenv(envOverrideHTTPHeaders) + if value == "" { + return nil + } + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return errdefs.InvalidParameter(errors.Errorf("failed to parse custom headers from %s environment variable: value must be formatted as comma-separated key=value pairs", envOverrideHTTPHeaders)) + } + if len(fields) == 0 { + return nil + } + + env := map[string]string{} + for _, kv := range fields { + k, v, hasValue := strings.Cut(kv, "=") + + // Only strip whitespace in keys; preserve whitespace in values. + k = strings.TrimSpace(k) + + if k == "" { + return errdefs.InvalidParameter(errors.Errorf(`failed to set custom headers from %s environment variable: value contains a key=value pair with an empty key: '%s'`, envOverrideHTTPHeaders, kv)) + } + + // We don't currently allow empty key=value pairs, and produce an error. + // This is something we could allow in future (e.g. to read value + // from an environment variable with the same name). In the meantime, + // produce an error to prevent users from depending on this. + if !hasValue { + return errdefs.InvalidParameter(errors.Errorf(`failed to set custom headers from %s environment variable: missing "=" in key=value pair: '%s'`, envOverrideHTTPHeaders, kv)) + } + + env[http.CanonicalHeaderKey(k)] = v + } + + if len(env) == 0 { + // We should probably not hit this case, as we don't skip values + // (only return errors), but we don't want to discard existing + // headers with an empty set. + return nil + } + + // TODO(thaJeztah): add a client.WithExtraHTTPHeaders() function to allow these headers to be _added_ to existing ones, instead of _replacing_ + // see https://github.com/docker/cli/pull/5098#issuecomment-2147403871 (when updating, also update the WARNING in the function and env-var GoDoc) + return client.WithHTTPHeaders(env)(apiClient) + } +} diff --git a/vendor/github.com/docker/cli/cli/command/formatter/container.go b/vendor/github.com/docker/cli/cli/command/formatter/container.go index aeaa03a6d3b1..63c1a73b19da 100644 --- a/vendor/github.com/docker/cli/cli/command/formatter/container.go +++ b/vendor/github.com/docker/cli/cli/command/formatter/container.go @@ -5,6 +5,7 @@ package formatter import ( "fmt" + "net" "sort" "strconv" "strings" @@ -331,7 +332,8 @@ func DisplayablePorts(ports []types.Port) string { portKey := port.Type if port.IP != "" { if port.PublicPort != current { - hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + hAddrPort := net.JoinHostPort(port.IP, strconv.Itoa(int(port.PublicPort))) + hostMappings = append(hostMappings, fmt.Sprintf("%s->%d/%s", hAddrPort, port.PrivatePort, port.Type)) continue } portKey = port.IP + "/" + port.Type diff --git a/vendor/github.com/docker/cli/cli/command/registry.go b/vendor/github.com/docker/cli/cli/command/registry.go index ba97861a6b49..dab062bbe4ba 100644 --- a/vendor/github.com/docker/cli/cli/command/registry.go +++ b/vendor/github.com/docker/cli/cli/command/registry.go @@ -1,10 +1,8 @@ package command import ( - "bufio" "context" "fmt" - "io" "os" "runtime" "strings" @@ -18,7 +16,6 @@ import ( "github.com/docker/docker/api/types" registrytypes "github.com/docker/docker/api/types/registry" "github.com/docker/docker/registry" - "github.com/moby/term" "github.com/pkg/errors" ) @@ -44,7 +41,7 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf default: } - err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry) + authConfig, err = PromptUserForCredentials(ctx, cli, "", "", authConfig.Username, indexServer) if err != nil { return "", err } @@ -89,8 +86,32 @@ func GetDefaultAuthConfig(cfg *configfile.ConfigFile, checkCredStore bool, serve return registrytypes.AuthConfig(authconfig), nil } -// ConfigureAuth handles prompting of user's username and password if needed -func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes.AuthConfig, isDefaultRegistry bool) error { +// ConfigureAuth handles prompting of user's username and password if needed. +// Deprecated: use PromptUserForCredentials instead. +func ConfigureAuth(ctx context.Context, cli Cli, flUser, flPassword string, authConfig *registrytypes.AuthConfig, _ bool) error { + defaultUsername := authConfig.Username + serverAddress := authConfig.ServerAddress + + newAuthConfig, err := PromptUserForCredentials(ctx, cli, flUser, flPassword, defaultUsername, serverAddress) + if err != nil { + return err + } + + authConfig.Username = newAuthConfig.Username + authConfig.Password = newAuthConfig.Password + return nil +} + +// PromptUserForCredentials handles the CLI prompt for the user to input +// credentials. +// If argUser is not empty, then the user is only prompted for their password. +// If argPassword is not empty, then the user is only prompted for their username +// If neither argUser nor argPassword are empty, then the user is not prompted and +// an AuthConfig is returned with those values. +// If defaultUsername is not empty, the username prompt includes that username +// and the user can hit enter without inputting a username to use that default +// username. +func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword, defaultUsername, serverAddress string) (authConfig registrytypes.AuthConfig, err error) { // On Windows, force the use of the regular OS stdin stream. // // See: @@ -110,13 +131,14 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes // Linux will hit this if you attempt `cat | docker login`, and Windows // will hit this if you attempt docker login from mintty where stdin // is a pipe, not a character based console. - if flPassword == "" && !cli.In().IsTerminal() { - return errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") + if argPassword == "" && !cli.In().IsTerminal() { + return authConfig, errors.Errorf("Error: Cannot perform an interactive login from a non TTY device") } - authconfig.Username = strings.TrimSpace(authconfig.Username) + isDefaultRegistry := serverAddress == registry.IndexServer + defaultUsername = strings.TrimSpace(defaultUsername) - if flUser = strings.TrimSpace(flUser); flUser == "" { + if argUser = strings.TrimSpace(argUser); argUser == "" { if isDefaultRegistry { // if this is a default registry (docker hub), then display the following message. fmt.Fprintln(cli.Out(), "Log in with your Docker ID or email address to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com/ to create one.") @@ -125,62 +147,45 @@ func ConfigureAuth(cli Cli, flUser, flPassword string, authconfig *registrytypes fmt.Fprintln(cli.Out()) } } - promptWithDefault(cli.Out(), "Username", authconfig.Username) - var err error - flUser, err = readInput(cli.In()) + + var prompt string + if defaultUsername == "" { + prompt = "Username: " + } else { + prompt = fmt.Sprintf("Username (%s): ", defaultUsername) + } + argUser, err = PromptForInput(ctx, cli.In(), cli.Out(), prompt) if err != nil { - return err + return authConfig, err } - if flUser == "" { - flUser = authconfig.Username + if argUser == "" { + argUser = defaultUsername } } - if flUser == "" { - return errors.Errorf("Error: Non-null Username Required") + if argUser == "" { + return authConfig, errors.Errorf("Error: Non-null Username Required") } - if flPassword == "" { - oldState, err := term.SaveState(cli.In().FD()) + if argPassword == "" { + restoreInput, err := DisableInputEcho(cli.In()) if err != nil { - return err + return authConfig, err } - fmt.Fprintf(cli.Out(), "Password: ") - _ = term.DisableEcho(cli.In().FD(), oldState) - defer func() { - _ = term.RestoreTerminal(cli.In().FD(), oldState) - }() - flPassword, err = readInput(cli.In()) + defer restoreInput() + + argPassword, err = PromptForInput(ctx, cli.In(), cli.Out(), "Password: ") if err != nil { - return err + return authConfig, err } fmt.Fprint(cli.Out(), "\n") - if flPassword == "" { - return errors.Errorf("Error: Password Required") + if argPassword == "" { + return authConfig, errors.Errorf("Error: Password Required") } } - authconfig.Username = flUser - authconfig.Password = flPassword - - return nil -} - -// readInput reads, and returns user input from in. It tries to return a -// single line, not including the end-of-line bytes, and trims leading -// and trailing whitespace. -func readInput(in io.Reader) (string, error) { - line, _, err := bufio.NewReader(in).ReadLine() - if err != nil { - return "", errors.Wrap(err, "error while reading input") - } - return strings.TrimSpace(string(line)), nil -} - -func promptWithDefault(out io.Writer, prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(out, "%s: ", prompt) - } else { - fmt.Fprintf(out, "%s (%s): ", prompt, configDefault) - } + authConfig.Username = argUser + authConfig.Password = argPassword + authConfig.ServerAddress = serverAddress + return authConfig, nil } // RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete diff --git a/vendor/github.com/docker/cli/cli/command/utils.go b/vendor/github.com/docker/cli/cli/command/utils.go index 48d2c4250ace..1cf4d199de02 100644 --- a/vendor/github.com/docker/cli/cli/command/utils.go +++ b/vendor/github.com/docker/cli/cli/command/utils.go @@ -19,6 +19,7 @@ import ( "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" "github.com/moby/sys/sequential" + "github.com/moby/term" "github.com/pkg/errors" "github.com/spf13/pflag" ) @@ -76,6 +77,48 @@ func PrettyPrint(i any) string { var ErrPromptTerminated = errdefs.Cancelled(errors.New("prompt terminated")) +// DisableInputEcho disables input echo on the provided streams.In. +// This is useful when the user provides sensitive information like passwords. +// The function returns a restore function that should be called to restore the +// terminal state. +func DisableInputEcho(ins *streams.In) (restore func() error, err error) { + oldState, err := term.SaveState(ins.FD()) + if err != nil { + return nil, err + } + restore = func() error { + return term.RestoreTerminal(ins.FD(), oldState) + } + return restore, term.DisableEcho(ins.FD(), oldState) +} + +// PromptForInput requests input from the user. +// +// If the user terminates the CLI with SIGINT or SIGTERM while the prompt is +// active, the prompt will return an empty string ("") with an ErrPromptTerminated error. +// When the prompt returns an error, the caller should propagate the error up +// the stack and close the io.Reader used for the prompt which will prevent the +// background goroutine from blocking indefinitely. +func PromptForInput(ctx context.Context, in io.Reader, out io.Writer, message string) (string, error) { + _, _ = fmt.Fprint(out, message) + + result := make(chan string) + go func() { + scanner := bufio.NewScanner(in) + if scanner.Scan() { + result <- strings.TrimSpace(scanner.Text()) + } + }() + + select { + case <-ctx.Done(): + _, _ = fmt.Fprintln(out, "") + return "", ErrPromptTerminated + case r := <-result: + return r, nil + } +} + // PromptForConfirmation requests and checks confirmation from the user. // This will display the provided message followed by ' [y/N] '. If the user // input 'y' or 'Y' it returns true otherwise false. If no message is provided, @@ -179,7 +222,7 @@ func ValidateOutputPath(path string) error { } if err := ValidateOutputPathFileMode(fileInfo.Mode()); err != nil { - return errors.Wrapf(err, fmt.Sprintf("invalid output path: %q must be a directory or a regular file", path)) + return errors.Wrapf(err, "invalid output path: %q must be a directory or a regular file", path) } } return nil diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go index ba9bc9d1d0db..ae9dcb3370c7 100644 --- a/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -303,6 +303,7 @@ func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, for registryHostname := range configFile.CredentialHelpers { newAuth, err := configFile.GetAuthConfig(registryHostname) if err != nil { + // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). logrus.WithError(err).Warnf("Failed to get credentials for registry: %s", registryHostname) continue } diff --git a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go index 1797abaed4c7..ab83ee2920a1 100644 --- a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go +++ b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go @@ -52,6 +52,7 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper args = append(args, "--host", "unix://"+sp.Path) } sshFlags = addSSHTimeout(sshFlags) + sshFlags = disablePseudoTerminalAllocation(sshFlags) args = append(args, "system", "dial-stdio") return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args(args...)...)...) }, @@ -79,3 +80,14 @@ func addSSHTimeout(sshFlags []string) []string { } return sshFlags } + +// disablePseudoTerminalAllocation disables pseudo-terminal allocation to +// prevent SSH from executing as a login shell +func disablePseudoTerminalAllocation(sshFlags []string) []string { + for _, flag := range sshFlags { + if flag == "-T" { + return sshFlags + } + } + return append(sshFlags, "-T") +} diff --git a/vendor/github.com/docker/cli/cli/context/store/store.go b/vendor/github.com/docker/cli/cli/context/store/store.go index 44e9477fb014..066b5769d728 100644 --- a/vendor/github.com/docker/cli/cli/context/store/store.go +++ b/vendor/github.com/docker/cli/cli/context/store/store.go @@ -124,6 +124,9 @@ func (s *ContextStore) List() ([]Metadata, error) { // Names return Metadata names for a Lister func Names(s Lister) ([]string, error) { + if s == nil { + return nil, errors.New("nil lister") + } list, err := s.List() if err != nil { return nil, err diff --git a/vendor/github.com/docker/cli/cli/required.go b/vendor/github.com/docker/cli/cli/required.go index 454e24761359..bad16248565c 100644 --- a/vendor/github.com/docker/cli/cli/required.go +++ b/vendor/github.com/docker/cli/cli/required.go @@ -27,16 +27,16 @@ func NoArgs(cmd *cobra.Command, args []string) error { } // RequiresMinArgs returns an error if there is not at least min args -func RequiresMinArgs(min int) cobra.PositionalArgs { +func RequiresMinArgs(minArgs int) cobra.PositionalArgs { return func(cmd *cobra.Command, args []string) error { - if len(args) >= min { + if len(args) >= minArgs { return nil } return errors.Errorf( "%q requires at least %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", cmd.CommandPath(), - min, - pluralize("argument", min), + minArgs, + pluralize("argument", minArgs), cmd.CommandPath(), cmd.UseLine(), cmd.Short, @@ -45,16 +45,16 @@ func RequiresMinArgs(min int) cobra.PositionalArgs { } // RequiresMaxArgs returns an error if there is not at most max args -func RequiresMaxArgs(max int) cobra.PositionalArgs { +func RequiresMaxArgs(maxArgs int) cobra.PositionalArgs { return func(cmd *cobra.Command, args []string) error { - if len(args) <= max { + if len(args) <= maxArgs { return nil } return errors.Errorf( "%q requires at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", cmd.CommandPath(), - max, - pluralize("argument", max), + maxArgs, + pluralize("argument", maxArgs), cmd.CommandPath(), cmd.UseLine(), cmd.Short, @@ -63,17 +63,17 @@ func RequiresMaxArgs(max int) cobra.PositionalArgs { } // RequiresRangeArgs returns an error if there is not at least min args and at most max args -func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { +func RequiresRangeArgs(minArgs int, maxArgs int) cobra.PositionalArgs { return func(cmd *cobra.Command, args []string) error { - if len(args) >= min && len(args) <= max { + if len(args) >= minArgs && len(args) <= maxArgs { return nil } return errors.Errorf( "%q requires at least %d and at most %d %s.\nSee '%s --help'.\n\nUsage: %s\n\n%s", cmd.CommandPath(), - min, - max, - pluralize("argument", max), + minArgs, + maxArgs, + pluralize("argument", maxArgs), cmd.CommandPath(), cmd.UseLine(), cmd.Short, diff --git a/vendor/github.com/docker/cli/opts/port.go b/vendor/github.com/docker/cli/opts/port.go index 2f2aa329c8ee..099aae3534d5 100644 --- a/vendor/github.com/docker/cli/opts/port.go +++ b/vendor/github.com/docker/cli/opts/port.go @@ -149,6 +149,7 @@ func ConvertPortToPortConfig( for _, binding := range portBindings[port] { if p := net.ParseIP(binding.HostIP); p != nil && !p.IsUnspecified() { + // TODO(thaJeztah): use context-logger, so that this output can be suppressed (in tests). logrus.Warnf("ignoring IP-address (%s:%s) service will listen on '0.0.0.0'", net.JoinHostPort(binding.HostIP, binding.HostPort), port) } diff --git a/vendor/github.com/docker/cli/opts/throttledevice.go b/vendor/github.com/docker/cli/opts/throttledevice.go index bdf454eb27da..8bf128804794 100644 --- a/vendor/github.com/docker/cli/opts/throttledevice.go +++ b/vendor/github.com/docker/cli/opts/throttledevice.go @@ -94,7 +94,7 @@ func (opt *ThrottledeviceOpt) String() string { // GetList returns a slice of pointers to ThrottleDevices. func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - out := make([]*blkiodev.ThrottleDevice, 0, len(opt.values)) + out := make([]*blkiodev.ThrottleDevice, len(opt.values)) copy(out, opt.values) return out } diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index f831735f840e..93d64cd8d5ff 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.46" + DefaultVersion = "1.47" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index cc754bf1fd12..86b01839b98a 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.46" +basePath: "/v1.47" info: title: "Docker Engine API" - version: "1.46" + version: "1.47" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.46) is used. - For example, calling `/info` is the same as calling `/v1.46/info`. Using the + If you omit the version-prefix, the current version of the API (v1.47) is used. + For example, calling `/info` is the same as calling `/v1.47/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -2265,6 +2265,19 @@ definitions: x-nullable: false type: "integer" example: 2 + Manifests: + description: | + Manifests is a list of manifests available in this image. + It provides a more detailed view of the platform-specific image manifests + or other image-attached data like build attestations. + + WARNING: This is experimental and may change at any time without any backward + compatibility. + type: "array" + x-nullable: false + x-omitempty: true + items: + $ref: "#/definitions/ImageManifestSummary" AuthConfig: type: "object" @@ -5318,7 +5331,7 @@ definitions: description: | The default (and highest) API version that is supported by the daemon type: "string" - example: "1.46" + example: "1.47" MinAPIVersion: description: | The minimum API version that is supported by the daemon @@ -5334,7 +5347,7 @@ definitions: The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" - example: "go1.21.11" + example: "go1.21.13" Os: description: | The operating system that the daemon is running on ("linux" or "windows") @@ -5830,13 +5843,13 @@ definitions: - "/var/run/cdi" Containerd: $ref: "#/definitions/ContainerdInfo" - x-nullable: true ContainerdInfo: description: | Information for connecting to the containerd instance that is used by the daemon. This is included for debugging purposes only. type: "object" + x-nullable: true properties: Address: description: "The address of the containerd socket." @@ -6644,6 +6657,120 @@ definitions: additionalProperties: type: "string" + ImageManifestSummary: + x-go-name: "ManifestSummary" + description: | + ImageManifestSummary represents a summary of an image manifest. + type: "object" + required: ["ID", "Descriptor", "Available", "Size", "Kind"] + properties: + ID: + description: | + ID is the content-addressable ID of an image and is the same as the + digest of the image manifest. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Available: + description: Indicates whether all the child content (image config, layers) is fully available locally. + type: "boolean" + example: true + Size: + type: "object" + x-nullable: false + required: ["Content", "Total"] + properties: + Total: + type: "integer" + format: "int64" + example: 8213251 + description: | + Total is the total size (in bytes) of all the locally present + data (both distributable and non-distributable) that's related to + this manifest and its children. + This equal to the sum of [Content] size AND all the sizes in the + [Size] struct present in the Kind-specific data struct. + For example, for an image kind (Kind == "image") + this would include the size of the image content and unpacked + image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Content: + description: | + Content is the size (in bytes) of all the locally present + content in the content store (e.g. image config, layers) + referenced by this manifest and its children. + This only includes blobs in the content store. + type: "integer" + format: "int64" + example: 3987495 + Kind: + type: "string" + example: "image" + enum: + - "image" + - "attestation" + - "unknown" + description: | + The kind of the manifest. + + kind | description + -------------|----------------------------------------------------------- + image | Image manifest that can be used to start a container. + attestation | Attestation manifest produced by the Buildkit builder for a specific image manifest. + ImageData: + description: | + The image data for the image manifest. + This field is only populated when Kind is "image". + type: "object" + x-nullable: true + x-omitempty: true + required: ["Platform", "Containers", "Size", "UnpackedSize"] + properties: + Platform: + $ref: "#/definitions/OCIPlatform" + description: | + OCI platform of the image. This will be the platform specified in the + manifest descriptor from the index/manifest list. + If it's not available, it will be obtained from the image config. + Containers: + description: | + The IDs of the containers that are using this image. + type: "array" + items: + type: "string" + example: ["ede54ee1fda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c7430", "abadbce344c096744d8d6071a90d474d28af8f1034b5ea9fb03c3f4bfc6d005e"] + Size: + type: "object" + x-nullable: false + required: ["Unpacked"] + properties: + Unpacked: + type: "integer" + format: "int64" + example: 3987495 + description: | + Unpacked is the size (in bytes) of the locally unpacked + (uncompressed) image content that's directly usable by the containers + running this image. + It's independent of the distributable content - e.g. + the image might still have an unpacked data that's still used by + some container even when the distributable/compressed content is + already gone. + AttestationData: + description: | + The image data for the attestation manifest. + This field is only populated when Kind is "attestation". + type: "object" + x-nullable: true + x-omitempty: true + required: ["For"] + properties: + For: + description: | + The digest of the image manifest that this attestation is for. + type: "string" + example: "sha256:95869fbcf224d947ace8d61d0e931d49e31bb7fc67fffbbe9c3198c33aa8e93f" + paths: /containers/json: get: @@ -8622,6 +8749,11 @@ paths: description: "Show digest information as a `RepoDigests` field on each image." type: "boolean" default: false + - name: "manifests" + in: "query" + description: "Include `Manifests` in the image summary." + type: "boolean" + default: false tags: ["Image"] /build: post: @@ -9563,7 +9695,7 @@ paths: Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - Images report these events: `create, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` diff --git a/vendor/github.com/docker/docker/api/types/image/manifest.go b/vendor/github.com/docker/docker/api/types/image/manifest.go new file mode 100644 index 000000000000..db8a00830e70 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/manifest.go @@ -0,0 +1,99 @@ +package image + +import ( + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type ManifestKind string + +const ( + ManifestKindImage ManifestKind = "image" + ManifestKindAttestation ManifestKind = "attestation" + ManifestKindUnknown ManifestKind = "unknown" +) + +type ManifestSummary struct { + // ID is the content-addressable ID of an image and is the same as the + // digest of the image manifest. + // + // Required: true + ID string `json:"ID"` + + // Descriptor is the OCI descriptor of the image. + // + // Required: true + Descriptor ocispec.Descriptor `json:"Descriptor"` + + // Indicates whether all the child content (image config, layers) is + // fully available locally + // + // Required: true + Available bool `json:"Available"` + + // Size is the size information of the content related to this manifest. + // Note: These sizes only take the locally available content into account. + // + // Required: true + Size struct { + // Content is the size (in bytes) of all the locally present + // content in the content store (e.g. image config, layers) + // referenced by this manifest and its children. + // This only includes blobs in the content store. + Content int64 `json:"Content"` + + // Total is the total size (in bytes) of all the locally present + // data (both distributable and non-distributable) that's related to + // this manifest and its children. + // This equal to the sum of [Content] size AND all the sizes in the + // [Size] struct present in the Kind-specific data struct. + // For example, for an image kind (Kind == ManifestKindImage), + // this would include the size of the image content and unpacked + // image snapshots ([Size.Content] + [ImageData.Size.Unpacked]). + Total int64 `json:"Total"` + } `json:"Size"` + + // Kind is the kind of the image manifest. + // + // Required: true + Kind ManifestKind `json:"Kind"` + + // Fields below are specific to the kind of the image manifest. + + // Present only if Kind == ManifestKindImage. + ImageData *ImageProperties `json:"ImageData,omitempty"` + + // Present only if Kind == ManifestKindAttestation. + AttestationData *AttestationProperties `json:"AttestationData,omitempty"` +} + +type ImageProperties struct { + // Platform is the OCI platform object describing the platform of the image. + // + // Required: true + Platform ocispec.Platform `json:"Platform"` + + Size struct { + // Unpacked is the size (in bytes) of the locally unpacked + // (uncompressed) image content that's directly usable by the containers + // running this image. + // It's independent of the distributable content - e.g. + // the image might still have an unpacked data that's still used by + // some container even when the distributable/compressed content is + // already gone. + // + // Required: true + Unpacked int64 `json:"Unpacked"` + } + + // Containers is an array containing the IDs of the containers that are + // using this image. + // + // Required: true + Containers []string `json:"Containers"` +} + +type AttestationProperties struct { + // For is the digest of the image manifest that this attestation is for. + For digest.Digest `json:"For"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index 8e32c9af8689..923ebe5a06a0 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -76,6 +76,9 @@ type ListOptions struct { // ContainerCount indicates whether container count should be computed. ContainerCount bool + + // Manifests indicates whether the image manifests should be returned. + Manifests bool } // RemoveOptions holds parameters to remove images. diff --git a/vendor/github.com/docker/docker/api/types/image/summary.go b/vendor/github.com/docker/docker/api/types/image/summary.go index f1e3e2ef018f..c7168fe62eab 100644 --- a/vendor/github.com/docker/docker/api/types/image/summary.go +++ b/vendor/github.com/docker/docker/api/types/image/summary.go @@ -1,10 +1,5 @@ package image -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Summary summary -// swagger:model Summary type Summary struct { // Number of containers using this image. Includes both stopped and running @@ -47,6 +42,14 @@ type Summary struct { // Required: true ParentID string `json:"ParentId"` + // Manifests is a list of image manifests available in this image. It + // provides a more detailed view of the platform-specific image manifests or + // other image-attached data like build attestations. + // + // WARNING: This is experimental and may change at any time without any backward + // compatibility. + Manifests []ManifestSummary `json:"Manifests,omitempty"` + // List of content-addressable digests of locally available image manifests // that the image is referenced from. Multiple manifests can refer to the // same image. diff --git a/vendor/github.com/docker/docker/api/types/registry/authconfig.go b/vendor/github.com/docker/docker/api/types/registry/authconfig.go index 97a924e37477..8e383f6e60cb 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authconfig.go +++ b/vendor/github.com/docker/docker/api/types/registry/authconfig.go @@ -34,10 +34,9 @@ type AuthConfig struct { } // EncodeAuthConfig serializes the auth configuration as a base64url encoded -// RFC4648, section 5) JSON string for sending through the X-Registry-Auth header. +// ([RFC4648, section 5]) JSON string for sending through the X-Registry-Auth header. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 func EncodeAuthConfig(authConfig AuthConfig) (string, error) { buf, err := json.Marshal(authConfig) if err != nil { @@ -46,15 +45,14 @@ func EncodeAuthConfig(authConfig AuthConfig) (string, error) { return base64.URLEncoding.EncodeToString(buf), nil } -// DecodeAuthConfig decodes base64url encoded (RFC4648, section 5) JSON +// DecodeAuthConfig decodes base64url encoded ([RFC4648, section 5]) JSON // authentication information as sent through the X-Registry-Auth header. // -// This function always returns an AuthConfig, even if an error occurs. It is up +// This function always returns an [AuthConfig], even if an error occurs. It is up // to the caller to decide if authentication is required, and if the error can // be ignored. // -// For details on base64url encoding, see: -// - RFC4648, section 5: https://tools.ietf.org/html/rfc4648#section-5 +// [RFC4648, section 5]: https://tools.ietf.org/html/rfc4648#section-5 func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { if authEncoded == "" { return &AuthConfig{}, nil @@ -69,7 +67,7 @@ func DecodeAuthConfig(authEncoded string) (*AuthConfig, error) { // clients and API versions. Current clients and API versions expect authentication // to be provided through the X-Registry-Auth header. // -// Like DecodeAuthConfig, this function always returns an AuthConfig, even if an +// Like [DecodeAuthConfig], this function always returns an [AuthConfig], even if an // error occurs. It is up to the caller to decide if authentication is required, // and if the error can be ignored. func DecodeAuthConfigBody(rdr io.ReadCloser) (*AuthConfig, error) { diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index 6791cf3284c4..c66a2afb8bbe 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -77,9 +77,6 @@ type Info struct { Containerd *ContainerdInfo `json:",omitempty"` - // Legacy API fields for older API versions. - legacyFields - // Warnings contains a slice of warnings that occurred while collecting // system information. These warnings are intended to be informational // messages for the user, and are not intended to be parsed / used for @@ -124,10 +121,6 @@ type ContainerdNamespaces struct { Plugins string } -type legacyFields struct { - ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. -} - // PluginsInfo is a temp struct holding Plugins name // registered with docker daemon. It is used by [Info] struct type PluginsInfo struct { diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a9cc1e21e5dd..bef679431dce 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -11,6 +11,11 @@ import ( ) // ImageList returns a list of images in the docker host. +// +// Experimental: Setting the [options.Manifest] will populate +// [image.Summary.Manifests] with information about image manifests. +// This is experimental and might change in the future without any backward +// compatibility. func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) { var images []image.Summary @@ -47,6 +52,9 @@ func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([] if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { query.Set("shared-size", "1") } + if options.Manifests && versions.GreaterThanOrEqualTo(cli.version, "1.47") { + query.Set("manifests", "1") + } serverResp, err := cli.get(ctx, "/images/json", query, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go index 45ac2aa6ce48..b9d2a538ab01 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -6,8 +6,8 @@ import ( "path/filepath" "strings" - "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/system" + "github.com/moby/sys/userns" "github.com/pkg/errors" "golang.org/x/sys/unix" ) diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md index a3479dba57f9..f7ca0dd9c241 100644 --- a/vendor/github.com/gofrs/flock/README.md +++ b/vendor/github.com/gofrs/flock/README.md @@ -1,7 +1,7 @@ # flock [![Go Reference](https://pkg.go.dev/badge/github.com/gofrs/flock.svg)](https://pkg.go.dev/github.com/gofrs/flock) -[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/main/LICENSE) [![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) `flock` implements a thread-safe file lock. diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go index 4c9955159c93..ff942b228a66 100644 --- a/vendor/github.com/gofrs/flock/flock.go +++ b/vendor/github.com/gofrs/flock/flock.go @@ -163,9 +163,9 @@ func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Durati } } -func (f *Flock) setFh() error { +func (f *Flock) setFh(flag int) error { // open a new os.File instance - fh, err := os.OpenFile(f.path, f.flag, f.perm) + fh, err := os.OpenFile(f.path, flag, f.perm) if err != nil { return err } @@ -176,9 +176,11 @@ func (f *Flock) setFh() error { return nil } -// ensure the file handle is closed if no lock is held. -func (f *Flock) ensureFhState() { - if f.l || f.r || f.fh == nil { +// resetFh resets file handle: +// - tries to close the file (ignore errors) +// - sets fh to nil. +func (f *Flock) resetFh() { + if f.fh == nil { return } @@ -187,11 +189,18 @@ func (f *Flock) ensureFhState() { f.fh = nil } +// ensure the file handle is closed if no lock is held. +func (f *Flock) ensureFhState() { + if f.l || f.r || f.fh == nil { + return + } + + f.resetFh() +} + func (f *Flock) reset() { f.l = false f.r = false - _ = f.fh.Close() - - f.fh = nil + f.resetFh() } diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go index 1c9d079329b5..cf8919c7addb 100644 --- a/vendor/github.com/gofrs/flock/flock_unix.go +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -50,7 +50,7 @@ func (f *Flock) lock(locked *bool, flag int) error { } if f.fh == nil { - if err := f.setFh(); err != nil { + if err := f.setFh(f.flag); err != nil { return err } @@ -147,7 +147,7 @@ func (f *Flock) try(locked *bool, flag int) (bool, error) { } if f.fh == nil { - if err := f.setFh(); err != nil { + if err := f.setFh(f.flag); err != nil { return false, err } @@ -183,6 +183,7 @@ retry: // This comes from `util-linux/sys-utils/flock.c`: // > Since Linux 3.4 (commit 55725513) // > Probably NFSv4 where flock() is emulated by fcntl(). +// > https://github.com/util-linux/util-linux/blob/198e920aa24743ef6ace4e07cf6237de527f9261/sys-utils/flock.c#L374-L390 func (f *Flock) reopenFDOnError(err error) (bool, error) { if !errors.Is(err, unix.EIO) && !errors.Is(err, unix.EBADF) { return false, nil @@ -197,16 +198,13 @@ func (f *Flock) reopenFDOnError(err error) (bool, error) { return false, nil } - _ = f.fh.Close() - f.fh = nil + f.resetFh() // reopen in read-write mode and set the file handle - fh, err := os.OpenFile(f.path, f.flag, f.perm) + err = f.setFh(f.flag | os.O_RDWR) if err != nil { return false, err } - f.fh = fh - return true, nil } diff --git a/vendor/github.com/gofrs/flock/flock_unix_fcntl.go b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go index e88251611c94..ea007b47d9af 100644 --- a/vendor/github.com/gofrs/flock/flock_unix_fcntl.go +++ b/vendor/github.com/gofrs/flock/flock_unix_fcntl.go @@ -112,7 +112,7 @@ func (f *Flock) lock(locked *bool, flag lockType) error { } if f.fh == nil { - if err := f.setFh(); err != nil { + if err := f.setFh(f.flag); err != nil { return err } @@ -359,7 +359,7 @@ func (f *Flock) try(locked *bool, flag lockType) (bool, error) { } if f.fh == nil { - if err := f.setFh(); err != nil { + if err := f.setFh(f.flag); err != nil { return false, err } diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go index 4df90e29841a..dfd31e15f509 100644 --- a/vendor/github.com/gofrs/flock/flock_windows.go +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -56,7 +56,7 @@ func (f *Flock) lock(locked *bool, flag uint32) error { } if f.fh == nil { - if err := f.setFh(); err != nil { + if err := f.setFh(f.flag); err != nil { return err } @@ -136,7 +136,7 @@ func (f *Flock) try(locked *bool, flag uint32) (bool, error) { } if f.fh == nil { - if err := f.setFh(); err != nil { + if err := f.setFh(f.flag); err != nil { return false, err } diff --git a/vendor/github.com/gorilla/mux/.editorconfig b/vendor/github.com/gorilla/mux/.editorconfig new file mode 100644 index 000000000000..c6b74c3e0d0c --- /dev/null +++ b/vendor/github.com/gorilla/mux/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/.gitignore b/vendor/github.com/gorilla/mux/.gitignore new file mode 100644 index 000000000000..84039fec6877 --- /dev/null +++ b/vendor/github.com/gorilla/mux/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392ee592..000000000000 --- a/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE index 6903df6386e9..bb9d80bc9b6b 100644 --- a/vendor/github.com/gorilla/mux/LICENSE +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gorilla/mux/Makefile b/vendor/github.com/gorilla/mux/Makefile new file mode 100644 index 000000000000..98f5ab75f9d7 --- /dev/null +++ b/vendor/github.com/gorilla/mux/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 35eea9f10635..382513d57c4c 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,12 +1,12 @@ # gorilla/mux -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) +![testing](https://github.com/gorilla/mux/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/mux/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/mux) +[![godoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) -https://www.gorillatoolkit.org/pkg/mux +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to their respective handler. @@ -247,32 +247,25 @@ type spaHandler struct { // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) + // Join internally call path.Clean to prevent directory traversal + path := filepath.Join(h.staticPath, r.URL.Path) - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html + // check whether a file exists or is a directory at the given path + fi, err := os.Stat(path) + if os.IsNotExist(err) || fi.IsDir() { + // file does not exist or path is a directory, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop + } + + if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) - return + return } - // otherwise, use http.FileServer to serve the static dir + // otherwise, use http.FileServer to serve the static file http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } @@ -375,6 +368,19 @@ url, err := r.Get("article").URL("subdomain", "news", "id", "42") ``` +To find all the required variables for a given route when calling `URL()`, the method `GetVarNames()` is available: +```go +r := mux.NewRouter() +r.Host("{domain}"). + Path("/{group}/{item_id}"). + Queries("some_data1", "{some_data1}"). + Queries("some_data2", "{some_data2}"). + Name("article") + +// Will print [domain group item_id some_data1 some_data2] +fmt.Println(r.Get("article").GetVarNames()) + +``` ### Walking Routes The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, @@ -572,7 +578,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler r := mux.NewRouter() r.HandleFunc("/", handler) -amw := authenticationMiddleware{} +amw := authenticationMiddleware{tokenUsers: make(map[string]string)} amw.Populate() r.Use(amw.Middleware) @@ -758,7 +764,8 @@ func TestMetricsHandler(t *testing.T) { rr := httptest.NewRecorder() - // Need to create a router that we can pass the request through so that the vars will be added to the context + // To add the vars to the context, + // we need to create a router through which we can pass the request. router := mux.NewRouter() router.HandleFunc("/metrics/{type}", MetricsHandler) router.ServeHTTP(rr, req) diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index bd5a38b55d82..80601351fd07 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -10,18 +10,18 @@ http.ServeMux, mux.Router matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. + - Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + - URL hosts, paths and query values can have variables with an optional + regular expression. + - Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + - Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + - It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. Let's start registering a couple of URL paths and handlers: @@ -301,6 +301,5 @@ A more complex authentication middleware, which maps session token to users, cou r.Use(amw.Middleware) Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - */ package mux diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 782a34b22a60..1e089906fad5 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -31,24 +31,26 @@ func NewRouter() *Router { // It implements the http.Handler interface, so it can be registered to serve // requests: // -// var router = mux.NewRouter() +// var router = mux.NewRouter() // -// func main() { -// http.Handle("/", router) -// } +// func main() { +// http.Handle("/", router) +// } // // Or, for Google App Engine, register it in a init() function: // -// func init() { -// http.Handle("/", router) -// } +// func init() { +// http.Handle("/", router) +// } // // This will send all incoming requests to the router. type Router struct { // Configurable Handler to be used when no route matches. + // This can be used to render your own 404 Not Found errors. NotFoundHandler http.Handler // Configurable Handler to be used when the request method does not match the route. + // This can be used to render your own 405 Method Not Allowed errors. MethodNotAllowedHandler http.Handler // Routes to be matched, in order. diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 0144842bb23e..5d05cfa0e9ea 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -22,10 +22,10 @@ type routeRegexpOptions struct { type regexpType int const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 + regexpTypePath regexpType = iota + regexpTypeHost + regexpTypePrefix + regexpTypeQuery ) // newRouteRegexp parses a route template and returns a routeRegexp, @@ -195,7 +195,7 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { // url builds a URL part using the given values. func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + urlValues := make([]interface{}, len(r.varsN)) for k, v := range r.varsN { value, ok := values[v] if !ok { diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 750afe570d05..e8f11df221f0 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -64,8 +64,18 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool { match.MatchErr = nil } - matchErr = nil + matchErr = nil // nolint:ineffassign return false + } else { + // Multiple routes may share the same path but use different HTTP methods. For instance: + // Route 1: POST "/users/{id}". + // Route 2: GET "/users/{id}", parameters: "id": "[0-9]+". + // + // The router must handle these cases correctly. For a GET request to "/users/abc" with "id" as "-2", + // The router should return a "Not Found" error as no route fully matches this request. + if match.MatchErr == ErrMethodMismatch { + match.MatchErr = nil + } } } @@ -230,9 +240,9 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // Headers adds a matcher for request header values. // It accepts a sequence of key/value pairs to be matched. For example: // -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. // If the value is an empty string, it will match any value if the key is set. @@ -255,9 +265,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { // HeadersRegexp accepts a sequence of key/value pairs, where the value has regex // support. For example: // -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") +// r := mux.NewRouter().NewRoute() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both the request header matches both regular expressions. // If the value is an empty string, it will match any value if the key is set. @@ -283,10 +293,10 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") +// r := mux.NewRouter().NewRoute() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -342,11 +352,11 @@ func (r *Route) Methods(methods ...string) *Route { // // For example: // -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) +// r := mux.NewRouter().NewRoute() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) // // Variable names must be unique in a given route. They can be retrieved // calling mux.Vars(request). @@ -377,8 +387,8 @@ func (r *Route) PathPrefix(tpl string) *Route { // It accepts a sequence of key/value pairs. Values may define variables. // For example: // -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// r := mux.NewRouter().NewRoute() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") // // The above route will only match if the URL contains the defined queries // values, e.g.: ?foo=bar&id=42. @@ -473,11 +483,11 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { // // It will test the inner routes only if the parent route matched. For example: // -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// r := mux.NewRouter().NewRoute() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) // // Here, the routes registered in the subrouter won't be tested if the host // doesn't match. @@ -497,36 +507,36 @@ func (r *Route) Subrouter() *Router { // It accepts a sequence of key/value pairs for the route variables. For // example, given this route: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") // // ...a URL for it can be built using: // -// url, err := r.Get("article").URL("category", "technology", "id", "42") +// url, err := r.Get("article").URL("category", "technology", "id", "42") // // ...which will return an url.URL with the following path: // -// "/articles/technology/42" +// "/articles/technology/42" // // This also works for host variables: // -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") // -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") // // The scheme of the resulting url will be the first argument that was passed to Schemes: // -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() +// // url.String() will be "https://example.com" +// r := mux.NewRouter().NewRoute() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() // // All variables defined in the route are required, and their values must // conform to the corresponding patterns. @@ -718,6 +728,25 @@ func (r *Route) GetHostTemplate() (string, error) { return r.regexp.host.template, nil } +// GetVarNames returns the names of all variables added by regexp matchers +// These can be used to know which route variables should be passed into r.URL() +func (r *Route) GetVarNames() ([]string, error) { + if r.err != nil { + return nil, r.err + } + var varNames []string + if r.regexp.host != nil { + varNames = append(varNames, r.regexp.host.varsN...) + } + if r.regexp.path != nil { + varNames = append(varNames, r.regexp.path.varsN...) + } + for _, regx := range r.regexp.queries { + varNames = append(varNames, regx.varsN...) + } + return varNames, nil +} + // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go index 41af8b70d159..acc90314bb68 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -1520,6 +1520,7 @@ type BuildHistoryRecord struct { NumTotalSteps int32 `protobuf:"varint,16,opt,name=numTotalSteps,proto3" json:"numTotalSteps,omitempty"` NumCompletedSteps int32 `protobuf:"varint,17,opt,name=numCompletedSteps,proto3" json:"numCompletedSteps,omitempty"` ExternalError *Descriptor `protobuf:"bytes,18,opt,name=externalError,proto3" json:"externalError,omitempty"` + NumWarnings int32 `protobuf:"varint,19,opt,name=numWarnings,proto3" json:"numWarnings,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1684,6 +1685,13 @@ func (m *BuildHistoryRecord) GetExternalError() *Descriptor { return nil } +func (m *BuildHistoryRecord) GetNumWarnings() int32 { + if m != nil { + return m.NumWarnings + } + return 0 +} + type UpdateBuildHistoryRequest struct { Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` Pinned bool `protobuf:"varint,2,opt,name=Pinned,proto3" json:"Pinned,omitempty"` @@ -2025,154 +2033,155 @@ func init() { func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } var fileDescriptor_0c5120591600887d = []byte{ - // 2340 bytes of a gzipped FileDescriptorProto + // 2354 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x19, 0x4d, 0x73, 0x1b, 0x59, 0x31, 0x23, 0xc9, 0xb2, 0xd4, 0x92, 0x1c, 0xf9, 0x25, 0x1b, 0x86, 0x21, 0x6b, 0x3b, 0xb3, 0x09, 0xb8, 0x42, 0x32, 0xf2, 0x0a, 0x42, 0xb2, 0x0e, 0x84, 0x58, 0x96, 0xd8, 0x38, 0xc4, 0x15, 0xef, 0xb3, 0xb3, 0xa1, 0xb6, 0x6a, 0xa1, 0xc6, 0xd2, 0xb3, 0x32, 0xe5, 0xd1, 0xcc, 0xf0, 0xde, 0x93, - 0x37, 0xe2, 0xc4, 0x89, 0x2a, 0x2e, 0x14, 0x17, 0x8a, 0x0b, 0x77, 0x4e, 0x9c, 0x39, 0x71, 0xe0, - 0x40, 0x55, 0x8e, 0x9c, 0xf7, 0x10, 0xa8, 0xfc, 0x00, 0x8a, 0x23, 0x47, 0xea, 0x7d, 0x8c, 0x34, - 0x92, 0x46, 0xb6, 0x9c, 0xe4, 0xa4, 0xd7, 0xef, 0x75, 0xf7, 0x74, 0xf7, 0xeb, 0xee, 0xd7, 0xdd, - 0x82, 0x4a, 0x3b, 0x0c, 0x38, 0x0d, 0x7d, 0x27, 0xa2, 0x21, 0x0f, 0x51, 0xb5, 0x17, 0x1e, 0x0e, - 0x9c, 0xc3, 0xbe, 0xe7, 0x77, 0x8e, 0x3d, 0xee, 0x9c, 0x7c, 0x6c, 0xd5, 0xbb, 0x1e, 0x7f, 0xd1, - 0x3f, 0x74, 0xda, 0x61, 0xaf, 0xd6, 0x0d, 0xbb, 0x61, 0xad, 0x1b, 0x86, 0x5d, 0x9f, 0xb8, 0x91, - 0xc7, 0xf4, 0xb2, 0x46, 0xa3, 0x76, 0x8d, 0x71, 0x97, 0xf7, 0x99, 0xe2, 0x62, 0xdd, 0x9e, 0xa4, - 0x91, 0xdb, 0x87, 0xfd, 0x23, 0x09, 0x49, 0x40, 0xae, 0x34, 0x7a, 0x2d, 0x81, 0x2e, 0xbe, 0x5f, - 0x8b, 0xbf, 0x5f, 0x73, 0x23, 0xaf, 0xc6, 0x07, 0x11, 0x61, 0xb5, 0xaf, 0x42, 0x7a, 0x4c, 0xa8, - 0x26, 0xb8, 0x35, 0x93, 0x80, 0x85, 0xfe, 0x09, 0xa1, 0xb5, 0xe8, 0xb0, 0x16, 0x46, 0xb1, 0x34, - 0x77, 0x4e, 0xc1, 0xee, 0xd3, 0x36, 0x89, 0x42, 0xdf, 0x6b, 0x0f, 0x04, 0x8d, 0x5a, 0x69, 0xb2, - 0x55, 0xad, 0xdd, 0x50, 0x76, 0xee, 0xf5, 0x08, 0xe3, 0x6e, 0x2f, 0x52, 0x08, 0xf6, 0x6f, 0x0c, - 0x28, 0xef, 0xd1, 0x7e, 0x40, 0x30, 0xf9, 0x65, 0x9f, 0x30, 0x8e, 0xae, 0x40, 0xfe, 0xc8, 0xf3, - 0x39, 0xa1, 0xa6, 0xb1, 0x96, 0x5d, 0x2f, 0x62, 0x0d, 0xa1, 0x2a, 0x64, 0x5d, 0xdf, 0x37, 0x33, - 0x6b, 0xc6, 0x7a, 0x01, 0x8b, 0x25, 0x5a, 0x87, 0xf2, 0x31, 0x21, 0x51, 0xb3, 0x4f, 0x5d, 0xee, - 0x85, 0x81, 0x99, 0x5d, 0x33, 0xd6, 0xb3, 0x8d, 0xdc, 0xab, 0xd7, 0xab, 0x06, 0x1e, 0x3b, 0x41, - 0x36, 0x14, 0x05, 0xdc, 0x18, 0x70, 0xc2, 0xcc, 0x5c, 0x02, 0x6d, 0xb4, 0x6d, 0xdf, 0x84, 0x6a, - 0xd3, 0x63, 0xc7, 0xcf, 0x98, 0xdb, 0x3d, 0x4b, 0x16, 0xfb, 0x31, 0x2c, 0x27, 0x70, 0x59, 0x14, - 0x06, 0x8c, 0xa0, 0x3b, 0x90, 0xa7, 0xa4, 0x1d, 0xd2, 0x8e, 0x44, 0x2e, 0xd5, 0x3f, 0x74, 0x26, - 0xdd, 0xc0, 0xd1, 0x04, 0x02, 0x09, 0x6b, 0x64, 0xfb, 0x8f, 0x59, 0x28, 0x25, 0xf6, 0xd1, 0x12, - 0x64, 0x76, 0x9a, 0xa6, 0xb1, 0x66, 0xac, 0x17, 0x71, 0x66, 0xa7, 0x89, 0x4c, 0x58, 0xdc, 0xed, - 0x73, 0xf7, 0xd0, 0x27, 0x5a, 0xf7, 0x18, 0x44, 0x97, 0x61, 0x61, 0x27, 0x78, 0xc6, 0x88, 0x54, - 0xbc, 0x80, 0x15, 0x80, 0x10, 0xe4, 0xf6, 0xbd, 0x5f, 0x11, 0xa5, 0x26, 0x96, 0x6b, 0x64, 0x41, - 0x7e, 0xcf, 0xa5, 0x24, 0xe0, 0xe6, 0x82, 0xe0, 0xdb, 0xc8, 0x98, 0x06, 0xd6, 0x3b, 0xa8, 0x01, - 0xc5, 0x6d, 0x4a, 0x5c, 0x4e, 0x3a, 0x5b, 0xdc, 0xcc, 0xaf, 0x19, 0xeb, 0xa5, 0xba, 0xe5, 0xa8, - 0x5b, 0x73, 0xe2, 0x5b, 0x73, 0x0e, 0xe2, 0x5b, 0x6b, 0x14, 0x5e, 0xbd, 0x5e, 0xbd, 0xf0, 0xfb, - 0x7f, 0x09, 0xdb, 0x0d, 0xc9, 0xd0, 0x43, 0x80, 0x27, 0x2e, 0xe3, 0xcf, 0x98, 0x64, 0xb2, 0x78, - 0x26, 0x93, 0x9c, 0x64, 0x90, 0xa0, 0x41, 0x2b, 0x00, 0xd2, 0x08, 0xdb, 0x61, 0x3f, 0xe0, 0x66, - 0x41, 0xca, 0x9e, 0xd8, 0x41, 0x6b, 0x50, 0x6a, 0x12, 0xd6, 0xa6, 0x5e, 0x24, 0xaf, 0xba, 0x28, - 0xcd, 0x93, 0xdc, 0x12, 0x1c, 0x94, 0x05, 0x0f, 0x06, 0x11, 0x31, 0x41, 0x22, 0x24, 0x76, 0xc4, - 0x5d, 0xee, 0xbf, 0x70, 0x29, 0xe9, 0x98, 0x25, 0x69, 0x2e, 0x0d, 0x09, 0xfb, 0x2a, 0x4b, 0x30, - 0xb3, 0x2c, 0x2f, 0x39, 0x06, 0xed, 0x5f, 0x17, 0xa0, 0xbc, 0x2f, 0x42, 0x21, 0x76, 0x87, 0x2a, - 0x64, 0x31, 0x39, 0xd2, 0x77, 0x23, 0x96, 0xc8, 0x01, 0x68, 0x92, 0x23, 0x2f, 0xf0, 0xa4, 0x54, - 0x19, 0xa9, 0xf8, 0x92, 0x13, 0x1d, 0x3a, 0xa3, 0x5d, 0x9c, 0xc0, 0x40, 0x0e, 0xa0, 0xd6, 0xcb, - 0x28, 0xa4, 0x9c, 0xd0, 0x26, 0x89, 0x28, 0x69, 0x0b, 0x03, 0xca, 0xfb, 0x2b, 0xe2, 0x94, 0x13, - 0xd4, 0x87, 0x6f, 0xc4, 0xbb, 0x5b, 0x9c, 0x53, 0x96, 0x20, 0xca, 0x49, 0x27, 0xbb, 0x3f, 0xed, - 0x64, 0x49, 0x91, 0x9d, 0x19, 0xd4, 0xad, 0x80, 0xd3, 0x01, 0x9e, 0xc5, 0x5b, 0xd8, 0x64, 0x9f, - 0x30, 0x26, 0x74, 0x92, 0x0e, 0x83, 0x63, 0x10, 0x59, 0x50, 0xf8, 0x09, 0x0d, 0x03, 0x4e, 0x82, - 0x8e, 0x74, 0x96, 0x22, 0x1e, 0xc2, 0xe8, 0x39, 0x54, 0xe2, 0xb5, 0x64, 0x68, 0x2e, 0x4a, 0x11, - 0x3f, 0x3e, 0x43, 0xc4, 0x31, 0x1a, 0x25, 0xd8, 0x38, 0x1f, 0xb4, 0x09, 0x0b, 0xdb, 0x6e, 0xfb, - 0x05, 0x91, 0x7e, 0x51, 0xaa, 0xaf, 0x4c, 0x33, 0x94, 0xc7, 0x4f, 0xa5, 0x23, 0x30, 0x19, 0xda, - 0x17, 0xb0, 0x22, 0x41, 0x3f, 0x87, 0x72, 0x2b, 0xe0, 0x1e, 0xf7, 0x49, 0x4f, 0xde, 0x71, 0x51, - 0xdc, 0x71, 0x63, 0xf3, 0xeb, 0xd7, 0xab, 0x3f, 0x98, 0x99, 0xd1, 0xfa, 0xdc, 0xf3, 0x6b, 0x24, - 0x41, 0xe5, 0x24, 0x58, 0xe0, 0x31, 0x7e, 0xe8, 0x0b, 0x58, 0x8a, 0x85, 0xdd, 0x09, 0xa2, 0x3e, - 0x67, 0x26, 0x48, 0xad, 0xeb, 0x73, 0x6a, 0xad, 0x88, 0x94, 0xda, 0x13, 0x9c, 0x84, 0xb1, 0x77, - 0x02, 0x4e, 0x68, 0xe0, 0xfa, 0xda, 0x69, 0x87, 0x30, 0xda, 0x11, 0xbe, 0x29, 0x12, 0xef, 0x9e, - 0x4c, 0xb7, 0x66, 0x59, 0x9a, 0xe6, 0xc6, 0xf4, 0x57, 0x93, 0xe9, 0xd9, 0x51, 0xc8, 0x78, 0x8c, - 0x14, 0xdd, 0x83, 0x62, 0xec, 0x08, 0xcc, 0xac, 0x48, 0xe9, 0xad, 0x69, 0x3e, 0x31, 0x0a, 0x1e, - 0x21, 0x5b, 0x8f, 0xe1, 0xea, 0x69, 0x0e, 0x26, 0x02, 0xe6, 0x98, 0x0c, 0xe2, 0x80, 0x39, 0x26, - 0x03, 0x91, 0xb3, 0x4e, 0x5c, 0xbf, 0xaf, 0x72, 0x59, 0x11, 0x2b, 0x60, 0x33, 0x73, 0xcf, 0xb0, - 0x1e, 0x02, 0x9a, 0xf6, 0x84, 0x73, 0x71, 0xf8, 0x0c, 0x2e, 0xa5, 0x58, 0x35, 0x85, 0xc5, 0xf5, - 0x24, 0x8b, 0xe9, 0x80, 0x1d, 0xb1, 0xb4, 0xff, 0x92, 0x85, 0x72, 0xd2, 0xb7, 0xd0, 0x06, 0x5c, - 0x52, 0x1a, 0x63, 0x72, 0x94, 0x08, 0x46, 0xc5, 0x3c, 0xed, 0x08, 0xd5, 0xe1, 0xf2, 0x4e, 0x4f, - 0x6f, 0x27, 0xe3, 0x37, 0x23, 0x93, 0x4d, 0xea, 0x19, 0x0a, 0xe1, 0x03, 0xc5, 0x6a, 0x32, 0xe8, - 0xb3, 0xf2, 0x76, 0x3e, 0x39, 0x3d, 0x00, 0x9c, 0x54, 0x5a, 0xe5, 0x62, 0xe9, 0x7c, 0xd1, 0x8f, - 0x60, 0x51, 0x1d, 0x30, 0x9d, 0x57, 0x3e, 0x3a, 0xfd, 0x13, 0x8a, 0x59, 0x4c, 0x23, 0xc8, 0x95, - 0x1e, 0xcc, 0x5c, 0x38, 0x07, 0xb9, 0xa6, 0xb1, 0x1e, 0x81, 0x35, 0x5b, 0xe4, 0xf3, 0xb8, 0x80, - 0xfd, 0x67, 0x03, 0x96, 0xa7, 0x3e, 0x24, 0x9e, 0x44, 0xf9, 0x28, 0x28, 0x16, 0x72, 0x8d, 0x9a, - 0xb0, 0xa0, 0x92, 0x54, 0x46, 0x0a, 0xec, 0xcc, 0x21, 0xb0, 0x93, 0xc8, 0x50, 0x8a, 0xd8, 0xba, - 0x07, 0xf0, 0x76, 0xce, 0x6a, 0xff, 0xd5, 0x80, 0x8a, 0x4e, 0x08, 0xba, 0x7e, 0x70, 0xa1, 0x3a, - 0x8c, 0x31, 0xbd, 0xa7, 0x2b, 0x89, 0x3b, 0x33, 0x73, 0x89, 0x42, 0x73, 0x26, 0xe9, 0x94, 0x8c, - 0x53, 0xec, 0xac, 0xed, 0xd8, 0xaf, 0x26, 0x50, 0xcf, 0x25, 0xf9, 0x35, 0xa8, 0xec, 0xcb, 0x3a, - 0x75, 0xe6, 0xb3, 0x68, 0xff, 0xd7, 0x80, 0xa5, 0x18, 0x47, 0x6b, 0xf7, 0x7d, 0x28, 0x9c, 0x10, - 0xca, 0xc9, 0x4b, 0xc2, 0xb4, 0x56, 0xe6, 0xb4, 0x56, 0x9f, 0x4b, 0x0c, 0x3c, 0xc4, 0x44, 0x9b, - 0x50, 0x50, 0x35, 0x31, 0x89, 0x2f, 0x6a, 0x65, 0x16, 0x95, 0xfe, 0xde, 0x10, 0x1f, 0xd5, 0x20, - 0xe7, 0x87, 0x5d, 0xa6, 0x63, 0xe6, 0x5b, 0xb3, 0xe8, 0x9e, 0x84, 0x5d, 0x2c, 0x11, 0xd1, 0x7d, - 0x28, 0x7c, 0xe5, 0xd2, 0xc0, 0x0b, 0xba, 0x71, 0x14, 0xac, 0xce, 0x22, 0x7a, 0xae, 0xf0, 0xf0, - 0x90, 0x40, 0x94, 0x71, 0x79, 0x75, 0x86, 0x1e, 0x43, 0xbe, 0xe3, 0x75, 0x09, 0xe3, 0xca, 0x24, - 0x8d, 0xba, 0x78, 0x8f, 0xbe, 0x7e, 0xbd, 0x7a, 0x33, 0xf1, 0xe0, 0x84, 0x11, 0x09, 0x44, 0xd3, - 0xe0, 0x7a, 0x01, 0xa1, 0xa2, 0x07, 0xb8, 0xad, 0x48, 0x9c, 0xa6, 0xfc, 0xc1, 0x9a, 0x83, 0xe0, - 0xe5, 0xa9, 0x67, 0x45, 0xe6, 0x8b, 0xb7, 0xe3, 0xa5, 0x38, 0x88, 0x30, 0x08, 0xdc, 0x1e, 0xd1, - 0xe5, 0x86, 0x5c, 0x8b, 0xaa, 0xa8, 0x2d, 0xfc, 0xbc, 0x23, 0xeb, 0xc5, 0x02, 0xd6, 0x10, 0xda, - 0x84, 0x45, 0xc6, 0x5d, 0x2a, 0x72, 0xce, 0xc2, 0x9c, 0xe5, 0x5c, 0x4c, 0x80, 0x1e, 0x40, 0xb1, - 0x1d, 0xf6, 0x22, 0x9f, 0x08, 0xea, 0xfc, 0x9c, 0xd4, 0x23, 0x12, 0xe1, 0x7a, 0x84, 0xd2, 0x90, - 0xca, 0x42, 0xb2, 0x88, 0x15, 0x80, 0xee, 0x42, 0x25, 0xa2, 0x61, 0x97, 0x12, 0xc6, 0x3e, 0xa5, - 0x61, 0x3f, 0xd2, 0xc5, 0xc0, 0xb2, 0x48, 0xde, 0x7b, 0xc9, 0x03, 0x3c, 0x8e, 0x67, 0xff, 0x27, - 0x03, 0xe5, 0xa4, 0x8b, 0x4c, 0x55, 0xd8, 0x8f, 0x21, 0xaf, 0x1c, 0x4e, 0xf9, 0xfa, 0xdb, 0xd9, - 0x58, 0x71, 0x48, 0xb5, 0xb1, 0x09, 0x8b, 0xed, 0x3e, 0x95, 0xe5, 0xb7, 0x2a, 0xca, 0x63, 0x50, - 0x68, 0xca, 0x43, 0xee, 0xfa, 0xd2, 0xc6, 0x59, 0xac, 0x00, 0x51, 0x91, 0x0f, 0xbb, 0xa4, 0xf3, - 0x55, 0xe4, 0x43, 0xb2, 0xe4, 0xfd, 0x2d, 0xbe, 0xd3, 0xfd, 0x15, 0xce, 0x7d, 0x7f, 0xf6, 0x3f, - 0x0c, 0x28, 0x0e, 0x63, 0x2b, 0x61, 0x5d, 0xe3, 0x9d, 0xad, 0x3b, 0x66, 0x99, 0xcc, 0xdb, 0x59, - 0xe6, 0x0a, 0xe4, 0x19, 0xa7, 0xc4, 0xed, 0xa9, 0x7e, 0x11, 0x6b, 0x48, 0x64, 0xb1, 0x1e, 0xeb, - 0xca, 0x1b, 0x2a, 0x63, 0xb1, 0xb4, 0xff, 0x67, 0x40, 0x65, 0x2c, 0xdc, 0xdf, 0xab, 0x2e, 0x97, - 0x61, 0xc1, 0x27, 0x27, 0x44, 0x75, 0xb4, 0x59, 0xac, 0x00, 0xb1, 0xcb, 0x5e, 0x84, 0x94, 0x4b, - 0xe1, 0xca, 0x58, 0x01, 0x42, 0xe6, 0x0e, 0xe1, 0xae, 0xe7, 0xcb, 0xbc, 0x54, 0xc6, 0x1a, 0x12, - 0x32, 0xf7, 0xa9, 0xaf, 0x6b, 0x74, 0xb1, 0x44, 0x36, 0xe4, 0xbc, 0xe0, 0x28, 0xd4, 0x6e, 0x23, - 0x2b, 0x1b, 0x55, 0xeb, 0xed, 0x04, 0x47, 0x21, 0x96, 0x67, 0xe8, 0x1a, 0xe4, 0xa9, 0x1b, 0x74, - 0x49, 0x5c, 0xa0, 0x17, 0x05, 0x16, 0x16, 0x3b, 0x58, 0x1f, 0xd8, 0x36, 0x94, 0x65, 0x57, 0xbc, - 0x4b, 0x98, 0xe8, 0xc1, 0x84, 0x5b, 0x77, 0x5c, 0xee, 0x4a, 0xb5, 0xcb, 0x58, 0xae, 0xed, 0x5b, - 0x80, 0x9e, 0x78, 0x8c, 0x3f, 0x97, 0x33, 0x05, 0x76, 0x56, 0xcb, 0xbc, 0x0f, 0x97, 0xc6, 0xb0, - 0xf5, 0xb3, 0xf0, 0xc3, 0x89, 0xa6, 0xf9, 0xfa, 0x74, 0xc6, 0x95, 0xa3, 0x0b, 0x47, 0x11, 0x4e, - 0xf4, 0xce, 0x15, 0x28, 0x49, 0xbd, 0xd4, 0xb7, 0x6d, 0x17, 0xca, 0x0a, 0xd4, 0xcc, 0x3f, 0x83, - 0x8b, 0x31, 0xa3, 0xcf, 0x09, 0x95, 0xed, 0x8c, 0x21, 0xed, 0xf2, 0x9d, 0x59, 0x5f, 0x69, 0x8c, - 0xa3, 0xe3, 0x49, 0x7a, 0x9b, 0xc0, 0x25, 0x89, 0xf3, 0xc8, 0x63, 0x3c, 0xa4, 0x83, 0x58, 0xeb, - 0x15, 0x80, 0xad, 0x36, 0xf7, 0x4e, 0xc8, 0xd3, 0xc0, 0x57, 0xcf, 0x68, 0x01, 0x27, 0x76, 0xe2, - 0x27, 0x32, 0x33, 0xea, 0x1c, 0xaf, 0x42, 0xb1, 0xe5, 0x52, 0x7f, 0xd0, 0x7a, 0xe9, 0x71, 0xdd, - 0xc0, 0x8f, 0x36, 0xec, 0xdf, 0x19, 0xb0, 0x9c, 0xfc, 0x4e, 0xeb, 0x44, 0xa4, 0x8b, 0xfb, 0x90, - 0xe3, 0x71, 0x1d, 0xb3, 0x94, 0xa6, 0xc4, 0x14, 0x89, 0x28, 0x75, 0xb0, 0x24, 0x4a, 0x58, 0x5a, - 0x05, 0xce, 0xf5, 0xd3, 0xc9, 0x27, 0x2c, 0xfd, 0xb7, 0x22, 0xa0, 0xe9, 0xe3, 0x94, 0x8e, 0x38, - 0xd9, 0x20, 0x66, 0x26, 0x1a, 0xc4, 0x2f, 0x27, 0x1b, 0x44, 0xf5, 0x34, 0xdf, 0x9d, 0x47, 0x92, - 0x39, 0xda, 0xc4, 0xb1, 0x3e, 0x26, 0x77, 0x8e, 0x3e, 0x06, 0xad, 0xc7, 0x2f, 0x8e, 0x7a, 0xeb, - 0x50, 0x9c, 0x53, 0x68, 0xd4, 0x76, 0x74, 0x5d, 0xa1, 0x5f, 0xa1, 0x07, 0xe7, 0x9b, 0x96, 0xe4, - 0x26, 0x27, 0x25, 0x0d, 0x28, 0x6d, 0xc7, 0x89, 0xf2, 0x1c, 0xa3, 0x92, 0x24, 0x11, 0xda, 0xd0, - 0x85, 0x8d, 0x4a, 0xcd, 0x57, 0xa7, 0x55, 0x8c, 0xc7, 0x22, 0x21, 0xd5, 0x95, 0xcd, 0x51, 0x4a, - 0x69, 0x59, 0x94, 0x06, 0xda, 0x9c, 0xcb, 0xf6, 0x73, 0xd6, 0x97, 0xe8, 0x13, 0xc8, 0x63, 0xc2, - 0xfa, 0x3e, 0x97, 0xf3, 0x97, 0x52, 0xfd, 0xda, 0x0c, 0xee, 0x0a, 0x49, 0xc6, 0xaa, 0x26, 0x40, - 0x3f, 0x85, 0x45, 0xb5, 0x62, 0x66, 0x69, 0xd6, 0xd8, 0x20, 0x45, 0x32, 0x4d, 0xa3, 0x1b, 0x0a, - 0x0d, 0x89, 0x70, 0xfc, 0x94, 0x04, 0x44, 0xcf, 0x05, 0x45, 0x6b, 0xbc, 0x80, 0x13, 0x3b, 0xa8, - 0x0e, 0x0b, 0x9c, 0xba, 0x6d, 0x62, 0x56, 0xe6, 0x30, 0xa1, 0x42, 0x15, 0x89, 0x2d, 0xf2, 0x82, - 0x80, 0x74, 0xcc, 0x25, 0x55, 0x29, 0x29, 0x08, 0x7d, 0x1b, 0x96, 0x82, 0x7e, 0x4f, 0x36, 0x0b, - 0x9d, 0x7d, 0x4e, 0x22, 0x66, 0x5e, 0x94, 0xdf, 0x9b, 0xd8, 0x45, 0xd7, 0xa1, 0x12, 0xf4, 0x7b, - 0x07, 0xe2, 0x85, 0x57, 0x68, 0x55, 0x89, 0x36, 0xbe, 0x89, 0x6e, 0xc1, 0xb2, 0xa0, 0x8b, 0x6f, - 0x5b, 0x61, 0x2e, 0x4b, 0xcc, 0xe9, 0x03, 0xd4, 0x80, 0x0a, 0x79, 0xa9, 0x06, 0x02, 0x2d, 0xe9, - 0xbf, 0x68, 0x0e, 0x7d, 0xc6, 0x49, 0xde, 0x43, 0xdf, 0xfd, 0x3e, 0xba, 0x0a, 0xeb, 0x4b, 0x28, - 0x27, 0xef, 0x32, 0x85, 0xf6, 0xee, 0x78, 0xd7, 0x3e, 0x87, 0x6f, 0x25, 0x9a, 0x96, 0x01, 0x7c, - 0xf3, 0x59, 0xd4, 0x71, 0x39, 0x49, 0xcb, 0xde, 0xd3, 0x59, 0xec, 0x0a, 0xe4, 0xf7, 0xd4, 0x65, - 0xab, 0x99, 0xab, 0x86, 0xc4, 0x7e, 0x93, 0x88, 0x0b, 0xd0, 0x29, 0x5b, 0x43, 0x32, 0xeb, 0x79, - 0x81, 0xeb, 0xc7, 0x83, 0xd7, 0x02, 0x1e, 0xc2, 0xf6, 0x55, 0xb0, 0xd2, 0x3e, 0xad, 0x0c, 0x65, - 0xff, 0x29, 0x03, 0x30, 0xba, 0x1c, 0xf4, 0x21, 0x40, 0x8f, 0x74, 0x3c, 0xf7, 0x17, 0x7c, 0xd4, - 0xb0, 0x16, 0xe5, 0x8e, 0xec, 0x5a, 0x47, 0xad, 0x45, 0xe6, 0x9d, 0x5b, 0x0b, 0x04, 0x39, 0x26, - 0xe4, 0x55, 0x65, 0x90, 0x5c, 0xa3, 0xa7, 0x50, 0x72, 0x83, 0x20, 0xe4, 0x32, 0x4c, 0xe2, 0x66, - 0xfe, 0xf6, 0x69, 0xee, 0xe4, 0x6c, 0x8d, 0xf0, 0x55, 0x14, 0x26, 0x39, 0x58, 0x0f, 0xa0, 0x3a, - 0x89, 0x70, 0xae, 0x66, 0xf3, 0xef, 0x19, 0xb8, 0x38, 0x71, 0xad, 0xe8, 0x11, 0x54, 0x15, 0x34, - 0x31, 0x80, 0x39, 0xcb, 0xf1, 0xa7, 0xa8, 0xd0, 0x43, 0x28, 0x6f, 0x71, 0x2e, 0x32, 0xad, 0xd2, - 0x57, 0xb5, 0x98, 0xa7, 0x73, 0x19, 0xa3, 0x40, 0x8f, 0x46, 0x69, 0x2b, 0x3b, 0x6b, 0x90, 0x30, - 0x21, 0x7f, 0x7a, 0xce, 0xb2, 0x7e, 0x36, 0x3b, 0x00, 0xb2, 0xca, 0x4a, 0xf5, 0xf1, 0x00, 0x38, - 0x23, 0x6b, 0x8d, 0x6c, 0xf8, 0x07, 0x03, 0x0a, 0x71, 0x80, 0xa6, 0xce, 0x42, 0xee, 0x8f, 0xcf, - 0x42, 0x6e, 0xcc, 0x7e, 0x34, 0xdf, 0xe7, 0x08, 0xe4, 0xe6, 0x8f, 0xe1, 0x83, 0xd4, 0x82, 0x05, - 0x95, 0x60, 0x71, 0xff, 0x60, 0x0b, 0x1f, 0xb4, 0x9a, 0xd5, 0x0b, 0xa8, 0x0c, 0x85, 0xed, 0xa7, - 0xbb, 0x7b, 0x4f, 0x5a, 0x07, 0xad, 0xaa, 0x21, 0x8e, 0x9a, 0x2d, 0xb1, 0x6e, 0x56, 0x33, 0xf5, - 0xdf, 0xe6, 0x61, 0x71, 0x5b, 0xfd, 0xf3, 0x86, 0x0e, 0xa0, 0x38, 0xfc, 0x4b, 0x06, 0xd9, 0x29, - 0xa6, 0x99, 0xf8, 0x6f, 0xc7, 0xfa, 0xe8, 0x54, 0x1c, 0xfd, 0xa0, 0x3d, 0x82, 0x05, 0xf9, 0xe7, - 0x14, 0x4a, 0x19, 0x3b, 0x24, 0xff, 0xb5, 0xb2, 0x4e, 0xff, 0xb3, 0x67, 0xc3, 0x10, 0x9c, 0xe4, - 0xcc, 0x26, 0x8d, 0x53, 0x72, 0x30, 0x6c, 0xad, 0x9e, 0x31, 0xec, 0x41, 0xbb, 0x90, 0xd7, 0x8d, - 0x6c, 0x1a, 0x6a, 0x72, 0x32, 0x63, 0xad, 0xcd, 0x46, 0x50, 0xcc, 0x36, 0x0c, 0xb4, 0x3b, 0x9c, - 0xf5, 0xa7, 0x89, 0x96, 0xec, 0x02, 0xac, 0x33, 0xce, 0xd7, 0x8d, 0x0d, 0x03, 0x7d, 0x01, 0xa5, - 0x44, 0x9d, 0x8f, 0x52, 0xaa, 0xcc, 0xe9, 0xa6, 0xc1, 0xba, 0x71, 0x06, 0x96, 0xd6, 0xbc, 0x05, - 0x39, 0x99, 0x00, 0x52, 0x8c, 0x9d, 0x68, 0x03, 0xd2, 0xc4, 0x1c, 0x6b, 0x0b, 0x0e, 0x55, 0xe3, - 0x42, 0x82, 0xa4, 0xf7, 0xa1, 0x1b, 0x67, 0xd5, 0x1b, 0x33, 0xdd, 0x66, 0xca, 0x89, 0x37, 0x0c, - 0x14, 0x02, 0x9a, 0x4e, 0xfa, 0xe8, 0xbb, 0x29, 0x5e, 0x32, 0xeb, 0x55, 0xb2, 0x6e, 0xcd, 0x87, - 0xac, 0x94, 0x6a, 0x94, 0x5f, 0xbd, 0x59, 0x31, 0xfe, 0xf9, 0x66, 0xc5, 0xf8, 0xf7, 0x9b, 0x15, - 0xe3, 0x30, 0x2f, 0x2b, 0xc9, 0xef, 0xfd, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x12, 0xae, 0x41, 0x43, - 0x99, 0x1e, 0x00, 0x00, + 0x37, 0xe2, 0xc4, 0x89, 0x2a, 0x2e, 0x14, 0x17, 0x8a, 0x0b, 0x77, 0x4e, 0x9c, 0x39, 0x73, 0xa0, + 0x2a, 0x47, 0xce, 0x7b, 0xc8, 0x52, 0xf9, 0x01, 0x14, 0x47, 0x8e, 0xd4, 0xfb, 0x18, 0x69, 0x24, + 0x8d, 0x6c, 0x39, 0xc9, 0x49, 0xaf, 0xdf, 0xeb, 0xee, 0xe9, 0xee, 0xd7, 0xdd, 0xaf, 0xbb, 0x05, + 0x95, 0x76, 0x18, 0x70, 0x1a, 0xfa, 0x4e, 0x44, 0x43, 0x1e, 0xa2, 0x6a, 0x2f, 0x3c, 0x1c, 0x38, + 0x87, 0x7d, 0xcf, 0xef, 0x1c, 0x7b, 0xdc, 0x39, 0xf9, 0xd8, 0xaa, 0x77, 0x3d, 0xfe, 0xa2, 0x7f, + 0xe8, 0xb4, 0xc3, 0x5e, 0xad, 0x1b, 0x76, 0xc3, 0x5a, 0x37, 0x0c, 0xbb, 0x3e, 0x71, 0x23, 0x8f, + 0xe9, 0x65, 0x8d, 0x46, 0xed, 0x1a, 0xe3, 0x2e, 0xef, 0x33, 0xc5, 0xc5, 0xba, 0x3d, 0x49, 0x23, + 0xb7, 0x0f, 0xfb, 0x47, 0x12, 0x92, 0x80, 0x5c, 0x69, 0xf4, 0x5a, 0x02, 0x5d, 0x7c, 0xbf, 0x16, + 0x7f, 0xbf, 0xe6, 0x46, 0x5e, 0x8d, 0x0f, 0x22, 0xc2, 0x6a, 0x5f, 0x85, 0xf4, 0x98, 0x50, 0x4d, + 0x70, 0x6b, 0x26, 0x01, 0x0b, 0xfd, 0x13, 0x42, 0x6b, 0xd1, 0x61, 0x2d, 0x8c, 0x62, 0x69, 0xee, + 0x9c, 0x82, 0xdd, 0xa7, 0x6d, 0x12, 0x85, 0xbe, 0xd7, 0x1e, 0x08, 0x1a, 0xb5, 0xd2, 0x64, 0xab, + 0x5a, 0xbb, 0xa1, 0xec, 0xdc, 0xeb, 0x11, 0xc6, 0xdd, 0x5e, 0xa4, 0x10, 0xec, 0xdf, 0x19, 0x50, + 0xde, 0xa3, 0xfd, 0x80, 0x60, 0xf2, 0xeb, 0x3e, 0x61, 0x1c, 0x5d, 0x81, 0xfc, 0x91, 0xe7, 0x73, + 0x42, 0x4d, 0x63, 0x2d, 0xbb, 0x5e, 0xc4, 0x1a, 0x42, 0x55, 0xc8, 0xba, 0xbe, 0x6f, 0x66, 0xd6, + 0x8c, 0xf5, 0x02, 0x16, 0x4b, 0xb4, 0x0e, 0xe5, 0x63, 0x42, 0xa2, 0x66, 0x9f, 0xba, 0xdc, 0x0b, + 0x03, 0x33, 0xbb, 0x66, 0xac, 0x67, 0x1b, 0xb9, 0x57, 0xaf, 0x57, 0x0d, 0x3c, 0x76, 0x82, 0x6c, + 0x28, 0x0a, 0xb8, 0x31, 0xe0, 0x84, 0x99, 0xb9, 0x04, 0xda, 0x68, 0xdb, 0xbe, 0x09, 0xd5, 0xa6, + 0xc7, 0x8e, 0x9f, 0x31, 0xb7, 0x7b, 0x96, 0x2c, 0xf6, 0x63, 0x58, 0x4e, 0xe0, 0xb2, 0x28, 0x0c, + 0x18, 0x41, 0x77, 0x20, 0x4f, 0x49, 0x3b, 0xa4, 0x1d, 0x89, 0x5c, 0xaa, 0x7f, 0xe8, 0x4c, 0xba, + 0x81, 0xa3, 0x09, 0x04, 0x12, 0xd6, 0xc8, 0xf6, 0x9f, 0xb3, 0x50, 0x4a, 0xec, 0xa3, 0x25, 0xc8, + 0xec, 0x34, 0x4d, 0x63, 0xcd, 0x58, 0x2f, 0xe2, 0xcc, 0x4e, 0x13, 0x99, 0xb0, 0xb8, 0xdb, 0xe7, + 0xee, 0xa1, 0x4f, 0xb4, 0xee, 0x31, 0x88, 0x2e, 0xc3, 0xc2, 0x4e, 0xf0, 0x8c, 0x11, 0xa9, 0x78, + 0x01, 0x2b, 0x00, 0x21, 0xc8, 0xed, 0x7b, 0xbf, 0x21, 0x4a, 0x4d, 0x2c, 0xd7, 0xc8, 0x82, 0xfc, + 0x9e, 0x4b, 0x49, 0xc0, 0xcd, 0x05, 0xc1, 0xb7, 0x91, 0x31, 0x0d, 0xac, 0x77, 0x50, 0x03, 0x8a, + 0xdb, 0x94, 0xb8, 0x9c, 0x74, 0xb6, 0xb8, 0x99, 0x5f, 0x33, 0xd6, 0x4b, 0x75, 0xcb, 0x51, 0xb7, + 0xe6, 0xc4, 0xb7, 0xe6, 0x1c, 0xc4, 0xb7, 0xd6, 0x28, 0xbc, 0x7a, 0xbd, 0x7a, 0xe1, 0x8f, 0xdf, + 0x08, 0xdb, 0x0d, 0xc9, 0xd0, 0x43, 0x80, 0x27, 0x2e, 0xe3, 0xcf, 0x98, 0x64, 0xb2, 0x78, 0x26, + 0x93, 0x9c, 0x64, 0x90, 0xa0, 0x41, 0x2b, 0x00, 0xd2, 0x08, 0xdb, 0x61, 0x3f, 0xe0, 0x66, 0x41, + 0xca, 0x9e, 0xd8, 0x41, 0x6b, 0x50, 0x6a, 0x12, 0xd6, 0xa6, 0x5e, 0x24, 0xaf, 0xba, 0x28, 0xcd, + 0x93, 0xdc, 0x12, 0x1c, 0x94, 0x05, 0x0f, 0x06, 0x11, 0x31, 0x41, 0x22, 0x24, 0x76, 0xc4, 0x5d, + 0xee, 0xbf, 0x70, 0x29, 0xe9, 0x98, 0x25, 0x69, 0x2e, 0x0d, 0x09, 0xfb, 0x2a, 0x4b, 0x30, 0xb3, + 0x2c, 0x2f, 0x39, 0x06, 0xed, 0xdf, 0x16, 0xa0, 0xbc, 0x2f, 0x42, 0x21, 0x76, 0x87, 0x2a, 0x64, + 0x31, 0x39, 0xd2, 0x77, 0x23, 0x96, 0xc8, 0x01, 0x68, 0x92, 0x23, 0x2f, 0xf0, 0xa4, 0x54, 0x19, + 0xa9, 0xf8, 0x92, 0x13, 0x1d, 0x3a, 0xa3, 0x5d, 0x9c, 0xc0, 0x40, 0x0e, 0xa0, 0xd6, 0xcb, 0x28, + 0xa4, 0x9c, 0xd0, 0x26, 0x89, 0x28, 0x69, 0x0b, 0x03, 0xca, 0xfb, 0x2b, 0xe2, 0x94, 0x13, 0xd4, + 0x87, 0x6f, 0xc5, 0xbb, 0x5b, 0x9c, 0x53, 0x96, 0x20, 0xca, 0x49, 0x27, 0xbb, 0x3f, 0xed, 0x64, + 0x49, 0x91, 0x9d, 0x19, 0xd4, 0xad, 0x80, 0xd3, 0x01, 0x9e, 0xc5, 0x5b, 0xd8, 0x64, 0x9f, 0x30, + 0x26, 0x74, 0x92, 0x0e, 0x83, 0x63, 0x10, 0x59, 0x50, 0xf8, 0x19, 0x0d, 0x03, 0x4e, 0x82, 0x8e, + 0x74, 0x96, 0x22, 0x1e, 0xc2, 0xe8, 0x39, 0x54, 0xe2, 0xb5, 0x64, 0x68, 0x2e, 0x4a, 0x11, 0x3f, + 0x3e, 0x43, 0xc4, 0x31, 0x1a, 0x25, 0xd8, 0x38, 0x1f, 0xb4, 0x09, 0x0b, 0xdb, 0x6e, 0xfb, 0x05, + 0x91, 0x7e, 0x51, 0xaa, 0xaf, 0x4c, 0x33, 0x94, 0xc7, 0x4f, 0xa5, 0x23, 0x30, 0x19, 0xda, 0x17, + 0xb0, 0x22, 0x41, 0xbf, 0x84, 0x72, 0x2b, 0xe0, 0x1e, 0xf7, 0x49, 0x4f, 0xde, 0x71, 0x51, 0xdc, + 0x71, 0x63, 0xf3, 0xeb, 0xd7, 0xab, 0x3f, 0x9a, 0x99, 0xd1, 0xfa, 0xdc, 0xf3, 0x6b, 0x24, 0x41, + 0xe5, 0x24, 0x58, 0xe0, 0x31, 0x7e, 0xe8, 0x0b, 0x58, 0x8a, 0x85, 0xdd, 0x09, 0xa2, 0x3e, 0x67, + 0x26, 0x48, 0xad, 0xeb, 0x73, 0x6a, 0xad, 0x88, 0x94, 0xda, 0x13, 0x9c, 0x84, 0xb1, 0x77, 0x02, + 0x4e, 0x68, 0xe0, 0xfa, 0xda, 0x69, 0x87, 0x30, 0xda, 0x11, 0xbe, 0x29, 0x12, 0xef, 0x9e, 0x4c, + 0xb7, 0x66, 0x59, 0x9a, 0xe6, 0xc6, 0xf4, 0x57, 0x93, 0xe9, 0xd9, 0x51, 0xc8, 0x78, 0x8c, 0x14, + 0xdd, 0x83, 0x62, 0xec, 0x08, 0xcc, 0xac, 0x48, 0xe9, 0xad, 0x69, 0x3e, 0x31, 0x0a, 0x1e, 0x21, + 0x5b, 0x8f, 0xe1, 0xea, 0x69, 0x0e, 0x26, 0x02, 0xe6, 0x98, 0x0c, 0xe2, 0x80, 0x39, 0x26, 0x03, + 0x91, 0xb3, 0x4e, 0x5c, 0xbf, 0xaf, 0x72, 0x59, 0x11, 0x2b, 0x60, 0x33, 0x73, 0xcf, 0xb0, 0x1e, + 0x02, 0x9a, 0xf6, 0x84, 0x73, 0x71, 0xf8, 0x0c, 0x2e, 0xa5, 0x58, 0x35, 0x85, 0xc5, 0xf5, 0x24, + 0x8b, 0xe9, 0x80, 0x1d, 0xb1, 0xb4, 0xff, 0x96, 0x85, 0x72, 0xd2, 0xb7, 0xd0, 0x06, 0x5c, 0x52, + 0x1a, 0x63, 0x72, 0x94, 0x08, 0x46, 0xc5, 0x3c, 0xed, 0x08, 0xd5, 0xe1, 0xf2, 0x4e, 0x4f, 0x6f, + 0x27, 0xe3, 0x37, 0x23, 0x93, 0x4d, 0xea, 0x19, 0x0a, 0xe1, 0x03, 0xc5, 0x6a, 0x32, 0xe8, 0xb3, + 0xf2, 0x76, 0x3e, 0x39, 0x3d, 0x00, 0x9c, 0x54, 0x5a, 0xe5, 0x62, 0xe9, 0x7c, 0xd1, 0x4f, 0x60, + 0x51, 0x1d, 0x30, 0x9d, 0x57, 0x3e, 0x3a, 0xfd, 0x13, 0x8a, 0x59, 0x4c, 0x23, 0xc8, 0x95, 0x1e, + 0xcc, 0x5c, 0x38, 0x07, 0xb9, 0xa6, 0xb1, 0x1e, 0x81, 0x35, 0x5b, 0xe4, 0xf3, 0xb8, 0x80, 0xfd, + 0x57, 0x03, 0x96, 0xa7, 0x3e, 0x24, 0x9e, 0x44, 0xf9, 0x28, 0x28, 0x16, 0x72, 0x8d, 0x9a, 0xb0, + 0xa0, 0x92, 0x54, 0x46, 0x0a, 0xec, 0xcc, 0x21, 0xb0, 0x93, 0xc8, 0x50, 0x8a, 0xd8, 0xba, 0x07, + 0xf0, 0x76, 0xce, 0x6a, 0xff, 0xdd, 0x80, 0x8a, 0x4e, 0x08, 0xba, 0x7e, 0x70, 0xa1, 0x3a, 0x8c, + 0x31, 0xbd, 0xa7, 0x2b, 0x89, 0x3b, 0x33, 0x73, 0x89, 0x42, 0x73, 0x26, 0xe9, 0x94, 0x8c, 0x53, + 0xec, 0xac, 0xed, 0xd8, 0xaf, 0x26, 0x50, 0xcf, 0x25, 0xf9, 0x35, 0xa8, 0xec, 0xcb, 0x3a, 0x75, + 0xe6, 0xb3, 0x68, 0xff, 0xd7, 0x80, 0xa5, 0x18, 0x47, 0x6b, 0xf7, 0x43, 0x28, 0x9c, 0x10, 0xca, + 0xc9, 0x4b, 0xc2, 0xb4, 0x56, 0xe6, 0xb4, 0x56, 0x9f, 0x4b, 0x0c, 0x3c, 0xc4, 0x44, 0x9b, 0x50, + 0x50, 0x35, 0x31, 0x89, 0x2f, 0x6a, 0x65, 0x16, 0x95, 0xfe, 0xde, 0x10, 0x1f, 0xd5, 0x20, 0xe7, + 0x87, 0x5d, 0xa6, 0x63, 0xe6, 0x3b, 0xb3, 0xe8, 0x9e, 0x84, 0x5d, 0x2c, 0x11, 0xd1, 0x7d, 0x28, + 0x7c, 0xe5, 0xd2, 0xc0, 0x0b, 0xba, 0x71, 0x14, 0xac, 0xce, 0x22, 0x7a, 0xae, 0xf0, 0xf0, 0x90, + 0x40, 0x94, 0x71, 0x79, 0x75, 0x86, 0x1e, 0x43, 0xbe, 0xe3, 0x75, 0x09, 0xe3, 0xca, 0x24, 0x8d, + 0xba, 0x78, 0x8f, 0xbe, 0x7e, 0xbd, 0x7a, 0x33, 0xf1, 0xe0, 0x84, 0x11, 0x09, 0x44, 0xd3, 0xe0, + 0x7a, 0x01, 0xa1, 0xa2, 0x07, 0xb8, 0xad, 0x48, 0x9c, 0xa6, 0xfc, 0xc1, 0x9a, 0x83, 0xe0, 0xe5, + 0xa9, 0x67, 0x45, 0xe6, 0x8b, 0xb7, 0xe3, 0xa5, 0x38, 0x88, 0x30, 0x08, 0xdc, 0x1e, 0xd1, 0xe5, + 0x86, 0x5c, 0x8b, 0xaa, 0xa8, 0x2d, 0xfc, 0xbc, 0x23, 0xeb, 0xc5, 0x02, 0xd6, 0x10, 0xda, 0x84, + 0x45, 0xc6, 0x5d, 0x2a, 0x72, 0xce, 0xc2, 0x9c, 0xe5, 0x5c, 0x4c, 0x80, 0x1e, 0x40, 0xb1, 0x1d, + 0xf6, 0x22, 0x9f, 0x08, 0xea, 0xfc, 0x9c, 0xd4, 0x23, 0x12, 0xe1, 0x7a, 0x84, 0xd2, 0x90, 0xca, + 0x42, 0xb2, 0x88, 0x15, 0x80, 0xee, 0x42, 0x25, 0xa2, 0x61, 0x97, 0x12, 0xc6, 0x3e, 0xa5, 0x61, + 0x3f, 0xd2, 0xc5, 0xc0, 0xb2, 0x48, 0xde, 0x7b, 0xc9, 0x03, 0x3c, 0x8e, 0x67, 0xff, 0x27, 0x03, + 0xe5, 0xa4, 0x8b, 0x4c, 0x55, 0xd8, 0x8f, 0x21, 0xaf, 0x1c, 0x4e, 0xf9, 0xfa, 0xdb, 0xd9, 0x58, + 0x71, 0x48, 0xb5, 0xb1, 0x09, 0x8b, 0xed, 0x3e, 0x95, 0xe5, 0xb7, 0x2a, 0xca, 0x63, 0x50, 0x68, + 0xca, 0x43, 0xee, 0xfa, 0xd2, 0xc6, 0x59, 0xac, 0x00, 0x51, 0x91, 0x0f, 0xbb, 0xa4, 0xf3, 0x55, + 0xe4, 0x43, 0xb2, 0xe4, 0xfd, 0x2d, 0xbe, 0xd3, 0xfd, 0x15, 0xce, 0x7d, 0x7f, 0xf6, 0x3f, 0x0d, + 0x28, 0x0e, 0x63, 0x2b, 0x61, 0x5d, 0xe3, 0x9d, 0xad, 0x3b, 0x66, 0x99, 0xcc, 0xdb, 0x59, 0xe6, + 0x0a, 0xe4, 0x19, 0xa7, 0xc4, 0xed, 0xa9, 0x7e, 0x11, 0x6b, 0x48, 0x64, 0xb1, 0x1e, 0xeb, 0xca, + 0x1b, 0x2a, 0x63, 0xb1, 0xb4, 0xff, 0x67, 0x40, 0x65, 0x2c, 0xdc, 0xdf, 0xab, 0x2e, 0x97, 0x61, + 0xc1, 0x27, 0x27, 0x44, 0x75, 0xb4, 0x59, 0xac, 0x00, 0xb1, 0xcb, 0x5e, 0x84, 0x94, 0x4b, 0xe1, + 0xca, 0x58, 0x01, 0x42, 0xe6, 0x0e, 0xe1, 0xae, 0xe7, 0xcb, 0xbc, 0x54, 0xc6, 0x1a, 0x12, 0x32, + 0xf7, 0xa9, 0xaf, 0x6b, 0x74, 0xb1, 0x44, 0x36, 0xe4, 0xbc, 0xe0, 0x28, 0xd4, 0x6e, 0x23, 0x2b, + 0x1b, 0x55, 0xeb, 0xed, 0x04, 0x47, 0x21, 0x96, 0x67, 0xe8, 0x1a, 0xe4, 0xa9, 0x1b, 0x74, 0x49, + 0x5c, 0xa0, 0x17, 0x05, 0x16, 0x16, 0x3b, 0x58, 0x1f, 0xd8, 0x36, 0x94, 0x65, 0x57, 0xbc, 0x4b, + 0x98, 0xe8, 0xc1, 0x84, 0x5b, 0x77, 0x5c, 0xee, 0x4a, 0xb5, 0xcb, 0x58, 0xae, 0xed, 0x5b, 0x80, + 0x9e, 0x78, 0x8c, 0x3f, 0x97, 0x33, 0x05, 0x76, 0x56, 0xcb, 0xbc, 0x0f, 0x97, 0xc6, 0xb0, 0xf5, + 0xb3, 0xf0, 0xe3, 0x89, 0xa6, 0xf9, 0xfa, 0x74, 0xc6, 0x95, 0xa3, 0x0b, 0x47, 0x11, 0x4e, 0xf4, + 0xce, 0x15, 0x28, 0x49, 0xbd, 0xd4, 0xb7, 0x6d, 0x17, 0xca, 0x0a, 0xd4, 0xcc, 0x3f, 0x83, 0x8b, + 0x31, 0xa3, 0xcf, 0x09, 0x95, 0xed, 0x8c, 0x21, 0xed, 0xf2, 0xbd, 0x59, 0x5f, 0x69, 0x8c, 0xa3, + 0xe3, 0x49, 0x7a, 0x9b, 0xc0, 0x25, 0x89, 0xf3, 0xc8, 0x63, 0x3c, 0xa4, 0x83, 0x58, 0xeb, 0x15, + 0x80, 0xad, 0x36, 0xf7, 0x4e, 0xc8, 0xd3, 0xc0, 0x57, 0xcf, 0x68, 0x01, 0x27, 0x76, 0xe2, 0x27, + 0x32, 0x33, 0xea, 0x1c, 0xaf, 0x42, 0xb1, 0xe5, 0x52, 0x7f, 0xd0, 0x7a, 0xe9, 0x71, 0xdd, 0xc0, + 0x8f, 0x36, 0xec, 0x3f, 0x18, 0xb0, 0x9c, 0xfc, 0x4e, 0xeb, 0x44, 0xa4, 0x8b, 0xfb, 0x90, 0xe3, + 0x71, 0x1d, 0xb3, 0x94, 0xa6, 0xc4, 0x14, 0x89, 0x28, 0x75, 0xb0, 0x24, 0x4a, 0x58, 0x5a, 0x05, + 0xce, 0xf5, 0xd3, 0xc9, 0x27, 0x2c, 0xfd, 0x4d, 0x11, 0xd0, 0xf4, 0x71, 0x4a, 0x47, 0x9c, 0x6c, + 0x10, 0x33, 0x13, 0x0d, 0xe2, 0x97, 0x93, 0x0d, 0xa2, 0x7a, 0x9a, 0xef, 0xce, 0x23, 0xc9, 0x1c, + 0x6d, 0xe2, 0x58, 0x1f, 0x93, 0x3b, 0x47, 0x1f, 0x83, 0xd6, 0xe3, 0x17, 0x47, 0xbd, 0x75, 0x28, + 0xce, 0x29, 0x34, 0x6a, 0x3b, 0xba, 0xae, 0xd0, 0xaf, 0xd0, 0x83, 0xf3, 0x4d, 0x4b, 0x72, 0x93, + 0x93, 0x92, 0x06, 0x94, 0xb6, 0xe3, 0x44, 0x79, 0x8e, 0x51, 0x49, 0x92, 0x08, 0x6d, 0xe8, 0xc2, + 0x46, 0xa5, 0xe6, 0xab, 0xd3, 0x2a, 0xc6, 0x63, 0x91, 0x90, 0xea, 0xca, 0xe6, 0x28, 0xa5, 0xb4, + 0x2c, 0x4a, 0x03, 0x6d, 0xce, 0x65, 0xfb, 0x39, 0xeb, 0x4b, 0xf4, 0x09, 0xe4, 0x31, 0x61, 0x7d, + 0x9f, 0xcb, 0xf9, 0x4b, 0xa9, 0x7e, 0x6d, 0x06, 0x77, 0x85, 0x24, 0x63, 0x55, 0x13, 0xa0, 0x9f, + 0xc3, 0xa2, 0x5a, 0x31, 0xb3, 0x34, 0x6b, 0x6c, 0x90, 0x22, 0x99, 0xa6, 0xd1, 0x0d, 0x85, 0x86, + 0x44, 0x38, 0x7e, 0x4a, 0x02, 0xa2, 0xe7, 0x82, 0xa2, 0x35, 0x5e, 0xc0, 0x89, 0x1d, 0x54, 0x87, + 0x05, 0x4e, 0xdd, 0x36, 0x31, 0x2b, 0x73, 0x98, 0x50, 0xa1, 0x8a, 0xc4, 0x16, 0x79, 0x41, 0x40, + 0x3a, 0xe6, 0x92, 0xaa, 0x94, 0x14, 0x84, 0xbe, 0x0b, 0x4b, 0x41, 0xbf, 0x27, 0x9b, 0x85, 0xce, + 0x3e, 0x27, 0x11, 0x33, 0x2f, 0xca, 0xef, 0x4d, 0xec, 0xa2, 0xeb, 0x50, 0x09, 0xfa, 0xbd, 0x03, + 0xf1, 0xc2, 0x2b, 0xb4, 0xaa, 0x44, 0x1b, 0xdf, 0x44, 0xb7, 0x60, 0x59, 0xd0, 0xc5, 0xb7, 0xad, + 0x30, 0x97, 0x25, 0xe6, 0xf4, 0x01, 0x6a, 0x40, 0x85, 0xbc, 0x54, 0x03, 0x81, 0x96, 0xf4, 0x5f, + 0x34, 0x87, 0x3e, 0xe3, 0x24, 0x68, 0x0d, 0x4a, 0x41, 0xbf, 0xf7, 0x3c, 0x2e, 0x7c, 0x2f, 0xc9, + 0x6f, 0x25, 0xb7, 0xde, 0x43, 0x67, 0xfe, 0x3e, 0xfa, 0x0e, 0xeb, 0x4b, 0x28, 0x27, 0x6f, 0x3b, + 0x85, 0xf6, 0xee, 0x78, 0x5f, 0x3f, 0x87, 0xf7, 0x25, 0xda, 0x9a, 0x01, 0x7c, 0xfb, 0x59, 0xd4, + 0x71, 0x39, 0x49, 0xcb, 0xef, 0xd3, 0x79, 0xee, 0x0a, 0xe4, 0xf7, 0x94, 0x3b, 0xa8, 0xa9, 0xac, + 0x86, 0xc4, 0x7e, 0x93, 0x88, 0x2b, 0xd2, 0x49, 0x5d, 0x43, 0x32, 0x2f, 0x7a, 0x81, 0xeb, 0xc7, + 0xa3, 0xd9, 0x02, 0x1e, 0xc2, 0xf6, 0x55, 0xb0, 0xd2, 0x3e, 0xad, 0x0c, 0x65, 0xff, 0x25, 0x03, + 0x30, 0xba, 0x3e, 0xf4, 0x21, 0x40, 0x8f, 0x74, 0x3c, 0xf7, 0x57, 0x7c, 0xd4, 0xd2, 0x16, 0xe5, + 0x8e, 0xec, 0x6b, 0x47, 0xcd, 0x47, 0xe6, 0x9d, 0x9b, 0x0f, 0x04, 0x39, 0x26, 0xe4, 0x55, 0x85, + 0x92, 0x5c, 0xa3, 0xa7, 0x50, 0x72, 0x83, 0x20, 0xe4, 0x32, 0x90, 0xe2, 0x76, 0xff, 0xf6, 0x69, + 0x0e, 0xe7, 0x6c, 0x8d, 0xf0, 0x55, 0x9c, 0x26, 0x39, 0x58, 0x0f, 0xa0, 0x3a, 0x89, 0x70, 0xae, + 0x76, 0xf4, 0x1f, 0x19, 0xb8, 0x38, 0x71, 0xad, 0xe8, 0x11, 0x54, 0x15, 0x34, 0x31, 0xa2, 0x39, + 0x2b, 0x34, 0xa6, 0xa8, 0xd0, 0x43, 0x28, 0x6f, 0x71, 0x2e, 0x72, 0xb1, 0xd2, 0x57, 0x35, 0xa1, + 0xa7, 0x73, 0x19, 0xa3, 0x40, 0x8f, 0x46, 0x89, 0x2d, 0x3b, 0x6b, 0xd4, 0x30, 0x21, 0x7f, 0x7a, + 0x56, 0xb3, 0x7e, 0x31, 0x3b, 0x00, 0xb2, 0xca, 0x4a, 0xf5, 0xf1, 0x00, 0x38, 0x23, 0xaf, 0x8d, + 0x6c, 0xf8, 0x27, 0x03, 0x0a, 0x71, 0x80, 0xa6, 0x4e, 0x4b, 0xee, 0x8f, 0x4f, 0x4b, 0x6e, 0xcc, + 0x7e, 0x56, 0xdf, 0xe7, 0x90, 0xe4, 0xe6, 0x4f, 0xe1, 0x83, 0xd4, 0x92, 0x06, 0x95, 0x60, 0x71, + 0xff, 0x60, 0x0b, 0x1f, 0xb4, 0x9a, 0xd5, 0x0b, 0xa8, 0x0c, 0x85, 0xed, 0xa7, 0xbb, 0x7b, 0x4f, + 0x5a, 0x07, 0xad, 0xaa, 0x21, 0x8e, 0x9a, 0x2d, 0xb1, 0x6e, 0x56, 0x33, 0xf5, 0xdf, 0xe7, 0x61, + 0x71, 0x5b, 0xfd, 0x37, 0x87, 0x0e, 0xa0, 0x38, 0xfc, 0xd3, 0x06, 0xd9, 0x29, 0xa6, 0x99, 0xf8, + 0xf7, 0xc7, 0xfa, 0xe8, 0x54, 0x1c, 0xfd, 0xe4, 0x3d, 0x82, 0x05, 0xf9, 0xf7, 0x15, 0x4a, 0x19, + 0x4c, 0x24, 0xff, 0xd7, 0xb2, 0x4e, 0xff, 0x3b, 0x68, 0xc3, 0x10, 0x9c, 0xe4, 0x54, 0x27, 0x8d, + 0x53, 0x72, 0x74, 0x6c, 0xad, 0x9e, 0x31, 0x0e, 0x42, 0xbb, 0x90, 0xd7, 0xad, 0x6e, 0x1a, 0x6a, + 0x72, 0x76, 0x63, 0xad, 0xcd, 0x46, 0x50, 0xcc, 0x36, 0x0c, 0xb4, 0x3b, 0xfc, 0x37, 0x20, 0x4d, + 0xb4, 0x64, 0x9f, 0x60, 0x9d, 0x71, 0xbe, 0x6e, 0x6c, 0x18, 0xe8, 0x0b, 0x28, 0x25, 0x3a, 0x01, + 0x94, 0x52, 0x87, 0x4e, 0xb7, 0x15, 0xd6, 0x8d, 0x33, 0xb0, 0xb4, 0xe6, 0x2d, 0xc8, 0xc9, 0x04, + 0x90, 0x62, 0xec, 0x44, 0xa3, 0x90, 0x26, 0xe6, 0x58, 0xe3, 0x70, 0xa8, 0x5a, 0x1b, 0x12, 0x24, + 0xbd, 0x0f, 0xdd, 0x38, 0xab, 0x22, 0x99, 0xe9, 0x36, 0x53, 0x4e, 0xbc, 0x61, 0xa0, 0x10, 0xd0, + 0x74, 0xd2, 0x47, 0xdf, 0x4f, 0xf1, 0x92, 0x59, 0xaf, 0x92, 0x75, 0x6b, 0x3e, 0x64, 0xa5, 0x54, + 0xa3, 0xfc, 0xea, 0xcd, 0x8a, 0xf1, 0xaf, 0x37, 0x2b, 0xc6, 0xbf, 0xdf, 0xac, 0x18, 0x87, 0x79, + 0x59, 0x6b, 0xfe, 0xe0, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x06, 0xac, 0x18, 0xbb, 0x1e, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4030,6 +4039,13 @@ func (m *BuildHistoryRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.NumWarnings != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumWarnings)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } if m.ExternalError != nil { { size, err := m.ExternalError.MarshalToSizedBuffer(dAtA[:i]) @@ -5242,6 +5258,9 @@ func (m *BuildHistoryRecord) Size() (n int) { l = m.ExternalError.Size() n += 2 + l + sovControl(uint64(l)) } + if m.NumWarnings != 0 { + n += 2 + sovControl(uint64(m.NumWarnings)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -10303,6 +10322,25 @@ func (m *BuildHistoryRecord) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumWarnings", wireType) + } + m.NumWarnings = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumWarnings |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto index e57b118cd33a..60a4aabc3ea3 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -212,6 +212,7 @@ message BuildHistoryRecord { int32 numTotalSteps = 16; int32 numCompletedSteps = 17; Descriptor externalError = 18; + int32 numWarnings = 19; // TODO: tags // TODO: unclipped logs } diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index 457a9b1f2f8e..6850f21a7459 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -6,6 +6,7 @@ import ( "fmt" "net" "sort" + "strings" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/system" @@ -290,7 +291,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] if len(e.secrets) > 0 { addCap(&e.constraints, pb.CapExecMountSecret) for _, s := range e.secrets { - if s.IsEnv { + if s.Env != nil { addCap(&e.constraints, pb.CapExecSecretEnv) break } @@ -388,15 +389,17 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } for _, s := range e.secrets { - if s.IsEnv { + if s.Env != nil { peo.Secretenv = append(peo.Secretenv, &pb.SecretEnv{ ID: s.ID, - Name: s.Target, + Name: *s.Env, Optional: s.Optional, }) - } else { + } + if s.Target != nil { pm := &pb.Mount{ - Dest: s.Target, + Input: pb.Empty, + Dest: *s.Target, MountType: pb.MountType_SECRET, SecretOpt: &pb.SecretOpt{ ID: s.ID, @@ -412,6 +415,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] for _, s := range e.ssh { pm := &pb.Mount{ + Input: pb.Empty, Dest: s.Target, MountType: pb.MountType_SSH, SSHOpt: &pb.SSHOpt{ @@ -678,7 +682,19 @@ type SSHInfo struct { // AddSecret is a RunOption that adds a secret to the exec. func AddSecret(dest string, opts ...SecretOption) RunOption { return runOptionFunc(func(ei *ExecInfo) { - s := &SecretInfo{ID: dest, Target: dest, Mode: 0400} + s := &SecretInfo{ID: dest, Target: &dest, Mode: 0400} + for _, opt := range opts { + opt.SetSecretOption(s) + } + ei.Secrets = append(ei.Secrets, *s) + }) +} + +// AddSecretWithDest is a RunOption that adds a secret to the exec +// with an optional destination. +func AddSecretWithDest(src string, dest *string, opts ...SecretOption) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + s := &SecretInfo{ID: src, Target: dest, Mode: 0400} for _, opt := range opts { opt.SetSecretOption(s) } @@ -697,13 +713,15 @@ func (fn secretOptionFunc) SetSecretOption(si *SecretInfo) { } type SecretInfo struct { - ID string - Target string + ID string + // Target optionally specifies the target for the secret mount + Target *string + // Env optionally names the environment variable for the secret + Env *string Mode int UID int GID int Optional bool - IsEnv bool } var SecretOptional = secretOptionFunc(func(si *SecretInfo) { @@ -719,7 +737,24 @@ func SecretID(id string) SecretOption { // SecretAsEnv defines if the secret should be added as an environment variable func SecretAsEnv(v bool) SecretOption { return secretOptionFunc(func(si *SecretInfo) { - si.IsEnv = v + if !v { + si.Env = nil + return + } + if si.Target == nil { + return + } + target := strings.Clone(*si.Target) + si.Env = &target + si.Target = nil + }) +} + +// SecretAsEnvName defines if the secret should be added as an environment variable +// with the specified name +func SecretAsEnvName(v string) SecretOption { + return secretOptionFunc(func(si *SecretInfo) { + si.Env = &v }) } diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go index 44e66f23a56d..32f070d810ba 100644 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -7,7 +7,6 @@ import ( "io" "maps" "os" - "path/filepath" "strings" "time" @@ -120,7 +119,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG if opt.SessionPreInitialized { return nil, errors.Errorf("no session provided for preinitialized option") } - s, err = session.NewSession(statusContext, defaultSessionName(), opt.SharedKey) + s, err = session.NewSession(statusContext, opt.SharedKey) if err != nil { return nil, errors.Wrap(err, "failed to create session") } @@ -419,14 +418,6 @@ func prepareSyncedFiles(def *llb.Definition, localMounts map[string]fsutil.FS) ( return result, nil } -func defaultSessionName() string { - wd, err := os.Getwd() - if err != nil { - return "unknown" - } - return filepath.Base(wd) -} - type cacheOptions struct { options controlapi.CacheOptions contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) diff --git a/vendor/github.com/moby/buildkit/errdefs/internal.go b/vendor/github.com/moby/buildkit/errdefs/internal.go new file mode 100644 index 000000000000..24cea7e333bd --- /dev/null +++ b/vendor/github.com/moby/buildkit/errdefs/internal.go @@ -0,0 +1,59 @@ +package errdefs + +import ( + "errors" + "syscall" +) + +type internalErr struct { + error +} + +func (internalErr) System() {} + +func (err internalErr) Unwrap() error { + return err.error +} + +type system interface { + System() +} + +var _ system = internalErr{} + +func Internal(err error) error { + if err == nil { + return nil + } + return internalErr{err} +} + +func IsInternal(err error) bool { + var s system + if errors.As(err, &s) { + return true + } + + var errno syscall.Errno + if errors.As(err, &errno) { + if _, ok := isInternalSyscall(errno); ok { + return true + } + } + return false +} + +func IsResourceExhausted(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + if v, ok := isInternalSyscall(errno); ok && v { + return v + } + } + return false +} + +func isInternalSyscall(err syscall.Errno) (bool, bool) { + v, ok := syscallErrors()[err] + return v, ok +} diff --git a/vendor/github.com/moby/buildkit/errdefs/internal_linux.go b/vendor/github.com/moby/buildkit/errdefs/internal_linux.go new file mode 100644 index 000000000000..ba246c597a73 --- /dev/null +++ b/vendor/github.com/moby/buildkit/errdefs/internal_linux.go @@ -0,0 +1,23 @@ +//go:build linux +// +build linux + +package errdefs + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// syscallErrors returns a map of syscall errors that are considered internal. +// value is true if the error is of type resource exhaustion, false otherwise. +func syscallErrors() map[syscall.Errno]bool { + return map[syscall.Errno]bool{ + unix.EIO: false, // I/O error + unix.ENOMEM: true, // Out of memory + unix.EFAULT: false, // Bad address + unix.ENOSPC: true, // No space left on device + unix.ENOTRECOVERABLE: false, // State not recoverable + unix.EHWPOISON: false, // Memory page has hardware error + } +} diff --git a/vendor/github.com/moby/buildkit/errdefs/internal_nolinux.go b/vendor/github.com/moby/buildkit/errdefs/internal_nolinux.go new file mode 100644 index 000000000000..5348b32ee7cb --- /dev/null +++ b/vendor/github.com/moby/buildkit/errdefs/internal_nolinux.go @@ -0,0 +1,10 @@ +//go:build !linux +// +build !linux + +package errdefs + +import "syscall" + +func syscallErrors() map[syscall.Errno]bool { + return nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go index da94cbc9e1a8..890acc53bdf2 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go @@ -10,42 +10,61 @@ import ( ) type Config struct { - Warn LintWarnFunc - SkipRules []string - SkipAll bool - ReturnAsError bool + ExperimentalAll bool + ExperimentalRules []string + ReturnAsError bool + SkipAll bool + SkipRules []string + Warn LintWarnFunc } type Linter struct { - SkippedRules map[string]struct{} - CalledRules []string - SkipAll bool - ReturnAsError bool - Warn LintWarnFunc + CalledRules []string + ExperimentalAll bool + ExperimentalRules map[string]struct{} + ReturnAsError bool + SkipAll bool + SkippedRules map[string]struct{} + Warn LintWarnFunc } func New(config *Config) *Linter { toret := &Linter{ - SkippedRules: map[string]struct{}{}, - CalledRules: []string{}, - Warn: config.Warn, + SkippedRules: map[string]struct{}{}, + ExperimentalRules: map[string]struct{}{}, + CalledRules: []string{}, + Warn: config.Warn, } toret.SkipAll = config.SkipAll + toret.ExperimentalAll = config.ExperimentalAll toret.ReturnAsError = config.ReturnAsError for _, rule := range config.SkipRules { toret.SkippedRules[rule] = struct{}{} } + for _, rule := range config.ExperimentalRules { + toret.ExperimentalRules[rule] = struct{}{} + } return toret } func (lc *Linter) Run(rule LinterRuleI, location []parser.Range, txt ...string) { - if lc == nil || lc.Warn == nil || lc.SkipAll || rule.IsDeprecated() { + if lc == nil || lc.Warn == nil || rule.IsDeprecated() { return } + rulename := rule.RuleName() - if _, ok := lc.SkippedRules[rulename]; ok { - return + if rule.IsExperimental() { + _, experimentalOk := lc.ExperimentalRules[rulename] + if !(lc.ExperimentalAll || experimentalOk) { + return + } + } else { + _, skipOk := lc.SkippedRules[rulename] + if lc.SkipAll || skipOk { + return + } } + lc.CalledRules = append(lc.CalledRules, rulename) rule.Run(lc.Warn, location, txt...) } @@ -72,14 +91,16 @@ type LinterRuleI interface { RuleName() string Run(warn LintWarnFunc, location []parser.Range, txt ...string) IsDeprecated() bool + IsExperimental() bool } type LinterRule[F any] struct { - Name string - Description string - Deprecated bool - URL string - Format F + Name string + Description string + Deprecated bool + Experimental bool + URL string + Format F } func (rule *LinterRule[F]) RuleName() string { @@ -98,6 +119,10 @@ func (rule *LinterRule[F]) IsDeprecated() bool { return rule.Deprecated } +func (rule *LinterRule[F]) IsExperimental() bool { + return rule.Experimental +} + func LintFormatShort(rulename, msg string, line int) string { msg = fmt.Sprintf("%s: %s", rulename, msg) if line > 0 { @@ -114,9 +139,9 @@ func ParseLintOptions(checkStr string) (*Config, error) { return &Config{}, nil } - parts := strings.SplitN(checkStr, ";", 2) - var skipSet []string - var errorOnWarn, skipAll bool + parts := strings.SplitN(checkStr, ";", 3) + var skipSet, experimentalSet []string + var errorOnWarn, skipAll, experimentalAll bool for _, p := range parts { k, v, ok := strings.Cut(p, "=") if !ok { @@ -134,6 +159,16 @@ func ParseLintOptions(checkStr string) (*Config, error) { skipSet[i] = strings.TrimSpace(rule) } } + case "experimental": + v = strings.TrimSpace(v) + if v == "all" { + experimentalAll = true + } else { + experimentalSet = strings.Split(v, ",") + for i, rule := range experimentalSet { + experimentalSet[i] = strings.TrimSpace(rule) + } + } case "error": v, err := strconv.ParseBool(strings.TrimSpace(v)) if err != nil { @@ -145,8 +180,10 @@ func ParseLintOptions(checkStr string) (*Config, error) { } } return &Config{ - SkipRules: skipSet, - SkipAll: skipAll, - ReturnAsError: errorOnWarn, + ExperimentalAll: experimentalAll, + ExperimentalRules: experimentalSet, + SkipRules: skipSet, + SkipAll: skipAll, + ReturnAsError: errorOnWarn, }, nil } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/ruleset.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/ruleset.go index d94c32c53ce7..083b07d66c6f 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/ruleset.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/ruleset.go @@ -163,5 +163,6 @@ var ( Format: func(cmd, file string) string { return fmt.Sprintf("Attempting to %s file %q that is excluded by .dockerignore", cmd, file) }, + Experimental: true, } ) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index 0d74545d1ac6..18da29334950 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -378,6 +378,9 @@ func (sw *shellWord) processDollar() (string, error) { fallthrough case '+', '-', '?', '#', '%': rawEscapes := ch == '#' || ch == '%' + if nullIsUnset && rawEscapes { + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) + } word, _, err := sw.processStopOn('}', rawEscapes) if err != nil { if sw.scanner.Peek() == scanner.EOF { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go b/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go index b6dce1673624..eeda2f7da352 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go @@ -125,6 +125,16 @@ func parseSourceDateEpoch(v string) (*time.Time, error) { return &tm, nil } +func parseLocalSessionIDs(opt map[string]string) map[string]string { + m := map[string]string{} + for k, v := range opt { + if strings.HasPrefix(k, localSessionIDPrefix) { + m[strings.TrimPrefix(k, localSessionIDPrefix)] = v + } + } + return m +} + func filter(opt map[string]string, key string) map[string]string { m := map[string]string{} for k, v := range opt { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go index e0f9658aaa28..662861020903 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go @@ -26,8 +26,9 @@ import ( ) const ( - buildArgPrefix = "build-arg:" - labelPrefix = "label:" + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + localSessionIDPrefix = "local-sessionid:" keyTarget = "target" keyCgroupParent = "cgroup-parent" @@ -45,30 +46,28 @@ const ( // Don't forget to update frontend documentation if you add // a new build-arg: frontend/dockerfile/docs/reference.md - keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" - keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" - keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" - keyDockerfileLintArg = "build-arg:BUILDKIT_DOCKERFILE_CHECK" - keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" - keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" - keyCopyIgnoredCheckEnabled = "build-arg:BUILDKIT_DOCKERFILE_CHECK_COPYIGNORED_EXPERIMENT" + keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" + keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" + keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" + keyDockerfileLintArg = "build-arg:BUILDKIT_DOCKERFILE_CHECK" + keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" + keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" ) type Config struct { - BuildArgs map[string]string - CacheIDNamespace string - CgroupParent string - Epoch *time.Time - ExtraHosts []llb.HostIP - Hostname string - ImageResolveMode llb.ResolveMode - Labels map[string]string - NetworkMode pb.NetMode - ShmSize int64 - Target string - Ulimits []pb.Ulimit - LinterConfig *linter.Config - CopyIgnoredCheckEnabled bool + BuildArgs map[string]string + CacheIDNamespace string + CgroupParent string + Epoch *time.Time + ExtraHosts []llb.HostIP + Hostname string + ImageResolveMode llb.ResolveMode + Labels map[string]string + NetworkMode pb.NetMode + ShmSize int64 + Target string + Ulimits []pb.Ulimit + LinterConfig *linter.Config CacheImports []client.CacheOptionsEntry TargetPlatforms []ocispecs.Platform // nil means default @@ -79,10 +78,11 @@ type Config struct { type Client struct { Config - client client.Client - ignoreCache []string - g flightcontrol.CachedGroup[*buildContext] - bopts client.BuildOpts + client client.Client + ignoreCache []string + g flightcontrol.CachedGroup[*buildContext] + bopts client.BuildOpts + localsSessionIDs map[string]string dockerignore []byte dockerignoreName string @@ -289,15 +289,8 @@ func (bc *Client) init() error { } } - // CopyIgnoredCheckEnabled is an experimental feature to check if COPY is ignored by .dockerignore, - // and it is disabled by default. It is expected that this feature will be enabled by default in a future - // release, and this build-arg will be removed. - if v, ok := opts[keyCopyIgnoredCheckEnabled]; ok { - bc.CopyIgnoredCheckEnabled, err = strconv.ParseBool(v) - if err != nil { - return errors.Wrapf(err, "failed to parse %s", keyCopyIgnoredCheckEnabled) - } - } + bc.localsSessionIDs = parseLocalSessionIDs(opts) + return nil } @@ -331,9 +324,14 @@ func (bc *Client) ReadEntrypoint(ctx context.Context, lang string, opts ...llb.L filenames = append(filenames, path.Join(path.Dir(bctx.filename), strings.ToLower(DefaultDockerfileName))) } + sessionID := bc.bopts.SessionID + if v, ok := bc.localsSessionIDs[bctx.dockerfileLocalName]; ok { + sessionID = v + } + opts = append([]llb.LocalOption{ llb.FollowPaths(filenames), - llb.SessionID(bc.bopts.SessionID), + llb.SessionID(sessionID), llb.SharedKeyHint(bctx.dockerfileLocalName), WithInternalName(name), llb.Differ(llb.DiffNone, false), @@ -427,8 +425,13 @@ func (bc *Client) MainContext(ctx context.Context, opts ...llb.LocalOption) (*ll return nil, errors.Wrapf(err, "failed to read dockerignore patterns") } + sessionID := bc.bopts.SessionID + if v, ok := bc.localsSessionIDs[bctx.contextLocalName]; ok { + sessionID = v + } + opts = append([]llb.LocalOption{ - llb.SessionID(bc.bopts.SessionID), + llb.SessionID(sessionID), llb.ExcludePatterns(excludes), llb.SharedKeyHint(bctx.contextLocalName), WithInternalName("load build context"), @@ -500,8 +503,12 @@ func WithInternalName(name string) llb.ConstraintsOpt { func (bc *Client) dockerIgnorePatterns(ctx context.Context, bctx *buildContext) ([]string, error) { if bc.dockerignore == nil { + sessionID := bc.bopts.SessionID + if v, ok := bc.localsSessionIDs[bctx.contextLocalName]; ok { + sessionID = v + } st := llb.Local(bctx.contextLocalName, - llb.SessionID(bc.bopts.SessionID), + llb.SessionID(sessionID), llb.FollowPaths([]string{DefaultDockerignoreName}), llb.SharedKeyHint(bctx.contextLocalName+"-"+DefaultDockerignoreName), WithInternalName("load "+DefaultDockerignoreName), diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go index 2a7a55878058..3bf7343e4e13 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go @@ -187,8 +187,12 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi } return &st, &img, nil case "local": + sessionID := bc.bopts.SessionID + if v, ok := bc.localsSessionIDs[vv[1]]; ok { + sessionID = v + } st := llb.Local(vv[1], - llb.SessionID(bc.bopts.SessionID), + llb.SessionID(sessionID), llb.FollowPaths([]string{DefaultDockerignoreName}), llb.SharedKeyHint("context:"+nameWithPlatform+"-"+DefaultDockerignoreName), llb.WithCustomName("[context "+nameWithPlatform+"] load "+DefaultDockerignoreName), @@ -226,7 +230,7 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi localOutput := &asyncLocalOutput{ name: vv[1], nameWithPlatform: nameWithPlatform, - sessionID: bc.bopts.SessionID, + sessionID: sessionID, excludes: excludes, extraOpts: opt.AsyncLocalOpts, } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go b/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go index 0cf069aa6483..9b84c52c07b3 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go @@ -66,7 +66,7 @@ func (bc *Client) HandleSubrequest(ctx context.Context, h RequestHandler) (*clie if warnings == nil { return nil, true, nil } - res, err := warnings.ToResult() + res, err := warnings.ToResult(nil) return res, true, err } } diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go b/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go index d207fba3c082..3b6b444fd397 100644 --- a/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go +++ b/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go @@ -16,6 +16,8 @@ import ( "github.com/pkg/errors" ) +type SourceInfoMap func(*pb.SourceInfo) *pb.SourceInfo + const RequestLint = "frontend.lint" var SubrequestLintDefinition = subrequests.Request{ @@ -39,6 +41,27 @@ type Warning struct { Location pb.Location `json:"location,omitempty"` } +func (w *Warning) PrintTo(wr io.Writer, sources []*pb.SourceInfo, scb SourceInfoMap) error { + fmt.Fprintf(wr, "\nWARNING: %s", w.RuleName) + if w.URL != "" { + fmt.Fprintf(wr, " - %s", w.URL) + } + fmt.Fprintf(wr, "\n%s\n", w.Detail) + + if w.Location.SourceIndex < 0 { + return nil + } + sourceInfo := sources[w.Location.SourceIndex] + if scb != nil { + sourceInfo = scb(sourceInfo) + } + source := errdefs.Source{ + Info: sourceInfo, + Ranges: w.Location.Ranges, + } + return source.Print(wr) +} + type BuildError struct { Message string `json:"message"` Location pb.Location `json:"location"` @@ -93,7 +116,7 @@ func (results *LintResults) AddWarning(rulename, description, url, fmtmsg string }) } -func (results *LintResults) ToResult() (*client.Result, error) { +func (results *LintResults) ToResult(scb SourceInfoMap) (*client.Result, error) { res := client.NewResult() dt, err := json.MarshalIndent(results, "", " ") if err != nil { @@ -102,7 +125,7 @@ func (results *LintResults) ToResult() (*client.Result, error) { res.AddMeta("result.json", dt) b := bytes.NewBuffer(nil) - if err := PrintLintViolations(dt, b); err != nil { + if err := PrintLintViolations(dt, b, scb); err != nil { return nil, err } res.AddMeta("result.txt", b.Bytes()) @@ -117,28 +140,7 @@ func (results *LintResults) ToResult() (*client.Result, error) { return res, nil } -func (results *LintResults) validateWarnings() error { - for _, warning := range results.Warnings { - if int(warning.Location.SourceIndex) >= len(results.Sources) { - return errors.Errorf("sourceIndex is out of range") - } - if warning.Location.SourceIndex > 0 { - warningSource := results.Sources[warning.Location.SourceIndex] - if warningSource == nil { - return errors.Errorf("sourceIndex points to nil source") - } - } - } - return nil -} - -func PrintLintViolations(dt []byte, w io.Writer) error { - var results LintResults - - if err := json.Unmarshal(dt, &results); err != nil { - return err - } - +func (results *LintResults) PrintTo(w io.Writer, scb SourceInfoMap) error { if err := results.validateWarnings(); err != nil { return err } @@ -169,21 +171,7 @@ func PrintLintViolations(dt []byte, w io.Writer) error { }) for _, warning := range results.Warnings { - fmt.Fprintf(w, "\nWARNING: %s", warning.RuleName) - if warning.URL != "" { - fmt.Fprintf(w, " - %s", warning.URL) - } - fmt.Fprintf(w, "\n%s\n", warning.Detail) - - if warning.Location.SourceIndex < 0 { - continue - } - sourceInfo := results.Sources[warning.Location.SourceIndex] - source := errdefs.Source{ - Info: sourceInfo, - Ranges: warning.Location.Ranges, - } - err := source.Print(w) + err := warning.PrintTo(w, results.Sources, scb) if err != nil { return err } @@ -192,6 +180,47 @@ func PrintLintViolations(dt []byte, w io.Writer) error { return nil } +func (results *LintResults) PrintErrorTo(w io.Writer) { + // This prints out the error in LintResults to the writer in a format that + // is consistent with the warnings for easier downstream consumption. + if results.Error == nil { + return + } + + fmt.Fprintln(w, results.Error.Message) + sourceInfo := results.Sources[results.Error.Location.SourceIndex] + source := errdefs.Source{ + Info: sourceInfo, + Ranges: results.Error.Location.Ranges, + } + source.Print(w) +} + +func (results *LintResults) validateWarnings() error { + for _, warning := range results.Warnings { + if int(warning.Location.SourceIndex) >= len(results.Sources) { + return errors.Errorf("sourceIndex is out of range") + } + if warning.Location.SourceIndex > 0 { + warningSource := results.Sources[warning.Location.SourceIndex] + if warningSource == nil { + return errors.Errorf("sourceIndex points to nil source") + } + } + } + return nil +} + +func PrintLintViolations(dt []byte, w io.Writer, scb SourceInfoMap) error { + var results LintResults + + if err := json.Unmarshal(dt, &results); err != nil { + return err + } + + return results.PrintTo(w, scb) +} + func sourceInfoEqual(a, b *pb.SourceInfo) bool { if a.Filename != b.Filename || a.Language != b.Language { return false diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go index 2eda89d2be07..02383dc090eb 100644 --- a/vendor/github.com/moby/buildkit/session/manager.go +++ b/vendor/github.com/moby/buildkit/session/manager.go @@ -16,7 +16,6 @@ type Caller interface { Context() context.Context Supports(method string) bool Conn() *grpc.ClientConn - Name() string SharedKey() string } @@ -106,7 +105,6 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin h := http.Header(opts) id := h.Get(headerSessionID) - name := h.Get(headerSessionName) sharedKey := h.Get(headerSessionSharedKey) ctx, cc, err := grpcClientConn(ctx, conn) @@ -118,7 +116,6 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin c := &client{ Session: Session{ id: id, - name: name, sharedKey: sharedKey, ctx: ctx, cancelCtx: cancel, @@ -197,10 +194,6 @@ func (c *client) Context() context.Context { return c.context() } -func (c *client) Name() string { - return c.name -} - func (c *client) SharedKey() string { return c.sharedKey } diff --git a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go b/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go index ea203bf02efd..71b00f893b5e 100644 --- a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go +++ b/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go @@ -32,7 +32,7 @@ func (sp *secretProvider) GetSecret(ctx context.Context, req *secrets.GetSecretR dt, err := sp.store.GetSecret(ctx, req.ID) if err != nil { if errors.Is(err, secrets.ErrNotFound) { - return nil, status.Errorf(codes.NotFound, err.Error()) + return nil, status.Error(codes.NotFound, err.Error()) } return nil, err } diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go index c9c8158f493c..bb2bccccb03c 100644 --- a/vendor/github.com/moby/buildkit/session/session.go +++ b/vendor/github.com/moby/buildkit/session/session.go @@ -38,7 +38,6 @@ type Attachable interface { type Session struct { mu sync.Mutex // synchronizes conn run and close id string - name string sharedKey string ctx context.Context cancelCtx func(error) @@ -49,7 +48,7 @@ type Session struct { } // NewSession returns a new long running session -func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { +func NewSession(ctx context.Context, sharedKey string) (*Session, error) { id := identity.NewID() serverOpts := []grpc.ServerOption{ @@ -67,7 +66,6 @@ func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { s := &Session{ id: id, - name: name, sharedKey: sharedKey, grpcServer: grpc.NewServer(serverOpts...), } @@ -103,7 +101,6 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error { meta := make(map[string][]string) meta[headerSessionID] = []string{s.id} - meta[headerSessionName] = []string{s.name} meta[headerSessionSharedKey] = []string{s.sharedKey} for name, svc := range s.grpcServer.GetServiceInfo() { diff --git a/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go b/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go index 010959f43dcc..2af434d8063c 100644 --- a/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go +++ b/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go @@ -13,15 +13,15 @@ import ( ) func New() *Uploader { - return &Uploader{m: map[string]io.Reader{}} + return &Uploader{m: map[string]io.ReadCloser{}} } type Uploader struct { mu sync.Mutex - m map[string]io.Reader + m map[string]io.ReadCloser } -func (hp *Uploader) Add(r io.Reader) string { +func (hp *Uploader) Add(r io.ReadCloser) string { id := identity.NewID() hp.m[id] = r return "http://buildkit-session/" + id @@ -51,6 +51,11 @@ func (hp *Uploader) Pull(stream upload.Upload_PullServer) error { hp.mu.Unlock() _, err := io.Copy(&writer{stream}, r) + + if err1 := r.Close(); err == nil { + err = err1 + } + return err } diff --git a/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go b/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go index 3d6626535b9a..d5a785ef62ea 100644 --- a/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go +++ b/vendor/github.com/moby/buildkit/util/appcontext/appcontext.go @@ -38,7 +38,7 @@ func Context() context.Context { err := errors.Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries) cancel(err) if retries >= exitLimit { - bklog.G(ctx).Errorf(err.Error()) + bklog.G(ctx).Error(err.Error()) os.Exit(1) } } diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_linux.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_linux.go new file mode 100644 index 000000000000..64c3894e58a7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_linux.go @@ -0,0 +1,6 @@ +package appdefaults + +const ( + Address = "unix:///run/buildkit/buildkitd.sock" + traceSocketPath = "/run/buildkit/otel-grpc.sock" +) diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go index 618288dce07c..447a3cfbcf49 100644 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -10,7 +10,6 @@ import ( ) const ( - Address = "unix:///run/buildkit/buildkitd.sock" Root = "/var/lib/buildkit" ConfigDir = "/etc/buildkit" DefaultCNIBinDir = "/opt/cni/bin" @@ -82,5 +81,5 @@ func TraceSocketPath(inUserNS bool) string { return filepath.Join(dirs[0], "buildkit", "otel-grpc.sock") } } - return "/run/buildkit/otel-grpc.sock" + return traceSocketPath } diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix_nolinux.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix_nolinux.go new file mode 100644 index 000000000000..943c2471e6a1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix_nolinux.go @@ -0,0 +1,8 @@ +//go:build unix && !linux + +package appdefaults + +const ( + Address = "unix:///var/run/buildkit/buildkitd.sock" + traceSocketPath = "/var/run/buildkit/otel-grpc.sock" +) diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index 710bc1ec8b5a..408a3a8b8882 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -10,6 +10,7 @@ import ( gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/proto" //nolint:staticcheck "github.com/golang/protobuf/ptypes/any" + "github.com/moby/buildkit/errdefs" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/stack" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -94,6 +95,13 @@ func withDetails(ctx context.Context, s *status.Status, details ...proto.Message } func Code(err error) codes.Code { + if errdefs.IsInternal(err) { + if errdefs.IsResourceExhausted(err) { + return codes.ResourceExhausted + } + return codes.Internal + } + if se, ok := err.(interface { Code() codes.Code }); ok { diff --git a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go index 1a2f54ed7651..6d5c472f1d61 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go +++ b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go @@ -64,11 +64,7 @@ func retryError(err error) bool { return true } // catches TLS timeout or other network-related temporary errors - if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() { //nolint:staticcheck // ignoring "SA1019: Temporary is deprecated", continue to propagate net.Error through the "temporary" status - return true - } - // https://github.com/containerd/containerd/pull/4724 - if errors.Cause(err).Error() == "no response" { + if ne := net.Error(nil); errors.As(err, &ne) && ne.Temporary() { // ignoring "SA1019: Temporary is deprecated", continue to propagate net.Error through the "temporary" status return true } diff --git a/vendor/github.com/moby/buildkit/util/stack/compress.go b/vendor/github.com/moby/buildkit/util/stack/compress.go new file mode 100644 index 000000000000..6d9a78f34702 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/stack/compress.go @@ -0,0 +1,67 @@ +package stack + +import ( + "slices" +) + +func compressStacks(st []*Stack) []*Stack { + if len(st) == 0 { + return nil + } + + slices.SortFunc(st, func(a, b *Stack) int { + return len(b.Frames) - len(a.Frames) + }) + + out := []*Stack{st[0]} + +loop0: + for _, st := range st[1:] { + maxIdx := -1 + for _, prev := range out { + idx := subFrames(st.Frames, prev.Frames) + if idx == -1 { + continue + } + // full match, potentially skip all + if idx == len(st.Frames)-1 { + if st.Pid == prev.Pid && st.Version == prev.Version && slices.Compare(st.Cmdline, st.Cmdline) == 0 { + continue loop0 + } + } + if idx > maxIdx { + maxIdx = idx + } + } + + if maxIdx > 0 { + st.Frames = st.Frames[:len(st.Frames)-maxIdx] + } + out = append(out, st) + } + + return out +} + +func subFrames(a, b []*Frame) int { + idx := -1 + i := len(a) - 1 + j := len(b) - 1 + for i >= 0 { + if j < 0 { + break + } + if a[i].Equal(b[j]) { + idx++ + i-- + j-- + } else { + break + } + } + return idx +} + +func (a *Frame) Equal(b *Frame) bool { + return a.File == b.File && a.Line == b.Line && a.Name == b.Name +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index 2fec18cb7a6d..458a1a7817e1 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -44,6 +44,10 @@ func Helper() { } func Traces(err error) []*Stack { + return compressStacks(traces(err)) +} + +func traces(err error) []*Stack { var st []*Stack switch e := err.(type) { diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/pins.go b/vendor/github.com/moby/buildkit/util/testutil/integration/pins.go index 75236f0236c0..494d4fa4bb6b 100644 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/pins.go +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/pins.go @@ -1,3 +1,6 @@ +//go:build !windows +// +build !windows + package integration var pins = map[string]map[string]string{ diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/run.go b/vendor/github.com/moby/buildkit/util/testutil/integration/run.go index b6b634e3be60..4bacda5e435c 100644 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/run.go +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/run.go @@ -189,7 +189,9 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) { t.Skip("rootless") } ctx := appcontext.Context() - if !strings.HasSuffix(fn, "NoParallel") { + // TODO(profnandaa): to revisit this to allow tests run + // in parallel on Windows in a stable way. Is flaky currently. + if !strings.HasSuffix(fn, "NoParallel") && runtime.GOOS != "windows" { t.Parallel() } require.NoError(t, sandboxLimiter.Acquire(context.TODO(), 1)) @@ -218,7 +220,7 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) { func getFunctionName(i interface{}) string { fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() dot := strings.LastIndex(fullname, ".") + 1 - return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support + return strings.Title(fullname[dot:]) // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support } var localImageCache map[string]map[string]struct{} @@ -273,23 +275,7 @@ func copyImagesLocal(t *testing.T, host string, images map[string]string) error } func OfficialImages(names ...string) map[string]string { - ns := runtime.GOARCH - if ns == "arm64" { - ns = "arm64v8" - } else if ns != "amd64" { - ns = "library" - } - m := map[string]string{} - for _, name := range names { - ref := "docker.io/" + ns + "/" + name - if pns, ok := pins[name]; ok { - if dgst, ok := pns[ns]; ok { - ref += "@" + dgst - } - } - m["library/"+name] = ref - } - return m + return officialImages(names...) } func withMirrorConfig(mirror string) ConfigUpdater { @@ -439,9 +425,19 @@ func prepareValueMatrix(tc testConf) []matrixValue { return m } -// Skips tests on Windows +// Skips tests on platform func SkipOnPlatform(t *testing.T, goos string) { if runtime.GOOS == goos { t.Skipf("Skipped on %s", goos) } } + +// Selects between two types, returns second +// argument if on Windows or else first argument. +// Typically used for selecting test cases. +func UnixOrWindows[T any](unix, windows T) T { + if runtime.GOOS == "windows" { + return windows + } + return unix +} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/run_unix.go b/vendor/github.com/moby/buildkit/util/testutil/integration/run_unix.go new file mode 100644 index 000000000000..138a6a1cdbcd --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/run_unix.go @@ -0,0 +1,26 @@ +//go:build !windows +// +build !windows + +package integration + +import "runtime" + +func officialImages(names ...string) map[string]string { + ns := runtime.GOARCH + if ns == "arm64" { + ns = "arm64v8" + } else if ns != "amd64" { + ns = "library" + } + m := map[string]string{} + for _, name := range names { + ref := "docker.io/" + ns + "/" + name + if pns, ok := pins[name]; ok { + if dgst, ok := pns[ns]; ok { + ref += "@" + dgst + } + } + m["library/"+name] = ref + } + return m +} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/run_windows.go b/vendor/github.com/moby/buildkit/util/testutil/integration/run_windows.go new file mode 100644 index 000000000000..69d9bd54060c --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/run_windows.go @@ -0,0 +1,13 @@ +package integration + +func officialImages(names ...string) map[string]string { + m := map[string]string{} + for _, name := range names { + // select available refs from the mirror map + // so that we mirror only those needed for the tests + if ref, ok := windowsImagesMirrorMap[name]; ok { + m["library/"+name] = ref + } + } + return m +} diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go b/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go index 685458b1c339..6ebd3241a195 100644 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go @@ -59,6 +59,8 @@ func (sb *sandbox) NewRegistry() (string, error) { func (sb *sandbox) Cmd(args ...string) *exec.Cmd { if len(args) == 1 { + // \\ being stripped off for Windows paths, convert to unix style + args[0] = strings.ReplaceAll(args[0], "\\", "/") if split, err := shlex.Split(args[0]); err == nil { args = split } @@ -151,7 +153,7 @@ func FormatLogs(m map[string]*bytes.Buffer) string { func CheckFeatureCompat(t *testing.T, sb Sandbox, features map[string]struct{}, reason ...string) { t.Helper() if err := HasFeatureCompat(t, sb, features, reason...); err != nil { - t.Skipf(err.Error()) + t.Skip(err.Error()) } } diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/util.go b/vendor/github.com/moby/buildkit/util/testutil/integration/util.go index 8ee29a962b11..1ebfed93aa2f 100644 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/util.go +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/util.go @@ -94,8 +94,12 @@ func StartCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) (func() error, error case <-ctx.Done(): case <-stopped: case <-stop: + // windows processes not responding to SIGTERM + signal := UnixOrWindows(syscall.SIGTERM, syscall.SIGKILL) + signalStr := UnixOrWindows("SIGTERM", "SIGKILL") fmt.Fprintf(cmd.Stderr, "> sending sigterm %v\n", time.Now()) - cmd.Process.Signal(syscall.SIGTERM) + fmt.Fprintf(cmd.Stderr, "> sending %s %v\n", signalStr, time.Now()) + cmd.Process.Signal(signal) go func() { select { case <-stopped: diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/util_windows.go b/vendor/github.com/moby/buildkit/util/testutil/integration/util_windows.go index 42d6f3fa4110..d0b70dc2227e 100644 --- a/vendor/github.com/moby/buildkit/util/testutil/integration/util_windows.go +++ b/vendor/github.com/moby/buildkit/util/testutil/integration/util_windows.go @@ -11,6 +11,17 @@ import ( var socketScheme = "npipe://" +var windowsImagesMirrorMap = map[string]string{ + // TODO(profnandaa): currently, amd64 only, to revisit for other archs. + "nanoserver:latest": "mcr.microsoft.com/windows/nanoserver:ltsc2022", + "servercore:latest": "mcr.microsoft.com/windows/servercore:ltsc2022", + "busybox:latest": "registry.k8s.io/e2e-test-images/busybox@sha256:6d854ffad9666d2041b879a1c128c9922d77faced7745ad676639b07111ab650", + // nanoserver with extra binaries, like fc.exe + // TODO(profnandaa): get an approved/compliant repo, placeholder for now + // see dockerfile here - https://github.com/microsoft/windows-container-tools/pull/178 + "nanoserver:plus": "docker.io/wintools/nanoserver:ltsc2022", +} + // abstracted function to handle pipe dialing on windows. // some simplification has been made to discard timeout param. func dialPipe(address string) (net.Conn, error) { diff --git a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go index e78e726196e1..58f13c269d36 100644 --- a/vendor/github.com/moby/sys/mountinfo/mounted_linux.go +++ b/vendor/github.com/moby/sys/mountinfo/mounted_linux.go @@ -51,7 +51,7 @@ func mountedByOpenat2(path string) (bool, error) { Resolve: unix.RESOLVE_NO_XDEV, }) _ = unix.Close(dirfd) - switch err { //nolint:errorlint // unix errors are bare + switch err { case nil: // definitely not a mount _ = unix.Close(fd) return false, nil diff --git a/vendor/github.com/moby/sys/user/user.go b/vendor/github.com/moby/sys/user/user.go index 984466d1ab59..198c49367953 100644 --- a/vendor/github.com/moby/sys/user/user.go +++ b/vendor/github.com/moby/sys/user/user.go @@ -197,7 +197,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { for { var line []byte line, isPrefix, err = rd.ReadLine() - if err != nil { // We should return no error if EOF is reached // without a match. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/LICENSE b/vendor/github.com/moby/sys/userns/LICENSE similarity index 99% rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/LICENSE rename to vendor/github.com/moby/sys/userns/LICENSE index 261eeb9e9f8b..d64569567334 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/LICENSE +++ b/vendor/github.com/moby/sys/userns/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/moby/sys/userns/userns.go b/vendor/github.com/moby/sys/userns/userns.go new file mode 100644 index 000000000000..56b24c44ad0d --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns.go @@ -0,0 +1,16 @@ +// Package userns provides utilities to detect whether we are currently running +// in a Linux user namespace. +// +// This code was migrated from [libcontainer/runc], which based its implementation +// on code from [lcx/incus]. +// +// [libcontainer/runc]: https://github.com/opencontainers/runc/blob/3778ae603c706494fd1e2c2faf83b406e38d687d/libcontainer/userns/userns_linux.go#L12-L49 +// [lcx/incus]: https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700 +package userns + +// RunningInUserNS detects whether we are currently running in a Linux +// user namespace and memoizes the result. It returns false on non-Linux +// platforms. +func RunningInUserNS() bool { + return inUserNS() +} diff --git a/vendor/github.com/moby/sys/userns/userns_linux.go b/vendor/github.com/moby/sys/userns/userns_linux.go new file mode 100644 index 000000000000..87c1c38eec27 --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_linux.go @@ -0,0 +1,53 @@ +package userns + +import ( + "bufio" + "fmt" + "os" + "sync" +) + +var inUserNS = sync.OnceValue(runningInUserNS) + +// runningInUserNS detects whether we are currently running in a user namespace. +// +// This code was migrated from [libcontainer/runc] and based on an implementation +// from [lcx/incus]. +// +// [libcontainer/runc]: https://github.com/opencontainers/runc/blob/3778ae603c706494fd1e2c2faf83b406e38d687d/libcontainer/userns/userns_linux.go#L12-L49 +// [lcx/incus]: https://github.com/lxc/incus/blob/e45085dd42f826b3c8c3228e9733c0b6f998eafe/shared/util.go#L678-L700 +func runningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + // This kernel-provided file only exists if user namespaces are supported. + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + return uidMapInUserNS(string(l)) +} + +func uidMapInUserNS(uidMap string) bool { + if uidMap == "" { + // File exist but empty (the initial state when userns is created, + // see user_namespaces(7)). + return true + } + + var a, b, c int64 + if _, err := fmt.Sscanf(uidMap, "%d %d %d", &a, &b, &c); err != nil { + // Assume we are in a regular, non user namespace. + return false + } + + // As per user_namespaces(7), /proc/self/uid_map of + // the initial user namespace shows 0 0 4294967295. + initNS := a == 0 && b == 0 && c == 4294967295 + return !initNS +} diff --git a/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go b/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go new file mode 100644 index 000000000000..26ba2e16ec4e --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_linux_fuzzer.go @@ -0,0 +1,8 @@ +//go:build linux && gofuzz + +package userns + +func FuzzUIDMap(uidmap []byte) int { + _ = uidMapInUserNS(string(uidmap)) + return 1 +} diff --git a/vendor/github.com/moby/sys/userns/userns_unsupported.go b/vendor/github.com/moby/sys/userns/userns_unsupported.go new file mode 100644 index 000000000000..8ed83072c233 --- /dev/null +++ b/vendor/github.com/moby/sys/userns/userns_unsupported.go @@ -0,0 +1,6 @@ +//go:build !linux + +package userns + +// inUserNS is a stub for non-Linux systems. Always returns false. +func inUserNS() bool { return false } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index ff0647deec37..16f9af12b66e 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -61,7 +61,11 @@ func newClient(ctx context.Context, cfg oconf.Config) (*client, error) { if c.conn == nil { // If the caller did not provide a ClientConn when the client was // created, create one using the configuration they did provide. - conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...) + userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version() + dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} + dialOpts = append(dialOpts, cfg.DialOptions...) + + conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...) if err != nil { return nil, err } @@ -172,28 +176,36 @@ func (c *client) exportContext(parent context.Context) (context.Context, context // duration to wait for if an explicit throttle time is included in err. func retryable(err error) (bool, time.Duration) { s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { case codes.Canceled, codes.DeadlineExceeded, - codes.ResourceExhausted, codes.Aborted, codes.OutOfRange, codes.Unavailable, codes.DataLoss: - return true, throttleDelay(s) + // Additionally, handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) } // Not a retry-able error. return false, 0 } -// throttleDelay returns a duration to wait for if an explicit throttle time -// is included in the response status. -func throttleDelay(s *status.Status) time.Duration { +// throttleDelay returns if the status is RetryInfo +// and the duration to wait for if an explicit throttle time is included. +func throttleDelay(s *status.Status) (bool, time.Duration) { for _, detail := range s.Details() { if t, ok := detail.(*errdetails.RetryInfo); ok { - return t.RetryDelay.AsDuration() + return true, t.RetryDelay.AsDuration() } } - return 0 + return false, 0 } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go index 6ba3600b1c1e..fbd495e7d7d6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -109,13 +109,7 @@ func compressorToCompression(compressor string) oconf.Compression { } // WithCompressor sets the compressor the gRPC client uses. -// -// It is the responsibility of the caller to ensure that the compressor set -// has been registered with google.golang.org/grpc/encoding (see -// encoding.RegisterCompressor for more information). For example, to register -// the gzip compressor import the package: -// -// import _ "google.golang.org/grpc/encoding/gzip" +// Supported compressor values: "gzip". // // If the OTEL_EXPORTER_OTLP_COMPRESSION or // OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go index 7820619bf608..7be572a79d02 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go @@ -12,6 +12,85 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates -// with an OTLP receiving endpoint using gRPC. +/* +Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC. +By default the telemetry is sent to https://localhost:4317. + +Exporter should be created using [New] and used with a [metric.PeriodicReader]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a host. +The value may additionally a port, a scheme, and a path. +The value accepts "http" and "https" scheme. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without the http or https scheme. +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides +the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT. +OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - +the filepath to the clients private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") - +aggregation temporality to use on the basis of instrument kind. Supported values: + - "cumulative" - Cumulative aggregation temporality for all instrument kinds, + - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds; + Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds, + - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds; + Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. + +The configuration can be overridden by [WithTemporalitySelector] option. + +OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") - +default aggregation to use for histogram instruments. Supported values: + - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation], + - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation]. + +The configuration can be overridden by [WithAggregationSelector] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation +[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation +*/ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go index 1d571294695c..17951ceb451a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go @@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.QueryUnescape(n) + name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) - value, err := url.QueryUnescape(v) + value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go index 40a4469f77ae..a85f27122243 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" "go.opentelemetry.io/otel/sdk/metric" ) @@ -122,7 +121,6 @@ func cleanPath(urlPath string, defaultPath string) string { // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { - userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version() cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), @@ -134,7 +132,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config { AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, - DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { @@ -158,9 +155,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.Metrics.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } - if len(cfg.DialOptions) != 0 { - cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...) - } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go similarity index 81% rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/version.go rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go index b4ecccdfb401..983968c6a3bd 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" -// Version is the current release version of the OpenTelemetry OTLP metrics exporter in use. +// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { - return "0.42.0" + return "0.44.0" } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 33f5474c1aa2..73463c91d5f4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -18,6 +18,7 @@ import ( "bytes" "compress/gzip" "context" + "errors" "fmt" "io" "net" @@ -30,7 +31,6 @@ import ( "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" @@ -89,7 +89,7 @@ func newClient(cfg oconf.Config) (*client, error) { return nil, err } - userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version() + userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version() req.Header.Set("User-Agent", userAgent) if n := len(cfg.Metrics.Headers); n > 0 { @@ -148,6 +148,10 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou request.reset(iCtx) resp, err := c.httpClient.Do(request.Request) + var urlErr *url.Error + if errors.As(err, &urlErr) && urlErr.Temporary() { + return newResponseError(http.Header{}) + } if err != nil { return err } @@ -162,8 +166,11 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou if _, err := io.Copy(&respData, resp.Body); err != nil { return err } + if respData.Len() == 0 { + return nil + } - if respData.Len() != 0 { + if resp.Header.Get("Content-Type") == "application/x-protobuf" { var respProto colmetricpb.ExportMetricsServiceResponse if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { return err @@ -179,7 +186,10 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } } return nil - case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable: + case sc == http.StatusTooManyRequests, + sc == http.StatusBadGateway, + sc == http.StatusServiceUnavailable, + sc == http.StatusGatewayTimeout: // Retry-able failure. rErr = newResponseError(resp.Header) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go index a49e24651711..94f8b250d3fe 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go @@ -12,7 +12,82 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package otlpmetrichttp provides an otlpmetric.Exporter that communicates -// with an OTLP receiving endpoint using protobuf encoded metric data over -// HTTP. +/* +Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads. +By default the telemetry is sent to https://localhost:4318/v1/metrics. + +Exporter should be created using [New] and used with a [metric.PeriodicReader]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") - +target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port and a path. +The value should not contain a query string or fragment. +The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +environment variable and by [WithEndpoint], [WithInsecure] options. + +OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") - +target URL to which the exporter sends telemetry. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port and a path. +The value should not contain a query string or fragment. +The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - +key-value pairs used as headers associated with HTTP requests. +The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) - +compression strategy the exporter uses to compress the HTTP body. +Supported values: "gzip". +OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompression] option. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) - +filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - +filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - +filepath to the clients private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSClientConfig] option. + +OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") - +aggregation temporality to use on the basis of instrument kind. Supported values: + - "cumulative" - Cumulative aggregation temporality for all instrument kinds, + - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds; + Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds, + - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds; + Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. + +The configuration can be overridden by [WithTemporalitySelector] option. + +OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") - +default aggregation to use for histogram instruments. Supported values: + - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation], + - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation]. + +The configuration can be overridden by [WithAggregationSelector] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation +[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation +*/ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go index 38859fb9342b..9dfb55c41bb6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go @@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } - name, err := url.QueryUnescape(n) + name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) - value, err := url.QueryUnescape(v) + value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go index c1ec5ed210a2..59468b9a5ed6 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go @@ -30,7 +30,6 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" "go.opentelemetry.io/otel/sdk/metric" ) @@ -122,7 +121,6 @@ func cleanPath(urlPath string, defaultPath string) string { // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { - userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version() cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), @@ -134,7 +132,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config { AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, - DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { @@ -158,9 +155,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.Metrics.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } - if len(cfg.DialOptions) != 0 { - cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...) - } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go similarity index 58% rename from vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/doc.go rename to vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go index 31831c415fec..59d7c1c2cb9b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/doc.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/version.go @@ -12,9 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package otlpmetric provides an OpenTelemetry metric Exporter that can be -// used with PeriodicReader. It transforms metricdata into OTLP and transmits -// the transformed data to OTLP receivers. The Exporter is configurable to use -// different Clients, each using a distinct transport protocol to communicate -// to an OTLP receiving endpoint. -package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. +func Version() string { + return "0.44.0" +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4a8b..000000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b8679376a..000000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a9045b..000000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa525..000000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803358..000000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000000..e99c92f39c78 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 58901bda53e5..e83ddeef0fc2 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -115,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe34ea..90a2c3d6dcb1 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000000..50593b6dfec6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5ffce9764be7..5bbb33217488 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529da5..3a5e776f895a 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897d23..4cc7b005967e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c7de..4e92e5aa4062 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240a4f..07642c308d3a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb2840285..923e08cb7924 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b526..7d73dda64733 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab747..057700111e74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 6f7d2ac70a93..97651b5bd04b 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -894,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9f73df75b5fe..eba761018aaf 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -1224,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20a1b..0569f5dd43e4 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35c2..87c33c798e87 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b724..5b95c13d9266 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e42573..0f95aa91d5bc 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e34d..5ad3548bf741 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3f8..4201b6b585a6 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d8067263556..18ddda3a4235 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd18..afd0ae84fdfb 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a333879..86a8caf06f31 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae65380a..2ae8ab9fa421 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d901a..6c0d72418d89 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 57% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index a4411c22bfc8..b5e30cff0215 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,87 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err @@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -364,6 +289,20 @@ type acBalancerWrapper struct { producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } // NewStream begins a streaming RPC on the addrConn. If the addrConn is not diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 429c389e4730..e6f2625b6844 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), @@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) { type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() - defer cc.mu.Unlock() if cc.conns == nil { - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - return nil + cc.mu.Unlock() + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle cc.addTraceEvent("entering idle mode") - go func() { - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() + cc.initIdleStateLocked() - return nil + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -649,66 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -804,11 +699,11 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } - internal.EnterIdleModeForTesting = func(cc *ClientConn) error { - return cc.enterIdleMode() + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() } internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.exitIdleMode() + return cc.idlenessMgr.ExitIdleMode() } } @@ -824,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -872,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -894,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -910,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -932,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -947,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -968,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1174,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1216,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1253,40 +1140,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1307,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1345,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1849,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -2007,32 +1886,3 @@ func (cc *ClientConn) determineAuthority() error { channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182db2..08476ad1fe17 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21af7..5dafd34edf93 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index cfc9fd85e8dd..ba2426180402 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -637,14 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, - idleTimeout: 30 * time.Minute, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -705,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df4959..11f91668ac9b 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e77529cd..fc094f3441b8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -58,6 +59,12 @@ func TurnOn() { } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.LoadInt32(&curState) == 1 diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddfbd4c..685a3cb41b13 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,9 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c109..29f234acb1b9 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000000..7f7044e1731c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917dbe6c1..f7f40a16acee 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476e5ef..fe49cb74c55a 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error -} - -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() + EnterIdleMode() } -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } } -// NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return } - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m + m.timer = timeAfterFunc(d, m.handleIdleTimeout) } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { +func (m *Manager) resetIdleTimer(d time.Duration) { m.idleMu.Lock() defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0d94c63e06e2..2549fe8e3b88 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -73,6 +73,11 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -177,10 +182,12 @@ var ( GRPCResolverSchemeExtraMetadata string = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) error + EnterIdleModeForTesting any // func(*grpc.ClientConn) // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36c89..b66dcb213276 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000000..c7fc557d00c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go new file mode 100644 index 000000000000..aeffd3e1c7b1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go @@ -0,0 +1,29 @@ +//go:build !unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000000..078137b7fd70 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 17f7a21b5a9f..a9d70e2a16c3 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -347,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -370,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d6f5c49358b5..59f67655a854 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -493,8 +494,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 6fa1eb41992a..680c9eba0b17 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -334,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -342,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -592,18 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -629,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -664,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -1242,10 +1223,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1311,10 +1288,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1397,11 +1370,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1433,10 +1406,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1461,6 +1436,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 415961987870..24fa1032574c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aac056e723bb..b7b8fec18046 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -425,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -698,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12a87..49446825763b 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -153,14 +153,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +205,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,17 +222,16 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ffbc5..a821ff9b2b75 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f4157c..bf56faa76d3d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -37,7 +37,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker statsHandlers []stats.Handler // to record blocking picker calls @@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 2e9cf66b4afc..5128f9364dd1 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" @@ -65,19 +64,6 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000000..14aa6f20ae01 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887de0a..ada5b9bb79ba 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e228e54..bd1c7d01b7ec 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -240,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d68330560848..000000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000000..c79bab12149f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8f60d421437d..682fa1831ec8 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,6 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } @@ -81,6 +85,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -139,7 +144,8 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan func() + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -578,11 +584,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -616,15 +624,14 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -806,6 +813,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -917,7 +936,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } @@ -971,18 +990,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() s.handleStream(st, stream) } @@ -996,7 +1026,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1040,7 +1069,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1689,6 +1718,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ctx := stream.Context() + ctx = contextWithServer(ctx, s) var ti *traceInfo if EnableTracing { tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) @@ -1697,7 +1727,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str tr: tr, firstLine: firstLine{ client: false, - remoteAddr: t.RemoteAddr(), + remoteAddr: t.Peer().Addr, }, } if dl, ok := ctx.Deadline(); ok { @@ -1731,6 +1761,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { @@ -1820,62 +1866,68 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1884,22 +1936,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1913,11 +1957,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 6d2cadd79a9b..dc2cea59c939 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.59.0" +const Version = "1.60.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bb480f1f9cca..896dc38f5063 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,16 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' @@ -94,15 +97,14 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -110,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ @@ -119,94 +120,73 @@ for MOD_FILE in $(find . -name 'go.mod'); do done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX TODO: Remove the below deprecation usages: +CloseNotifier +Roots.Subjects +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index c8136043a115..cc0361d1bff5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -128,7 +128,7 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/compose-spec/compose-go/v2 v2.1.4 +# github.com/compose-spec/compose-go/v2 v2.1.6 ## explicit; go 1.21 github.com/compose-spec/compose-go/v2/cli github.com/compose-spec/compose-go/v2/consts @@ -150,7 +150,7 @@ github.com/compose-spec/compose-go/v2/validation # github.com/containerd/console v1.0.4 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.7.19 +# github.com/containerd/containerd v1.7.20 ## explicit; go 1.21 github.com/containerd/containerd/archive/compression github.com/containerd/containerd/content @@ -167,7 +167,6 @@ github.com/containerd/containerd/namespaces github.com/containerd/containerd/pkg/dialer github.com/containerd/containerd/pkg/randutil github.com/containerd/containerd/pkg/seed -github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/protobuf github.com/containerd/containerd/protobuf/types github.com/containerd/containerd/reference @@ -204,8 +203,8 @@ github.com/containerd/platforms # github.com/containerd/ttrpc v1.2.5 ## explicit; go 1.19 github.com/containerd/ttrpc -# github.com/containerd/typeurl/v2 v2.1.1 -## explicit; go 1.13 +# github.com/containerd/typeurl/v2 v2.2.0 +## explicit; go 1.21 github.com/containerd/typeurl/v2 # github.com/cpuguy83/go-md2man/v2 v2.0.4 ## explicit; go 1.11 @@ -219,7 +218,7 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v27.0.3+incompatible +# github.com/docker/cli v27.2.0+incompatible ## explicit github.com/docker/cli/cli github.com/docker/cli/cli-plugins/hooks @@ -271,7 +270,7 @@ github.com/docker/distribution/registry/client/transport github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache/memory github.com/docker/distribution/uuid -# github.com/docker/docker v27.0.3+incompatible +# github.com/docker/docker v27.2.0+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -355,7 +354,7 @@ github.com/go-openapi/swag ## explicit; go 1.18 github.com/go-viper/mapstructure/v2 github.com/go-viper/mapstructure/v2/internal/errors -# github.com/gofrs/flock v0.12.0 +# github.com/gofrs/flock v0.12.1 ## explicit; go 1.21.0 github.com/gofrs/flock # github.com/gogo/googleapis v1.4.1 @@ -427,8 +426,8 @@ github.com/google/shlex # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/gorilla/mux v1.8.0 -## explicit; go 1.12 +# github.com/gorilla/mux v1.8.1 +## explicit; go 1.20 github.com/gorilla/mux # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 @@ -516,7 +515,7 @@ github.com/mitchellh/go-wordwrap github.com/mitchellh/hashstructure/v2 # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 -# github.com/moby/buildkit v0.15.0 +# github.com/moby/buildkit v0.16.0-rc1 ## explicit; go 1.21.0 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types @@ -531,6 +530,7 @@ github.com/moby/buildkit/client/llb github.com/moby/buildkit/client/llb/sourceresolver github.com/moby/buildkit/client/ociindex github.com/moby/buildkit/cmd/buildkitd/config +github.com/moby/buildkit/errdefs github.com/moby/buildkit/executor/resources/types github.com/moby/buildkit/exporter/containerimage/exptypes github.com/moby/buildkit/exporter/exptypes @@ -611,18 +611,21 @@ github.com/moby/patternmatcher/ignorefile ## explicit; go 1.13 github.com/moby/spdystream github.com/moby/spdystream/spdy -# github.com/moby/sys/mountinfo v0.7.1 -## explicit; go 1.16 +# github.com/moby/sys/mountinfo v0.7.2 +## explicit; go 1.17 github.com/moby/sys/mountinfo # github.com/moby/sys/sequential v0.5.0 ## explicit; go 1.17 github.com/moby/sys/sequential -# github.com/moby/sys/signal v0.7.0 -## explicit; go 1.16 +# github.com/moby/sys/signal v0.7.1 +## explicit; go 1.17 github.com/moby/sys/signal -# github.com/moby/sys/user v0.1.0 +# github.com/moby/sys/user v0.3.0 ## explicit; go 1.17 github.com/moby/sys/user +# github.com/moby/sys/userns v0.1.0 +## explicit; go 1.21 +github.com/moby/sys/userns # github.com/moby/term v0.5.0 ## explicit; go 1.18 github.com/moby/term @@ -779,10 +782,7 @@ go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.21.0 -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 -## explicit; go 1.20 -go.opentelemetry.io/otel/exporters/otlp/otlpmetric -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal @@ -790,7 +790,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envco go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform -# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 +# go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0 ## explicit; go 1.20 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal @@ -876,7 +876,6 @@ golang.org/x/mod/module golang.org/x/mod/semver # golang.org/x/net v0.25.0 ## explicit; go 1.18 -golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/http/httpguts @@ -887,7 +886,7 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.11.0 +# golang.org/x/oauth2 v0.13.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -895,7 +894,7 @@ golang.org/x/oauth2/internal ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.21.0 +# golang.org/x/sys v0.22.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -941,7 +940,7 @@ golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine/internal google.golang.org/appengine/internal/base @@ -950,17 +949,17 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto v0.0.0-20231211222908-989df2bf70f3 ## explicit; go 1.19 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.59.0 +# google.golang.org/grpc v1.60.1 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -999,6 +998,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -1010,6 +1010,7 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status