diff --git a/client/allocrunner/taskrunner/getter/sandbox_test.go b/client/allocrunner/taskrunner/getter/sandbox_test.go
index 63c7ee77838..bed1f7a3cbe 100644
--- a/client/allocrunner/taskrunner/getter/sandbox_test.go
+++ b/client/allocrunner/taskrunner/getter/sandbox_test.go
@@ -4,6 +4,7 @@
package getter
import (
+ "archive/tar"
"fmt"
"net/http"
"net/http/cgi"
@@ -16,12 +17,15 @@ import (
"time"
"github.com/hashicorp/nomad/client/config"
+ "github.com/hashicorp/nomad/client/interfaces"
"github.com/hashicorp/nomad/client/testutil"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/shoenig/test/must"
)
+const testFileContent = "test-file-content"
+
func artifactConfig(timeout time.Duration) *config.ArtifactConfig {
return &config.ArtifactConfig{
HTTPReadTimeout: timeout,
@@ -119,13 +123,20 @@ func TestSandbox_Get_inspection(t *testing.T) {
testutil.RequireRoot(t)
logger := testlog.HCLogger(t)
- // Create a temporary directory directly so the repos
- // don't end up being found improperly
- tdir, err := os.MkdirTemp("", "nomad-test")
- must.NoError(t, err, must.Sprint("failed to create top level local repo directory"))
+ sandboxSetup := func() (string, *Sandbox, interfaces.EnvReplacer) {
+ testutil.RequireRoot(t)
+ logger := testlog.HCLogger(t)
+ ac := artifactConfig(10 * time.Second)
+ sbox := New(ac, logger)
+ _, taskDir := SetupDir(t)
+ env := noopTaskEnv(taskDir)
+ sbox.ac.DisableFilesystemIsolation = true
+
+ return taskDir, sbox, env
+ }
t.Run("symlink escaped sandbox", func(t *testing.T) {
- dir, err := os.MkdirTemp(tdir, "fake-repo")
+ dir, err := os.MkdirTemp(t.TempDir(), "fake-repo")
must.NoError(t, err, must.Sprint("failed to create local repo directory"))
must.NoError(t, os.Symlink("/", filepath.Join(dir, "bad-file")), must.Sprint("could not create symlink in local repo"))
srv := makeAndServeGitRepo(t, dir)
@@ -162,7 +173,7 @@ func TestSandbox_Get_inspection(t *testing.T) {
})
t.Run("symlink within sandbox", func(t *testing.T) {
- dir, err := os.MkdirTemp(tdir, "fake-repo")
+ dir, err := os.MkdirTemp(t.TempDir(), "fake-repo")
must.NoError(t, err, must.Sprint("failed to create local repo"))
// create a file to link to
f, err := os.Create(filepath.Join(dir, "test-file"))
@@ -193,6 +204,195 @@ func TestSandbox_Get_inspection(t *testing.T) {
err = sbox.Get(env, artifact, "nobody")
must.NoError(t, err)
})
+
+ t.Run("ignores existing symlinks", func(t *testing.T) {
+ taskDir, sbox, env := sandboxSetup()
+ src, _ := servTestFile(t, "test-file")
+ must.NoError(t, os.Symlink("/", filepath.Join(taskDir, "bad-file")))
+
+ artifact := &structs.TaskArtifact{
+ GetterSource: src,
+ RelativeDest: "local/downloads",
+ }
+
+ err := sbox.Get(env, artifact, "nobody")
+ must.NoError(t, err)
+
+ _, err = os.Stat(filepath.Join(taskDir, "local", "downloads", "test-file"))
+ must.NoError(t, err)
+ })
+
+ t.Run("properly chowns destination", func(t *testing.T) {
+ taskDir, sbox, env := sandboxSetup()
+ src, _ := servTestFile(t, "test-file")
+
+ artifact := &structs.TaskArtifact{
+ GetterSource: src,
+ RelativeDest: "local/downloads",
+ Chown: true,
+ }
+
+ err := sbox.Get(env, artifact, "nobody")
+ must.NoError(t, err)
+
+ info, err := os.Stat(filepath.Join(taskDir, "local", "downloads"))
+ must.NoError(t, err)
+
+ uid := info.Sys().(*syscall.Stat_t).Uid
+ must.Eq(t, 65534, uid) // nobody's conventional uid
+
+ info, err = os.Stat(filepath.Join(taskDir, "local", "downloads", "test-file"))
+ must.NoError(t, err)
+
+ uid = info.Sys().(*syscall.Stat_t).Uid
+ must.Eq(t, 65534, uid) // nobody's conventional uid
+ })
+
+ t.Run("when destination file exists", func(t *testing.T) {
+ taskDir, sbox, env := sandboxSetup()
+ src, _ := servTestFile(t, "test-file")
+
+ testFile := filepath.Join(taskDir, "local", "downloads", "test-file")
+ must.NoError(t, os.MkdirAll(filepath.Dir(testFile), 0755))
+ f, err := os.OpenFile(testFile, os.O_CREATE, 0644)
+ must.NoError(t, err)
+ f.Write([]byte("testing"))
+ f.Close()
+ originalInfo, err := os.Stat(testFile)
+ must.NoError(t, err)
+
+ artifact := &structs.TaskArtifact{
+ GetterSource: src,
+ RelativeDest: "local/downloads",
+ Chown: true,
+ }
+
+ err = sbox.Get(env, artifact, "nobody")
+ must.NoError(t, err)
+
+ newInfo, err := os.Stat(testFile)
+ must.NoError(t, err)
+
+ must.False(t, os.SameFile(originalInfo, newInfo))
+ })
+
+ t.Run("when destination directory exists", func(t *testing.T) {
+ taskDir, sbox, env := sandboxSetup()
+ src, _ := servTestFile(t, "test-file")
+
+ testFile := filepath.Join(taskDir, "local", "downloads", "testfile.txt")
+ must.NoError(t, os.MkdirAll(filepath.Dir(testFile), 0755))
+ f, err := os.OpenFile(testFile, os.O_CREATE, 0644)
+ must.NoError(t, err)
+ f.Write([]byte("testing"))
+ f.Close()
+
+ artifact := &structs.TaskArtifact{
+ GetterSource: src,
+ RelativeDest: "local/downloads",
+ Chown: true,
+ }
+
+ err = sbox.Get(env, artifact, "nobody")
+ must.NoError(t, err)
+
+ // check that new file exists
+ _, err = os.Stat(filepath.Join(taskDir, "local", "downloads", "test-file"))
+ must.NoError(t, err)
+
+ // check that existing file still exists
+ _, err = os.Stat(testFile)
+ must.NoError(t, err)
+ })
+
+ t.Run("when unpacking file to an existing directory", func(t *testing.T) {
+ taskDir, sbox, env := sandboxSetup()
+
+ tarFiles := []string{
+ "test.file",
+ "nested/test.file",
+ "other/test.file",
+ }
+ src, _ := servTarFile(t, tarFiles...)
+
+ testFile := filepath.Join(taskDir, "local", "downloads", "other", "testfile.txt")
+ must.NoError(t, os.MkdirAll(filepath.Dir(testFile), 0755))
+ f, err := os.Create(testFile)
+ must.NoError(t, err)
+ f.Write([]byte("testing"))
+ f.Close()
+
+ artifact := &structs.TaskArtifact{
+ GetterSource: src,
+ RelativeDest: "local/downloads",
+ Chown: true,
+ }
+
+ err = sbox.Get(env, artifact, "nobody")
+ must.NoError(t, err)
+
+ // check that all unpacked files exist
+ for _, tarFile := range tarFiles {
+ _, err := os.Stat(filepath.Join(taskDir, "local", "downloads", tarFile))
+ must.NoError(t, err)
+ }
+
+ // check existing file remains
+ _, err = os.Stat(testFile)
+ must.NoError(t, err)
+ })
+}
+
+func servTestFile(t *testing.T, filename string) (string, *httptest.Server) {
+ t.Helper()
+
+ dir, err := os.MkdirTemp(t.TempDir(), "file")
+ must.NoError(t, err)
+ f, err := os.Create(filepath.Join(dir, filename))
+ must.NoError(t, err)
+ defer f.Close()
+ f.Write([]byte(testFileContent))
+
+ s := servDir(t, dir)
+ return fmt.Sprintf("%s/%s", s.URL, filename), s
+}
+
+func servTarFile(t *testing.T, paths ...string) (string, *httptest.Server) {
+ t.Helper()
+
+ dir, err := os.MkdirTemp(t.TempDir(), "tar")
+ f, err := os.Create(filepath.Join(dir, "test-compressed.tar"))
+ must.NoError(t, err)
+ defer f.Close()
+
+ w := tar.NewWriter(f)
+ defer w.Close()
+ for _, path := range paths {
+ err := w.WriteHeader(&tar.Header{
+ Name: path,
+ Mode: 0644,
+ Size: int64(len(testFileContent)),
+ })
+ must.NoError(t, err)
+ bytes, err := w.Write([]byte(testFileContent))
+ must.NoError(t, err)
+ must.Eq(t, len(testFileContent), bytes)
+ }
+
+ s := servDir(t, dir)
+ return fmt.Sprintf("%s/test-compressed.tar", s.URL), s
+}
+
+func servDir(t *testing.T, dir string) *httptest.Server {
+ t.Helper()
+
+ fs := os.DirFS(dir)
+ s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.ServeFileFS(w, r, fs, r.URL.Path)
+ }))
+ t.Cleanup(s.Close)
+
+ return s
}
func makeAndServeGitRepo(t *testing.T, repoPath string) *httptest.Server {
diff --git a/client/allocrunner/taskrunner/getter/util.go b/client/allocrunner/taskrunner/getter/util.go
index 919008f31e8..a769c660c98 100644
--- a/client/allocrunner/taskrunner/getter/util.go
+++ b/client/allocrunner/taskrunner/getter/util.go
@@ -165,6 +165,31 @@ func (s *Sandbox) runCmd(env *parameters) error {
// find the nomad process
bin := subproc.Self()
+ // if the artifact is to be inspected, fetch it to a temporary
+ // location so inspection can be performed before moving it
+ // to its final destination
+ var finalDest string
+ if !env.DisableArtifactInspection && (env.DisableFilesystemIsolation || !lockdownAvailable()) {
+ finalDest = env.Destination
+ tmpDir, err := os.MkdirTemp(env.AllocDir, "artifact-")
+ if err != nil {
+ return err
+ }
+
+ // NOTE: use a destination path that does not actually
+ // exist to prevent unexpected errors with go-getter
+ env.Destination = filepath.Join(tmpDir, "artifact")
+
+ s.logger.Debug("artifact download destination modified for inspection",
+ "temporary", env.Destination, "final", finalDest)
+ // before leaving, set the destination back to the
+ // original value and cleanup
+ defer func() {
+ env.Destination = finalDest
+ os.RemoveAll(tmpDir)
+ }()
+ }
+
// final method of ensuring subprocess termination
ctx, cancel := subproc.Context(env.deadline())
defer cancel()
@@ -189,44 +214,96 @@ func (s *Sandbox) runCmd(env *parameters) error {
}
subproc.Log(output, s.logger.Debug)
- // if filesystem isolation was not disabled and lockdown
- // is available on this platform, do not continue to inspection
- if !env.DisableFilesystemIsolation && lockdownAvailable() {
+ // if the artifact was not downloaded to a temporary
+ // location, no inspection is needed
+ if finalDest == "" {
return nil
}
- // if artifact inspection is disabled, do not continue to inspection
- if env.DisableArtifactInspection {
- return nil
+ // inspect the downloaded artifact
+ artifactInspector, err := genWalkInspector(env.Destination)
+ if err != nil {
+ return err
}
- // inspect the writable directories. start with inspecting the
- // alloc directory
- allocInspector, err := genWalkInspector(env.AllocDir)
- if err != nil {
+ if err := filepath.WalkDir(env.Destination, artifactInspector); err != nil {
return err
}
- if err := filepath.WalkDir(env.AllocDir, allocInspector); err != nil {
+ // ensure the final destination path exists
+ if err := os.MkdirAll(finalDest, 0755); err != nil {
return err
}
- // the task directory is within the alloc directory. however, if
- // that ever changes for some reason, make sure it is checked as well
- isWithin, err := isPathWithin(env.AllocDir, env.TaskDir)
+ // the artifact contents will have the owner set correctly
+ // but the destination directory will not, so set that now
+ // if it was configured
+ if env.Chown {
+ if err := chownDestination(finalDest, env.User); err != nil {
+ return err
+ }
+ }
+
+ if err := mergeDirectories(env.Destination, finalDest); err != nil {
+ return err
+ }
+
+ return nil
+
+}
+
+// mergeDirectories will merge the contents of the srcDir into
+// the dstDir. This is a destructive action; the contents of
+// srcDir are moved into dstDir.
+func mergeDirectories(srcDir, dstDir string) error {
+ entries, err := os.ReadDir(srcDir)
if err != nil {
return err
}
- if !isWithin {
- taskInspector, err := genWalkInspector(env.TaskDir)
+ for _, entry := range entries {
+ src := filepath.Join(srcDir, entry.Name())
+ dst := filepath.Join(dstDir, entry.Name())
+
+ srcInfo, err := os.Stat(src)
if err != nil {
return err
}
- if err := filepath.WalkDir(env.TaskDir, taskInspector); err != nil {
+ dstInfo, err := os.Stat(dst)
+ if err != nil {
+ // if the destination does not exist, the source
+ // can be moved directly
+ if errors.Is(err, os.ErrNotExist) {
+ if err := os.Rename(src, dst); err != nil {
+ return err
+ }
+
+ continue
+ }
+
return err
}
+
+ // if both the source and destination are directories
+ // merge the source into the destination
+ if srcInfo.IsDir() && dstInfo.IsDir() {
+ if err := mergeDirectories(src, dst); err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ // remove the destination and move the source
+ if err := os.RemoveAll(dst); err != nil {
+ return err
+ }
+
+ if err := os.Rename(src, dst); err != nil {
+ return err
+ }
+
}
return nil
diff --git a/website/content/docs/configuration/client.mdx b/website/content/docs/configuration/client.mdx
index 74274bc9bbe..37bd84fb7a0 100644
--- a/website/content/docs/configuration/client.mdx
+++ b/website/content/docs/configuration/client.mdx
@@ -502,11 +502,16 @@ refer to the [drivers documentation](/nomad/docs/job-declare/task-driver).
- `disable_artifact_inspection` `(bool: false)` - Specifies whether to disable
artifact inspection for sandbox escapes. If the platform supports filesystem
isolation, and it is not disabled, artifact inspection will not be performed
- regardless of this value.
+ regardless of this value. When artifact inspection is performed, Nomad will
+ merge the unpacked contents of the artifact into the destination path if it
+ already exists. Depending on the remote source used, this may be different
+ behavior than when the artifact is unpacked without inspection.
- `disable_filesystem_isolation` `(bool: false)` - Specifies whether filesystem
isolation should be disabled for artifact downloads. Applies only to systems
where filesystem isolation via [landlock] is possible (Linux kernel 5.13+).
+ When applied, Nomad will inspect the unpacked contents unless inspection is
+ disabled with `disable_filesystem_isolation`.
- `filesystem_isolation_extra_paths` `([]string: nil)` - Allow extra paths
in the filesystem isolation. Paths are specified in the form `[kind]:[mode]:[path]`
diff --git a/website/content/docs/upgrade/upgrade-specific.mdx b/website/content/docs/upgrade/upgrade-specific.mdx
index 81a90bda07b..9a67d36e8c9 100644
--- a/website/content/docs/upgrade/upgrade-specific.mdx
+++ b/website/content/docs/upgrade/upgrade-specific.mdx
@@ -38,6 +38,15 @@ being silently ignored. Any existing policies with duplicate or invalid keys
will continue to work, but the source policy document will need to be updated
to be valid before it can be written to Nomad.
+#### Artifact unpacking behavior changes when inspection enabled
+
+Nomad 1.11.0 updates how artifact contents are unpacked and inspected. Artifact
+inspection is applied when `client.disable_filesystem_isolation` is set to `true`
+or the host platform does not support filesystem isolation. When applied, the
+unpacked contents of the artifact will be merged into the destination path if
+it already exists. Depending on the remote source being used, this may be different
+behavior than seen when the artifact is unpacked without inspection.
+
#### Maximum number of allocations per job is limited by default
Nomad 1.11.0 limits the maximum number of allocations for a job to the value of
@@ -69,6 +78,15 @@ being silently ignored. Any existing policies with duplicate or invalid keys
will continue to work, but the source policy document will need to be updated
to be valid before it can be written to Nomad.
+#### Artifact unpacking behavior changes when inspection enabled
+
+Nomad 1.10.6 updates how artifact contents are unpacked and inspected. Artifact
+inspection is applied when `client.disable_filesystem_isolation` is set to `true`
+or the host platform does not support filesystem isolation. When applied, the
+unpacked contents of the artifact will be merged into the destination path if
+it already exists. Depending on the remote source being used, this may be different
+behavior than seen when the artifact is unpacked without inspection.
+
#### Enterprise product usage reporting
Nomad Enterprise 1.10.6 adds detailed product usage information to
@@ -242,6 +260,15 @@ being silently ignored. Any existing policies with duplicate or invalid keys
will continue to work, but the source policy document will need to be updated
to be valid before it can be written to Nomad.
+#### Artifact unpacking behavior changes when inspection enabled
+
+Nomad 1.8.18 updates how artifact contents are unpacked and inspected. Artifact
+inspection is applied when `client.disable_filesystem_isolation` is set to `true`
+or the host platform does not support filesystem isolation. When applied, the
+unpacked contents of the artifact will be merged into the destination path if
+it already exists. Depending on the remote source being used, this may be different
+behavior than seen when the artifact is unpacked without inspection.
+
#### Enterprise product usage reporting
Nomad Enterprise 1.8.18 adds detailed product usage information to