diff --git a/api/server/server.go b/api/server/server.go index 8d2dafa74004b..c1eb40252e27d 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -36,6 +36,7 @@ import ( "github.com/docker/docker/pkg/version" "github.com/docker/docker/runconfig" "github.com/docker/docker/utils" + "github.com/docker/libcontainer" "github.com/docker/libnetwork/portallocator" ) @@ -1286,6 +1287,48 @@ func (s *Server) postContainersCopy(version version.Version, w http.ResponseWrit return nil } +func (s *Server) postContainersCheckpoint(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + criuOpts := &libcontainer.CriuOpts{} + if err := json.NewDecoder(r.Body).Decode(criuOpts); err != nil { + return err + } + + if err := s.daemon.ContainerCheckpoint(vars["name"], criuOpts); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + +func (s *Server) postContainersRestore(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { + if vars == nil { + return fmt.Errorf("Missing parameter") + } + if err := parseForm(r); err != nil { + return err + } + + restoreOpts := runconfig.RestoreConfig{} + if err := json.NewDecoder(r.Body).Decode(&restoreOpts); err != nil { + return err + } + + if err := s.daemon.ContainerRestore(vars["name"], &restoreOpts.CriuOpts, restoreOpts.ForceRestore); err != nil { + return err + } + + w.WriteHeader(http.StatusNoContent) + return nil +} + func (s *Server) postContainerExecCreate(version version.Version, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := parseForm(r); err != nil { return nil @@ -1488,28 +1531,30 @@ func createRouter(s *Server) *mux.Router { "/exec/{id:.*}/json": s.getExecByID, }, "POST": { - "/auth": s.postAuth, - "/commit": s.postCommit, - "/build": s.postBuild, - "/images/create": s.postImagesCreate, - "/images/load": s.postImagesLoad, - "/images/{name:.*}/push": s.postImagesPush, - "/images/{name:.*}/tag": s.postImagesTag, - "/containers/create": s.postContainersCreate, - "/containers/{name:.*}/kill": s.postContainersKill, - "/containers/{name:.*}/pause": s.postContainersPause, - "/containers/{name:.*}/unpause": s.postContainersUnpause, - "/containers/{name:.*}/restart": s.postContainersRestart, - "/containers/{name:.*}/start": s.postContainersStart, - "/containers/{name:.*}/stop": s.postContainersStop, - "/containers/{name:.*}/wait": s.postContainersWait, - "/containers/{name:.*}/resize": s.postContainersResize, - "/containers/{name:.*}/attach": s.postContainersAttach, - "/containers/{name:.*}/copy": s.postContainersCopy, - "/containers/{name:.*}/exec": s.postContainerExecCreate, - "/exec/{name:.*}/start": s.postContainerExecStart, - "/exec/{name:.*}/resize": s.postContainerExecResize, - "/containers/{name:.*}/rename": s.postContainerRename, + "/auth": s.postAuth, + "/commit": s.postCommit, + "/build": s.postBuild, + "/images/create": s.postImagesCreate, + "/images/load": s.postImagesLoad, + "/images/{name:.*}/push": s.postImagesPush, + "/images/{name:.*}/tag": s.postImagesTag, + "/containers/create": s.postContainersCreate, + "/containers/{name:.*}/kill": s.postContainersKill, + "/containers/{name:.*}/pause": s.postContainersPause, + "/containers/{name:.*}/unpause": s.postContainersUnpause, + "/containers/{name:.*}/restart": s.postContainersRestart, + "/containers/{name:.*}/start": s.postContainersStart, + "/containers/{name:.*}/stop": s.postContainersStop, + "/containers/{name:.*}/wait": s.postContainersWait, + "/containers/{name:.*}/resize": s.postContainersResize, + "/containers/{name:.*}/attach": s.postContainersAttach, + "/containers/{name:.*}/copy": s.postContainersCopy, + "/containers/{name:.*}/exec": s.postContainerExecCreate, + "/exec/{name:.*}/start": s.postContainerExecStart, + "/exec/{name:.*}/resize": s.postContainerExecResize, + "/containers/{name:.*}/rename": s.postContainerRename, + "/containers/{name:.*}/checkpoint": s.postContainersCheckpoint, + "/containers/{name:.*}/restore": s.postContainersRestore, }, "DELETE": { "/containers/{name:.*}": s.deleteContainers, diff --git a/daemon/checkpoint.go b/daemon/checkpoint.go new file mode 100644 index 0000000000000..1d98d3222c787 --- /dev/null +++ b/daemon/checkpoint.go @@ -0,0 +1,56 @@ +package daemon + +import ( + "fmt" + + "github.com/docker/libcontainer" +) + +// Checkpoint a running container. +func (daemon *Daemon) ContainerCheckpoint(name string, opts *libcontainer.CriuOpts) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + if !container.IsRunning() { + return fmt.Errorf("Container %s not running", name) + } + if err := container.Checkpoint(opts); err != nil { + return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) + } + + container.LogEvent("checkpoint") + return nil +} + +// Restore a checkpointed container. +func (daemon *Daemon) ContainerRestore(name string, opts *libcontainer.CriuOpts, forceRestore bool) error { + container, err := daemon.Get(name) + if err != nil { + return err + } + + if !forceRestore { + // TODO: It's possible we only want to bypass the checkpointed check, + // I'm not sure how this will work if the container is already running + if container.IsRunning() { + return fmt.Errorf("Container %s already running", name) + } + + if !container.IsCheckpointed() { + return fmt.Errorf("Container %s is not checkpointed", name) + } + } else { + if !container.HasBeenCheckpointed() && opts.ImagesDirectory == "" { + return fmt.Errorf("You must specify an image directory to restore from %s", name) + } + } + + if err = container.Restore(opts, forceRestore); err != nil { + container.LogEvent("die") + return fmt.Errorf("Cannot restore container %s: %s", name, err) + } + + container.LogEvent("restore") + return nil +} diff --git a/daemon/container.go b/daemon/container.go index 7a59bec5d78db..53a2886f2547e 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -13,6 +13,7 @@ import ( "syscall" "time" + "github.com/docker/libcontainer" "github.com/docker/libcontainer/label" "github.com/Sirupsen/logrus" @@ -255,7 +256,7 @@ func (container *Container) Start() (err error) { if err := container.Mount(); err != nil { return err } - if err := container.initializeNetworking(); err != nil { + if err := container.initializeNetworking(false); err != nil { return err } container.verifyDaemonSettings() @@ -342,7 +343,11 @@ func (container *Container) isNetworkAllocated() bool { // cleanup releases any network resources allocated to the container along with any rules // around how containers are linked together. It also unmounts the container's root filesystem. func (container *Container) cleanup() { - container.ReleaseNetwork() + if container.IsCheckpointed() { + logrus.Debugf("not calling ReleaseNetwork() for checkpointed container %s", container.ID) + } else { + container.ReleaseNetwork() + } disableAllActiveLinks(container) @@ -564,6 +569,55 @@ func validateID(id string) error { return nil } +func (container *Container) Checkpoint(opts *libcontainer.CriuOpts) error { + if err := container.daemon.Checkpoint(container, opts); err != nil { + return err + } + + if opts.LeaveRunning == false { + container.ReleaseNetwork() + } + return nil +} + +func (container *Container) Restore(opts *libcontainer.CriuOpts, forceRestore bool) error { + var err error + container.Lock() + defer container.Unlock() + + defer func() { + if err != nil { + container.cleanup() + } + }() + if err := container.Mount(); err != nil { + return err + } + if err = container.initializeNetworking(true); err != nil { + return err + } + container.verifyDaemonSettings() + + linkedEnv, err := container.setupLinkedContainers() + if err != nil { + return err + } + if err = container.setupWorkingDirectory(); err != nil { + return err + } + + env := container.createDaemonEnvironment(linkedEnv) + if err = populateCommand(container, env); err != nil { + return err + } + + if err = container.setupMounts(); err != nil { + return err + } + + return container.waitForRestore(opts, forceRestore) +} + func (container *Container) Copy(resource string) (io.ReadCloser, error) { container.Lock() defer container.Unlock() @@ -709,6 +763,26 @@ func (container *Container) waitForStart() error { return nil } +func (container *Container) waitForRestore(opts *libcontainer.CriuOpts, forceRestore bool) error { + container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) + + // After calling promise.Go() we'll have two goroutines: + // - The current goroutine that will block in the select + // below until restore is done. + // - A new goroutine that will restore the container and + // wait for it to exit. + select { + case <-container.monitor.restoreSignal: + if container.ExitCode != 0 { + return fmt.Errorf("restore process failed") + } + case err := <-promise.Go(func() error { return container.monitor.Restore(opts, forceRestore) }): + return err + } + + return nil +} + func (container *Container) GetProcessLabel() string { // even if we have a process label return "" if we are running // in privileged mode diff --git a/daemon/container_linux.go b/daemon/container_linux.go index 72e840927525a..52b44b879ebe6 100644 --- a/daemon/container_linux.go +++ b/daemon/container_linux.go @@ -637,7 +637,7 @@ func (container *Container) UpdateNetwork() error { return nil } -func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointOption, error) { +func (container *Container) buildCreateEndpointOptions(restoring bool) ([]libnetwork.EndpointOption, error) { var ( portSpecs = make(nat.PortSet) bindings = make(nat.PortMap) @@ -718,10 +718,18 @@ func (container *Container) buildCreateEndpointOptions() ([]libnetwork.EndpointO createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) } + /*if restoring && container.NetworkSettings.IPAddress != "" { + genericOption := options.Generic{ + netlabel.IPAddress: net.ParseIP(container.NetworkSettings.IPAddress), + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + }*/ + return createOptions, nil } -func (container *Container) AllocateNetwork() error { +func (container *Container) AllocateNetwork(restoring bool) error { mode := container.hostConfig.NetworkMode if container.Config.NetworkDisabled || mode.IsContainer() { return nil @@ -734,7 +742,7 @@ func (container *Container) AllocateNetwork() error { return fmt.Errorf("error locating network with name %s: %v", string(mode), err) } - createOptions, err := container.buildCreateEndpointOptions() + createOptions, err := container.buildCreateEndpointOptions(restoring) if err != nil { return err } @@ -768,7 +776,7 @@ func (container *Container) AllocateNetwork() error { return nil } -func (container *Container) initializeNetworking() error { +func (container *Container) initializeNetworking(restoring bool) error { var err error // Make sure NetworkMode has an acceptable value before @@ -809,7 +817,7 @@ func (container *Container) initializeNetworking() error { } - if err := container.AllocateNetwork(); err != nil { + if err := container.AllocateNetwork(restoring); err != nil { return err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 96c4e9c7365b9..43b47585e7bcd 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -14,6 +14,7 @@ import ( "sync" "time" + "github.com/docker/libcontainer" "github.com/docker/libcontainer/label" "github.com/docker/libnetwork" "github.com/docker/libnetwork/netlabel" @@ -279,6 +280,18 @@ func (daemon *Daemon) restore() error { logrus.Debugf("Loaded container %v", container.ID) containers[container.ID] = container + + // If the container was checkpointed, we need to reserve + // the IP address that it was using. + // + // XXX We should also reserve host ports (if any). + if container.IsCheckpointed() { + /*err = bridge.ReserveIP(container.ID, container.NetworkSettings.IPAddress) + if err != nil { + log.Errorf("Failed to reserve IP %s for container %s", + container.ID, container.NetworkSettings.IPAddress) + }*/ + } } else { logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) } @@ -1048,6 +1061,25 @@ func (daemon *Daemon) Run(c *Container, pipes *execdriver.Pipes, startCallback e return daemon.execDriver.Run(c.command, pipes, startCallback) } +func (daemon *Daemon) Checkpoint(c *Container, opts *libcontainer.CriuOpts) error { + if err := daemon.execDriver.Checkpoint(c.command, opts); err != nil { + return err + } + c.SetCheckpointed(opts.LeaveRunning) + return nil +} + +func (daemon *Daemon) Restore(c *Container, pipes *execdriver.Pipes, restoreCallback execdriver.RestoreCallback, opts *libcontainer.CriuOpts, forceRestore bool) (execdriver.ExitStatus, error) { + // Mount the container's filesystem (daemon/graphdriver/aufs/aufs.go). + _, err := daemon.driver.Get(c.ID, c.GetMountLabel()) + if err != nil { + return execdriver.ExitStatus{ExitCode: 0}, err + } + + exitCode, err := daemon.execDriver.Restore(c.command, pipes, restoreCallback, opts, forceRestore) + return exitCode, err +} + func (daemon *Daemon) Kill(c *Container, sig int) error { return daemon.execDriver.Kill(c.command, sig) } diff --git a/daemon/execdriver/driver.go b/daemon/execdriver/driver.go index eca77e921eaac..54d3955bb040c 100644 --- a/daemon/execdriver/driver.go +++ b/daemon/execdriver/driver.go @@ -24,6 +24,7 @@ var ( ) type StartCallback func(*ProcessConfig, int) +type RestoreCallback func(*ProcessConfig, int) // Driver specific information based on // processes registered with the driver @@ -59,6 +60,8 @@ type Driver interface { Kill(c *Command, sig int) error Pause(c *Command) error Unpause(c *Command) error + Checkpoint(c *Command, opts *libcontainer.CriuOpts) error + Restore(c *Command, pipes *Pipes, restoreCallback RestoreCallback, opts *libcontainer.CriuOpts, forceRestore bool) (ExitStatus, error) Name() string // Driver name Info(id string) Info // "temporary" hack (until we move state from core to plugins) GetPidsForContainer(id string) ([]int, error) // Returns a list of pids for the given container. diff --git a/daemon/execdriver/lxc/driver.go b/daemon/execdriver/lxc/driver.go index 4b5730a3f4f42..fe5bdc06b45c4 100644 --- a/daemon/execdriver/lxc/driver.go +++ b/daemon/execdriver/lxc/driver.go @@ -547,6 +547,14 @@ func (d *driver) Unpause(c *execdriver.Command) error { return err } +func (d *driver) Checkpoint(c *execdriver.Command, opts *libcontainer.CriuOpts) error { + return fmt.Errorf("Checkpointing lxc containers not supported yet\n") +} + +func (d *driver) Restore(c *execdriver.Command, pipes *execdriver.Pipes, restoreCallback execdriver.RestoreCallback, opts *libcontainer.CriuOpts, forceRestore bool) (execdriver.ExitStatus, error) { + return execdriver.ExitStatus{ExitCode: 0}, fmt.Errorf("Restoring lxc containers not supported yet\n") +} + func (d *driver) Terminate(c *execdriver.Command) error { return KillLxc(c.ID, 9) } diff --git a/daemon/execdriver/native/driver.go b/daemon/execdriver/native/driver.go index 1e1df1f21bfbf..8e1bb6c53e1fa 100644 --- a/daemon/execdriver/native/driver.go +++ b/daemon/execdriver/native/driver.go @@ -274,6 +274,101 @@ func (d *driver) Unpause(c *execdriver.Command) error { return active.Resume() } +func (d *driver) Checkpoint(c *execdriver.Command, opts *libcontainer.CriuOpts) error { + active := d.activeContainers[c.ID] + if active == nil { + return fmt.Errorf("active container for %s does not exist", c.ID) + } + + d.Lock() + defer d.Unlock() + err := active.Checkpoint(opts) + if err != nil { + return err + } + + return nil +} + +func (d *driver) Restore(c *execdriver.Command, pipes *execdriver.Pipes, restoreCallback execdriver.RestoreCallback, opts *libcontainer.CriuOpts, forceRestore bool) (execdriver.ExitStatus, error) { + var ( + cont libcontainer.Container + err error + ) + + cont, err = d.factory.Load(c.ID) + if err != nil { + if forceRestore { + var config *configs.Config + config, err = d.createContainer(c) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + cont, err = d.factory.Create(c.ID, config) + if err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + } else { + return execdriver.ExitStatus{ExitCode: -1}, err + } + } + + p := &libcontainer.Process{ + Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), + Env: c.ProcessConfig.Env, + Cwd: c.WorkingDir, + User: c.ProcessConfig.User, + } + + config := cont.Config() + if err := setupPipes(&config, &c.ProcessConfig, p, pipes); err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + d.Lock() + d.activeContainers[c.ID] = cont + d.Unlock() + defer func() { + cont.Destroy() + d.cleanContainer(c.ID) + }() + + if err := cont.Restore(p, opts); err != nil { + return execdriver.ExitStatus{ExitCode: -1}, err + } + + // FIXME: no idea if any of this is needed... + if restoreCallback != nil { + pid, err := p.Pid() + if err != nil { + p.Signal(os.Kill) + p.Wait() + return execdriver.ExitStatus{ExitCode: -1}, err + } + restoreCallback(&c.ProcessConfig, pid) + } + + oom := notifyOnOOM(cont) + waitF := p.Wait + if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { + // we need such hack for tracking processes with inherited fds, + // because cmd.Wait() waiting for all streams to be copied + waitF = waitInPIDHost(p, cont) + } + ps, err := waitF() + if err != nil { + execErr, ok := err.(*exec.ExitError) + if !ok { + return execdriver.ExitStatus{ExitCode: -1}, err + } + ps = execErr.ProcessState + } + + cont.Destroy() + _, oomKill := <-oom + return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil +} + func (d *driver) Terminate(c *execdriver.Command) error { defer d.cleanContainer(c.ID) container, err := d.factory.Load(c.ID) diff --git a/daemon/monitor.go b/daemon/monitor.go index dfade8e21847b..42682ee6ec04d 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -10,6 +10,7 @@ import ( "github.com/docker/docker/daemon/execdriver" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/runconfig" + "github.com/docker/libcontainer" ) const defaultTimeIncrement = 100 @@ -44,6 +45,9 @@ type containerMonitor struct { // left waiting for nothing to happen during this time stopChan chan struct{} + // like startSignal but for restoring a container + restoreSignal chan struct{} + // timeIncrement is the amount of time to wait between restarts // this is in milliseconds timeIncrement int @@ -61,6 +65,7 @@ func newContainerMonitor(container *Container, policy runconfig.RestartPolicy) * timeIncrement: defaultTimeIncrement, stopChan: make(chan struct{}), startSignal: make(chan struct{}), + restoreSignal: make(chan struct{}), } } @@ -181,6 +186,51 @@ func (m *containerMonitor) Start() error { } } +// Like Start() but for restoring a container. +func (m *containerMonitor) Restore(opts *libcontainer.CriuOpts, forceRestore bool) error { + var ( + err error + // XXX The following line should be changed to + // exitStatus execdriver.ExitStatus to match Start() + exitCode execdriver.ExitStatus + afterRestore bool + ) + defer func() { + if afterRestore { + m.container.Lock() + m.container.setStopped(&execdriver.ExitStatus{exitCode.ExitCode, false}) + defer m.container.Unlock() + } + m.Close() + }() + + // FIXME: right now if we startLogging again we get double logs after a restore + if m.container.logCopier == nil { + if err := m.container.startLogging(); err != nil { + m.resetContainer(false) + return err + } + } + + pipes := execdriver.NewPipes(m.container.stdin, m.container.stdout, m.container.stderr, m.container.Config.OpenStdin) + + m.container.LogEvent("restore") + m.lastStartTime = time.Now() + if exitCode, err = m.container.daemon.Restore(m.container, pipes, m.restoreCallback, opts, forceRestore); err != nil { + logrus.Errorf("Error restoring container: %s, exitCode=%d", err, exitCode) + m.container.ExitCode = -1 + m.resetContainer(false) + return err + } + afterRestore = true + + m.container.ExitCode = exitCode.ExitCode + m.resetMonitor(err == nil && exitCode.ExitCode == 0) + m.container.LogEvent("die") + m.resetContainer(true) + return err +} + // resetMonitor resets the stateful fields on the containerMonitor based on the // previous runs success or failure. Regardless of success, if the container had // an execution time of more than 10s then reset the timer back to the default @@ -267,6 +317,29 @@ func (m *containerMonitor) callback(processConfig *execdriver.ProcessConfig, pid } } +// Like callback() but for restoring a container. +func (m *containerMonitor) restoreCallback(processConfig *execdriver.ProcessConfig, restorePid int) { + // If restorePid is 0, it means that restore failed. + if restorePid != 0 { + m.container.setRunning(restorePid) + } + + // Unblock the goroutine waiting in waitForRestore(). + select { + case <-m.restoreSignal: + default: + close(m.restoreSignal) + } + + if restorePid != 0 { + // Write config.json and hostconfig.json files + // to /var/lib/docker/containers/. + if err := m.container.ToDisk(); err != nil { + logrus.Debugf("%s", err) + } + } +} + // resetContainer resets the container's IO and ensures that the command is able to be executed again // by copying the data into a new struct // if lock is true, then container locked during reset diff --git a/daemon/state.go b/daemon/state.go index 4119d0e6cd924..aca36a5815b85 100644 --- a/daemon/state.go +++ b/daemon/state.go @@ -14,6 +14,7 @@ type State struct { Running bool Paused bool Restarting bool + Checkpointed bool OOMKilled bool removalInProgress bool // Not need for this to be persistent on disk. Dead bool @@ -22,6 +23,7 @@ type State struct { Error string // contains last known error when starting the container StartedAt time.Time FinishedAt time.Time + CheckpointedAt time.Time waitChan chan struct{} } @@ -48,6 +50,10 @@ func (s *State) String() string { return "Removal In Progress" } + if s.Checkpointed { + return fmt.Sprintf("Checkpointed %s ago", units.HumanDuration(time.Now().UTC().Sub(s.CheckpointedAt))) + } + if s.Dead { return "Dead" } @@ -71,6 +77,10 @@ func (s *State) StateString() string { return "running" } + if s.Checkpointed { + return "checkpointed'" + } + if s.Dead { return "dead" } @@ -158,6 +168,7 @@ func (s *State) setRunning(pid int) { s.Error = "" s.Running = true s.Paused = false + s.Checkpointed = false s.Restarting = false s.ExitCode = 0 s.Pid = pid @@ -254,3 +265,24 @@ func (s *State) SetDead() { s.Dead = true s.Unlock() } + +func (s *State) SetCheckpointed(leaveRunning bool) { + s.Lock() + s.CheckpointedAt = time.Now().UTC() + s.Checkpointed = !leaveRunning + s.Running = leaveRunning + s.Paused = false + s.Restarting = false + // XXX Not sure if we need to close and recreate waitChan. + // close(s.waitChan) + // s.waitChan = make(chan struct{}) + s.Unlock() +} + +func (s *State) HasBeenCheckpointed() bool { + return s.CheckpointedAt != time.Time{} +} + +func (s *State) IsCheckpointed() bool { + return s.Checkpointed +} diff --git a/docker/flags.go b/docker/flags.go index cbdb6a859deb5..fcf3f77db41b9 100644 --- a/docker/flags.go +++ b/docker/flags.go @@ -30,6 +30,7 @@ var ( dockerCommands = []command{ {"attach", "Attach to a running container"}, {"build", "Build an image from a Dockerfile"}, + {"checkpoint", "Checkpoint one or more running containers"}, {"commit", "Create a new image from a container's changes"}, {"cp", "Copy files/folders from a container's filesystem to the host path"}, {"create", "Create a new container"}, @@ -54,6 +55,7 @@ var ( {"push", "Push an image or a repository to a Docker registry server"}, {"rename", "Rename an existing container"}, {"restart", "Restart a running container"}, + {"restore", "Restore one or more checkpointed containers"}, {"rm", "Remove one or more containers"}, {"rmi", "Remove one or more images"}, {"run", "Run a command in a new container"}, diff --git a/runconfig/restore.go b/runconfig/restore.go new file mode 100644 index 0000000000000..18749b67e766f --- /dev/null +++ b/runconfig/restore.go @@ -0,0 +1,10 @@ +package runconfig + +import ( + "github.com/docker/libcontainer" +) + +type RestoreConfig struct { + CriuOpts libcontainer.CriuOpts + ForceRestore bool +}