diff --git a/lxd/instance/drivers/driver_lxc.go b/lxd/instance/drivers/driver_lxc.go index 5f134659b9bd..2cbc17edf1be 100644 --- a/lxd/instance/drivers/driver_lxc.go +++ b/lxd/instance/drivers/driver_lxc.go @@ -8044,7 +8044,9 @@ func (d *lxc) IsPrivileged() bool { // IsRunning returns if instance is running. func (d *lxc) IsRunning() bool { - return d.isRunningStatusCode(d.statusCode()) + s := d.statusCode() + fmt.Println(s.String()) + return d.isRunningStatusCode(s) } // CanMigrate returns whether the instance can be migrated. @@ -8142,6 +8144,8 @@ func (d *lxc) NextIdmap() (*idmap.IdmapSet, error) { // statusCode returns instance status code. func (d *lxc) statusCode() api.StatusCode { // If instance is running on a remote cluster member, we cannot determine instance state. + fmt.Println(d.state.ServerName) + fmt.Println(d.Location()) if d.state.ServerName != d.Location() { return api.Error } diff --git a/lxd/instances.go b/lxd/instances.go index 7efe90710d49..7197d97151b0 100644 --- a/lxd/instances.go +++ b/lxd/instances.go @@ -455,6 +455,8 @@ func instancesOnDisk(s *state.State) ([]instance.Instance, error) { } func instancesShutdown(instances []instance.Instance) { + fmt.Println("instance shutdown") + sort.Sort(instanceStopList(instances)) // Limit shutdown concurrency to number of instances or number of CPU cores (which ever is less). @@ -462,6 +464,7 @@ func instancesShutdown(instances []instance.Instance) { instShutdownCh := make(chan instance.Instance) maxConcurrent := runtime.NumCPU() instCount := len(instances) + fmt.Printf("%d instances\n", len(instances)) if instCount < maxConcurrent { maxConcurrent = instCount } @@ -469,6 +472,8 @@ func instancesShutdown(instances []instance.Instance) { for i := 0; i < maxConcurrent; i++ { go func(instShutdownCh <-chan instance.Instance) { for inst := range instShutdownCh { + fmt.Printf("shutting down %s\n", inst.Name()) + // Determine how long to wait for the instance to shutdown cleanly. timeoutSeconds := 30 value, ok := inst.ExpandedConfig()["boot.host_shutdown_timeout"] @@ -478,11 +483,15 @@ func instancesShutdown(instances []instance.Instance) { err := inst.Shutdown(time.Second * time.Duration(timeoutSeconds)) if err != nil { + fmt.Printf("failed shutting down %s\n", inst.Name()) logger.Warn("Failed shutting down instance, forcefully stopping", logger.Ctx{"project": inst.Project().Name, "instance": inst.Name(), "err": err}) err = inst.Stop(false) if err != nil { + fmt.Printf("failed shutting down %s forcefully\n", inst.Name()) logger.Warn("Failed forcefully stopping instance", logger.Ctx{"project": inst.Project().Name, "instance": inst.Name(), "err": err}) } + } else { + fmt.Printf("succeeded shutting down %s\n", inst.Name()) } if inst.ID() > 0 { @@ -499,8 +508,11 @@ func instancesShutdown(instances []instance.Instance) { var currentBatchPriority int for i, inst := range instances { + fmt.Printf("looping through %s\n", inst.Name()) + // Skip stopped instances. if !inst.IsRunning() { + fmt.Printf("skipping %s\n", inst.Name()) continue } @@ -516,6 +528,8 @@ func instancesShutdown(instances []instance.Instance) { } wg.Add(1) + + fmt.Printf("adding %s to channel\n", inst.Name()) instShutdownCh <- inst } diff --git a/test/suites/clustering.sh b/test/suites/clustering.sh index 92c1ecb9136f..aff08ac3cc78 100644 --- a/test/suites/clustering.sh +++ b/test/suites/clustering.sh @@ -1894,6 +1894,9 @@ test_clustering_shutdown_nodes() { wait "${daemon_pid1}" # Container foo shouldn't be running anymore + stat "/proc/${instance_pid}" + sleep 20 + stat "/proc/${instance_pid}" [ ! -e "/proc/${instance_pid}" ] teardown_clustering_netns