diff --git a/pkg/sentry/kernel/kernel.go b/pkg/sentry/kernel/kernel.go index 513aec2d38..7ff159f90f 100644 --- a/pkg/sentry/kernel/kernel.go +++ b/pkg/sentry/kernel/kernel.go @@ -490,6 +490,11 @@ func (k *Kernel) Init(args InitKernelArgs) error { k.cpuClockTickerWakeCh = make(chan struct{}, 1) k.cpuClockTickerStopCond.L = &k.runningTasksMu k.applicationCores = args.ApplicationCores + if args.UseHostCores && k.HasCPUNumbers() { + args.UseHostCores = false + log.Infof("UseHostCores enabled but the platform implements HasCPUNumbers(): setting UseHostCores to false") + } + if args.UseHostCores { k.useHostCores = true maxCPU, err := hostcpu.MaxPossibleCPU() @@ -502,6 +507,15 @@ func (k *Kernel) Init(args InitKernelArgs) error { k.applicationCores = minAppCores } } + + if k.HasCPUNumbers() { + if k.applicationCores < uint(k.NumCPUs()) { + log.Infof("ApplicationCores is less than NumCPUs: %d < %d", k.applicationCores, k.NumCPUs()) + log.Infof("Setting applicationCores to NumCPUs: %d", k.NumCPUs()) + k.applicationCores = uint(k.NumCPUs()) + } + } + k.extraAuxv = args.ExtraAuxv k.vdso = args.Vdso k.vdsoParams = args.VdsoParams diff --git a/pkg/sentry/kernel/rseq.go b/pkg/sentry/kernel/rseq.go index 033cdb0607..61cd11f58a 100644 --- a/pkg/sentry/kernel/rseq.go +++ b/pkg/sentry/kernel/rseq.go @@ -20,7 +20,6 @@ import ( "gvisor.dev/gvisor/pkg/abi/linux" "gvisor.dev/gvisor/pkg/errors/linuxerr" "gvisor.dev/gvisor/pkg/hostarch" - "gvisor.dev/gvisor/pkg/sentry/hostcpu" "gvisor.dev/gvisor/pkg/usermem" ) @@ -50,7 +49,7 @@ type OldRSeqCriticalRegion struct { // RSeqAvailable returns true if t supports (old and new) restartable sequences. func (t *Task) RSeqAvailable() bool { - return t.k.useHostCores && t.k.Platform.DetectsCPUPreemption() + return (t.k.useHostCores || t.k.Platform.HasCPUNumbers()) && t.k.Platform.DetectsCPUPreemption() } // SetRSeq registers addr as this thread's rseq structure. @@ -201,7 +200,7 @@ func (t *Task) rseqUpdateCPU() error { return nil } - t.rseqCPU = int32(hostcpu.GetCPU()) + t.rseqCPU = t.CPU() // Update both CPUs, even if one fails. rerr := t.rseqCopyOutCPU() diff --git a/pkg/sentry/kernel/task_run.go b/pkg/sentry/kernel/task_run.go index 79e374b3f0..6f7e44e55d 100644 --- a/pkg/sentry/kernel/task_run.go +++ b/pkg/sentry/kernel/task_run.go @@ -24,7 +24,6 @@ import ( "gvisor.dev/gvisor/pkg/goid" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/refs" - "gvisor.dev/gvisor/pkg/sentry/hostcpu" "gvisor.dev/gvisor/pkg/sentry/ktime" "gvisor.dev/gvisor/pkg/sentry/memmap" "gvisor.dev/gvisor/pkg/sentry/platform" @@ -207,7 +206,7 @@ func (app *runApp) execute(t *Task) taskRunState { if t.rseqPreempted { t.rseqPreempted = false if t.rseqAddr != 0 || t.oldRSeqCPUAddr != 0 { - t.rseqCPU = int32(hostcpu.GetCPU()) + t.rseqCPU = t.CPU() if err := t.rseqCopyOutCPU(); err != nil { t.Debugf("Failed to copy CPU to %#x for rseq: %v", t.rseqAddr, err) t.forceSignal(linux.SIGSEGV, false) diff --git a/pkg/sentry/kernel/task_sched.go b/pkg/sentry/kernel/task_sched.go index ff34f0c107..39fe9f0144 100644 --- a/pkg/sentry/kernel/task_sched.go +++ b/pkg/sentry/kernel/task_sched.go @@ -365,7 +365,7 @@ func (t *Task) SetCPUMask(mask sched.CPUSet) error { return linuxerr.EINVAL } - if t.k.useHostCores { + if t.k.useHostCores || t.k.Platform.HasCPUNumbers() { // No-op; pretend the mask was immediately changed back. return nil } @@ -383,6 +383,10 @@ func (t *Task) SetCPUMask(mask sched.CPUSet) error { // CPU returns the cpu id for a given task. func (t *Task) CPU() int32 { + if t.k.Platform.HasCPUNumbers() { + return t.p.LastCPUNumber() + } + if t.k.useHostCores { return int32(hostcpu.GetCPU()) } diff --git a/pkg/sentry/platform/kvm/config.go b/pkg/sentry/platform/kvm/config.go index 91d5f4e2d9..0a0aaed7d0 100644 --- a/pkg/sentry/platform/kvm/config.go +++ b/pkg/sentry/platform/kvm/config.go @@ -23,6 +23,11 @@ type Config struct { // kernel.InitKernelArgs. It is necessary to forward it to KVM in order // to initialize the correct amount of vCPUs. ApplicationCores int + + // UseCPUNums use KVM CPU numbers as CPU numbers in the sentry. + // This is necessary to support features like rseq and KVM based + // preemption. + UseCPUNums bool } func (*machine) applyConfig(config *Config) error { return nil } diff --git a/pkg/sentry/platform/kvm/context.go b/pkg/sentry/platform/kvm/context.go index 8725c87934..6be7837e85 100644 --- a/pkg/sentry/platform/kvm/context.go +++ b/pkg/sentry/platform/kvm/context.go @@ -16,6 +16,7 @@ package kvm import ( "gvisor.dev/gvisor/pkg/abi/linux" + "gvisor.dev/gvisor/pkg/atomicbitops" pkgcontext "gvisor.dev/gvisor/pkg/context" "gvisor.dev/gvisor/pkg/hostarch" "gvisor.dev/gvisor/pkg/ring0" @@ -36,6 +37,9 @@ type platformContext struct { // interrupt is the interrupt platformContext. interrupt interrupt.Forwarder + + // lastUsedCPU is the last CPU ID used by this platformContext. + lastUsedCPU atomicbitops.Int32 } // tryCPUIDError indicates that CPUID emulation should occur. @@ -45,7 +49,7 @@ type tryCPUIDError struct{} func (tryCPUIDError) Error() string { return "cpuid emulation failed" } // Switch runs the provided platformContext in the given address space. -func (c *platformContext) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac *arch.Context64, _ int32) (*linux.SignalInfo, hostarch.AccessType, error) { +func (c *platformContext) Switch(ctx pkgcontext.Context, mm platform.MemoryManager, ac *arch.Context64, rseqCPU int32) (*linux.SignalInfo, hostarch.AccessType, error) { as := mm.AddressSpace() localAS := as.(*addressSpace) @@ -58,6 +62,20 @@ restart: c.machine.Put(cpu) // Already preempted. return nil, hostarch.NoAccess, platform.ErrContextInterrupt } + // If this CPU was last used to run a different context + // or if this context last ran on a different CPU, then we've + // been preempted. + last := cpu.lastCtx.Swap(c) + c.lastUsedCPU.Store(int32(cpu.id)) + preempted := rseqCPU >= 0 && (last != c || rseqCPU != int32(cpu.id)) + if preempted { + // Release resources. + c.machine.Put(cpu) + + // All done. + c.interrupt.Disable() + return nil, hostarch.NoAccess, platform.ErrContextCPUPreempted + } // Set the active address space. // @@ -136,3 +154,8 @@ func (c *platformContext) PullFullState(as platform.AddressSpace, ac *arch.Conte // PrepareSleep implements platform.Context.platform.Context. func (*platformContext) PrepareSleep() {} + +// LastCPUNumber implements platform.Context.LastCPUNumber. +func (c *platformContext) LastCPUNumber() int32 { + return c.lastUsedCPU.Load() +} diff --git a/pkg/sentry/platform/kvm/kvm.go b/pkg/sentry/platform/kvm/kvm.go index fb3fbd7480..ed019364cf 100644 --- a/pkg/sentry/platform/kvm/kvm.go +++ b/pkg/sentry/platform/kvm/kvm.go @@ -62,8 +62,6 @@ type runData struct { // KVM represents a lightweight VM context. type KVM struct { - platform.NoCPUPreemptionDetection - // KVM never changes mm_structs. platform.UseHostProcessMemoryBarrier @@ -180,6 +178,41 @@ func (k *KVM) ConcurrencyCount() int { return k.machine.maxVCPUs } +// HasCPUNumbers implements platform.Platform.HasCPUNumbers. +func (k *KVM) HasCPUNumbers() bool { + return k.machine.useCPUNums +} + +// NumCPUs implements platform.Platform.NumCPUs. +func (k *KVM) NumCPUs() int32 { + if !k.HasCPUNumbers() { + panic("platform is not configured to use CPU numbers") + } + return int32(k.machine.maxVCPUs) +} + +// DetectsCPUPreemption implements platform.Platform.DetectsCPUPreemption. +func (k *KVM) DetectsCPUPreemption() bool { + return true +} + +// PreemptAllCPUs implements platform.Platform.PreemptAllCPUs. +func (k *KVM) PreemptAllCPUs() error { + for _, c := range k.machine.vCPUsByID { + c.lastCtx.Store(nil) + c.BounceToHost() + } + return nil +} + +// PreemptCPU implements platform.Platform.PreemptCPU. +func (k *KVM) PreemptCPU(cpu int32) error { + c := k.machine.vCPUsByID[cpu] + c.lastCtx.Store(nil) + c.BounceToHost() + return nil +} + // NewContext returns an interruptible context. func (k *KVM) NewContext(pkgcontext.Context) platform.Context { return &platformContext{ @@ -192,6 +225,7 @@ type constructor struct{} func (*constructor) New(opts platform.Options) (platform.Platform, error) { return New(opts.DeviceFile, Config{ ApplicationCores: opts.ApplicationCores, + UseCPUNums: opts.UseCPUNums, }) } diff --git a/pkg/sentry/platform/kvm/machine.go b/pkg/sentry/platform/kvm/machine.go index 78b7de31a8..42e29672eb 100644 --- a/pkg/sentry/platform/kvm/machine.go +++ b/pkg/sentry/platform/kvm/machine.go @@ -89,6 +89,9 @@ type machine struct { // usedSlots is the set of used physical addresses (not sorted). usedSlots []uintptr + + // useCPUNums indicates whether to enable the use vCPU numbers as CPU numbers. + useCPUNums bool } const ( @@ -216,6 +219,9 @@ type vCPU struct { // dieState holds state related to vCPU death. dieState dieState + + // lastCtx is the last context that was scheduled on this vCPU + lastCtx atomic.Pointer[platformContext] } type dieState struct { @@ -275,6 +281,7 @@ func newMachine(vm int, config *Config) (*machine, error) { m := &machine{ fd: vm, applicationCores: config.ApplicationCores, + useCPUNums: config.UseCPUNums, } m.available.L = &m.mu diff --git a/pkg/sentry/platform/platform.go b/pkg/sentry/platform/platform.go index 272b4df7a4..dddc30219c 100644 --- a/pkg/sentry/platform/platform.go +++ b/pkg/sentry/platform/platform.go @@ -96,6 +96,17 @@ type Platform interface { // NewContext returns a new execution context. NewContext(context.Context) Context + // PreemptAllCPUs causes all concurrent calls to Context.Switch() on the given CPU, as well + // as the first following call to Context.Switch() for each Context, to + // return ErrContextCPUPreempted. + // + // Precondition(s): cpu must be in the range [0, NumCPUs()). + // + // PreemptCPU is only supported if DetectsCPUPremption() && HasCPUNumbers() == true. + // Platforms for which this does not hold may panic if PreemptCPU is + // called. + PreemptCPU(cpu int32) error + // PreemptAllCPUs causes all concurrent calls to Context.Switch(), as well // as the first following call to Context.Switch() for each Context, to // return ErrContextCPUPreempted. @@ -121,6 +132,12 @@ type Platform interface { // in parallel. Concurrent calls to Context.Switch() beyond // ConcurrencyCount() may block until previous calls have returned. ConcurrencyCount() int + + // HasCPUNumbers returns true if the platform assigns CPU numbers to contexts. + HasCPUNumbers() bool + + // NumCPUs returns the number of CPUs on the platform. + NumCPUs() int32 } // NoCPUPreemptionDetection implements Platform.DetectsCPUPreemption and @@ -137,6 +154,25 @@ func (NoCPUPreemptionDetection) PreemptAllCPUs() error { panic("This platform does not support CPU preemption detection") } +// NoCPUNumbers implements Platform.HasCPUNumbers for platforms that do +// not support it. +type NoCPUNumbers struct{} + +// HasCPUNumbers implements Platform.HasCPUNumbers. +func (NoCPUNumbers) HasCPUNumbers() bool { + return false +} + +// NumCPUs implements Platform.NumCPUs. +func (NoCPUNumbers) NumCPUs() int32 { + panic("platform does not support CPU numbers") +} + +// PreemptCPU implements Platform.PreemptCPU. +func (NoCPUNumbers) PreemptCPU(cpu int32) error { + panic("platform does not support preempting a specific CPU") +} + // UseHostGlobalMemoryBarrier implements Platform.HaveGlobalMemoryBarrier and // Platform.GlobalMemoryBarrier by invoking equivalent functionality on the // host. @@ -264,6 +300,16 @@ type Context interface { // PrepareSleep() is called when the thread switches to the // interruptible sleep state. PrepareSleep() + + // LastCPUNumber returns the last CPU number that this context was running on. + // If the context never ran on a CPU, it may return any valid CPU number, as long as the first + // call to Switch will detect that the CPU number is incorrect and return ErrContextCPUPreempted. + LastCPUNumber() int32 +} + +// LastCPUNumber implements Context.LastCPUNumber. +func (NoCPUNumbers) LastCPUNumber() int32 { + panic("context does not support last CPU number") } // ContextError is one of the possible errors returned by Context.Switch(). @@ -538,6 +584,11 @@ type Options struct { // ApplicationCores is used by KVM to determine the correct amount of // vCPUs to create. ApplicationCores int + + // UseCPUNums is used by KVM to determine whether to use KVM CPU numbers + // as CPU numbers in the sentry. This is necessary to support features like + // rseq + UseCPUNums bool } // Constructor represents a platform type. diff --git a/pkg/sentry/platform/ptrace/ptrace.go b/pkg/sentry/platform/ptrace/ptrace.go index 619f4c5960..c7dda0b7cd 100644 --- a/pkg/sentry/platform/ptrace/ptrace.go +++ b/pkg/sentry/platform/ptrace/ptrace.go @@ -74,6 +74,7 @@ var ( type context struct { archContext + platform.NoCPUNumbers // signalInfo is the signal info, if and when a signal is received. signalInfo linux.SignalInfo @@ -214,6 +215,7 @@ type PTrace struct { platform.MMapMinAddr platform.NoCPUPreemptionDetection platform.UseHostGlobalMemoryBarrier + platform.NoCPUNumbers } // New returns a new ptrace-based implementation of the platform interface. diff --git a/pkg/sentry/platform/systrap/systrap.go b/pkg/sentry/platform/systrap/systrap.go index 3758f46444..0b5512668e 100644 --- a/pkg/sentry/platform/systrap/systrap.go +++ b/pkg/sentry/platform/systrap/systrap.go @@ -125,6 +125,8 @@ var ( // platformContext is an implementation of the platform context. type platformContext struct { + platform.NoCPUNumbers + // signalInfo is the signal info, if and when a signal is received. signalInfo linux.SignalInfo @@ -239,6 +241,7 @@ func (c *platformContext) PrepareSleep() { type Systrap struct { platform.NoCPUPreemptionDetection platform.UseHostGlobalMemoryBarrier + platform.NoCPUNumbers // memoryFile is used to create a stub sysmsg stack which is shared with // the Sentry. Since memoryFile is platform-private, it is never restored, diff --git a/pkg/sentry/syscalls/linux/sys_membarrier.go b/pkg/sentry/syscalls/linux/sys_membarrier.go index 681a5ced2f..7cbcf48518 100644 --- a/pkg/sentry/syscalls/linux/sys_membarrier.go +++ b/pkg/sentry/syscalls/linux/sys_membarrier.go @@ -25,6 +25,7 @@ import ( func Membarrier(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { cmd := args[0].Int() flags := args[1].Uint() + cpu := args[2].Int() switch cmd { case linux.MEMBARRIER_CMD_QUERY: @@ -83,8 +84,17 @@ func Membarrier(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uint if !t.MemoryManager().IsMembarrierRSeqEnabled() { return 0, nil, linuxerr.EPERM } - // MEMBARRIER_CMD_FLAG_CPU and cpu_id are ignored since we don't have - // the ability to preempt specific CPUs. + + if flags&linux.MEMBARRIER_CMD_FLAG_CPU != 0 && cpu >= 0 && t.Kernel().Platform.HasCPUNumbers() { + // Per membarrier(2), an out of range cpu# that is >= 0 is a no-op. + if cpu >= t.Kernel().Platform.NumCPUs() { + return 0, nil, nil + } + return 0, nil, t.Kernel().Platform.PreemptCPU(cpu) + } + + // Preempt all CPUs if the platform does not support CPU numbers or cpu # is less than 0 - + // this is the same behavior as Linux. return 0, nil, t.Kernel().Platform.PreemptAllCPUs() case linux.MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ: if flags != 0 { diff --git a/runsc/boot/loader.go b/runsc/boot/loader.go index 08ea84ceb0..cc46b9fe82 100644 --- a/runsc/boot/loader.go +++ b/runsc/boot/loader.go @@ -160,6 +160,9 @@ type containerInfo struct { // rootfsUpperTarFD is the file descriptor to the tar file containing the rootfs // upper layer changes. rootfsUpperTarFD *fd.FD + + // useCPUNums indicates whether to use platform assigned CPU numbers as CPU numbers in the sentry. + useCPUNums bool } type loaderState int @@ -882,6 +885,7 @@ func createPlatform(conf *config.Config, numCPU int, deviceFile *fd.FD) (platfor DeviceFile: deviceFile, DisableSyscallPatching: conf.Platform == "systrap" && conf.SystrapDisableSyscallPatching, ApplicationCores: numCPU, + UseCPUNums: conf.Platform == "kvm" && conf.UseCPUNums, }) } diff --git a/runsc/config/config.go b/runsc/config/config.go index a4e28e1588..559d963824 100644 --- a/runsc/config/config.go +++ b/runsc/config/config.go @@ -410,6 +410,10 @@ type Config struct { // AllowSUID causes ID elevation to be allowed when execving into executables // with the SUID/SGID bits set. AllowSUID bool `flag:"allow-suid"` + + // UseCPUNums causes the sentry to use KVM CPU numbers as CPU numbers in the + // sentry. This is necessary to support features like rseq. + UseCPUNums bool `flag:"use-cpu-nums"` } func (c *Config) validate() error { @@ -461,6 +465,7 @@ func (c *Config) Log() { log.Infof("RootDir: %s", c.RootDir) log.Infof("FileAccess: %v / Directfs: %t / Overlay: %v", c.FileAccess, c.DirectFS, c.GetOverlay2()) log.Infof("Network: %v", c.Network) + log.Infof("UseCPUNums: %t", c.UseCPUNums) if c.Debug || c.Strace { log.Infof("Debug: %t. Strace: %t, max size: %d, syscalls: %s", c.Debug, c.Strace, c.StraceLogSize, c.StraceSyscalls) } diff --git a/runsc/config/flags.go b/runsc/config/flags.go index c80eec4cde..e014c9dcb0 100644 --- a/runsc/config/flags.go +++ b/runsc/config/flags.go @@ -112,6 +112,7 @@ func RegisterFlags(flagSet *flag.FlagSet) { flagSet.Var(RestoreSpecValidationEnforce.Ptr(), "restore-spec-validation", "how to handle spec validation during restore.") flagSet.Bool("systrap-disable-syscall-patching", false, "disables syscall patching when using the Systrap platform. May be necessary to use in case the workload uses the GS register, or uses ptrace within gVisor. Has significant performance implications and is only recommended when the sandbox is known to run otherwise-incompatible workloads. Only relevant for x86.") flagSet.Bool("allow-suid", false, "allows ID elevation when executing binaries with the SUID/SGID bits set. The OCI --no-new-privileges flag continues to prevent ID elevation even when this flag is true.") + flagSet.Bool("use-cpu-nums", true, "on KVM use vCPU numbers as CPU numbers in the sentry. This is necessary to support features like rseq.") // Flags that control sandbox runtime behavior: MM related. flagSet.Bool("app-huge-pages", true, "enable use of huge pages for application memory; requires /sys/kernel/mm/transparent_hugepage/shmem_enabled = advise") diff --git a/test/e2e/integration_test.go b/test/e2e/integration_test.go index 627d611da9..59a2b52a89 100644 --- a/test/e2e/integration_test.go +++ b/test/e2e/integration_test.go @@ -32,6 +32,7 @@ import ( "os/exec" "path/filepath" "regexp" + "slices" "strconv" "strings" "testing" @@ -327,6 +328,14 @@ func TestMemLimit(t *testing.T) { func TestNumCPU(t *testing.T) { ctx := context.Background() d := dockerutil.MakeContainer(ctx, t) + runArgs, err := dockerutil.RuntimeArgs() + if err != nil { + t.Fatalf("dockerutil.RuntimeArgs() failed: %v", err) + } + want := 1 + if slices.Contains(runArgs, "--platform=kvm") && !slices.Contains(runArgs, "--use-cpu-nums=false") { + want = 3 + } defer d.CleanUp(ctx) // Read how many cores are in the container. @@ -343,8 +352,8 @@ func TestNumCPU(t *testing.T) { if err != nil { t.Fatalf("failed to parse %q: %v", out, err) } - if want := 1; got != want { - t.Errorf("MemTotal got: %d, want: %d", got, want) + if got != want { + t.Errorf("NumCPU got: %d, want: %d", got, want) } } diff --git a/test/syscalls/linux/affinity.cc b/test/syscalls/linux/affinity.cc index 67a3320f6b..dc5fbfa51b 100644 --- a/test/syscalls/linux/affinity.cc +++ b/test/syscalls/linux/affinity.cc @@ -92,6 +92,8 @@ TEST_F(AffinityTest, SchedSetAffinityZeroMask) { // N.B. This test case relies on cpuset_size_ larger than the actual number of // of all existing CPUs. Check your machine if the test fails. TEST_F(AffinityTest, SchedSetAffinityNonexistentCPUDropped) { + // sched_setaffinity() is a no-op on platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); cpu_set_t mask = mask_; // Add a nonexistent CPU. // @@ -115,6 +117,8 @@ TEST_F(AffinityTest, SchedSetAffinityNonexistentCPUDropped) { } TEST_F(AffinityTest, SchedSetAffinityOnlyNonexistentCPUFails) { + // sched_setaffinity() is a no-op on platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); // Make an empty cpu set. CPU_ZERO(&mask_); // Add a nonexistent CPU. @@ -145,6 +149,8 @@ TEST_F(AffinityTest, SchedSetAffinityInvalidSize) { } TEST_F(AffinityTest, Sanity) { + // sched_setaffinity() is a no-op on platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); ASSERT_NO_ERRNO(ClearLowestBit()); EXPECT_THAT(sched_setaffinity(/*pid=*/0, sizeof(cpu_set_t), &mask_), SyscallSucceeds()); @@ -157,6 +163,8 @@ TEST_F(AffinityTest, Sanity) { } TEST_F(AffinityTest, NewThread) { + // sched_setaffinity() is a no-op on platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); SKIP_IF(CPU_COUNT(&mask_) < 3); ASSERT_NO_ERRNO(ClearLowestBit()); ASSERT_NO_ERRNO(ClearLowestBit()); @@ -208,6 +216,8 @@ TEST_F(AffinityTest, SmallCpuMask) { } TEST_F(AffinityTest, LargeCpuMask) { + // sched_setaffinity() is a no-op on platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); // Allocate mask bigger than cpu_set_t normally allocates. const size_t cpus = CPU_SETSIZE * 8; const size_t mask_size = CPU_ALLOC_SIZE(cpus); diff --git a/test/syscalls/linux/fork.cc b/test/syscalls/linux/fork.cc index 77baa8d0cc..4eb664ee29 100644 --- a/test/syscalls/linux/fork.cc +++ b/test/syscalls/linux/fork.cc @@ -377,6 +377,8 @@ TEST_F(ForkTest, SigAltStack) { } TEST_F(ForkTest, Affinity) { + // sched_setaffinity is a no-op on platform/KVM. + SKIP_IF(GvisorPlatform() == Platform::kKVM); // Make a non-default cpumask. cpu_set_t parent_mask; EXPECT_THAT(sched_getaffinity(/*pid=*/0, sizeof(cpu_set_t), &parent_mask), diff --git a/test/syscalls/linux/rseq.cc b/test/syscalls/linux/rseq.cc index 9284c8d6f0..6d9f1238f3 100644 --- a/test/syscalls/linux/rseq.cc +++ b/test/syscalls/linux/rseq.cc @@ -158,6 +158,9 @@ TEST(RseqTest, CPU) { // Critical section is eventually aborted. TEST(RseqTest, Abort) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported())); + // TODO(b/456832928): Re-enable after making this test compatible with + // platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); RunChildTest(kRseqTestAbort, 0); } @@ -165,6 +168,9 @@ TEST(RseqTest, Abort) { // Abort may be before the critical section. TEST(RseqTest, AbortBefore) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported())); + // TODO(b/456832928): Re-enable after making this test compatible with + // platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); RunChildTest(kRseqTestAbortBefore, 0); } @@ -172,6 +178,9 @@ TEST(RseqTest, AbortBefore) { // Signature must match. TEST(RseqTest, AbortSignature) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported())); + // TODO(b/456832928): Re-enable after making this test compatible with + // platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); RunChildTest(kRseqTestAbortSignature, SIGSEGV); } @@ -179,6 +188,9 @@ TEST(RseqTest, AbortSignature) { // Abort must not be in the critical section. TEST(RseqTest, AbortPreCommit) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported())); + // TODO(b/456832928): Re-enable after making this test compatible with + // platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); RunChildTest(kRseqTestAbortPreCommit, SIGSEGV); } @@ -186,6 +198,9 @@ TEST(RseqTest, AbortPreCommit) { // rseq.rseq_cs is cleared on abort. TEST(RseqTest, AbortClearsCS) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported())); + // TODO(b/456832928): Re-enable after making this test compatible with + // platform/KVM + SKIP_IF(GvisorPlatform() == Platform::kKVM); RunChildTest(kRseqTestAbortClearsCS, 0); } @@ -193,6 +208,7 @@ TEST(RseqTest, AbortClearsCS) { // rseq.rseq_cs is cleared on abort outside of critical section. TEST(RseqTest, InvalidAbortClearsCS) { SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(RSeqSupported())); + SKIP_IF(GvisorPlatform() == Platform::kKVM); RunChildTest(kRseqTestInvalidAbortClearsCS, 0); }