|
210 | 210 |
|
211 | 211 | # endif /* LINUX_VERSION_CODE */ |
212 | 212 |
|
213 | | -#endif /* CONFIG_X86_64 */ |
| 213 | +#elif defined(CONFIG_ARM64) |
| 214 | + |
| 215 | +/* arm64/include/asm/syscall_wrapper.h versions */ |
| 216 | + |
| 217 | +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ |
| 218 | + __MAP(x,__SC_ARGS \ |
| 219 | + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ |
| 220 | + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) |
| 221 | + |
| 222 | +#define __KPATCH_SYSCALL_DEFINEx(x, name, ...) \ |
| 223 | + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ |
| 224 | + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ |
| 225 | + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ |
| 226 | + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ |
| 227 | + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ |
| 228 | + { \ |
| 229 | + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ |
| 230 | + } \ |
| 231 | + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ |
| 232 | + { \ |
| 233 | + long ret = __kpatch_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ |
| 234 | + __MAP(x,__SC_TEST,__VA_ARGS__); \ |
| 235 | + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ |
| 236 | + return ret; \ |
| 237 | + } \ |
| 238 | + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) |
| 239 | + |
| 240 | +#endif /* which arch */ |
214 | 241 |
|
215 | 242 |
|
216 | 243 | #ifndef __KPATCH_SYSCALL_DEFINEx |
|
0 commit comments