|
209 | 209 |
|
210 | 210 | # endif /* LINUX_VERSION_CODE */ |
211 | 211 |
|
212 | | -#endif /* CONFIG_X86_64 */ |
| 212 | +#elif defined(CONFIG_ARM64) |
| 213 | + |
| 214 | +/* arm64/include/asm/syscall_wrapper.h versions */ |
| 215 | + |
| 216 | +#define SC_ARM64_REGS_TO_ARGS(x, ...) \ |
| 217 | + __MAP(x,__SC_ARGS \ |
| 218 | + ,,regs->regs[0],,regs->regs[1],,regs->regs[2] \ |
| 219 | + ,,regs->regs[3],,regs->regs[4],,regs->regs[5]) |
| 220 | + |
| 221 | +#define __KPATCH_SYSCALL_DEFINEx(x, name, ...) \ |
| 222 | + asmlinkage long __arm64_sys##name(const struct pt_regs *regs); \ |
| 223 | + ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO); \ |
| 224 | + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ |
| 225 | + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \ |
| 226 | + asmlinkage long __arm64_sys##name(const struct pt_regs *regs) \ |
| 227 | + { \ |
| 228 | + return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__)); \ |
| 229 | + } \ |
| 230 | + static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \ |
| 231 | + { \ |
| 232 | + long ret = __kpatch_do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \ |
| 233 | + __MAP(x,__SC_TEST,__VA_ARGS__); \ |
| 234 | + __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \ |
| 235 | + return ret; \ |
| 236 | + } \ |
| 237 | + static inline long __kpatch_do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) |
| 238 | + |
| 239 | +#endif /* which arch */ |
213 | 240 |
|
214 | 241 |
|
215 | 242 | #ifndef __KPATCH_SYSCALL_DEFINEx |
|
0 commit comments