@@ -173,6 +173,8 @@ static bool is_gcc6_localentry_bundled_sym(struct kpatch_elf *kelf,
173173 struct symbol * sym )
174174{
175175 switch (kelf -> arch ) {
176+ case AARCH64 :
177+ return false;
176178 case PPC64 :
177179 return ((PPC64_LOCAL_ENTRY_OFFSET (sym -> sym .st_other ) != 0 ) &&
178180 sym -> sym .st_value == 8 );
@@ -228,6 +230,67 @@ static struct rela *toc_rela(const struct rela *rela)
228230 (unsigned int )rela -> addend );
229231}
230232
233+ /*
234+ * Mapping symbols are used to mark and label the transitions between code and
235+ * data in elf files. They begin with a "$" dollar symbol. Don't correlate them
236+ * as they often all have the same name either "$x" to mark the start of code
237+ * or "$d" to mark the start of data.
238+ */
239+ static bool kpatch_is_mapping_symbol (struct kpatch_elf * kelf , struct symbol * sym )
240+ {
241+ switch (kelf -> arch ) {
242+ case AARCH64 :
243+ if (sym -> name && sym -> name [0 ] == '$'
244+ && sym -> type == STT_NOTYPE \
245+ && sym -> bind == STB_LOCAL )
246+ return true;
247+ case X86_64 :
248+ case PPC64 :
249+ case S390 :
250+ return false;
251+ default :
252+ ERROR ("unsupported arch" );
253+ }
254+
255+ return false;
256+ }
257+
258+ static unsigned int function_padding_size (struct kpatch_elf * kelf , struct symbol * sym )
259+ {
260+ unsigned int size = 0 ;
261+
262+ switch (kelf -> arch ) {
263+ case AARCH64 :
264+ {
265+ uint8_t * insn = sym -> sec -> data -> d_buf ;
266+ unsigned int i ;
267+ void * insn_end = sym -> sec -> data -> d_buf + sym -> sym .st_value ;
268+
269+ /*
270+ * If the arm64 kernel is compiled with CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
271+ * then there are two NOPs before the function and a `BTI C` + 2 NOPs at the
272+ * start of the function. Verify the presence of the two NOPs before the
273+ * function entry.
274+ */
275+ for (i = 0 ; (void * )insn < insn_end ; i ++ , insn += 4 )
276+ if (insn [0 ] != 0x1f || insn [1 ] != 0x20 ||
277+ insn [2 ] != 0x03 || insn [3 ] != 0xd5 )
278+ break ;
279+
280+ if (i == 2 )
281+ size = 8 ;
282+ else if (i != 0 )
283+ log_error ("function %s within section %s has invalid padding\n" , sym -> name , sym -> sec -> name );
284+
285+ break ;
286+ }
287+ default :
288+ break ;
289+ }
290+
291+ return size ;
292+ }
293+
231294/*
232295 * When compiling with -ffunction-sections and -fdata-sections, almost every
233296 * symbol gets its own dedicated section. We call such symbols "bundled"
@@ -244,6 +307,8 @@ static void kpatch_bundle_symbols(struct kpatch_elf *kelf)
244307 expected_offset = sym -> pfx -> sym .st_size ;
245308 else if (is_gcc6_localentry_bundled_sym (kelf , sym ))
246309 expected_offset = 8 ;
310+ else if (sym -> type == STT_FUNC )
311+ expected_offset = function_padding_size (kelf , sym );
247312 else
248313 expected_offset = 0 ;
249314
@@ -622,6 +687,8 @@ static void kpatch_compare_correlated_section(struct section *sec)
622687 */
623688 if (!strcmp (sec -> name , ".rela__mcount_loc" ) ||
624689 !strcmp (sec -> name , "__mcount_loc" ) ||
690+ !strcmp (sec -> name , ".sframe" ) ||
691+ !strcmp (sec -> name , ".rela.sframe" ) ||
625692 !strcmp (sec -> name , ".rela__patchable_function_entries" ) ||
626693 !strcmp (sec -> name , "__patchable_function_entries" )) {
627694 sec -> status = SAME ;
@@ -706,6 +773,12 @@ static bool insn_is_load_immediate(struct kpatch_elf *kelf, void *addr)
706773
707774 break ;
708775
776+ case AARCH64 :
777+ /* Verify mov w2 <line number> */
778+ if ((insn [0 ] & 0b11111 ) == 0x2 && insn [3 ] == 0x52 )
779+ return true;
780+ break ;
781+
709782 default :
710783 ERROR ("unsupported arch" );
711784 }
@@ -746,6 +819,7 @@ static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf,
746819 void * data1 , * data2 , * insn1 , * insn2 ;
747820 struct rela * r , * rela ;
748821 bool found , found_any = false;
822+ bool warn_printk_only = (kelf -> arch == AARCH64 );
749823
750824 if (sec -> status != CHANGED ||
751825 is_rela_section (sec ) ||
@@ -809,8 +883,15 @@ static bool kpatch_line_macro_change_only(struct kpatch_elf *kelf,
809883 !strncmp (rela -> sym -> name , "__func__." , 9 ))
810884 continue ;
811885
886+ if (!strcmp (rela -> sym -> name , "__warn_printk" )) {
887+ found = true;
888+ break ;
889+ }
890+
891+ if (warn_printk_only )
892+ return false;
893+
812894 if (!strncmp (rela -> sym -> name , "warn_slowpath_" , 14 ) ||
813- !strcmp (rela -> sym -> name , "__warn_printk" ) ||
814895 !strcmp (rela -> sym -> name , "__might_sleep" ) ||
815896 !strcmp (rela -> sym -> name , "___might_sleep" ) ||
816897 !strcmp (rela -> sym -> name , "__might_fault" ) ||
@@ -1075,15 +1156,15 @@ static void kpatch_correlate_sections(struct list_head *seclist_orig,
10751156 }
10761157}
10771158
1078- static void kpatch_correlate_symbols (struct list_head * symlist_orig ,
1079- struct list_head * symlist_patched )
1159+ static void kpatch_correlate_symbols (struct kpatch_elf * kelf_orig ,
1160+ struct kpatch_elf * kelf_patched )
10801161{
10811162 struct symbol * sym_orig , * sym_patched ;
10821163
1083- list_for_each_entry (sym_orig , symlist_orig , list ) {
1164+ list_for_each_entry (sym_orig , & kelf_orig -> symbols , list ) {
10841165 if (sym_orig -> twin )
10851166 continue ;
1086- list_for_each_entry (sym_patched , symlist_patched , list ) {
1167+ list_for_each_entry (sym_patched , & kelf_patched -> symbols , list ) {
10871168 if (kpatch_mangled_strcmp (sym_orig -> name , sym_patched -> name ) ||
10881169 sym_orig -> type != sym_patched -> type || sym_patched -> twin )
10891170 continue ;
@@ -1103,6 +1184,9 @@ static void kpatch_correlate_symbols(struct list_head *symlist_orig,
11031184 !strncmp (sym_orig -> name , ".LC" , 3 ))
11041185 continue ;
11051186
1187+ if (kpatch_is_mapping_symbol (kelf_orig , sym_orig ))
1188+ continue ;
1189+
11061190 /* group section symbols must have correlated sections */
11071191 if (sym_orig -> sec &&
11081192 sym_orig -> sec -> sh .sh_type == SHT_GROUP &&
@@ -1508,7 +1592,7 @@ static void kpatch_correlate_elfs(struct kpatch_elf *kelf_orig,
15081592 struct kpatch_elf * kelf_patched )
15091593{
15101594 kpatch_correlate_sections (& kelf_orig -> sections , & kelf_patched -> sections );
1511- kpatch_correlate_symbols (& kelf_orig -> symbols , & kelf_patched -> symbols );
1595+ kpatch_correlate_symbols (kelf_orig , kelf_patched );
15121596}
15131597
15141598static void kpatch_compare_correlated_elements (struct kpatch_elf * kelf )
@@ -1561,6 +1645,13 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
15611645 if (!is_rela_section (relasec ) || is_debug_section (relasec ))
15621646 continue ;
15631647
1648+ /*
1649+ * We regenerate __patchable_function_entries from scratch so
1650+ * don't bother replacing section symbols in its relasec.
1651+ */
1652+ if (is_patchable_function_entries_section (relasec ))
1653+ continue ;
1654+
15641655 list_for_each_entry (rela , & relasec -> relas , list ) {
15651656
15661657 if (rela -> sym -> type != STT_SECTION || !rela -> sym -> sec )
@@ -1624,7 +1715,8 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
16241715
16251716 if (is_text_section (relasec -> base ) &&
16261717 !is_text_section (sym -> sec ) &&
1627- rela -> type == R_X86_64_32S &&
1718+ (rela -> type == R_X86_64_32S ||
1719+ rela -> type == R_AARCH64_ABS64 ) &&
16281720 rela -> addend == (long )sym -> sec -> sh .sh_size &&
16291721 end == (long )sym -> sec -> sh .sh_size ) {
16301722
@@ -1661,6 +1753,9 @@ static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
16611753 */
16621754 } else if (target_off == start && target_off == end ) {
16631755
1756+ if (kpatch_is_mapping_symbol (kelf , sym ))
1757+ continue ;
1758+
16641759 /*
16651760 * Allow replacement for references to
16661761 * empty symbols.
@@ -1700,8 +1795,8 @@ static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf)
17001795 (sym -> parent && sym -> parent -> status == CHANGED ))
17011796 continue ;
17021797 if (!sym -> twin -> has_func_profiling ) {
1703- log_error ("function %s has no fentry/mcount call , unable to patch\n" ,
1704- sym -> name );
1798+ log_error ("function %s doesn't have patchable function entry , unable to patch\n" ,
1799+ sym -> name );
17051800 errs ++ ;
17061801 }
17071802 }
@@ -1905,6 +2000,7 @@ static void kpatch_include_standard_elements(struct kpatch_elf *kelf)
19052000 !strcmp (sec -> name , ".symtab" ) ||
19062001 !strcmp (sec -> name , ".toc" ) ||
19072002 !strcmp (sec -> name , ".rodata" ) ||
2003+ !strcmp (sec -> name , ".rodata.str" ) ||
19082004 is_string_literal_section (sec )) {
19092005 kpatch_include_section (sec );
19102006 }
@@ -2493,28 +2589,28 @@ static bool static_call_sites_group_filter(struct lookup_table *lookup,
24932589static struct special_section special_sections [] = {
24942590 {
24952591 .name = "__bug_table" ,
2496- .arch = X86_64 | PPC64 | S390 ,
2592+ .arch = AARCH64 | X86_64 | PPC64 | S390 ,
24972593 .group_size = bug_table_group_size ,
24982594 },
24992595 {
25002596 .name = ".fixup" ,
2501- .arch = X86_64 | PPC64 | S390 ,
2597+ .arch = AARCH64 | X86_64 | PPC64 | S390 ,
25022598 .group_size = fixup_group_size ,
25032599 },
25042600 {
25052601 .name = "__ex_table" , /* must come after .fixup */
2506- .arch = X86_64 | PPC64 | S390 ,
2602+ .arch = AARCH64 | X86_64 | PPC64 | S390 ,
25072603 .group_size = ex_table_group_size ,
25082604 },
25092605 {
25102606 .name = "__jump_table" ,
2511- .arch = X86_64 | PPC64 | S390 ,
2607+ .arch = AARCH64 | X86_64 | PPC64 | S390 ,
25122608 .group_size = jump_table_group_size ,
25132609 .group_filter = jump_table_group_filter ,
25142610 },
25152611 {
25162612 .name = ".printk_index" ,
2517- .arch = X86_64 | PPC64 | S390 ,
2613+ .arch = AARCH64 | X86_64 | PPC64 | S390 ,
25182614 .group_size = printk_index_group_size ,
25192615 },
25202616 {
@@ -2529,7 +2625,7 @@ static struct special_section special_sections[] = {
25292625 },
25302626 {
25312627 .name = ".altinstructions" ,
2532- .arch = X86_64 | S390 ,
2628+ .arch = AARCH64 | X86_64 | S390 ,
25332629 .group_size = altinstructions_group_size ,
25342630 },
25352631 {
@@ -3847,6 +3943,38 @@ static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf)
38473943 }
38483944
38493945 switch (kelf -> arch ) {
3946+ case AARCH64 : {
3947+ unsigned char * insn = sym -> sec -> data -> d_buf ;
3948+ int padding ;
3949+ int i ;
3950+
3951+ /*
3952+ * Skip the padding NOPs added by CALL_OPS.
3953+ */
3954+ padding = function_padding_size (kelf , sym );
3955+ insn += padding ;
3956+
3957+ /*
3958+ * If BTI (Branch Target Identification) is enabled then there
3959+ * might be an additional 'BTI C' instruction before the two
3960+ * patchable function entry 'NOP's.
3961+ * i.e. 0xd503245f (little endian)
3962+ */
3963+ if (insn [0 ] == 0x5f ) {
3964+ if (insn [1 ] != 0x24 || insn [2 ] != 0x03 || insn [3 ] != 0xd5 )
3965+ ERROR ("%s: unexpected instruction in patch section of function\n" , sym -> name );
3966+ if (!padding )
3967+ insn_offset += 4 ;
3968+ insn += 4 ;
3969+ }
3970+ for (i = 0 ; i < 8 ; i += 4 ) {
3971+ /* We expect a NOP i.e. 0xd503201f (little endian) */
3972+ if (insn [i ] != 0x1f || insn [i + 1 ] != 0x20 ||
3973+ insn [i + 2 ] != 0x03 || insn [i + 3 ] != 0xd5 )
3974+ ERROR ("%s: unexpected instruction in patch section of function\n" , sym -> name );
3975+ }
3976+ break ;
3977+ }
38503978 case PPC64 : {
38513979 unsigned char * insn ;
38523980
@@ -4179,6 +4307,10 @@ static void kpatch_find_func_profiling_calls(struct kpatch_elf *kelf)
41794307 insn [4 ] == 0x00 && insn [5 ] == 0x00 )
41804308 sym -> has_func_profiling = 1 ;
41814309 break ;
4310+ case AARCH64 :
4311+ if (kpatch_symbol_has_pfe_entry (kelf , sym ))
4312+ sym -> has_func_profiling = 1 ;
4313+ break ;
41824314 default :
41834315 ERROR ("unsupported arch" );
41844316 }
0 commit comments