1 /* Machine-dependent ELF dynamic relocation inline functions.
2    PowerPC64 version.
3    Copyright 1995-2021 Free Software Foundation, Inc.
4    This file is part of the GNU C Library.
5 
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Library General Public License as
8    published by the Free Software Foundation; either version 2 of the
9    License, or (at your option) any later version.
10 
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Library General Public License for more details.
15 
16    You should have received a copy of the GNU Library General Public
17    License along with the GNU C Library; see the file COPYING.LIB.  If
18    not, see <https://www.gnu.org/licenses/>.  */
19 
20 #ifndef dl_machine_h
21 #define dl_machine_h
22 
23 #define ELF_MACHINE_NAME "powerpc64"
24 
25 #include <assert.h>
26 #include <sys/param.h>
27 #include <dl-tls.h>
28 #include <sysdep.h>
29 #include <hwcapinfo.h>
30 #include <cpu-features.c>
31 #include <dl-static-tls.h>
32 #include <dl-funcdesc.h>
33 #include <dl-machine-rel.h>
34 
35 /* Translate a processor specific dynamic tag to the index
36    in l_info array.  */
37 #define DT_PPC64(x) (DT_PPC64_##x - DT_LOPROC + DT_NUM)
38 
39 #define ELF_MULT_MACHINES_SUPPORTED
40 
41 /* Return nonzero iff ELF header is compatible with the running host.  */
42 static inline int
elf_machine_matches_host(const Elf64_Ehdr * ehdr)43 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
44 {
45   /* Verify that the binary matches our ABI version.  */
46   if ((ehdr->e_flags & EF_PPC64_ABI) != 0)
47     {
48 #if _CALL_ELF != 2
49       if ((ehdr->e_flags & EF_PPC64_ABI) != 1)
50         return 0;
51 #else
52       if ((ehdr->e_flags & EF_PPC64_ABI) != 2)
53         return 0;
54 #endif
55     }
56 
57   return ehdr->e_machine == EM_PPC64;
58 }
59 
60 /* Return nonzero iff ELF header is compatible with the running host,
61    but not this loader.  */
62 static inline int
elf_host_tolerates_machine(const Elf64_Ehdr * ehdr)63 elf_host_tolerates_machine (const Elf64_Ehdr *ehdr)
64 {
65   return ehdr->e_machine == EM_PPC;
66 }
67 
68 /* Return nonzero iff ELF header is compatible with the running host,
69    but not this loader.  */
70 static inline int
elf_host_tolerates_class(const Elf64_Ehdr * ehdr)71 elf_host_tolerates_class (const Elf64_Ehdr *ehdr)
72 {
73   return ehdr->e_ident[EI_CLASS] == ELFCLASS32;
74 }
75 
76 
77 /* Return the run-time load address of the shared object, assuming it
78    was originally linked at zero.  */
79 static inline Elf64_Addr
80 elf_machine_load_address (void) __attribute__ ((const));
81 
82 static inline Elf64_Addr
elf_machine_load_address(void)83 elf_machine_load_address (void)
84 {
85   Elf64_Addr ret;
86 
87   /* The first entry in .got (and thus the first entry in .toc) is the
88      link-time TOC_base, ie. r2.  So the difference between that and
89      the current r2 set by the kernel is how far the shared lib has
90      moved.  */
91   asm (	"	ld	%0,-32768(2)\n"
92 	"	subf	%0,%0,2\n"
93 	: "=r"	(ret));
94   return ret;
95 }
96 
97 /* Return the link-time address of _DYNAMIC.  */
98 static inline Elf64_Addr
elf_machine_dynamic(void)99 elf_machine_dynamic (void)
100 {
101   Elf64_Addr runtime_dynamic;
102   /* It's easier to get the run-time address.  */
103   asm (	"	addis	%0,2,_DYNAMIC@toc@ha\n"
104 	"	addi	%0,%0,_DYNAMIC@toc@l\n"
105 	: "=b"	(runtime_dynamic));
106   /* Then subtract off the load address offset.  */
107   return runtime_dynamic - elf_machine_load_address() ;
108 }
109 
110 /* The PLT uses Elf64_Rela relocs.  */
111 #define elf_machine_relplt elf_machine_rela
112 
113 
114 #ifdef HAVE_INLINED_SYSCALLS
115 /* We do not need _dl_starting_up.  */
116 # define DL_STARTING_UP_DEF
117 #else
118 # define DL_STARTING_UP_DEF \
119 ".LC__dl_starting_up:\n"  \
120 "	.tc __GI__dl_starting_up[TC],__GI__dl_starting_up\n"
121 #endif
122 
123 
124 /* Initial entry point code for the dynamic linker.  The C function
125    `_dl_start' is the real entry point; its return value is the user
126    program's entry point.  */
127 #define RTLD_START \
128   asm (".pushsection \".text\"\n"					\
129 "	.align	2\n"							\
130 "	" ENTRY_2(_start) "\n"						\
131 BODY_PREFIX "_start:\n"							\
132 "	" LOCALENTRY(_start) "\n"						\
133 /* We start with the following on the stack, from top:			\
134    argc (4 bytes);							\
135    arguments for program (terminated by NULL);				\
136    environment variables (terminated by NULL);				\
137    arguments for the program loader.  */				\
138 "	mr	3,1\n"							\
139 "	li	4,0\n"							\
140 "	stdu	4,-128(1)\n"						\
141 /* Call _dl_start with one parameter pointing at argc.  */		\
142 "	bl	" DOT_PREFIX "_dl_start\n"				\
143 "	nop\n"								\
144 /* Transfer control to _dl_start_user!  */				\
145 "	b	" DOT_PREFIX "_dl_start_user\n"				\
146 ".LT__start:\n"								\
147 "	.long 0\n"							\
148 "	.byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n"		\
149 "	.long .LT__start-" BODY_PREFIX "_start\n"			\
150 "	.short .LT__start_name_end-.LT__start_name_start\n"		\
151 ".LT__start_name_start:\n"						\
152 "	.ascii \"_start\"\n"						\
153 ".LT__start_name_end:\n"						\
154 "	.align 2\n"							\
155 "	" END_2(_start) "\n"						\
156 "	.pushsection	\".toc\",\"aw\"\n"				\
157 DL_STARTING_UP_DEF							\
158 ".LC__rtld_local:\n"							\
159 "	.tc _rtld_local[TC],_rtld_local\n"				\
160 ".LC__dl_argc:\n"							\
161 "	.tc _dl_argc[TC],_dl_argc\n"					\
162 ".LC__dl_argv:\n"							\
163 "	.tc __GI__dl_argv[TC],__GI__dl_argv\n"				\
164 ".LC__dl_fini:\n"							\
165 "	.tc _dl_fini[TC],_dl_fini\n"					\
166 "	.popsection\n"							\
167 "	" ENTRY_2(_dl_start_user) "\n"					\
168 /* Now, we do our main work of calling initialisation procedures.	\
169    The ELF ABI doesn't say anything about parameters for these,		\
170    so we just pass argc, argv, and the environment.			\
171    Changing these is strongly discouraged (not least because argc is	\
172    passed by value!).  */						\
173 BODY_PREFIX "_dl_start_user:\n"						\
174 "	" LOCALENTRY(_dl_start_user) "\n"				\
175 /* the address of _start in r30.  */					\
176 "	mr	30,3\n"							\
177 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28.  */		\
178 "	ld	28,.LC__rtld_local@toc(2)\n"				\
179 "	ld	29,.LC__dl_argc@toc(2)\n"				\
180 "	ld	27,.LC__dl_argv@toc(2)\n"				\
181 /* _dl_init (_dl_loaded, _dl_argc, _dl_argv, _dl_argv+_dl_argc+1).  */	\
182 "	ld	3,0(28)\n"						\
183 "	lwa	4,0(29)\n"						\
184 "	ld	5,0(27)\n"						\
185 "	sldi	6,4,3\n"						\
186 "	add	6,5,6\n"						\
187 "	addi	6,6,8\n"						\
188 "	bl	" DOT_PREFIX "_dl_init\n"				\
189 "	nop\n"								\
190 /* Now, to conform to the ELF ABI, we have to:				\
191    Pass argc (actually _dl_argc) in r3;  */				\
192 "	lwa	3,0(29)\n"						\
193 /* Pass argv (actually _dl_argv) in r4;  */				\
194 "	ld	4,0(27)\n"						\
195 /* Pass argv+argc+1 in r5;  */						\
196 "	sldi	5,3,3\n"						\
197 "	add	6,4,5\n"						\
198 "	addi	5,6,8\n"						\
199 /* Pass the auxiliary vector in r6. This is passed to us just after	\
200    _envp.  */								\
201 "2:	ldu	0,8(6)\n"						\
202 "	cmpdi	0,0\n"							\
203 "	bne	2b\n"							\
204 "	addi	6,6,8\n"						\
205 /* Pass a termination function pointer (in this case _dl_fini) in	\
206    r7.  */								\
207 "	ld	7,.LC__dl_fini@toc(2)\n"				\
208 /* Pass the stack pointer in r1 (so far so good), pointing to a NULL	\
209    value.  This lets our startup code distinguish between a program	\
210    linked statically, which linux will call with argc on top of the	\
211    stack which will hopefully never be zero, and a dynamically linked	\
212    program which will always have a NULL on the top of the stack.	\
213    Take the opportunity to clear LR, so anyone who accidentally		\
214    returns from _start gets SEGV.  Also clear the next few words of	\
215    the stack.  */							\
216 "	li	31,0\n"							\
217 "	std	31,0(1)\n"						\
218 "	mtlr	31\n"							\
219 "	std	31,8(1)\n"						\
220 "	std	31,16(1)\n"						\
221 "	std	31,24(1)\n"						\
222 /* Now, call the start function descriptor at r30...  */		\
223 "	.globl	._dl_main_dispatch\n"					\
224 "._dl_main_dispatch:\n"							\
225 "	" PPC64_LOAD_FUNCPTR(30) "\n"					\
226 "	bctr\n"								\
227 ".LT__dl_start_user:\n"							\
228 "	.long 0\n"							\
229 "	.byte 0x00,0x0c,0x24,0x40,0x00,0x00,0x00,0x00\n"		\
230 "	.long .LT__dl_start_user-" BODY_PREFIX "_dl_start_user\n"	\
231 "	.short .LT__dl_start_user_name_end-.LT__dl_start_user_name_start\n" \
232 ".LT__dl_start_user_name_start:\n"					\
233 "	.ascii \"_dl_start_user\"\n"					\
234 ".LT__dl_start_user_name_end:\n"					\
235 "	.align 2\n"							\
236 "	" END_2(_dl_start_user) "\n"					\
237 "	.popsection");
238 
239 /* ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to
240    one of the main executable's symbols, as for a COPY reloc.
241 
242    To make function pointer comparisons work on most targets, the
243    relevant ABI states that the address of a non-local function in a
244    dynamically linked executable is the address of the PLT entry for
245    that function.  This is quite reasonable since using the real
246    function address in a non-PIC executable would typically require
247    dynamic relocations in .text, something to be avoided.  For such
248    functions, the linker emits a SHN_UNDEF symbol in the executable
249    with value equal to the PLT entry address.  Normally, SHN_UNDEF
250    symbols have a value of zero, so this is a clue to ld.so that it
251    should treat these symbols specially.  For relocations not in
252    ELF_RTYPE_CLASS_PLT (eg. those on function pointers), ld.so should
253    use the value of the executable SHN_UNDEF symbol, ie. the PLT entry
254    address.  For relocations in ELF_RTYPE_CLASS_PLT (eg. the relocs in
255    the PLT itself), ld.so should use the value of the corresponding
256    defined symbol in the object that defines the function, ie. the
257    real function address.  This complicates ld.so in that there are
258    now two possible values for a given symbol, and it gets even worse
259    because protected symbols need yet another set of rules.
260 
261    On PowerPC64 we don't need any of this.  The linker won't emit
262    SHN_UNDEF symbols with non-zero values.  ld.so can make all
263    relocations behave "normally", ie. always use the real address
264    like PLT relocations.  So always set ELF_RTYPE_CLASS_PLT.  */
265 
266 #if _CALL_ELF != 2
267 #define elf_machine_type_class(type) \
268   (ELF_RTYPE_CLASS_PLT | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
269 #else
270 /* And now that you have read that large comment, you can disregard it
271    all for ELFv2.  ELFv2 does need the special SHN_UNDEF treatment.  */
272 #define IS_PPC64_TLS_RELOC(R)						\
273   (((R) >= R_PPC64_TLS && (R) <= R_PPC64_DTPREL16_HIGHESTA)		\
274    || ((R) >= R_PPC64_TPREL16_HIGH && (R) <= R_PPC64_DTPREL16_HIGHA))
275 
276 #define elf_machine_type_class(type) \
277   ((((type) == R_PPC64_JMP_SLOT					\
278      || (type) == R_PPC64_ADDR24				\
279      || IS_PPC64_TLS_RELOC (type)) * ELF_RTYPE_CLASS_PLT)	\
280    | (((type) == R_PPC64_COPY) * ELF_RTYPE_CLASS_COPY))
281 #endif
282 
283 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries.  */
284 #define ELF_MACHINE_JMP_SLOT	R_PPC64_JMP_SLOT
285 
286 /* We define an initialization function to initialize HWCAP/HWCAP2 and
287    platform data so it can be copied into the TCB later.  This is called
288    very early in _dl_sysdep_start for dynamically linked binaries.  */
289 #if defined(SHARED) && IS_IN (rtld)
290 # define DL_PLATFORM_INIT dl_platform_init ()
291 
292 static inline void __attribute__ ((unused))
dl_platform_init(void)293 dl_platform_init (void)
294 {
295   __tcb_parse_hwcap_and_convert_at_platform ();
296   init_cpu_features (&GLRO(dl_powerpc_cpu_features));
297 }
298 #endif
299 
300 /* Stuff for the PLT.  */
301 #if _CALL_ELF != 2
302 #define PLT_INITIAL_ENTRY_WORDS 3
303 #define PLT_ENTRY_WORDS 3
304 #define GLINK_INITIAL_ENTRY_WORDS 8
305 /* The first 32k entries of glink can set an index and branch using two
306    instructions; past that point, glink uses three instructions.  */
307 #define GLINK_ENTRY_WORDS(I) (((I) < 0x8000)? 2 : 3)
308 #else
309 #define PLT_INITIAL_ENTRY_WORDS 2
310 #define PLT_ENTRY_WORDS 1
311 #define GLINK_INITIAL_ENTRY_WORDS 8
312 #define GLINK_ENTRY_WORDS(I) 1
313 #endif
314 
315 #define PPC_DCBST(where) asm volatile ("dcbst 0,%0" : : "r"(where) : "memory")
316 #define PPC_DCBT(where) asm volatile ("dcbt 0,%0" : : "r"(where) : "memory")
317 #define PPC_DCBF(where) asm volatile ("dcbf 0,%0" : : "r"(where) : "memory")
318 #define PPC_SYNC asm volatile ("sync" : : : "memory")
319 #define PPC_ISYNC asm volatile ("sync; isync" : : : "memory")
320 #define PPC_ICBI(where) asm volatile ("icbi 0,%0" : : "r"(where) : "memory")
321 #define PPC_DIE asm volatile ("tweq 0,0")
322 /* Use this when you've modified some code, but it won't be in the
323    instruction fetch queue (or when it doesn't matter if it is). */
324 #define MODIFIED_CODE_NOQUEUE(where) \
325      do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); } while (0)
326 /* Use this when it might be in the instruction queue. */
327 #define MODIFIED_CODE(where) \
328      do { PPC_DCBST(where); PPC_SYNC; PPC_ICBI(where); PPC_ISYNC; } while (0)
329 
330 /* Set up the loaded object described by MAP so its unrelocated PLT
331    entries will jump to the on-demand fixup code in dl-runtime.c.  */
332 static inline int __attribute__ ((always_inline))
elf_machine_runtime_setup(struct link_map * map,struct r_scope_elem * scope[],int lazy,int profile)333 elf_machine_runtime_setup (struct link_map *map, struct r_scope_elem *scope[],
334 			   int lazy, int profile)
335 {
336   if (map->l_info[DT_JMPREL])
337     {
338       Elf64_Word i;
339       Elf64_Word *glink = NULL;
340       Elf64_Xword *plt = (Elf64_Xword *) D_PTR (map, l_info[DT_PLTGOT]);
341       Elf64_Word num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val
342 				    / sizeof (Elf64_Rela));
343       Elf64_Addr l_addr = map->l_addr;
344       Elf64_Dyn **info = map->l_info;
345       char *p;
346 
347       extern void _dl_runtime_resolve (void);
348       extern void _dl_profile_resolve (void);
349 
350       /* Relocate the DT_PPC64_GLINK entry in the _DYNAMIC section.
351 	 elf_get_dynamic_info takes care of the standard entries but
352 	 doesn't know exactly what to do with processor specific
353 	 entries.  */
354       if (info[DT_PPC64(GLINK)] != NULL)
355 	info[DT_PPC64(GLINK)]->d_un.d_ptr += l_addr;
356 
357       if (lazy)
358 	{
359 	  Elf64_Word glink_offset;
360 	  Elf64_Word offset;
361 	  Elf64_Addr dlrr;
362 
363 	  dlrr = (Elf64_Addr) (profile ? _dl_profile_resolve
364 				       : _dl_runtime_resolve);
365 	  if (profile && GLRO(dl_profile) != NULL
366 	      && _dl_name_match_p (GLRO(dl_profile), map))
367 	    /* This is the object we are looking for.  Say that we really
368 	       want profiling and the timers are started.  */
369 	    GL(dl_profile_map) = map;
370 
371 #if _CALL_ELF != 2
372 	  /* We need to stuff the address/TOC of _dl_runtime_resolve
373 	     into doublewords 0 and 1 of plt_reserve.  Then we need to
374 	     stuff the map address into doubleword 2 of plt_reserve.
375 	     This allows the GLINK0 code to transfer control to the
376 	     correct trampoline which will transfer control to fixup
377 	     in dl-machine.c.  */
378 	  {
379 	    /* The plt_reserve area is the 1st 3 doublewords of the PLT.  */
380 	    Elf64_FuncDesc *plt_reserve = (Elf64_FuncDesc *) plt;
381 	    Elf64_FuncDesc *resolve_fd = (Elf64_FuncDesc *) dlrr;
382 	    plt_reserve->fd_func = resolve_fd->fd_func;
383 	    plt_reserve->fd_toc  = resolve_fd->fd_toc;
384 	    plt_reserve->fd_aux  = (Elf64_Addr) map;
385 #ifdef RTLD_BOOTSTRAP
386 	    /* When we're bootstrapping, the opd entry will not have
387 	       been relocated yet.  */
388 	    plt_reserve->fd_func += l_addr;
389 	    plt_reserve->fd_toc  += l_addr;
390 #endif
391 	  }
392 #else
393 	  /* When we don't have function descriptors, the first doubleword
394 	     of the PLT holds the address of _dl_runtime_resolve, and the
395 	     second doubleword holds the map address.  */
396 	  plt[0] = dlrr;
397 	  plt[1] = (Elf64_Addr) map;
398 #endif
399 
400 	  /* Set up the lazy PLT entries.  */
401 	  glink = (Elf64_Word *) D_PTR (map, l_info[DT_PPC64(GLINK)]);
402 	  offset = PLT_INITIAL_ENTRY_WORDS;
403 	  glink_offset = GLINK_INITIAL_ENTRY_WORDS;
404 	  for (i = 0; i < num_plt_entries; i++)
405 	    {
406 
407 	      plt[offset] = (Elf64_Xword) &glink[glink_offset];
408 	      offset += PLT_ENTRY_WORDS;
409 	      glink_offset += GLINK_ENTRY_WORDS (i);
410 	    }
411 
412 	  /* Now, we've modified data.  We need to write the changes from
413 	     the data cache to a second-level unified cache, then make
414 	     sure that stale data in the instruction cache is removed.
415 	     (In a multiprocessor system, the effect is more complex.)
416 	     Most of the PLT shouldn't be in the instruction cache, but
417 	     there may be a little overlap at the start and the end.
418 
419 	     Assumes that dcbst and icbi apply to lines of 16 bytes or
420 	     more.  Current known line sizes are 16, 32, and 128 bytes.  */
421 
422 	  for (p = (char *) plt; p < (char *) &plt[offset]; p += 16)
423 	    PPC_DCBST (p);
424 	  PPC_SYNC;
425 	}
426     }
427   return lazy;
428 }
429 
430 #if _CALL_ELF == 2
431 extern void attribute_hidden _dl_error_localentry (struct link_map *map,
432 						   const Elf64_Sym *refsym);
433 
434 /* If the PLT entry resolves to a function in the same object, return
435    the target function's local entry point offset if usable.  */
436 static inline Elf64_Addr __attribute__ ((always_inline))
ppc64_local_entry_offset(struct link_map * map,lookup_t sym_map,const ElfW (Sym)* refsym,const ElfW (Sym)* sym)437 ppc64_local_entry_offset (struct link_map *map, lookup_t sym_map,
438 			  const ElfW(Sym) *refsym, const ElfW(Sym) *sym)
439 {
440   /* If the target function is in a different object, we cannot
441      use the local entry point.  */
442   if (sym_map != map)
443     {
444       /* Check that optimized plt call stubs for localentry:0 functions
445 	 are not being satisfied by a non-zero localentry symbol.  */
446       if (map->l_info[DT_PPC64(OPT)]
447 	  && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_LOCALENTRY) != 0
448 	  && refsym->st_info == ELFW(ST_INFO) (STB_GLOBAL, STT_FUNC)
449 	  && (STO_PPC64_LOCAL_MASK & refsym->st_other) == 0
450 	  && (STO_PPC64_LOCAL_MASK & sym->st_other) != 0)
451 	_dl_error_localentry (map, refsym);
452 
453       return 0;
454     }
455 
456   /* If the linker inserted multiple TOCs, we cannot use the
457      local entry point.  */
458   if (map->l_info[DT_PPC64(OPT)]
459       && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_MULTI_TOC))
460     return 0;
461 
462   /* If the target function is an ifunc then the local entry offset is
463      for the resolver, not the final destination.  */
464   if (__builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0))
465     return 0;
466 
467   /* Otherwise, we can use the local entry point.  Retrieve its offset
468      from the symbol's ELF st_other field.  */
469   return PPC64_LOCAL_ENTRY_OFFSET (sym->st_other);
470 }
471 #endif
472 
473 /* Change the PLT entry whose reloc is 'reloc' to call the actual
474    routine.  */
475 static inline Elf64_Addr __attribute__ ((always_inline))
elf_machine_fixup_plt(struct link_map * map,lookup_t sym_map,const ElfW (Sym)* refsym,const ElfW (Sym)* sym,const Elf64_Rela * reloc,Elf64_Addr * reloc_addr,Elf64_Addr finaladdr)476 elf_machine_fixup_plt (struct link_map *map, lookup_t sym_map,
477 		       const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
478 		       const Elf64_Rela *reloc,
479 		       Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
480 {
481 #if _CALL_ELF != 2
482   Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
483   Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
484   Elf64_Addr offset = 0;
485   Elf64_FuncDesc zero_fd = {0, 0, 0};
486 
487   PPC_DCBT (&plt->fd_aux);
488   PPC_DCBT (&plt->fd_func);
489 
490   /* If sym_map is NULL, it's a weak undefined sym;  Set the plt to
491      zero.  finaladdr should be zero already in this case, but guard
492      against invalid plt relocations with non-zero addends.  */
493   if (sym_map == NULL)
494     finaladdr = 0;
495 
496   /* Don't die here if finaladdr is zero, die if this plt entry is
497      actually called.  Makes a difference when LD_BIND_NOW=1.
498      finaladdr may be zero for a weak undefined symbol, or when an
499      ifunc resolver returns zero.  */
500   if (finaladdr == 0)
501     rel = &zero_fd;
502   else
503     {
504       PPC_DCBT (&rel->fd_aux);
505       PPC_DCBT (&rel->fd_func);
506     }
507 
508   /* If the opd entry is not yet relocated (because it's from a shared
509      object that hasn't been processed yet), then manually reloc it.  */
510   if (finaladdr != 0 && map != sym_map && !sym_map->l_relocated
511 #if !defined RTLD_BOOTSTRAP && defined SHARED
512       /* Bootstrap map doesn't have l_relocated set for it.  */
513       && sym_map != &GL(dl_rtld_map)
514 #endif
515       )
516     offset = sym_map->l_addr;
517 
518   /* For PPC64, fixup_plt copies the function descriptor from opd
519      over the corresponding PLT entry.
520      Initially, PLT Entry[i] is set up for lazy linking, or is zero.
521      For lazy linking, the fd_toc and fd_aux entries are irrelevant,
522      so for thread safety we write them before changing fd_func.  */
523 
524   plt->fd_aux = rel->fd_aux + offset;
525   plt->fd_toc = rel->fd_toc + offset;
526   PPC_DCBF (&plt->fd_toc);
527   PPC_ISYNC;
528 
529   plt->fd_func = rel->fd_func + offset;
530   PPC_DCBST (&plt->fd_func);
531   PPC_ISYNC;
532 #else
533   finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
534   *reloc_addr = finaladdr;
535 #endif
536 
537   return finaladdr;
538 }
539 
540 static inline void __attribute__ ((always_inline))
elf_machine_plt_conflict(struct link_map * map,lookup_t sym_map,const ElfW (Sym)* refsym,const ElfW (Sym)* sym,const Elf64_Rela * reloc,Elf64_Addr * reloc_addr,Elf64_Addr finaladdr)541 elf_machine_plt_conflict (struct link_map *map, lookup_t sym_map,
542 			  const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
543 			  const Elf64_Rela *reloc,
544 			  Elf64_Addr *reloc_addr, Elf64_Addr finaladdr)
545 {
546 #if _CALL_ELF != 2
547   Elf64_FuncDesc *plt = (Elf64_FuncDesc *) reloc_addr;
548   Elf64_FuncDesc *rel = (Elf64_FuncDesc *) finaladdr;
549   Elf64_FuncDesc zero_fd = {0, 0, 0};
550 
551   if (sym_map == NULL)
552     finaladdr = 0;
553 
554   if (finaladdr == 0)
555     rel = &zero_fd;
556 
557   plt->fd_func = rel->fd_func;
558   plt->fd_aux = rel->fd_aux;
559   plt->fd_toc = rel->fd_toc;
560   PPC_DCBST (&plt->fd_func);
561   PPC_DCBST (&plt->fd_aux);
562   PPC_DCBST (&plt->fd_toc);
563   PPC_SYNC;
564 #else
565   finaladdr += ppc64_local_entry_offset (map, sym_map, refsym, sym);
566   *reloc_addr = finaladdr;
567 #endif
568 }
569 
570 /* Return the final value of a plt relocation.  */
571 static inline Elf64_Addr
elf_machine_plt_value(struct link_map * map,const Elf64_Rela * reloc,Elf64_Addr value)572 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
573 		       Elf64_Addr value)
574 {
575   return value + reloc->r_addend;
576 }
577 
578 
579 /* Names of the architecture-specific auditing callback functions.  */
580 #if _CALL_ELF != 2
581 #define ARCH_LA_PLTENTER ppc64_gnu_pltenter
582 #define ARCH_LA_PLTEXIT ppc64_gnu_pltexit
583 #else
584 #define ARCH_LA_PLTENTER ppc64v2_gnu_pltenter
585 #define ARCH_LA_PLTEXIT ppc64v2_gnu_pltexit
586 #endif
587 
588 #endif /* dl_machine_h */
589 
590 #ifdef RESOLVE_MAP
591 
592 #define PPC_LO(v) ((v) & 0xffff)
593 #define PPC_HI(v) (((v) >> 16) & 0xffff)
594 #define PPC_HA(v) PPC_HI ((v) + 0x8000)
595 #define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
596 #define PPC_HIGHERA(v) PPC_HIGHER ((v) + 0x8000)
597 #define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
598 #define PPC_HIGHESTA(v) PPC_HIGHEST ((v) + 0x8000)
599 #define BIT_INSERT(var, val, mask) \
600   ((var) = ((var) & ~(Elf64_Addr) (mask)) | ((val) & (mask)))
601 
602 #define dont_expect(X) __builtin_expect ((X), 0)
603 
604 extern void attribute_hidden _dl_reloc_overflow (struct link_map *map,
605 						 const char *name,
606 						 Elf64_Addr *const reloc_addr,
607 						 const Elf64_Sym *refsym);
608 
609 static inline void __attribute__ ((always_inline))
elf_machine_rela_relative(Elf64_Addr l_addr,const Elf64_Rela * reloc,void * const reloc_addr_arg)610 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
611 			   void *const reloc_addr_arg)
612 {
613   Elf64_Addr *const reloc_addr = reloc_addr_arg;
614   *reloc_addr = l_addr + reloc->r_addend;
615 }
616 
617 /* This computes the value used by TPREL* relocs.  */
618 static inline Elf64_Addr __attribute__ ((always_inline, const))
elf_machine_tprel(struct link_map * map,struct link_map * sym_map,const Elf64_Sym * sym,const Elf64_Rela * reloc)619 elf_machine_tprel (struct link_map *map,
620 		   struct link_map *sym_map,
621 		   const Elf64_Sym *sym,
622 		   const Elf64_Rela *reloc)
623 {
624 #ifndef RTLD_BOOTSTRAP
625   if (sym_map)
626     {
627       CHECK_STATIC_TLS (map, sym_map);
628 #endif
629       return TLS_TPREL_VALUE (sym_map, sym, reloc);
630 #ifndef RTLD_BOOTSTRAP
631     }
632 #endif
633   return 0;
634 }
635 
636 /* Call function at address VALUE (an OPD entry) to resolve ifunc relocs.  */
637 static inline Elf64_Addr __attribute__ ((always_inline))
resolve_ifunc(Elf64_Addr value,const struct link_map * map,const struct link_map * sym_map)638 resolve_ifunc (Elf64_Addr value,
639 	       const struct link_map *map, const struct link_map *sym_map)
640 {
641 #if _CALL_ELF != 2
642 #ifndef RESOLVE_CONFLICT_FIND_MAP
643   /* The function we are calling may not yet have its opd entry relocated.  */
644   Elf64_FuncDesc opd;
645   if (map != sym_map
646 # if !defined RTLD_BOOTSTRAP && defined SHARED
647       /* Bootstrap map doesn't have l_relocated set for it.  */
648       && sym_map != &GL(dl_rtld_map)
649 # endif
650       && !sym_map->l_relocated)
651     {
652       Elf64_FuncDesc *func = (Elf64_FuncDesc *) value;
653       opd.fd_func = func->fd_func + sym_map->l_addr;
654       opd.fd_toc = func->fd_toc + sym_map->l_addr;
655       opd.fd_aux = func->fd_aux;
656       /* GCC 4.9+ eliminates the branch as dead code, force the odp set
657          dependency.  */
658       asm ("" : "=r" (value) : "0" (&opd), "X" (opd));
659     }
660 #endif
661 #endif
662   return ((Elf64_Addr (*) (unsigned long int)) value) (GLRO(dl_hwcap));
663 }
664 
665 /* Perform the relocation specified by RELOC and SYM (which is fully
666    resolved).  MAP is the object containing the reloc.  */
667 static inline void __attribute__ ((always_inline))
elf_machine_rela(struct link_map * map,struct r_scope_elem * scope[],const Elf64_Rela * reloc,const Elf64_Sym * sym,const struct r_found_version * version,void * const reloc_addr_arg,int skip_ifunc)668 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
669 		  const Elf64_Rela *reloc,
670 		  const Elf64_Sym *sym,
671 		  const struct r_found_version *version,
672 		  void *const reloc_addr_arg,
673 		  int skip_ifunc)
674 {
675   Elf64_Addr *const reloc_addr = reloc_addr_arg;
676   const int r_type = ELF64_R_TYPE (reloc->r_info);
677   const Elf64_Sym *const refsym = sym;
678   union unaligned
679     {
680       uint16_t u2;
681       uint32_t u4;
682       uint64_t u8;
683     } __attribute__ ((__packed__));
684 
685   if (r_type == R_PPC64_RELATIVE)
686     {
687       *reloc_addr = map->l_addr + reloc->r_addend;
688       return;
689     }
690 
691   if (__glibc_unlikely (r_type == R_PPC64_NONE))
692     return;
693 
694   /* We need SYM_MAP even in the absence of TLS, for elf_machine_fixup_plt
695      and STT_GNU_IFUNC.  */
696   struct link_map *sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
697   Elf64_Addr value = SYMBOL_ADDRESS (sym_map, sym, true) + reloc->r_addend;
698 
699   if (sym != NULL
700       && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
701       && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
702       && __builtin_expect (!skip_ifunc, 1))
703     value = resolve_ifunc (value, map, sym_map);
704 
705   /* For relocs that don't edit code, return.
706      For relocs that might edit instructions, break from the switch.  */
707   switch (r_type)
708     {
709     case R_PPC64_ADDR64:
710     case R_PPC64_GLOB_DAT:
711       *reloc_addr = value;
712       return;
713 
714     case R_PPC64_IRELATIVE:
715       if (__glibc_likely (!skip_ifunc))
716 	value = resolve_ifunc (value, map, sym_map);
717       *reloc_addr = value;
718       return;
719 
720     case R_PPC64_JMP_IREL:
721       if (__glibc_likely (!skip_ifunc))
722 	value = resolve_ifunc (value, map, sym_map);
723       /* Fall thru */
724     case R_PPC64_JMP_SLOT:
725 #ifdef RESOLVE_CONFLICT_FIND_MAP
726       elf_machine_plt_conflict (map, sym_map, refsym, sym,
727 				reloc, reloc_addr, value);
728 #else
729       elf_machine_fixup_plt (map, sym_map, refsym, sym,
730 			     reloc, reloc_addr, value);
731 #endif
732       return;
733 
734     case R_PPC64_DTPMOD64:
735       if (map->l_info[DT_PPC64(OPT)]
736 	  && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
737 	{
738 #ifdef RTLD_BOOTSTRAP
739 	  reloc_addr[0] = 0;
740 	  reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
741 			   + TLS_DTV_OFFSET);
742 	  return;
743 #else
744 	  if (sym_map != NULL)
745 	    {
746 # ifndef SHARED
747 	      CHECK_STATIC_TLS (map, sym_map);
748 # else
749 	      if (TRY_STATIC_TLS (map, sym_map))
750 # endif
751 		{
752 		  reloc_addr[0] = 0;
753 		  /* Set up for local dynamic.  */
754 		  reloc_addr[1] = (sym_map->l_tls_offset - TLS_TP_OFFSET
755 				   + TLS_DTV_OFFSET);
756 		  return;
757 		}
758 	    }
759 #endif
760 	}
761 #ifdef RTLD_BOOTSTRAP
762       /* During startup the dynamic linker is always index 1.  */
763       *reloc_addr = 1;
764 #else
765       /* Get the information from the link map returned by the
766 	 resolve function.  */
767       if (sym_map != NULL)
768 	*reloc_addr = sym_map->l_tls_modid;
769 #endif
770       return;
771 
772     case R_PPC64_DTPREL64:
773       if (map->l_info[DT_PPC64(OPT)]
774 	  && (map->l_info[DT_PPC64(OPT)]->d_un.d_val & PPC64_OPT_TLS))
775 	{
776 #ifdef RTLD_BOOTSTRAP
777 	  *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
778 	  return;
779 #else
780 	  if (sym_map != NULL)
781 	    {
782 	      /* This reloc is always preceded by R_PPC64_DTPMOD64.  */
783 # ifndef SHARED
784 	      assert (HAVE_STATIC_TLS (map, sym_map));
785 # else
786 	      if (HAVE_STATIC_TLS (map, sym_map))
787 #  endif
788 		{
789 		  *reloc_addr = TLS_TPREL_VALUE (sym_map, sym, reloc);
790 		  return;
791 		}
792 	    }
793 #endif
794 	}
795       /* During relocation all TLS symbols are defined and used.
796 	 Therefore the offset is already correct.  */
797 #ifndef RTLD_BOOTSTRAP
798       if (sym_map != NULL)
799 	*reloc_addr = TLS_DTPREL_VALUE (sym, reloc);
800 #endif
801       return;
802 
803     case R_PPC64_TPREL64:
804       *reloc_addr = elf_machine_tprel (map, sym_map, sym, reloc);
805       return;
806 
807     case R_PPC64_TPREL16_LO_DS:
808       value = elf_machine_tprel (map, sym_map, sym, reloc);
809       if (dont_expect ((value & 3) != 0))
810 	_dl_reloc_overflow (map, "R_PPC64_TPREL16_LO_DS", reloc_addr, refsym);
811       BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
812       break;
813 
814     case R_PPC64_TPREL16_DS:
815       value = elf_machine_tprel (map, sym_map, sym, reloc);
816       if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
817 	_dl_reloc_overflow (map, "R_PPC64_TPREL16_DS", reloc_addr, refsym);
818       BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
819       break;
820 
821     case R_PPC64_TPREL16:
822       value = elf_machine_tprel (map, sym_map, sym, reloc);
823       if (dont_expect ((value + 0x8000) >= 0x10000))
824 	_dl_reloc_overflow (map, "R_PPC64_TPREL16", reloc_addr, refsym);
825       *(Elf64_Half *) reloc_addr = PPC_LO (value);
826       break;
827 
828     case R_PPC64_TPREL16_LO:
829       value = elf_machine_tprel (map, sym_map, sym, reloc);
830       *(Elf64_Half *) reloc_addr = PPC_LO (value);
831       break;
832 
833     case R_PPC64_TPREL16_HI:
834       value = elf_machine_tprel (map, sym_map, sym, reloc);
835       if (dont_expect (value + 0x80000000 >= 0x100000000LL))
836 	_dl_reloc_overflow (map, "R_PPC64_TPREL16_HI", reloc_addr, refsym);
837       *(Elf64_Half *) reloc_addr = PPC_HI (value);
838       break;
839 
840     case R_PPC64_TPREL16_HIGH:
841       value = elf_machine_tprel (map, sym_map, sym, reloc);
842       *(Elf64_Half *) reloc_addr = PPC_HI (value);
843       break;
844 
845     case R_PPC64_TPREL16_HA:
846       value = elf_machine_tprel (map, sym_map, sym, reloc);
847       if (dont_expect (value + 0x80008000 >= 0x100000000LL))
848 	_dl_reloc_overflow (map, "R_PPC64_TPREL16_HA", reloc_addr, refsym);
849       *(Elf64_Half *) reloc_addr = PPC_HA (value);
850       break;
851 
852     case R_PPC64_TPREL16_HIGHA:
853       value = elf_machine_tprel (map, sym_map, sym, reloc);
854       *(Elf64_Half *) reloc_addr = PPC_HA (value);
855       break;
856 
857     case R_PPC64_TPREL16_HIGHER:
858       value = elf_machine_tprel (map, sym_map, sym, reloc);
859       *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
860       break;
861 
862     case R_PPC64_TPREL16_HIGHEST:
863       value = elf_machine_tprel (map, sym_map, sym, reloc);
864       *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
865       break;
866 
867     case R_PPC64_TPREL16_HIGHERA:
868       value = elf_machine_tprel (map, sym_map, sym, reloc);
869       *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
870       break;
871 
872     case R_PPC64_TPREL16_HIGHESTA:
873       value = elf_machine_tprel (map, sym_map, sym, reloc);
874       *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
875       break;
876 
877 #ifndef RTLD_BOOTSTRAP /* None of the following appear in ld.so */
878     case R_PPC64_ADDR16_LO_DS:
879       if (dont_expect ((value & 3) != 0))
880 	_dl_reloc_overflow (map, "R_PPC64_ADDR16_LO_DS", reloc_addr, refsym);
881       BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
882       break;
883 
884     case R_PPC64_ADDR16_LO:
885       *(Elf64_Half *) reloc_addr = PPC_LO (value);
886       break;
887 
888     case R_PPC64_ADDR16_HI:
889       if (dont_expect (value + 0x80000000 >= 0x100000000LL))
890 	_dl_reloc_overflow (map, "R_PPC64_ADDR16_HI", reloc_addr, refsym);
891       /* Fall through.  */
892     case R_PPC64_ADDR16_HIGH:
893       *(Elf64_Half *) reloc_addr = PPC_HI (value);
894       break;
895 
896     case R_PPC64_ADDR16_HA:
897       if (dont_expect (value + 0x80008000 >= 0x100000000LL))
898 	_dl_reloc_overflow (map, "R_PPC64_ADDR16_HA", reloc_addr, refsym);
899       /* Fall through.  */
900     case R_PPC64_ADDR16_HIGHA:
901       *(Elf64_Half *) reloc_addr = PPC_HA (value);
902       break;
903 
904     case R_PPC64_ADDR30:
905       {
906 	Elf64_Addr delta = value - (Elf64_Xword) reloc_addr;
907 	if (dont_expect ((delta + 0x80000000) >= 0x100000000LL
908 			 || (delta & 3) != 0))
909 	  _dl_reloc_overflow (map, "R_PPC64_ADDR30", reloc_addr, refsym);
910 	BIT_INSERT (*(Elf64_Word *) reloc_addr, delta, 0xfffffffc);
911       }
912       break;
913 
914     case R_PPC64_COPY:
915       if (dont_expect (sym == NULL))
916 	/* This can happen in trace mode when an object could not be found. */
917 	return;
918       if (dont_expect (sym->st_size > refsym->st_size
919 		       || (GLRO(dl_verbose)
920 			   && sym->st_size < refsym->st_size)))
921 	{
922 	  const char *strtab;
923 
924 	  strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
925 	  _dl_error_printf ("%s: Symbol `%s' has different size" \
926 			    " in shared object," \
927 			    " consider re-linking\n",
928 			    RTLD_PROGNAME, strtab + refsym->st_name);
929 	}
930       memcpy (reloc_addr_arg, (char *) value,
931 	      MIN (sym->st_size, refsym->st_size));
932       return;
933 
934     case R_PPC64_UADDR64:
935       ((union unaligned *) reloc_addr)->u8 = value;
936       return;
937 
938     case R_PPC64_UADDR32:
939       ((union unaligned *) reloc_addr)->u4 = value;
940       return;
941 
942     case R_PPC64_ADDR32:
943       if (dont_expect ((value + 0x80000000) >= 0x100000000LL))
944 	_dl_reloc_overflow (map, "R_PPC64_ADDR32", reloc_addr, refsym);
945       *(Elf64_Word *) reloc_addr = value;
946       return;
947 
948     case R_PPC64_ADDR24:
949       if (dont_expect ((value + 0x2000000) >= 0x4000000 || (value & 3) != 0))
950 	_dl_reloc_overflow (map, "R_PPC64_ADDR24", reloc_addr, refsym);
951       BIT_INSERT (*(Elf64_Word *) reloc_addr, value, 0x3fffffc);
952       break;
953 
954     case R_PPC64_ADDR16:
955       if (dont_expect ((value + 0x8000) >= 0x10000))
956 	_dl_reloc_overflow (map, "R_PPC64_ADDR16", reloc_addr, refsym);
957       *(Elf64_Half *) reloc_addr = value;
958       break;
959 
960     case R_PPC64_UADDR16:
961       if (dont_expect ((value + 0x8000) >= 0x10000))
962 	_dl_reloc_overflow (map, "R_PPC64_UADDR16", reloc_addr, refsym);
963       ((union unaligned *) reloc_addr)->u2 = value;
964       return;
965 
966     case R_PPC64_ADDR16_DS:
967       if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
968 	_dl_reloc_overflow (map, "R_PPC64_ADDR16_DS", reloc_addr, refsym);
969       BIT_INSERT (*(Elf64_Half *) reloc_addr, value, 0xfffc);
970       break;
971 
972     case R_PPC64_ADDR16_HIGHER:
973       *(Elf64_Half *) reloc_addr = PPC_HIGHER (value);
974       break;
975 
976     case R_PPC64_ADDR16_HIGHEST:
977       *(Elf64_Half *) reloc_addr = PPC_HIGHEST (value);
978       break;
979 
980     case R_PPC64_ADDR16_HIGHERA:
981       *(Elf64_Half *) reloc_addr = PPC_HIGHERA (value);
982       break;
983 
984     case R_PPC64_ADDR16_HIGHESTA:
985       *(Elf64_Half *) reloc_addr = PPC_HIGHESTA (value);
986       break;
987 
988     case R_PPC64_ADDR14:
989     case R_PPC64_ADDR14_BRTAKEN:
990     case R_PPC64_ADDR14_BRNTAKEN:
991       {
992 	if (dont_expect ((value + 0x8000) >= 0x10000 || (value & 3) != 0))
993 	  _dl_reloc_overflow (map, "R_PPC64_ADDR14", reloc_addr, refsym);
994 	Elf64_Word insn = *(Elf64_Word *) reloc_addr;
995 	BIT_INSERT (insn, value, 0xfffc);
996 	if (r_type != R_PPC64_ADDR14)
997 	  {
998 	    insn &= ~(1 << 21);
999 	    if (r_type == R_PPC64_ADDR14_BRTAKEN)
1000 	      insn |= 1 << 21;
1001 	    if ((insn & (0x14 << 21)) == (0x04 << 21))
1002 	      insn |= 0x02 << 21;
1003 	    else if ((insn & (0x14 << 21)) == (0x10 << 21))
1004 	      insn |= 0x08 << 21;
1005 	  }
1006 	*(Elf64_Word *) reloc_addr = insn;
1007       }
1008       break;
1009 
1010     case R_PPC64_REL32:
1011       *(Elf64_Word *) reloc_addr = value - (Elf64_Addr) reloc_addr;
1012       return;
1013 
1014     case R_PPC64_REL64:
1015       *reloc_addr = value - (Elf64_Addr) reloc_addr;
1016       return;
1017 #endif /* !RTLD_BOOTSTRAP */
1018 
1019     default:
1020       _dl_reloc_bad_type (map, r_type, 0);
1021       return;
1022     }
1023   MODIFIED_CODE_NOQUEUE (reloc_addr);
1024 }
1025 
1026 static inline void __attribute__ ((always_inline))
elf_machine_lazy_rel(struct link_map * map,struct r_scope_elem * scope[],Elf64_Addr l_addr,const Elf64_Rela * reloc,int skip_ifunc)1027 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
1028 		      Elf64_Addr l_addr, const Elf64_Rela *reloc,
1029 		      int skip_ifunc)
1030 {
1031   /* elf_machine_runtime_setup handles this.  */
1032 }
1033 
1034 
1035 #endif /* RESOLVE */
1036