1 /* Machine-dependent ELF dynamic relocation inline functions. SPARC version.
2 Copyright (C) 1996-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef dl_machine_h
20 #define dl_machine_h
21
22 #define ELF_MACHINE_NAME "sparc"
23
24 #include <string.h>
25 #include <sys/param.h>
26 #include <ldsodefs.h>
27 #include <sysdep.h>
28 #include <tls.h>
29 #include <dl-plt.h>
30 #include <elf/dl-hwcaps.h>
31 #include <dl-static-tls.h>
32 #include <dl-machine-rel.h>
33
34 /* Return nonzero iff ELF header is compatible with the running host. */
35 static inline int
36 elf_machine_matches_host (const Elf32_Ehdr *ehdr)
37 {
38 if (ehdr->e_machine == EM_SPARC)
39 return 1;
40 else if (ehdr->e_machine == EM_SPARC32PLUS)
41 {
42 #if defined SHARED
43 uint64_t hwcap_mask = TUNABLE_GET (glibc, cpu, hwcap_mask, uint64_t,
44 NULL);
45 return GLRO(dl_hwcap) & hwcap_mask & HWCAP_SPARC_V9;
46 #else
47 return GLRO(dl_hwcap) & HWCAP_SPARC_V9;
48 #endif
49 }
50 else
51 return 0;
52 }
53
54 /* We have to do this because elf_machine_{dynamic,load_address} can be
55 invoked from functions that have no GOT references, and thus the compiler
56 has no obligation to load the PIC register. */
57 #define LOAD_PIC_REG(PIC_REG) \
58 do { register Elf32_Addr pc __asm("o7"); \
59 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
60 "call 1f\n\t" \
61 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n" \
62 "1:\tadd %1, %0, %1" \
63 : "=r" (pc), "=r" (PIC_REG)); \
64 } while (0)
65
66 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
67 first element of the GOT. This must be inlined in a function which
68 uses global data. */
69 static inline Elf32_Addr
70 elf_machine_dynamic (void)
71 {
72 register Elf32_Addr *got asm ("%l7");
73
74 LOAD_PIC_REG (got);
75
76 return *got;
77 }
78
79 /* Return the run-time load address of the shared object. */
80 static inline Elf32_Addr
81 elf_machine_load_address (void)
82 {
83 register Elf32_Addr *pc __asm ("%o7"), *got __asm ("%l7");
84
85 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
86 "call 1f\n\t"
87 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
88 "call _DYNAMIC\n\t"
89 "call _GLOBAL_OFFSET_TABLE_\n"
90 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
91
92 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
93 *got is _DYNAMIC
94 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
95 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
96 return (Elf32_Addr) got - *got + (pc[2] - pc[3]) * 4 - 4;
97 }
98
99 /* Set up the loaded object described by L so its unrelocated PLT
100 entries will jump to the on-demand fixup code in dl-runtime.c. */
101
102 static inline int
103 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
104 int lazy, int profile)
105 {
106 Elf32_Addr *plt;
107 extern void _dl_runtime_resolve (Elf32_Word);
108 extern void _dl_runtime_profile (Elf32_Word);
109
110 if (l->l_info[DT_JMPREL] && lazy)
111 {
112 Elf32_Addr rfunc;
113
114 /* The entries for functions in the PLT have not yet been filled in.
115 Their initial contents will arrange when called to set the high 22
116 bits of %g1 with an offset into the .rela.plt section and jump to
117 the beginning of the PLT. */
118 plt = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
119 if (__builtin_expect(profile, 0))
120 {
121 rfunc = (Elf32_Addr) &_dl_runtime_profile;
122
123 if (GLRO(dl_profile) != NULL
124 && _dl_name_match_p (GLRO(dl_profile), l))
125 GL(dl_profile_map) = l;
126 }
127 else
128 {
129 rfunc = (Elf32_Addr) &_dl_runtime_resolve;
130 }
131
132 /* The beginning of the PLT does:
133
134 sethi %hi(_dl_runtime_{resolve,profile}), %g2
135 pltpc: jmpl %g2 + %lo(_dl_runtime_{resolve,profile}), %g2
136 nop
137 .word MAP
138
139 The PC value (pltpc) saved in %g2 by the jmpl points near the
140 location where we store the link_map pointer for this object. */
141
142 plt[0] = 0x05000000 | ((rfunc >> 10) & 0x003fffff);
143 plt[1] = 0x85c0a000 | (rfunc & 0x3ff);
144 plt[2] = OPCODE_NOP; /* Fill call delay slot. */
145 plt[3] = (Elf32_Addr) l;
146 }
147
148 return lazy;
149 }
150
151 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
152 PLT entries should not be allowed to define the value.
153 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one
154 of the main executable's symbols, as for a COPY reloc. */
155 #define elf_machine_type_class(type) \
156 ((((type) == R_SPARC_JMP_SLOT \
157 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
158 * ELF_RTYPE_CLASS_PLT) \
159 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
160
161 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
162 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
163
164 /* Undo the sub %sp, 6*4, %sp; add %sp, 22*4, %o0 below to get at the
165 value we want in __libc_stack_end. */
166 #define DL_STACK_END(cookie) \
167 ((void *) (((long) (cookie)) - (22 - 6) * 4))
168
169 /* Initial entry point code for the dynamic linker.
170 The C function `_dl_start' is the real entry point;
171 its return value is the user program's entry point. */
172
173 #define RTLD_GOT_ADDRESS(pic_reg, reg, symbol) \
174 "sethi %gdop_hix22(" #symbol "), " #reg "\n\t" \
175 "xor " #reg ", %gdop_lox10(" #symbol "), " #reg "\n\t" \
176 "ld [" #pic_reg " + " #reg "], " #reg ", %gdop(" #symbol ")"
177
178 #define RTLD_START __asm__ ("\
179 .text\n\
180 .globl _start\n\
181 .type _start, @function\n\
182 .align 32\n\
183 _start:\n\
184 /* Allocate space for functions to drop their arguments. */\n\
185 sub %sp, 6*4, %sp\n\
186 /* Pass pointer to argument block to _dl_start. */\n\
187 call _dl_start\n\
188 add %sp, 22*4, %o0\n\
189 /* FALTHRU */\n\
190 .globl _dl_start_user\n\
191 .type _dl_start_user, @function\n\
192 _dl_start_user:\n\
193 /* Load the PIC register. */\n\
194 1: call 2f\n\
195 sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
196 2: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n\
197 add %l7, %o7, %l7\n\
198 /* Save the user entry point address in %l0 */\n\
199 mov %o0, %l0\n\
200 ld [%sp+22*4], %i5 /* load argc */\n\
201 /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n\
202 " RTLD_GOT_ADDRESS(%l7, %o0, _rtld_local) "\n\
203 add %sp, 23*4, %o2\n\
204 sll %i5, 2, %o3\n\
205 add %o3, 4, %o3\n\
206 mov %i5, %o1\n\
207 add %o2, %o3, %o3\n\
208 call _dl_init\n\
209 ld [%o0], %o0\n\
210 /* Pass our finalizer function to the user in %g1. */\n\
211 " RTLD_GOT_ADDRESS(%l7, %g1, _dl_fini) "\n\
212 /* Jump to the user's entry point and deallocate the extra stack we got. */\n\
213 jmp %l0\n\
214 add %sp, 6*4, %sp\n\
215 .size _dl_start_user, . - _dl_start_user\n\
216 .previous");
217
218 static inline Elf32_Addr
219 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
220 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
221 const Elf32_Rela *reloc,
222 Elf32_Addr *reloc_addr, Elf32_Addr value)
223 {
224 #ifdef __sparc_v9__
225 /* Sparc v9 can assume flush is always present. */
226 const int do_flush = 1;
227 #else
228 /* Note that we don't mask the hwcap here, as the flush is essential to
229 functionality on those cpu's that implement it. */
230 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
231 #endif
232 return sparc_fixup_plt (reloc, reloc_addr, value, 1, do_flush);
233 }
234
235 /* Return the final value of a plt relocation. */
236 static inline Elf32_Addr
237 elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
238 Elf32_Addr value)
239 {
240 return value + reloc->r_addend;
241 }
242
243 #endif /* dl_machine_h */
244
245 #define ARCH_LA_PLTENTER sparc32_gnu_pltenter
246 #define ARCH_LA_PLTEXIT sparc32_gnu_pltexit
247
248 #ifdef RESOLVE_MAP
249
250 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
251 MAP is the object containing the reloc. */
252
253 static inline void
254 __attribute__ ((always_inline))
255 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
256 const Elf32_Rela *reloc, const Elf32_Sym *sym,
257 const struct r_found_version *version,
258 void *const reloc_addr_arg, int skip_ifunc)
259 {
260 Elf32_Addr *const reloc_addr = reloc_addr_arg;
261 #if !defined RTLD_BOOTSTRAP
262 const Elf32_Sym *const refsym = sym;
263 #endif
264 Elf32_Addr value;
265 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
266 struct link_map *sym_map = NULL;
267
268 if (__glibc_unlikely (r_type == R_SPARC_NONE))
269 return;
270
271 if (__glibc_unlikely (r_type == R_SPARC_SIZE32))
272 {
273 *reloc_addr = sym->st_size + reloc->r_addend;
274 return;
275 }
276
277 #if !defined RTLD_BOOTSTRAP
278 if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
279 {
280 *reloc_addr += map->l_addr + reloc->r_addend;
281 return;
282 }
283 #endif
284
285 if (__builtin_expect (ELF32_ST_BIND (sym->st_info) == STB_LOCAL, 0)
286 && sym->st_shndx != SHN_UNDEF)
287 {
288 sym_map = map;
289 value = map->l_addr;
290 }
291 else
292 {
293 sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
294 value = SYMBOL_ADDRESS (sym_map, sym, true);
295 }
296
297 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
298
299 if (sym != NULL
300 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
301 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
302 && __builtin_expect (!skip_ifunc, 1))
303 {
304 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
305 }
306
307 switch (r_type)
308 {
309 #if !defined RTLD_BOOTSTRAP
310 case R_SPARC_COPY:
311 if (sym == NULL)
312 /* This can happen in trace mode if an object could not be
313 found. */
314 break;
315 if (sym->st_size > refsym->st_size
316 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
317 {
318 const char *strtab;
319
320 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
321 _dl_error_printf ("\
322 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
323 RTLD_PROGNAME, strtab + refsym->st_name);
324 }
325 memcpy (reloc_addr_arg, (void *) value,
326 MIN (sym->st_size, refsym->st_size));
327 break;
328 #endif
329 case R_SPARC_GLOB_DAT:
330 case R_SPARC_32:
331 *reloc_addr = value;
332 break;
333 case R_SPARC_IRELATIVE:
334 if (__glibc_likely (!skip_ifunc))
335 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
336 *reloc_addr = value;
337 break;
338 case R_SPARC_JMP_IREL:
339 if (__glibc_likely (!skip_ifunc))
340 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
341 /* Fall thru */
342 case R_SPARC_JMP_SLOT:
343 {
344 #if !defined RTLD_BOOTSTRAP && !defined __sparc_v9__
345 /* Note that we don't mask the hwcap here, as the flush is
346 essential to functionality on those cpu's that implement
347 it. For sparcv9 we can assume flush is present. */
348 const int do_flush = GLRO(dl_hwcap) & HWCAP_SPARC_FLUSH;
349 #else
350 /* Unfortunately, this is necessary, so that we can ensure
351 ld.so will not execute corrupt PLT entry instructions. */
352 const int do_flush = 1;
353 #endif
354 /* At this point we don't need to bother with thread safety,
355 so we can optimize the first instruction of .plt out. */
356 sparc_fixup_plt (reloc, reloc_addr, value, 0, do_flush);
357 }
358 break;
359 case R_SPARC_TLS_DTPMOD32:
360 /* Get the information from the link map returned by the
361 resolv function. */
362 if (sym_map != NULL)
363 *reloc_addr = sym_map->l_tls_modid;
364 break;
365 case R_SPARC_TLS_DTPOFF32:
366 /* During relocation all TLS symbols are defined and used.
367 Therefore the offset is already correct. */
368 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
369 break;
370 case R_SPARC_TLS_TPOFF32:
371 /* The offset is negative, forward from the thread pointer. */
372 /* We know the offset of object the symbol is contained in.
373 It is a negative value which will be added to the
374 thread pointer. */
375 if (sym != NULL)
376 {
377 CHECK_STATIC_TLS (map, sym_map);
378 *reloc_addr = sym->st_value - sym_map->l_tls_offset
379 + reloc->r_addend;
380 }
381 break;
382 #ifndef RTLD_BOOTSTRAP
383 case R_SPARC_TLS_LE_HIX22:
384 case R_SPARC_TLS_LE_LOX10:
385 if (sym != NULL)
386 {
387 CHECK_STATIC_TLS (map, sym_map);
388 value = sym->st_value - sym_map->l_tls_offset
389 + reloc->r_addend;
390 if (r_type == R_SPARC_TLS_LE_HIX22)
391 *reloc_addr = (*reloc_addr & 0xffc00000) | ((~value) >> 10);
392 else
393 *reloc_addr = (*reloc_addr & 0xffffe000) | (value & 0x3ff)
394 | 0x1c00;
395 }
396 break;
397 #endif
398 #ifndef RTLD_BOOTSTRAP
399 case R_SPARC_8:
400 *(char *) reloc_addr = value;
401 break;
402 case R_SPARC_16:
403 *(short *) reloc_addr = value;
404 break;
405 case R_SPARC_DISP8:
406 *(char *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
407 break;
408 case R_SPARC_DISP16:
409 *(short *) reloc_addr = (value - (Elf32_Addr) reloc_addr);
410 break;
411 case R_SPARC_DISP32:
412 *reloc_addr = (value - (Elf32_Addr) reloc_addr);
413 break;
414 case R_SPARC_LO10:
415 *reloc_addr = (*reloc_addr & ~0x3ff) | (value & 0x3ff);
416 break;
417 case R_SPARC_WDISP30:
418 *reloc_addr = ((*reloc_addr & 0xc0000000)
419 | ((value - (unsigned int) reloc_addr) >> 2));
420 break;
421 case R_SPARC_HI22:
422 *reloc_addr = (*reloc_addr & 0xffc00000) | (value >> 10);
423 break;
424 case R_SPARC_UA16:
425 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
426 ((unsigned char *) reloc_addr_arg) [1] = value;
427 break;
428 case R_SPARC_UA32:
429 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
430 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
431 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
432 ((unsigned char *) reloc_addr_arg) [3] = value;
433 break;
434 #endif
435 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
436 default:
437 _dl_reloc_bad_type (map, r_type, 0);
438 break;
439 #endif
440 }
441 }
442
443 static inline void
444 __attribute__ ((always_inline))
445 elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
446 void *const reloc_addr_arg)
447 {
448 Elf32_Addr *const reloc_addr = reloc_addr_arg;
449 *reloc_addr += l_addr + reloc->r_addend;
450 }
451
452 static inline void
453 __attribute__ ((always_inline))
454 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
455 Elf32_Addr l_addr, const Elf32_Rela *reloc,
456 int skip_ifunc)
457 {
458 Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
459 const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
460
461 if (__glibc_likely (r_type == R_SPARC_JMP_SLOT))
462 ;
463 else if (r_type == R_SPARC_JMP_IREL)
464 {
465 Elf32_Addr value = map->l_addr + reloc->r_addend;
466 if (__glibc_likely (!skip_ifunc))
467 value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
468 sparc_fixup_plt (reloc, reloc_addr, value, 1, 1);
469 }
470 else if (r_type == R_SPARC_NONE)
471 ;
472 else
473 _dl_reloc_bad_type (map, r_type, 1);
474 }
475
476 #endif /* RESOLVE_MAP */