Print this page
10471 ld(1) amd64 LD->LE TLS transition causes memory corruption
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/sgs/libld/common/machrel.amd.c
+++ new/usr/src/cmd/sgs/libld/common/machrel.amd.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 /* Get the x86 version of the relocation engine */
27 27 #define DO_RELOC_LIBLD_X86
28 28
29 29 #include <string.h>
30 30 #include <stdio.h>
31 31 #include <strings.h>
32 32 #include <sys/elf_amd64.h>
33 33 #include <debug.h>
34 34 #include <reloc.h>
35 35 #include <i386/machdep_x86.h>
36 36 #include "msg.h"
37 37 #include "_libld.h"
38 38
39 39 /*
40 40 * This module uses do_reloc_ld() to execute several synthesized relocations.
41 41 * That function expects to be passed two things that we need to construct
42 42 * here:
43 43 *
44 44 * 1) A Rel_desc descriptor for each relocation type, from which the
45 45 * rel_rtype field, and nothing else, is obtained. This is easily
46 46 * handled by constructing the necessary descriptors.
47 47 *
48 48 * 2) A function, which called with the Rel_desc descriptor, returns
49 49 * a string representing the name of the symbol associated with
50 50 * the descriptor. The usual function for this is ld_reloc_sym_name().
51 51 * However, that function will not work in this case, as these synthetic
52 52 * relocations do not have an associated symbol. We supply the
53 53 * syn_rdesc_sym_name() function to simply return the fixed name.
54 54 */
55 55 static Rel_desc rdesc_r_amd64_gotpcrel = {
56 56 NULL, NULL, NULL, 0, 0, 0, R_AMD64_GOTPCREL };
57 57 static Rel_desc rdesc_r_amd64_32 = {
58 58 NULL, NULL, NULL, 0, 0, 0, R_AMD64_32 };
59 59 static Rel_desc rdesc_r_amd64_pc32 = {
60 60 NULL, NULL, NULL, 0, 0, 0, R_AMD64_PC32 };
61 61
62 62 /*ARGSUSED*/
63 63 static const char *
64 64 syn_rdesc_sym_name(Rel_desc *rdesc)
65 65 {
66 66 return (MSG_ORIG(MSG_SYM_PLTENT));
67 67 }
68 68
69 69 /*
70 70 * Search the GOT index list for a GOT entry with a matching reference and the
71 71 * proper addend.
72 72 */
73 73 static Gotndx *
74 74 ld_find_got_ndx(Alist *alp, Gotref gref, Ofl_desc *ofl, Rel_desc *rdesc)
75 75 {
76 76 Aliste idx;
77 77 Gotndx *gnp;
78 78
79 79 assert(rdesc != 0);
80 80
81 81 if ((gref == GOT_REF_TLSLD) && ofl->ofl_tlsldgotndx)
82 82 return (ofl->ofl_tlsldgotndx);
83 83
84 84 for (ALIST_TRAVERSE(alp, idx, gnp)) {
85 85 if ((rdesc->rel_raddend == gnp->gn_addend) &&
86 86 (gnp->gn_gotref == gref)) {
87 87 return (gnp);
88 88 }
89 89 }
90 90 return (NULL);
91 91 }
92 92
93 93 static Xword
94 94 ld_calc_got_offset(Rel_desc *rdesc, Ofl_desc *ofl)
95 95 {
96 96 Os_desc *osp = ofl->ofl_osgot;
97 97 Sym_desc *sdp = rdesc->rel_sym;
98 98 Xword gotndx;
99 99 Gotref gref;
100 100 Gotndx *gnp;
101 101
102 102 if (rdesc->rel_flags & FLG_REL_DTLS)
103 103 gref = GOT_REF_TLSGD;
104 104 else if (rdesc->rel_flags & FLG_REL_MTLS)
105 105 gref = GOT_REF_TLSLD;
106 106 else if (rdesc->rel_flags & FLG_REL_STLS)
107 107 gref = GOT_REF_TLSIE;
108 108 else
109 109 gref = GOT_REF_GENERIC;
110 110
111 111 gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref, ofl, rdesc);
112 112 assert(gnp);
113 113
114 114 gotndx = (Xword)gnp->gn_gotndx;
115 115
116 116 if ((rdesc->rel_flags & FLG_REL_DTLS) &&
117 117 (rdesc->rel_rtype == R_AMD64_DTPOFF64))
118 118 gotndx++;
119 119
120 120 return ((Xword)(osp->os_shdr->sh_addr + (gotndx * M_GOT_ENTSIZE)));
121 121 }
122 122
123 123 static Word
124 124 ld_init_rel(Rel_desc *reld, Word *typedata, void *reloc)
125 125 {
126 126 Rela *rel = (Rela *)reloc;
127 127
128 128 /* LINTED */
129 129 reld->rel_rtype = (Word)ELF_R_TYPE(rel->r_info, M_MACH);
130 130 reld->rel_roffset = rel->r_offset;
131 131 reld->rel_raddend = rel->r_addend;
132 132 *typedata = 0;
133 133
134 134 reld->rel_flags |= FLG_REL_RELA;
135 135
136 136 return ((Word)ELF_R_SYM(rel->r_info));
137 137 }
138 138
139 139 static void
140 140 ld_mach_eflags(Ehdr *ehdr, Ofl_desc *ofl)
141 141 {
142 142 ofl->ofl_dehdr->e_flags |= ehdr->e_flags;
143 143 }
144 144
145 145 static void
146 146 ld_mach_make_dynamic(Ofl_desc *ofl, size_t *cnt)
147 147 {
148 148 if (!(ofl->ofl_flags & FLG_OF_RELOBJ)) {
149 149 /*
150 150 * Create this entry if we are going to create a PLT table.
151 151 */
152 152 if (ofl->ofl_pltcnt)
153 153 (*cnt)++; /* DT_PLTGOT */
154 154 }
155 155 }
156 156
157 157 static void
158 158 ld_mach_update_odynamic(Ofl_desc *ofl, Dyn **dyn)
159 159 {
160 160 if (((ofl->ofl_flags & FLG_OF_RELOBJ) == 0) && ofl->ofl_pltcnt) {
161 161 (*dyn)->d_tag = DT_PLTGOT;
162 162 if (ofl->ofl_osgot)
163 163 (*dyn)->d_un.d_ptr = ofl->ofl_osgot->os_shdr->sh_addr;
164 164 else
165 165 (*dyn)->d_un.d_ptr = 0;
166 166 (*dyn)++;
167 167 }
168 168 }
169 169
170 170 static Xword
171 171 ld_calc_plt_addr(Sym_desc *sdp, Ofl_desc *ofl)
172 172 {
173 173 Xword value;
174 174
175 175 value = (Xword)(ofl->ofl_osplt->os_shdr->sh_addr) +
176 176 M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) * M_PLT_ENTSIZE);
177 177 return (value);
178 178 }
179 179
180 180 /*
181 181 * Build a single plt entry - code is:
182 182 * JMP *name1@GOTPCREL(%rip)
183 183 * PUSHL $index
184 184 * JMP .PLT0
185 185 */
186 186 static uchar_t pltn_entry[M_PLT_ENTSIZE] = {
187 187 /* 0x00 jmpq *name1@GOTPCREL(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00,
188 188 /* 0x06 pushq $index */ 0x68, 0x00, 0x00, 0x00, 0x00,
189 189 /* 0x0b jmpq .plt0(%rip) */ 0xe9, 0x00, 0x00, 0x00, 0x00
190 190 /* 0x10 */
191 191 };
192 192
193 193 static uintptr_t
194 194 plt_entry(Ofl_desc * ofl, Sym_desc * sdp)
195 195 {
196 196 uchar_t *plt0, *pltent, *gotent;
197 197 Sword plt_off;
198 198 Word got_off;
199 199 Xword val1;
200 200 int bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
201 201
202 202 got_off = sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE;
203 203 plt_off = M_PLT_RESERVSZ + ((sdp->sd_aux->sa_PLTndx - 1) *
204 204 M_PLT_ENTSIZE);
205 205 plt0 = (uchar_t *)(ofl->ofl_osplt->os_outdata->d_buf);
206 206 pltent = plt0 + plt_off;
207 207 gotent = (uchar_t *)(ofl->ofl_osgot->os_outdata->d_buf) + got_off;
208 208
209 209 bcopy(pltn_entry, pltent, sizeof (pltn_entry));
210 210 /*
211 211 * Fill in the got entry with the address of the next instruction.
212 212 */
213 213 /* LINTED */
214 214 *(Word *)gotent = ofl->ofl_osplt->os_shdr->sh_addr + plt_off +
215 215 M_PLT_INSSIZE;
216 216 if (bswap)
217 217 /* LINTED */
218 218 *(Word *)gotent = ld_bswap_Word(*(Word *)gotent);
219 219
220 220 /*
221 221 * If '-z noreloc' is specified - skip the do_reloc_ld
222 222 * stage.
223 223 */
224 224 if (!OFL_DO_RELOC(ofl))
225 225 return (1);
226 226
227 227 /*
228 228 * patchup:
229 229 * jmpq *name1@gotpcrel(%rip)
230 230 *
231 231 * NOTE: 0x06 represents next instruction.
232 232 */
233 233 val1 = (ofl->ofl_osgot->os_shdr->sh_addr + got_off) -
234 234 (ofl->ofl_osplt->os_shdr->sh_addr + plt_off) - 0x06;
235 235
236 236 if (do_reloc_ld(&rdesc_r_amd64_gotpcrel, &pltent[0x02], &val1,
237 237 syn_rdesc_sym_name, MSG_ORIG(MSG_SPECFIL_PLTENT), bswap,
238 238 ofl->ofl_lml) == 0) {
239 239 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
240 240 sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
241 241 return (S_ERROR);
242 242 }
243 243
244 244 /*
245 245 * patchup:
246 246 * pushq $pltndx
247 247 */
248 248 val1 = (Xword)(sdp->sd_aux->sa_PLTndx - 1);
249 249
250 250 if (do_reloc_ld(&rdesc_r_amd64_32, &pltent[0x07], &val1,
251 251 syn_rdesc_sym_name, MSG_ORIG(MSG_SPECFIL_PLTENT), bswap,
252 252 ofl->ofl_lml) == 0) {
253 253 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
254 254 sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
255 255 return (S_ERROR);
256 256 }
257 257
258 258 /*
259 259 * patchup:
260 260 * jmpq .plt0(%rip)
261 261 * NOTE: 0x10 represents next instruction. The rather complex
262 262 * series of casts is necessary to sign extend an offset into
263 263 * a 64-bit value while satisfying various compiler error
264 264 * checks. Handle with care.
265 265 */
266 266 val1 = (Xword)((intptr_t)((uintptr_t)plt0 -
267 267 (uintptr_t)(&pltent[0x10])));
268 268
269 269 if (do_reloc_ld(&rdesc_r_amd64_pc32, &pltent[0x0c], &val1,
270 270 syn_rdesc_sym_name, MSG_ORIG(MSG_SPECFIL_PLTENT), bswap,
271 271 ofl->ofl_lml) == 0) {
272 272 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_PLT_PLTNFAIL),
273 273 sdp->sd_aux->sa_PLTndx, demangle(sdp->sd_name));
274 274 return (S_ERROR);
275 275 }
276 276
277 277 return (1);
278 278 }
279 279
280 280 static uintptr_t
281 281 ld_perform_outreloc(Rel_desc * orsp, Ofl_desc * ofl, Boolean *remain_seen)
282 282 {
283 283 Os_desc * relosp, * osp = 0;
284 284 Word ndx;
285 285 Xword roffset, value;
286 286 Sxword raddend;
287 287 Rela rea;
288 288 char *relbits;
289 289 Sym_desc * sdp, * psym = (Sym_desc *)0;
290 290 int sectmoved = 0;
291 291
292 292 raddend = orsp->rel_raddend;
293 293 sdp = orsp->rel_sym;
294 294
295 295 /*
296 296 * If the section this relocation is against has been discarded
297 297 * (-zignore), then also discard (skip) the relocation itself.
298 298 */
299 299 if (orsp->rel_isdesc && ((orsp->rel_flags &
300 300 (FLG_REL_GOT | FLG_REL_BSS | FLG_REL_PLT | FLG_REL_NOINFO)) == 0) &&
301 301 (orsp->rel_isdesc->is_flags & FLG_IS_DISCARD)) {
302 302 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, orsp));
303 303 return (1);
304 304 }
305 305
306 306 /*
307 307 * If this is a relocation against a move table, or expanded move
308 308 * table, adjust the relocation entries.
309 309 */
310 310 if (RELAUX_GET_MOVE(orsp))
311 311 ld_adj_movereloc(ofl, orsp);
312 312
313 313 /*
314 314 * If this is a relocation against a section then we need to adjust the
315 315 * raddend field to compensate for the new position of the input section
316 316 * within the new output section.
317 317 */
318 318 if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) {
319 319 if (ofl->ofl_parsyms &&
320 320 (sdp->sd_isc->is_flags & FLG_IS_RELUPD) &&
321 321 /* LINTED */
322 322 (psym = ld_am_I_partial(orsp, orsp->rel_raddend))) {
323 323 DBG_CALL(Dbg_move_outsctadj(ofl->ofl_lml, psym));
324 324 sectmoved = 1;
325 325 if (ofl->ofl_flags & FLG_OF_RELOBJ)
326 326 raddend = psym->sd_sym->st_value;
327 327 else
328 328 raddend = psym->sd_sym->st_value -
329 329 psym->sd_isc->is_osdesc->os_shdr->sh_addr;
330 330 /* LINTED */
331 331 raddend += (Off)_elf_getxoff(psym->sd_isc->is_indata);
332 332 if (psym->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
333 333 raddend +=
334 334 psym->sd_isc->is_osdesc->os_shdr->sh_addr;
335 335 } else {
336 336 /* LINTED */
337 337 raddend += (Off)_elf_getxoff(sdp->sd_isc->is_indata);
338 338 if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
339 339 raddend +=
340 340 sdp->sd_isc->is_osdesc->os_shdr->sh_addr;
341 341 }
342 342 }
343 343
344 344 value = sdp->sd_sym->st_value;
345 345
346 346 if (orsp->rel_flags & FLG_REL_GOT) {
347 347 /*
348 348 * Note: for GOT relative relocations on amd64
349 349 * we discard the addend. It was relevant
350 350 * to the reference - not to the data item
351 351 * being referenced (ie: that -4 thing).
352 352 */
353 353 raddend = 0;
354 354 osp = ofl->ofl_osgot;
355 355 roffset = ld_calc_got_offset(orsp, ofl);
356 356
357 357 } else if (orsp->rel_flags & FLG_REL_PLT) {
358 358 /*
359 359 * Note that relocations for PLT's actually
360 360 * cause a relocation againt the GOT.
361 361 */
362 362 osp = ofl->ofl_osplt;
363 363 roffset = (ofl->ofl_osgot->os_shdr->sh_addr) +
364 364 sdp->sd_aux->sa_PLTGOTndx * M_GOT_ENTSIZE;
365 365 raddend = 0;
366 366 if (plt_entry(ofl, sdp) == S_ERROR)
367 367 return (S_ERROR);
368 368
369 369 } else if (orsp->rel_flags & FLG_REL_BSS) {
370 370 /*
371 371 * This must be a R_AMD64_COPY. For these set the roffset to
372 372 * point to the new symbols location.
373 373 */
374 374 osp = ofl->ofl_isbss->is_osdesc;
375 375 roffset = value;
376 376
377 377 /*
378 378 * The raddend doesn't mean anything in a R_SPARC_COPY
379 379 * relocation. Null it out because it can confuse people.
380 380 */
381 381 raddend = 0;
382 382 } else {
383 383 osp = RELAUX_GET_OSDESC(orsp);
384 384
385 385 /*
386 386 * Calculate virtual offset of reference point; equals offset
387 387 * into section + vaddr of section for loadable sections, or
388 388 * offset plus section displacement for nonloadable sections.
389 389 */
390 390 roffset = orsp->rel_roffset +
391 391 (Off)_elf_getxoff(orsp->rel_isdesc->is_indata);
392 392 if (!(ofl->ofl_flags & FLG_OF_RELOBJ))
393 393 roffset += orsp->rel_isdesc->is_osdesc->
394 394 os_shdr->sh_addr;
395 395 }
396 396
397 397 if ((osp == 0) || ((relosp = osp->os_relosdesc) == 0))
398 398 relosp = ofl->ofl_osrel;
399 399
400 400 /*
401 401 * Assign the symbols index for the output relocation. If the
402 402 * relocation refers to a SECTION symbol then it's index is based upon
403 403 * the output sections symbols index. Otherwise the index can be
404 404 * derived from the symbols index itself.
405 405 */
406 406 if (orsp->rel_rtype == R_AMD64_RELATIVE)
407 407 ndx = STN_UNDEF;
408 408 else if ((orsp->rel_flags & FLG_REL_SCNNDX) ||
409 409 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION)) {
410 410 if (sectmoved == 0) {
411 411 /*
412 412 * Check for a null input section. This can
413 413 * occur if this relocation references a symbol
414 414 * generated by sym_add_sym().
415 415 */
416 416 if (sdp->sd_isc && sdp->sd_isc->is_osdesc)
417 417 ndx = sdp->sd_isc->is_osdesc->os_identndx;
418 418 else
419 419 ndx = sdp->sd_shndx;
420 420 } else
421 421 ndx = ofl->ofl_parexpnndx;
422 422 } else
423 423 ndx = sdp->sd_symndx;
424 424
425 425 /*
426 426 * Add the symbols 'value' to the addend field.
427 427 */
428 428 if (orsp->rel_flags & FLG_REL_ADVAL)
429 429 raddend += value;
430 430
431 431 /*
432 432 * The addend field for R_AMD64_DTPMOD64 means nothing. The addend
433 433 * is propagated in the corresponding R_AMD64_DTPOFF64 relocation.
434 434 */
435 435 if (orsp->rel_rtype == R_AMD64_DTPMOD64)
436 436 raddend = 0;
437 437
438 438 relbits = (char *)relosp->os_outdata->d_buf;
439 439
440 440 rea.r_info = ELF_R_INFO(ndx, orsp->rel_rtype);
441 441 rea.r_offset = roffset;
442 442 rea.r_addend = raddend;
443 443 DBG_CALL(Dbg_reloc_out(ofl, ELF_DBG_LD, SHT_RELA, &rea, relosp->os_name,
444 444 ld_reloc_sym_name(orsp)));
445 445
446 446 /*
447 447 * Assert we haven't walked off the end of our relocation table.
448 448 */
449 449 assert(relosp->os_szoutrels <= relosp->os_shdr->sh_size);
450 450
451 451 (void) memcpy((relbits + relosp->os_szoutrels),
452 452 (char *)&rea, sizeof (Rela));
453 453 relosp->os_szoutrels += (Xword)sizeof (Rela);
454 454
455 455 /*
456 456 * Determine if this relocation is against a non-writable, allocatable
457 457 * section. If so we may need to provide a text relocation diagnostic.
458 458 * Note that relocations against the .plt (R_AMD64_JUMP_SLOT) actually
459 459 * result in modifications to the .got.
460 460 */
461 461 if (orsp->rel_rtype == R_AMD64_JUMP_SLOT)
462 462 osp = ofl->ofl_osgot;
463 463
464 464 ld_reloc_remain_entry(orsp, osp, ofl, remain_seen);
465 465 return (1);
466 466 }
467 467
468 468 /*
469 469 * amd64 Instructions for TLS processing
470 470 */
471 471 static uchar_t tlsinstr_gd_ie[] = {
472 472 /*
473 473 * 0x00 movq %fs:0, %rax
474 474 */
475 475 0x64, 0x48, 0x8b, 0x04, 0x25,
476 476 0x00, 0x00, 0x00, 0x00,
477 477 /*
478 478 * 0x09 addq x@gottpoff(%rip), %rax
479 479 */
480 480 0x48, 0x03, 0x05, 0x00, 0x00,
481 481 0x00, 0x00
482 482 };
483 483
484 484 static uchar_t tlsinstr_gd_le[] = {
485 485 /*
486 486 * 0x00 movq %fs:0, %rax
487 487 */
488 488 0x64, 0x48, 0x8b, 0x04, 0x25,
489 489 0x00, 0x00, 0x00, 0x00,
490 490 /*
491 491 * 0x09 leaq x@gottpoff(%rip), %rax
492 492 */
493 493 0x48, 0x8d, 0x80, 0x00, 0x00,
494 494 0x00, 0x00
495 495 };
496 496
497 497 static uchar_t tlsinstr_ld_le[] = {
498 498 /*
499 499 * .byte 0x66
500 500 */
501 501 0x66,
502 502 /*
503 503 * .byte 0x66
504 504 */
505 505 0x66,
506 506 /*
507 507 * .byte 0x66
508 508 */
509 509 0x66,
510 510 /*
511 511 * movq %fs:0, %rax
512 512 */
513 513 0x64, 0x48, 0x8b, 0x04, 0x25,
514 514 0x00, 0x00, 0x00, 0x00
515 515 };
516 516
517 517 #define REX_B 0x1
518 518 #define REX_X 0x2
519 519 #define REX_R 0x4
520 520 #define REX_W 0x8
521 521 #define REX_PREFIX 0x40
522 522
523 523 #define REX_RW (REX_PREFIX | REX_R | REX_W)
524 524 #define REX_BW (REX_PREFIX | REX_B | REX_W)
525 525 #define REX_BRW (REX_PREFIX | REX_B | REX_R | REX_W)
526 526
527 527 #define REG_ESP 0x4
528 528
529 529 #define INSN_ADDMR 0x03 /* addq mem,reg */
530 530 #define INSN_ADDIR 0x81 /* addq imm,reg */
531 531 #define INSN_MOVMR 0x8b /* movq mem,reg */
532 532 #define INSN_MOVIR 0xc7 /* movq imm,reg */
533 533 #define INSN_LEA 0x8d /* leaq mem,reg */
534 534
535 535 static Fixupret
536 536 tls_fixups(Ofl_desc *ofl, Rel_desc *arsp)
537 537 {
538 538 Sym_desc *sdp = arsp->rel_sym;
539 539 Word rtype = arsp->rel_rtype;
540 540 uchar_t *offset;
541 541
542 542 offset = (uchar_t *)((uintptr_t)arsp->rel_roffset +
543 543 (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata) +
544 544 (uintptr_t)RELAUX_GET_OSDESC(arsp)->os_outdata->d_buf);
545 545
546 546 /*
547 547 * Note that in certain of the original insn sequences below, the
548 548 * instructions are not necessarily adjacent
549 549 */
550 550 if (sdp->sd_ref == REF_DYN_NEED) {
551 551 /*
552 552 * IE reference model
553 553 */
554 554 switch (rtype) {
555 555 case R_AMD64_TLSGD:
556 556 /*
557 557 * GD -> IE
558 558 *
559 559 * Transition:
560 560 * 0x00 .byte 0x66
561 561 * 0x01 leaq x@tlsgd(%rip), %rdi
562 562 * 0x08 .word 0x6666
563 563 * 0x0a rex64
564 564 * 0x0b call __tls_get_addr@plt
565 565 * 0x10
566 566 * To:
567 567 * 0x00 movq %fs:0, %rax
568 568 * 0x09 addq x@gottpoff(%rip), %rax
569 569 * 0x10
570 570 */
571 571 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
572 572 R_AMD64_GOTTPOFF, arsp, ld_reloc_sym_name));
573 573 arsp->rel_rtype = R_AMD64_GOTTPOFF;
574 574 arsp->rel_roffset += 8;
575 575 arsp->rel_raddend = (Sxword)-4;
576 576
577 577 /*
578 578 * Adjust 'offset' to beginning of instruction
579 579 * sequence.
580 580 */
581 581 offset -= 4;
582 582 (void) memcpy(offset, tlsinstr_gd_ie,
583 583 sizeof (tlsinstr_gd_ie));
584 584 return (FIX_RELOC);
585 585
586 586 case R_AMD64_PLT32:
587 587 /*
588 588 * Fixup done via the TLS_GD relocation.
589 589 */
590 590 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
591 591 R_AMD64_NONE, arsp, ld_reloc_sym_name));
592 592 return (FIX_DONE);
593 593 }
594 594 }
595 595
596 596 /*
597 597 * LE reference model
598 598 */
599 599 switch (rtype) {
600 600 case R_AMD64_TLSGD:
601 601 /*
602 602 * GD -> LE
603 603 *
604 604 * Transition:
605 605 * 0x00 .byte 0x66
606 606 * 0x01 leaq x@tlsgd(%rip), %rdi
607 607 * 0x08 .word 0x6666
608 608 * 0x0a rex64
609 609 * 0x0b call __tls_get_addr@plt
610 610 * 0x10
611 611 * To:
612 612 * 0x00 movq %fs:0, %rax
613 613 * 0x09 leaq x@tpoff(%rax), %rax
614 614 * 0x10
615 615 */
616 616 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
617 617 R_AMD64_TPOFF32, arsp, ld_reloc_sym_name));
618 618 arsp->rel_rtype = R_AMD64_TPOFF32;
619 619 arsp->rel_roffset += 8;
620 620 arsp->rel_raddend = 0;
621 621
622 622 /*
623 623 * Adjust 'offset' to beginning of instruction sequence.
624 624 */
625 625 offset -= 4;
626 626 (void) memcpy(offset, tlsinstr_gd_le, sizeof (tlsinstr_gd_le));
627 627 return (FIX_RELOC);
628 628
629 629 case R_AMD64_GOTTPOFF: {
630 630 /*
631 631 * IE -> LE
632 632 *
633 633 * Transition 1:
634 634 * movq %fs:0, %reg
635 635 * addq x@gottpoff(%rip), %reg
636 636 * To:
637 637 * movq %fs:0, %reg
638 638 * leaq x@tpoff(%reg), %reg
639 639 *
640 640 * Transition (as a special case):
641 641 * movq %fs:0, %r12/%rsp
642 642 * addq x@gottpoff(%rip), %r12/%rsp
643 643 * To:
644 644 * movq %fs:0, %r12/%rsp
645 645 * addq x@tpoff(%rax), %r12/%rsp
646 646 *
647 647 * Transition 2:
648 648 * movq x@gottpoff(%rip), %reg
649 649 * movq %fs:(%reg), %reg
650 650 * To:
651 651 * movq x@tpoff(%reg), %reg
652 652 * movq %fs:(%reg), %reg
653 653 */
654 654 Conv_inv_buf_t inv_buf;
655 655 uint8_t reg; /* Register */
656 656
657 657 offset -= 3;
658 658
659 659 reg = offset[2] >> 3; /* Encoded dest. reg. operand */
660 660
661 661 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
662 662 R_AMD64_TPOFF32, arsp, ld_reloc_sym_name));
663 663 arsp->rel_rtype = R_AMD64_TPOFF32;
664 664 arsp->rel_raddend = 0;
665 665
666 666 /*
667 667 * This is transition 2, and the special case of form 1 where
668 668 * a normal transition would index %rsp or %r12 and need a SIB
669 669 * byte in the leaq for which we lack space
670 670 */
671 671 if ((offset[1] == INSN_MOVMR) ||
672 672 ((offset[1] == INSN_ADDMR) && (reg == REG_ESP))) {
673 673 /*
674 674 * If we needed an extra bit of MOD.reg to refer to
675 675 * this register as the dest of the original movq we
676 676 * need an extra bit of MOD.rm to refer to it in the
677 677 * dest of the replacement movq or addq.
678 678 */
679 679 if (offset[0] == REX_RW)
680 680 offset[0] = REX_BW;
681 681
682 682 offset[1] = (offset[1] == INSN_MOVMR) ?
683 683 INSN_MOVIR : INSN_ADDIR;
684 684 offset[2] = 0xc0 | reg;
685 685
686 686 return (FIX_RELOC);
687 687 } else if (offset[1] == INSN_ADDMR) {
688 688 /*
689 689 * If we needed an extra bit of MOD.reg to refer to
690 690 * this register in the dest of the addq we need an
691 691 * extra bit of both MOD.reg and MOD.rm to refer to it
692 692 * in the source and dest of the leaq
693 693 */
694 694 if (offset[0] == REX_RW)
695 695 offset[0] = REX_BRW;
696 696
697 697 offset[1] = INSN_LEA;
698 698 offset[2] = 0x80 | (reg << 3) | reg;
699 699
700 700 return (FIX_RELOC);
701 701 }
702 702
703 703 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_REL_BADTLSINS),
704 704 conv_reloc_amd64_type(arsp->rel_rtype, 0, &inv_buf),
705 705 arsp->rel_isdesc->is_file->ifl_name,
706 706 ld_reloc_sym_name(arsp),
707 707 arsp->rel_isdesc->is_name,
708 708 EC_OFF(arsp->rel_roffset));
709 709 return (FIX_ERROR);
710 710 }
711 711 case R_AMD64_TLSLD:
712 712 /*
713 713 * LD -> LE
714 714 *
715 715 * Transition
716 716 * 0x00 leaq x1@tlsgd(%rip), %rdi
717 717 * 0x07 call __tls_get_addr@plt
718 718 * 0x0c
719 719 * To:
720 720 * 0x00 .byte 0x66
721 721 * 0x01 .byte 0x66
722 722 * 0x02 .byte 0x66
723 723 * 0x03 movq %fs:0, %rax
724 724 */
725 725 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
726 726 R_AMD64_NONE, arsp, ld_reloc_sym_name));
727 727 offset -= 3;
728 728 (void) memcpy(offset, tlsinstr_ld_le, sizeof (tlsinstr_ld_le));
729 729 return (FIX_DONE);
730 730
731 731 case R_AMD64_DTPOFF32:
732 732 /*
↓ open down ↓ |
732 lines elided |
↑ open up ↑ |
733 733 * LD->LE
734 734 *
735 735 * Transition:
736 736 * 0x00 leaq x1@dtpoff(%rax), %rcx
737 737 * To:
738 738 * 0x00 leaq x1@tpoff(%rax), %rcx
739 739 */
740 740 DBG_CALL(Dbg_reloc_transition(ofl->ofl_lml, M_MACH,
741 741 R_AMD64_TPOFF32, arsp, ld_reloc_sym_name));
742 742 arsp->rel_rtype = R_AMD64_TPOFF32;
743 - arsp->rel_raddend = 0;
744 743 return (FIX_RELOC);
745 744 }
746 745
747 746 return (FIX_RELOC);
748 747 }
749 748
750 749 static uintptr_t
751 750 ld_do_activerelocs(Ofl_desc *ofl)
752 751 {
753 752 Rel_desc *arsp;
754 753 Rel_cachebuf *rcbp;
755 754 Aliste idx;
756 755 uintptr_t return_code = 1;
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
757 756 ofl_flag_t flags = ofl->ofl_flags;
758 757
759 758 if (aplist_nitems(ofl->ofl_actrels.rc_list) != 0)
760 759 DBG_CALL(Dbg_reloc_doact_title(ofl->ofl_lml));
761 760
762 761 /*
763 762 * Process active relocations.
764 763 */
765 764 REL_CACHE_TRAVERSE(&ofl->ofl_actrels, idx, rcbp, arsp) {
766 765 uchar_t *addr;
767 - Xword value;
766 + Xword value;
768 767 Sym_desc *sdp;
769 768 const char *ifl_name;
770 769 Xword refaddr;
771 770 int moved = 0;
772 771 Gotref gref;
773 772 Os_desc *osp;
774 773
775 774 /*
776 775 * If the section this relocation is against has been discarded
777 776 * (-zignore), then discard (skip) the relocation itself.
778 777 */
779 778 if ((arsp->rel_isdesc->is_flags & FLG_IS_DISCARD) &&
780 779 ((arsp->rel_flags & (FLG_REL_GOT | FLG_REL_BSS |
781 780 FLG_REL_PLT | FLG_REL_NOINFO)) == 0)) {
782 781 DBG_CALL(Dbg_reloc_discard(ofl->ofl_lml, M_MACH, arsp));
783 782 continue;
784 783 }
785 784
786 785 /*
787 786 * We determine what the 'got reference' model (if required)
788 787 * is at this point. This needs to be done before tls_fixup()
789 788 * since it may 'transition' our instructions.
790 789 *
791 790 * The got table entries have already been assigned,
792 791 * and we bind to those initial entries.
793 792 */
794 793 if (arsp->rel_flags & FLG_REL_DTLS)
795 794 gref = GOT_REF_TLSGD;
796 795 else if (arsp->rel_flags & FLG_REL_MTLS)
797 796 gref = GOT_REF_TLSLD;
798 797 else if (arsp->rel_flags & FLG_REL_STLS)
799 798 gref = GOT_REF_TLSIE;
800 799 else
801 800 gref = GOT_REF_GENERIC;
802 801
803 802 /*
804 803 * Perform any required TLS fixups.
805 804 */
806 805 if (arsp->rel_flags & FLG_REL_TLSFIX) {
807 806 Fixupret ret;
808 807
809 808 if ((ret = tls_fixups(ofl, arsp)) == FIX_ERROR)
810 809 return (S_ERROR);
811 810 if (ret == FIX_DONE)
812 811 continue;
813 812 }
814 813
815 814 /*
816 815 * If this is a relocation against a move table, or
817 816 * expanded move table, adjust the relocation entries.
818 817 */
819 818 if (RELAUX_GET_MOVE(arsp))
820 819 ld_adj_movereloc(ofl, arsp);
821 820
822 821 sdp = arsp->rel_sym;
823 822 refaddr = arsp->rel_roffset +
824 823 (Off)_elf_getxoff(arsp->rel_isdesc->is_indata);
825 824
826 825 if ((arsp->rel_flags & FLG_REL_CLVAL) ||
827 826 (arsp->rel_flags & FLG_REL_GOTCL))
828 827 value = 0;
829 828 else if (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION) {
830 829 Sym_desc *sym;
831 830
832 831 /*
833 832 * The value for a symbol pointing to a SECTION
834 833 * is based off of that sections position.
835 834 */
836 835 if ((sdp->sd_isc->is_flags & FLG_IS_RELUPD) &&
837 836 /* LINTED */
838 837 (sym = ld_am_I_partial(arsp, arsp->rel_raddend))) {
839 838 /*
840 839 * The symbol was moved, so adjust the value
841 840 * relative to the new section.
842 841 */
843 842 value = sym->sd_sym->st_value;
844 843 moved = 1;
845 844
846 845 /*
847 846 * The original raddend covers the displacement
848 847 * from the section start to the desired
849 848 * address. The value computed above gets us
850 849 * from the section start to the start of the
851 850 * symbol range. Adjust the old raddend to
852 851 * remove the offset from section start to
853 852 * symbol start, leaving the displacement
854 853 * within the range of the symbol.
855 854 */
856 855 arsp->rel_raddend -= sym->sd_osym->st_value;
857 856 } else {
858 857 value = _elf_getxoff(sdp->sd_isc->is_indata);
859 858 if (sdp->sd_isc->is_shdr->sh_flags & SHF_ALLOC)
860 859 value += sdp->sd_isc->is_osdesc->
861 860 os_shdr->sh_addr;
862 861 }
863 862 if (sdp->sd_isc->is_shdr->sh_flags & SHF_TLS)
864 863 value -= ofl->ofl_tlsphdr->p_vaddr;
865 864
866 865 } else if (IS_SIZE(arsp->rel_rtype)) {
867 866 /*
868 867 * Size relocations require the symbols size.
869 868 */
870 869 value = sdp->sd_sym->st_size;
871 870
872 871 } else if ((sdp->sd_flags & FLG_SY_CAP) &&
873 872 sdp->sd_aux && sdp->sd_aux->sa_PLTndx) {
874 873 /*
875 874 * If relocation is against a capabilities symbol, we
876 875 * need to jump to an associated PLT, so that at runtime
877 876 * ld.so.1 is involved to determine the best binding
878 877 * choice. Otherwise, the value is the symbols value.
879 878 */
880 879 value = ld_calc_plt_addr(sdp, ofl);
881 880 } else
882 881 value = sdp->sd_sym->st_value;
883 882
884 883 /*
885 884 * Relocation against the GLOBAL_OFFSET_TABLE.
886 885 */
887 886 if ((arsp->rel_flags & FLG_REL_GOT) &&
888 887 !ld_reloc_set_aux_osdesc(ofl, arsp, ofl->ofl_osgot))
889 888 return (S_ERROR);
890 889 osp = RELAUX_GET_OSDESC(arsp);
891 890
892 891 /*
893 892 * If loadable and not producing a relocatable object add the
894 893 * sections virtual address to the reference address.
895 894 */
896 895 if ((arsp->rel_flags & FLG_REL_LOAD) &&
897 896 ((flags & FLG_OF_RELOBJ) == 0))
898 897 refaddr += arsp->rel_isdesc->is_osdesc->
899 898 os_shdr->sh_addr;
900 899
901 900 /*
902 901 * If this entry has a PLT assigned to it, its value is actually
903 902 * the address of the PLT (and not the address of the function).
904 903 */
905 904 if (IS_PLT(arsp->rel_rtype)) {
906 905 if (sdp->sd_aux && sdp->sd_aux->sa_PLTndx)
907 906 value = ld_calc_plt_addr(sdp, ofl);
908 907 }
909 908
910 909 /*
911 910 * Add relocations addend to value. Add extra
912 911 * relocation addend if needed.
913 912 *
914 913 * Note: For GOT relative relocations on amd64 we discard the
915 914 * addend. It was relevant to the reference - not to the
916 915 * data item being referenced (ie: that -4 thing).
917 916 */
918 917 if ((arsp->rel_flags & FLG_REL_GOT) == 0)
919 918 value += arsp->rel_raddend;
920 919
921 920 /*
922 921 * Determine whether the value needs further adjustment. Filter
923 922 * through the attributes of the relocation to determine what
924 923 * adjustment is required. Note, many of the following cases
925 924 * are only applicable when a .got is present. As a .got is
926 925 * not generated when a relocatable object is being built,
927 926 * any adjustments that require a .got need to be skipped.
928 927 */
929 928 if ((arsp->rel_flags & FLG_REL_GOT) &&
930 929 ((flags & FLG_OF_RELOBJ) == 0)) {
931 930 Xword R1addr;
932 931 uintptr_t R2addr;
933 932 Word gotndx;
934 933 Gotndx *gnp;
935 934
936 935 /*
937 936 * Perform relocation against GOT table. Since this
938 937 * doesn't fit exactly into a relocation we place the
939 938 * appropriate byte in the GOT directly
940 939 *
941 940 * Calculate offset into GOT at which to apply
942 941 * the relocation.
943 942 */
944 943 gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref, ofl, arsp);
945 944 assert(gnp);
946 945
947 946 if (arsp->rel_rtype == R_AMD64_DTPOFF64)
948 947 gotndx = gnp->gn_gotndx + 1;
949 948 else
950 949 gotndx = gnp->gn_gotndx;
951 950
952 951 R1addr = (Xword)(gotndx * M_GOT_ENTSIZE);
953 952
954 953 /*
955 954 * Add the GOTs data's offset.
956 955 */
957 956 R2addr = R1addr + (uintptr_t)osp->os_outdata->d_buf;
958 957
959 958 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD_ACT,
960 959 M_MACH, SHT_RELA, arsp, R1addr, value,
961 960 ld_reloc_sym_name));
962 961
963 962 /*
964 963 * And do it.
965 964 */
966 965 if (ofl->ofl_flags1 & FLG_OF1_ENCDIFF)
967 966 *(Xword *)R2addr = ld_bswap_Xword(value);
968 967 else
969 968 *(Xword *)R2addr = value;
970 969 continue;
971 970
972 971 } else if (IS_GOT_BASED(arsp->rel_rtype) &&
973 972 ((flags & FLG_OF_RELOBJ) == 0)) {
974 973 value -= ofl->ofl_osgot->os_shdr->sh_addr;
975 974
976 975 } else if (IS_GOTPCREL(arsp->rel_rtype) &&
977 976 ((flags & FLG_OF_RELOBJ) == 0)) {
978 977 Gotndx *gnp;
979 978
980 979 /*
981 980 * Calculation:
982 981 * G + GOT + A - P
983 982 */
984 983 gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref, ofl, arsp);
985 984 assert(gnp);
986 985 value = (Xword)(ofl->ofl_osgot->os_shdr-> sh_addr) +
987 986 ((Xword)gnp->gn_gotndx * M_GOT_ENTSIZE) +
988 987 arsp->rel_raddend - refaddr;
989 988
990 989 } else if (IS_GOT_PC(arsp->rel_rtype) &&
991 990 ((flags & FLG_OF_RELOBJ) == 0)) {
992 991 value = (Xword)(ofl->ofl_osgot->os_shdr->
993 992 sh_addr) - refaddr + arsp->rel_raddend;
994 993
995 994 } else if ((IS_PC_RELATIVE(arsp->rel_rtype)) &&
996 995 (((flags & FLG_OF_RELOBJ) == 0) ||
997 996 (osp == sdp->sd_isc->is_osdesc))) {
998 997 value -= refaddr;
999 998
1000 999 } else if (IS_TLS_INS(arsp->rel_rtype) &&
1001 1000 IS_GOT_RELATIVE(arsp->rel_rtype) &&
1002 1001 ((flags & FLG_OF_RELOBJ) == 0)) {
1003 1002 Gotndx *gnp;
1004 1003
1005 1004 gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref, ofl, arsp);
1006 1005 assert(gnp);
1007 1006 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE;
1008 1007
1009 1008 } else if (IS_GOT_RELATIVE(arsp->rel_rtype) &&
1010 1009 ((flags & FLG_OF_RELOBJ) == 0)) {
1011 1010 Gotndx *gnp;
1012 1011
1013 1012 gnp = ld_find_got_ndx(sdp->sd_GOTndxs, gref, ofl, arsp);
1014 1013 assert(gnp);
1015 1014 value = (Xword)gnp->gn_gotndx * M_GOT_ENTSIZE;
1016 1015
1017 1016 } else if ((arsp->rel_flags & FLG_REL_STLS) &&
1018 1017 ((flags & FLG_OF_RELOBJ) == 0)) {
1019 1018 Xword tlsstatsize;
1020 1019
1021 1020 /*
1022 1021 * This is the LE TLS reference model. Static
1023 1022 * offset is hard-coded.
1024 1023 */
1025 1024 tlsstatsize = S_ROUND(ofl->ofl_tlsphdr->p_memsz,
1026 1025 M_TLSSTATALIGN);
1027 1026 value = tlsstatsize - value;
1028 1027
1029 1028 /*
1030 1029 * Since this code is fixed up, it assumes a negative
1031 1030 * offset that can be added to the thread pointer.
1032 1031 */
1033 1032 if (arsp->rel_rtype == R_AMD64_TPOFF32)
1034 1033 value = -value;
1035 1034 }
1036 1035
1037 1036 if (arsp->rel_isdesc->is_file)
1038 1037 ifl_name = arsp->rel_isdesc->is_file->ifl_name;
1039 1038 else
1040 1039 ifl_name = MSG_INTL(MSG_STR_NULL);
1041 1040
1042 1041 /*
1043 1042 * Make sure we have data to relocate. Compiler and assembler
1044 1043 * developers have been known to generate relocations against
1045 1044 * invalid sections (normally .bss), so for their benefit give
1046 1045 * them sufficient information to help analyze the problem.
1047 1046 * End users should never see this.
1048 1047 */
1049 1048 if (arsp->rel_isdesc->is_indata->d_buf == 0) {
1050 1049 Conv_inv_buf_t inv_buf;
1051 1050
1052 1051 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_REL_EMPTYSEC),
1053 1052 conv_reloc_amd64_type(arsp->rel_rtype, 0, &inv_buf),
1054 1053 ifl_name, ld_reloc_sym_name(arsp),
1055 1054 EC_WORD(arsp->rel_isdesc->is_scnndx),
1056 1055 arsp->rel_isdesc->is_name);
1057 1056 return (S_ERROR);
1058 1057 }
1059 1058
1060 1059 /*
1061 1060 * Get the address of the data item we need to modify.
1062 1061 */
1063 1062 addr = (uchar_t *)((uintptr_t)arsp->rel_roffset +
1064 1063 (uintptr_t)_elf_getxoff(arsp->rel_isdesc->is_indata));
1065 1064
1066 1065 DBG_CALL(Dbg_reloc_doact(ofl->ofl_lml, ELF_DBG_LD_ACT,
1067 1066 M_MACH, SHT_RELA, arsp, EC_NATPTR(addr), value,
1068 1067 ld_reloc_sym_name));
1069 1068 addr += (uintptr_t)osp->os_outdata->d_buf;
1070 1069
1071 1070 if ((((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) >
1072 1071 ofl->ofl_size) || (arsp->rel_roffset >
1073 1072 osp->os_shdr->sh_size)) {
1074 1073 int class;
1075 1074 Conv_inv_buf_t inv_buf;
1076 1075
1077 1076 if (((uintptr_t)addr - (uintptr_t)ofl->ofl_nehdr) >
1078 1077 ofl->ofl_size)
1079 1078 class = ERR_FATAL;
1080 1079 else
1081 1080 class = ERR_WARNING;
1082 1081
1083 1082 ld_eprintf(ofl, class, MSG_INTL(MSG_REL_INVALOFFSET),
1084 1083 conv_reloc_amd64_type(arsp->rel_rtype, 0, &inv_buf),
1085 1084 ifl_name, EC_WORD(arsp->rel_isdesc->is_scnndx),
1086 1085 arsp->rel_isdesc->is_name, ld_reloc_sym_name(arsp),
1087 1086 EC_ADDR((uintptr_t)addr -
1088 1087 (uintptr_t)ofl->ofl_nehdr));
1089 1088
1090 1089 if (class == ERR_FATAL) {
1091 1090 return_code = S_ERROR;
1092 1091 continue;
1093 1092 }
1094 1093 }
1095 1094
1096 1095 /*
1097 1096 * The relocation is additive. Ignore the previous symbol
1098 1097 * value if this local partial symbol is expanded.
1099 1098 */
1100 1099 if (moved)
1101 1100 value -= *addr;
1102 1101
1103 1102 /*
1104 1103 * If '-z noreloc' is specified - skip the do_reloc_ld stage.
1105 1104 */
1106 1105 if (OFL_DO_RELOC(ofl)) {
1107 1106 /*
1108 1107 * If this is a PROGBITS section and the running linker
1109 1108 * has a different byte order than the target host,
1110 1109 * tell do_reloc_ld() to swap bytes.
1111 1110 */
1112 1111 if (do_reloc_ld(arsp, addr, &value, ld_reloc_sym_name,
1113 1112 ifl_name, OFL_SWAP_RELOC_DATA(ofl, arsp),
1114 1113 ofl->ofl_lml) == 0) {
1115 1114 ofl->ofl_flags |= FLG_OF_FATAL;
1116 1115 return_code = S_ERROR;
1117 1116 }
1118 1117 }
1119 1118 }
1120 1119 return (return_code);
1121 1120 }
1122 1121
1123 1122 static uintptr_t
1124 1123 ld_add_outrel(Word flags, Rel_desc *rsp, Ofl_desc *ofl)
1125 1124 {
1126 1125 Rel_desc *orsp;
1127 1126 Sym_desc *sdp = rsp->rel_sym;
1128 1127
1129 1128 /*
1130 1129 * Static executables *do not* want any relocations against them.
1131 1130 * Since our engine still creates relocations against a WEAK UNDEFINED
1132 1131 * symbol in a static executable, it's best to disable them here
1133 1132 * instead of through out the relocation code.
1134 1133 */
1135 1134 if (OFL_IS_STATIC_EXEC(ofl))
1136 1135 return (1);
1137 1136
1138 1137 /*
1139 1138 * If we are adding a output relocation against a section
1140 1139 * symbol (non-RELATIVE) then mark that section. These sections
1141 1140 * will be added to the .dynsym symbol table.
1142 1141 */
1143 1142 if (sdp && (rsp->rel_rtype != M_R_RELATIVE) &&
1144 1143 ((flags & FLG_REL_SCNNDX) ||
1145 1144 (ELF_ST_TYPE(sdp->sd_sym->st_info) == STT_SECTION))) {
1146 1145
1147 1146 /*
1148 1147 * If this is a COMMON symbol - no output section
1149 1148 * exists yet - (it's created as part of sym_validate()).
1150 1149 * So - we mark here that when it's created it should
1151 1150 * be tagged with the FLG_OS_OUTREL flag.
1152 1151 */
1153 1152 if ((sdp->sd_flags & FLG_SY_SPECSEC) &&
1154 1153 (sdp->sd_sym->st_shndx == SHN_COMMON)) {
1155 1154 if (ELF_ST_TYPE(sdp->sd_sym->st_info) != STT_TLS)
1156 1155 ofl->ofl_flags1 |= FLG_OF1_BSSOREL;
1157 1156 else
1158 1157 ofl->ofl_flags1 |= FLG_OF1_TLSOREL;
1159 1158 } else {
1160 1159 Os_desc *osp;
1161 1160 Is_desc *isp = sdp->sd_isc;
1162 1161
1163 1162 if (isp && ((osp = isp->is_osdesc) != NULL) &&
1164 1163 ((osp->os_flags & FLG_OS_OUTREL) == 0)) {
1165 1164 ofl->ofl_dynshdrcnt++;
1166 1165 osp->os_flags |= FLG_OS_OUTREL;
1167 1166 }
1168 1167 }
1169 1168 }
1170 1169
1171 1170 /* Enter it into the output relocation cache */
1172 1171 if ((orsp = ld_reloc_enter(ofl, &ofl->ofl_outrels, rsp, flags)) == NULL)
1173 1172 return (S_ERROR);
1174 1173
1175 1174 if (flags & FLG_REL_GOT)
1176 1175 ofl->ofl_relocgotsz += (Xword)sizeof (Rela);
1177 1176 else if (flags & FLG_REL_PLT)
1178 1177 ofl->ofl_relocpltsz += (Xword)sizeof (Rela);
1179 1178 else if (flags & FLG_REL_BSS)
1180 1179 ofl->ofl_relocbsssz += (Xword)sizeof (Rela);
1181 1180 else if (flags & FLG_REL_NOINFO)
1182 1181 ofl->ofl_relocrelsz += (Xword)sizeof (Rela);
1183 1182 else
1184 1183 RELAUX_GET_OSDESC(orsp)->os_szoutrels += (Xword)sizeof (Rela);
1185 1184
1186 1185 if (orsp->rel_rtype == M_R_RELATIVE)
1187 1186 ofl->ofl_relocrelcnt++;
1188 1187
1189 1188 /*
1190 1189 * We don't perform sorting on PLT relocations because
1191 1190 * they have already been assigned a PLT index and if we
1192 1191 * were to sort them we would have to re-assign the plt indexes.
1193 1192 */
1194 1193 if (!(flags & FLG_REL_PLT))
1195 1194 ofl->ofl_reloccnt++;
1196 1195
1197 1196 /*
1198 1197 * Insure a GLOBAL_OFFSET_TABLE is generated if required.
1199 1198 */
1200 1199 if (IS_GOT_REQUIRED(orsp->rel_rtype))
1201 1200 ofl->ofl_flags |= FLG_OF_BLDGOT;
1202 1201
1203 1202 /*
1204 1203 * Identify and possibly warn of a displacement relocation.
1205 1204 */
1206 1205 if (orsp->rel_flags & FLG_REL_DISP) {
1207 1206 ofl->ofl_dtflags_1 |= DF_1_DISPRELPND;
1208 1207
1209 1208 if (ofl->ofl_flags & FLG_OF_VERBOSE)
1210 1209 ld_disp_errmsg(MSG_INTL(MSG_REL_DISPREL4), orsp, ofl);
1211 1210 }
1212 1211 DBG_CALL(Dbg_reloc_ors_entry(ofl->ofl_lml, ELF_DBG_LD, SHT_RELA,
1213 1212 M_MACH, orsp));
1214 1213 return (1);
1215 1214 }
1216 1215
1217 1216 /*
1218 1217 * process relocation for a LOCAL symbol
1219 1218 */
1220 1219 static uintptr_t
1221 1220 ld_reloc_local(Rel_desc * rsp, Ofl_desc * ofl)
1222 1221 {
1223 1222 ofl_flag_t flags = ofl->ofl_flags;
1224 1223 Sym_desc *sdp = rsp->rel_sym;
1225 1224 Word shndx = sdp->sd_sym->st_shndx;
1226 1225 Word ortype = rsp->rel_rtype;
1227 1226
1228 1227 /*
1229 1228 * if ((shared object) and (not pc relative relocation) and
1230 1229 * (not against ABS symbol))
1231 1230 * then
1232 1231 * build R_AMD64_RELATIVE
1233 1232 * fi
1234 1233 */
1235 1234 if ((flags & FLG_OF_SHAROBJ) && (rsp->rel_flags & FLG_REL_LOAD) &&
1236 1235 !(IS_PC_RELATIVE(rsp->rel_rtype)) && !(IS_SIZE(rsp->rel_rtype)) &&
1237 1236 !(IS_GOT_BASED(rsp->rel_rtype)) &&
1238 1237 !(rsp->rel_isdesc != NULL &&
1239 1238 (rsp->rel_isdesc->is_shdr->sh_type == SHT_SUNW_dof)) &&
1240 1239 (((sdp->sd_flags & FLG_SY_SPECSEC) == 0) ||
1241 1240 (shndx != SHN_ABS) || (sdp->sd_aux && sdp->sd_aux->sa_symspec))) {
1242 1241
1243 1242 /*
1244 1243 * R_AMD64_RELATIVE updates a 64bit address, if this
1245 1244 * relocation isn't a 64bit binding then we can not
1246 1245 * simplify it to a RELATIVE relocation.
1247 1246 */
1248 1247 if (reloc_table[ortype].re_fsize != sizeof (Addr)) {
1249 1248 return (ld_add_outrel(0, rsp, ofl));
1250 1249 }
1251 1250
1252 1251 rsp->rel_rtype = R_AMD64_RELATIVE;
1253 1252 if (ld_add_outrel(FLG_REL_ADVAL, rsp, ofl) == S_ERROR)
1254 1253 return (S_ERROR);
1255 1254 rsp->rel_rtype = ortype;
1256 1255 return (1);
1257 1256 }
1258 1257
1259 1258 /*
1260 1259 * If the relocation is against a 'non-allocatable' section
1261 1260 * and we can not resolve it now - then give a warning
1262 1261 * message.
1263 1262 *
1264 1263 * We can not resolve the symbol if either:
1265 1264 * a) it's undefined
1266 1265 * b) it's defined in a shared library and a
1267 1266 * COPY relocation hasn't moved it to the executable
1268 1267 *
1269 1268 * Note: because we process all of the relocations against the
1270 1269 * text segment before any others - we know whether
1271 1270 * or not a copy relocation will be generated before
1272 1271 * we get here (see reloc_init()->reloc_segments()).
1273 1272 */
1274 1273 if (!(rsp->rel_flags & FLG_REL_LOAD) &&
1275 1274 ((shndx == SHN_UNDEF) ||
1276 1275 ((sdp->sd_ref == REF_DYN_NEED) &&
1277 1276 ((sdp->sd_flags & FLG_SY_MVTOCOMM) == 0)))) {
1278 1277 Conv_inv_buf_t inv_buf;
1279 1278 Os_desc *osp = RELAUX_GET_OSDESC(rsp);
1280 1279
1281 1280 /*
1282 1281 * If the relocation is against a SHT_SUNW_ANNOTATE
1283 1282 * section - then silently ignore that the relocation
1284 1283 * can not be resolved.
1285 1284 */
1286 1285 if (osp && (osp->os_shdr->sh_type == SHT_SUNW_ANNOTATE))
1287 1286 return (0);
1288 1287 ld_eprintf(ofl, ERR_WARNING, MSG_INTL(MSG_REL_EXTERNSYM),
1289 1288 conv_reloc_amd64_type(rsp->rel_rtype, 0, &inv_buf),
1290 1289 rsp->rel_isdesc->is_file->ifl_name,
1291 1290 ld_reloc_sym_name(rsp), osp->os_name);
1292 1291 return (1);
1293 1292 }
1294 1293
1295 1294 /*
1296 1295 * Perform relocation.
1297 1296 */
1298 1297 return (ld_add_actrel(NULL, rsp, ofl));
1299 1298 }
1300 1299
1301 1300
1302 1301 static uintptr_t
1303 1302 ld_reloc_TLS(Boolean local, Rel_desc * rsp, Ofl_desc * ofl)
1304 1303 {
1305 1304 Word rtype = rsp->rel_rtype;
1306 1305 Sym_desc *sdp = rsp->rel_sym;
1307 1306 ofl_flag_t flags = ofl->ofl_flags;
1308 1307 Gotndx *gnp;
1309 1308
1310 1309 /*
1311 1310 * If we're building an executable - use either the IE or LE access
1312 1311 * model. If we're building a shared object process any IE model.
1313 1312 */
1314 1313 if ((flags & FLG_OF_EXEC) || (IS_TLS_IE(rtype))) {
1315 1314 /*
1316 1315 * Set the DF_STATIC_TLS flag.
1317 1316 */
1318 1317 ofl->ofl_dtflags |= DF_STATIC_TLS;
1319 1318
1320 1319 if (!local || ((flags & FLG_OF_EXEC) == 0)) {
1321 1320 /*
1322 1321 * Assign a GOT entry for static TLS references.
1323 1322 */
1324 1323 if ((gnp = ld_find_got_ndx(sdp->sd_GOTndxs,
1325 1324 GOT_REF_TLSIE, ofl, rsp)) == NULL) {
1326 1325
1327 1326 if (ld_assign_got_TLS(local, rsp, ofl, sdp,
1328 1327 gnp, GOT_REF_TLSIE, FLG_REL_STLS,
1329 1328 rtype, R_AMD64_TPOFF64, 0) == S_ERROR)
1330 1329 return (S_ERROR);
1331 1330 }
1332 1331
1333 1332 /*
1334 1333 * IE access model.
1335 1334 */
1336 1335 if (IS_TLS_IE(rtype))
1337 1336 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl));
1338 1337
1339 1338 /*
1340 1339 * Fixups are required for other executable models.
1341 1340 */
1342 1341 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS),
1343 1342 rsp, ofl));
1344 1343 }
1345 1344
1346 1345 /*
1347 1346 * LE access model.
1348 1347 */
1349 1348 if (IS_TLS_LE(rtype))
1350 1349 return (ld_add_actrel(FLG_REL_STLS, rsp, ofl));
1351 1350
1352 1351 return (ld_add_actrel((FLG_REL_TLSFIX | FLG_REL_STLS),
1353 1352 rsp, ofl));
1354 1353 }
1355 1354
1356 1355 /*
1357 1356 * Building a shared object.
1358 1357 *
1359 1358 * Assign a GOT entry for a dynamic TLS reference.
1360 1359 */
1361 1360 if (IS_TLS_LD(rtype) && ((gnp = ld_find_got_ndx(sdp->sd_GOTndxs,
1362 1361 GOT_REF_TLSLD, ofl, rsp)) == NULL)) {
1363 1362
1364 1363 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSLD,
1365 1364 FLG_REL_MTLS, rtype, R_AMD64_DTPMOD64, NULL) == S_ERROR)
1366 1365 return (S_ERROR);
1367 1366
1368 1367 } else if (IS_TLS_GD(rtype) &&
1369 1368 ((gnp = ld_find_got_ndx(sdp->sd_GOTndxs, GOT_REF_TLSGD,
1370 1369 ofl, rsp)) == NULL)) {
1371 1370
1372 1371 if (ld_assign_got_TLS(local, rsp, ofl, sdp, gnp, GOT_REF_TLSGD,
1373 1372 FLG_REL_DTLS, rtype, R_AMD64_DTPMOD64,
1374 1373 R_AMD64_DTPOFF64) == S_ERROR)
1375 1374 return (S_ERROR);
1376 1375 }
1377 1376
1378 1377 if (IS_TLS_LD(rtype))
1379 1378 return (ld_add_actrel(FLG_REL_MTLS, rsp, ofl));
1380 1379
1381 1380 return (ld_add_actrel(FLG_REL_DTLS, rsp, ofl));
1382 1381 }
1383 1382
1384 1383 /* ARGSUSED5 */
1385 1384 static uintptr_t
1386 1385 ld_assign_got_ndx(Alist **alpp, Gotndx *pgnp, Gotref gref, Ofl_desc *ofl,
1387 1386 Rel_desc *rsp, Sym_desc *sdp)
1388 1387 {
1389 1388 Xword raddend;
1390 1389 Gotndx gn, *gnp;
1391 1390 Aliste idx;
1392 1391 uint_t gotents;
1393 1392
1394 1393 raddend = rsp->rel_raddend;
1395 1394 if (pgnp && (pgnp->gn_addend == raddend) && (pgnp->gn_gotref == gref))
1396 1395 return (1);
1397 1396
1398 1397 if ((gref == GOT_REF_TLSGD) || (gref == GOT_REF_TLSLD))
1399 1398 gotents = 2;
1400 1399 else
1401 1400 gotents = 1;
1402 1401
1403 1402 gn.gn_addend = raddend;
1404 1403 gn.gn_gotndx = ofl->ofl_gotcnt;
1405 1404 gn.gn_gotref = gref;
1406 1405
1407 1406 ofl->ofl_gotcnt += gotents;
1408 1407
1409 1408 if (gref == GOT_REF_TLSLD) {
1410 1409 if (ofl->ofl_tlsldgotndx == NULL) {
1411 1410 if ((gnp = libld_malloc(sizeof (Gotndx))) == NULL)
1412 1411 return (S_ERROR);
1413 1412 (void) memcpy(gnp, &gn, sizeof (Gotndx));
1414 1413 ofl->ofl_tlsldgotndx = gnp;
1415 1414 }
1416 1415 return (1);
1417 1416 }
1418 1417
1419 1418 idx = 0;
1420 1419 for (ALIST_TRAVERSE(*alpp, idx, gnp)) {
1421 1420 if (gnp->gn_addend > raddend)
1422 1421 break;
1423 1422 }
1424 1423
1425 1424 /*
1426 1425 * GOT indexes are maintained on an Alist, where there is typically
1427 1426 * only one index. The usage of this list is to scan the list to find
1428 1427 * an index, and then apply that index immediately to a relocation.
1429 1428 * Thus there are no external references to these GOT index structures
1430 1429 * that can be compromised by the Alist being reallocated.
1431 1430 */
1432 1431 if (alist_insert(alpp, &gn, sizeof (Gotndx),
1433 1432 AL_CNT_SDP_GOT, idx) == NULL)
1434 1433 return (S_ERROR);
1435 1434
1436 1435 return (1);
1437 1436 }
1438 1437
1439 1438 static void
1440 1439 ld_assign_plt_ndx(Sym_desc * sdp, Ofl_desc *ofl)
1441 1440 {
1442 1441 sdp->sd_aux->sa_PLTndx = 1 + ofl->ofl_pltcnt++;
1443 1442 sdp->sd_aux->sa_PLTGOTndx = ofl->ofl_gotcnt++;
1444 1443 ofl->ofl_flags |= FLG_OF_BLDGOT;
1445 1444 }
1446 1445
1447 1446 static uchar_t plt0_template[M_PLT_ENTSIZE] = {
1448 1447 /* 0x00 PUSHQ GOT+8(%rip) */ 0xff, 0x35, 0x00, 0x00, 0x00, 0x00,
1449 1448 /* 0x06 JMP *GOT+16(%rip) */ 0xff, 0x25, 0x00, 0x00, 0x00, 0x00,
1450 1449 /* 0x0c NOP */ 0x90,
1451 1450 /* 0x0d NOP */ 0x90,
1452 1451 /* 0x0e NOP */ 0x90,
1453 1452 /* 0x0f NOP */ 0x90
1454 1453 };
1455 1454
1456 1455 /*
1457 1456 * Initializes .got[0] with the _DYNAMIC symbol value.
1458 1457 */
1459 1458 static uintptr_t
1460 1459 ld_fillin_gotplt(Ofl_desc *ofl)
1461 1460 {
1462 1461 int bswap = (ofl->ofl_flags1 & FLG_OF1_ENCDIFF) != 0;
1463 1462
1464 1463 if (ofl->ofl_osgot) {
1465 1464 Sym_desc *sdp;
1466 1465
1467 1466 if ((sdp = ld_sym_find(MSG_ORIG(MSG_SYM_DYNAMIC_U),
1468 1467 SYM_NOHASH, NULL, ofl)) != NULL) {
1469 1468 uchar_t *genptr;
1470 1469
1471 1470 genptr = ((uchar_t *)ofl->ofl_osgot->os_outdata->d_buf +
1472 1471 (M_GOT_XDYNAMIC * M_GOT_ENTSIZE));
1473 1472 /* LINTED */
1474 1473 *(Xword *)genptr = sdp->sd_sym->st_value;
1475 1474 if (bswap)
1476 1475 /* LINTED */
1477 1476 *(Xword *)genptr =
1478 1477 /* LINTED */
1479 1478 ld_bswap_Xword(*(Xword *)genptr);
1480 1479 }
1481 1480 }
1482 1481
1483 1482 /*
1484 1483 * Fill in the reserved slot in the procedure linkage table the first
1485 1484 * entry is:
1486 1485 * 0x00 PUSHQ GOT+8(%rip) # GOT[1]
1487 1486 * 0x06 JMP *GOT+16(%rip) # GOT[2]
1488 1487 * 0x0c NOP
1489 1488 * 0x0d NOP
1490 1489 * 0x0e NOP
1491 1490 * 0x0f NOP
1492 1491 */
1493 1492 if ((ofl->ofl_flags & FLG_OF_DYNAMIC) && ofl->ofl_osplt) {
1494 1493 uchar_t *pltent;
1495 1494 Xword val1;
1496 1495
1497 1496 pltent = (uchar_t *)ofl->ofl_osplt->os_outdata->d_buf;
1498 1497 bcopy(plt0_template, pltent, sizeof (plt0_template));
1499 1498
1500 1499 /*
1501 1500 * If '-z noreloc' is specified - skip the do_reloc_ld
1502 1501 * stage.
1503 1502 */
1504 1503 if (!OFL_DO_RELOC(ofl))
1505 1504 return (1);
1506 1505
1507 1506 /*
1508 1507 * filin:
1509 1508 * PUSHQ GOT + 8(%rip)
1510 1509 *
1511 1510 * Note: 0x06 below represents the offset to the
1512 1511 * next instruction - which is what %rip will
1513 1512 * be pointing at.
1514 1513 */
1515 1514 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) +
1516 1515 (M_GOT_XLINKMAP * M_GOT_ENTSIZE) -
1517 1516 ofl->ofl_osplt->os_shdr->sh_addr - 0x06;
1518 1517
1519 1518 if (do_reloc_ld(&rdesc_r_amd64_gotpcrel, &pltent[0x02],
1520 1519 &val1, syn_rdesc_sym_name, MSG_ORIG(MSG_SPECFIL_PLTENT),
1521 1520 bswap, ofl->ofl_lml) == 0) {
1522 1521 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_PLT_PLT0FAIL));
1523 1522 return (S_ERROR);
1524 1523 }
1525 1524
1526 1525 /*
1527 1526 * filin:
1528 1527 * JMP *GOT+16(%rip)
1529 1528 */
1530 1529 val1 = (ofl->ofl_osgot->os_shdr->sh_addr) +
1531 1530 (M_GOT_XRTLD * M_GOT_ENTSIZE) -
1532 1531 ofl->ofl_osplt->os_shdr->sh_addr - 0x0c;
1533 1532
1534 1533 if (do_reloc_ld(&rdesc_r_amd64_gotpcrel, &pltent[0x08],
1535 1534 &val1, syn_rdesc_sym_name, MSG_ORIG(MSG_SPECFIL_PLTENT),
1536 1535 bswap, ofl->ofl_lml) == 0) {
1537 1536 ld_eprintf(ofl, ERR_FATAL, MSG_INTL(MSG_PLT_PLT0FAIL));
1538 1537 return (S_ERROR);
1539 1538 }
1540 1539 }
1541 1540
1542 1541 return (1);
1543 1542 }
1544 1543
1545 1544
1546 1545
1547 1546 /*
1548 1547 * Template for generating "void (*)(void)" function
1549 1548 */
1550 1549 static const uchar_t nullfunc_tmpl[] = { /* amd64 */
1551 1550 /* 0x00 */ 0x55, /* pushq %rbp */
1552 1551 /* 0x01 */ 0x48, 0x8b, 0xec, /* movq %rsp,%rbp */
1553 1552 /* 0x04 */ 0x48, 0x8b, 0xe5, /* movq %rbp,%rsp */
1554 1553 /* 0x07 */ 0x5d, /* popq %rbp */
1555 1554 /* 0x08 */ 0xc3 /* ret */
1556 1555 };
1557 1556
1558 1557
1559 1558 /*
1560 1559 * Function used to provide fill padding in SHF_EXECINSTR sections
1561 1560 *
1562 1561 * entry:
1563 1562 *
1564 1563 * base - base address of section being filled
1565 1564 * offset - starting offset for fill within memory referenced by base
1566 1565 * cnt - # bytes to be filled
1567 1566 *
1568 1567 * exit:
1569 1568 * The fill has been completed.
1570 1569 */
1571 1570 static void
1572 1571 execfill(void *base, off_t off, size_t cnt)
1573 1572 {
1574 1573 /*
1575 1574 * 0x90 is an X86 NOP instruction in both 32 and 64-bit worlds.
1576 1575 * There are no alignment constraints.
1577 1576 */
1578 1577 (void) memset(off + (char *)base, 0x90, cnt);
1579 1578 }
1580 1579
1581 1580
1582 1581 /*
1583 1582 * Return the ld_targ definition for this target.
1584 1583 */
1585 1584 const Target *
1586 1585 ld_targ_init_x86(void)
1587 1586 {
1588 1587 static const Target _ld_targ = {
1589 1588 { /* Target_mach */
1590 1589 M_MACH, /* m_mach */
1591 1590 M_MACHPLUS, /* m_machplus */
1592 1591 M_FLAGSPLUS, /* m_flagsplus */
1593 1592 M_CLASS, /* m_class */
1594 1593 M_DATA, /* m_data */
1595 1594
1596 1595 M_SEGM_ALIGN, /* m_segm_align */
1597 1596 M_SEGM_ORIGIN, /* m_segm_origin */
1598 1597 M_SEGM_AORIGIN, /* m_segm_aorigin */
1599 1598 M_DATASEG_PERM, /* m_dataseg_perm */
1600 1599 M_STACK_PERM, /* m_stack_perm */
1601 1600 M_WORD_ALIGN, /* m_word_align */
1602 1601 MSG_ORIG(MSG_PTH_RTLD_AMD64), /* m_def_interp */
1603 1602
1604 1603 /* Relocation type codes */
1605 1604 M_R_ARRAYADDR, /* m_r_arrayaddr */
1606 1605 M_R_COPY, /* m_r_copy */
1607 1606 M_R_GLOB_DAT, /* m_r_glob_dat */
1608 1607 M_R_JMP_SLOT, /* m_r_jmp_slot */
1609 1608 M_R_NUM, /* m_r_num */
1610 1609 M_R_NONE, /* m_r_none */
1611 1610 M_R_RELATIVE, /* m_r_relative */
1612 1611 M_R_REGISTER, /* m_r_register */
1613 1612
1614 1613 /* Relocation related constants */
1615 1614 M_REL_DT_COUNT, /* m_rel_dt_count */
1616 1615 M_REL_DT_ENT, /* m_rel_dt_ent */
1617 1616 M_REL_DT_SIZE, /* m_rel_dt_size */
1618 1617 M_REL_DT_TYPE, /* m_rel_dt_type */
1619 1618 M_REL_SHT_TYPE, /* m_rel_sht_type */
1620 1619
1621 1620 /* GOT related constants */
1622 1621 M_GOT_ENTSIZE, /* m_got_entsize */
1623 1622 M_GOT_XNumber, /* m_got_xnumber */
1624 1623
1625 1624 /* PLT related constants */
1626 1625 M_PLT_ALIGN, /* m_plt_align */
1627 1626 M_PLT_ENTSIZE, /* m_plt_entsize */
1628 1627 M_PLT_RESERVSZ, /* m_plt_reservsz */
1629 1628 M_PLT_SHF_FLAGS, /* m_plt_shf_flags */
1630 1629
1631 1630 /* Section type of .eh_frame/.eh_frame_hdr sections */
1632 1631 SHT_AMD64_UNWIND, /* m_sht_unwind */
1633 1632
1634 1633 M_DT_REGISTER, /* m_dt_register */
1635 1634 },
1636 1635 { /* Target_machid */
1637 1636 M_ID_ARRAY, /* id_array */
1638 1637 M_ID_BSS, /* id_bss */
1639 1638 M_ID_CAP, /* id_cap */
1640 1639 M_ID_CAPINFO, /* id_capinfo */
1641 1640 M_ID_CAPCHAIN, /* id_capchain */
1642 1641 M_ID_DATA, /* id_data */
1643 1642 M_ID_DYNAMIC, /* id_dynamic */
1644 1643 M_ID_DYNSORT, /* id_dynsort */
1645 1644 M_ID_DYNSTR, /* id_dynstr */
1646 1645 M_ID_DYNSYM, /* id_dynsym */
1647 1646 M_ID_DYNSYM_NDX, /* id_dynsym_ndx */
1648 1647 M_ID_GOT, /* id_got */
1649 1648 M_ID_UNKNOWN, /* id_gotdata (unused) */
1650 1649 M_ID_HASH, /* id_hash */
1651 1650 M_ID_INTERP, /* id_interp */
1652 1651 M_ID_LBSS, /* id_lbss */
1653 1652 M_ID_LDYNSYM, /* id_ldynsym */
1654 1653 M_ID_NOTE, /* id_note */
1655 1654 M_ID_NULL, /* id_null */
1656 1655 M_ID_PLT, /* id_plt */
1657 1656 M_ID_REL, /* id_rel */
1658 1657 M_ID_STRTAB, /* id_strtab */
1659 1658 M_ID_SYMINFO, /* id_syminfo */
1660 1659 M_ID_SYMTAB, /* id_symtab */
1661 1660 M_ID_SYMTAB_NDX, /* id_symtab_ndx */
1662 1661 M_ID_TEXT, /* id_text */
1663 1662 M_ID_TLS, /* id_tls */
1664 1663 M_ID_TLSBSS, /* id_tlsbss */
1665 1664 M_ID_UNKNOWN, /* id_unknown */
1666 1665 M_ID_UNWIND, /* id_unwind */
1667 1666 M_ID_UNWINDHDR, /* id_unwindhdr */
1668 1667 M_ID_USER, /* id_user */
1669 1668 M_ID_VERSION, /* id_version */
1670 1669 },
1671 1670 { /* Target_nullfunc */
1672 1671 nullfunc_tmpl, /* nf_template */
1673 1672 sizeof (nullfunc_tmpl), /* nf_size */
1674 1673 },
1675 1674 { /* Target_fillfunc */
1676 1675 execfill /* ff_execfill */
1677 1676 },
1678 1677 { /* Target_machrel */
1679 1678 reloc_table,
1680 1679
1681 1680 ld_init_rel, /* mr_init_rel */
1682 1681 ld_mach_eflags, /* mr_mach_eflags */
1683 1682 ld_mach_make_dynamic, /* mr_mach_make_dynamic */
1684 1683 ld_mach_update_odynamic, /* mr_mach_update_odynamic */
1685 1684 ld_calc_plt_addr, /* mr_calc_plt_addr */
1686 1685 ld_perform_outreloc, /* mr_perform_outreloc */
1687 1686 ld_do_activerelocs, /* mr_do_activerelocs */
1688 1687 ld_add_outrel, /* mr_add_outrel */
1689 1688 NULL, /* mr_reloc_register */
1690 1689 ld_reloc_local, /* mr_reloc_local */
1691 1690 NULL, /* mr_reloc_GOTOP */
1692 1691 ld_reloc_TLS, /* mr_reloc_TLS */
1693 1692 NULL, /* mr_assign_got */
1694 1693 ld_find_got_ndx, /* mr_find_got_ndx */
1695 1694 ld_calc_got_offset, /* mr_calc_got_offset */
1696 1695 ld_assign_got_ndx, /* mr_assign_got_ndx */
1697 1696 ld_assign_plt_ndx, /* mr_assign_plt_ndx */
1698 1697 NULL, /* mr_allocate_got */
1699 1698 ld_fillin_gotplt, /* mr_fillin_gotplt */
1700 1699 },
1701 1700 { /* Target_machsym */
1702 1701 NULL, /* ms_reg_check */
1703 1702 NULL, /* ms_mach_sym_typecheck */
1704 1703 NULL, /* ms_is_regsym */
1705 1704 NULL, /* ms_reg_find */
1706 1705 NULL /* ms_reg_enter */
1707 1706 }
1708 1707 };
1709 1708
1710 1709 return (&_ld_targ);
1711 1710 }
↓ open down ↓ |
934 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX