Print this page
smatch clean rtld
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/cmd/sgs/rtld/amd64/amd64_elf.c
+++ new/usr/src/cmd/sgs/rtld/amd64/amd64_elf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 * amd64 machine dependent and ELF file class dependent functions.
29 29 * Contains routines for performing function binding and symbol relocations.
30 30 */
31 31
32 32 #include <stdio.h>
33 33 #include <sys/elf.h>
34 34 #include <sys/elf_amd64.h>
35 35 #include <sys/mman.h>
36 36 #include <dlfcn.h>
37 37 #include <synch.h>
38 38 #include <string.h>
39 39 #include <debug.h>
40 40 #include <reloc.h>
41 41 #include <conv.h>
42 42 #include "_rtld.h"
43 43 #include "_audit.h"
44 44 #include "_elf.h"
45 45 #include "_inline_gen.h"
46 46 #include "_inline_reloc.h"
47 47 #include "msg.h"
48 48
49 49 extern void elf_rtbndr(Rt_map *, ulong_t, caddr_t);
50 50
51 51 int
52 52 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
53 53 {
54 54 /*
55 55 * Check machine type and flags.
56 56 */
57 57 if (ehdr->e_flags != 0) {
58 58 rej->rej_type = SGS_REJ_BADFLAG;
59 59 rej->rej_info = (uint_t)ehdr->e_flags;
60 60 return (0);
61 61 }
62 62 return (1);
63 63 }
64 64
65 65 void
66 66 ldso_plt_init(Rt_map *lmp)
67 67 {
68 68 /*
69 69 * There is no need to analyze ld.so because we don't map in any of
70 70 * its dependencies. However we may map these dependencies in later
71 71 * (as if ld.so had dlopened them), so initialize the plt and the
72 72 * permission information.
73 73 */
74 74 if (PLTGOT(lmp))
75 75 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp);
76 76 }
77 77
78 78 static const uchar_t dyn_plt_template[] = {
79 79 /* 0x00 */ 0x55, /* pushq %rbp */
80 80 /* 0x01 */ 0x48, 0x89, 0xe5, /* movq %rsp, %rbp */
81 81 /* 0x04 */ 0x48, 0x83, 0xec, 0x10, /* subq $0x10, %rsp */
82 82 /* 0x08 */ 0x4c, 0x8d, 0x1d, 0x00, /* leaq trace_fields(%rip), %r11 */
83 83 0x00, 0x00, 0x00,
84 84 /* 0x0f */ 0x4c, 0x89, 0x5d, 0xf8, /* movq %r11, -0x8(%rbp) */
85 85 /* 0x13 */ 0x49, 0xbb, 0x00, 0x00, /* movq $elf_plt_trace, %r11 */
86 86 0x00, 0x00, 0x00,
87 87 0x00, 0x00, 0x00,
88 88 /* 0x1d */ 0x41, 0xff, 0xe3 /* jmp *%r11 */
89 89 /* 0x20 */
90 90 };
91 91
92 92 /*
93 93 * And the virutal outstanding relocations against the
94 94 * above block are:
95 95 *
96 96 * reloc offset Addend symbol
97 97 * R_AMD64_PC32 0x0b -4 trace_fields
98 98 * R_AMD64_64 0x15 0 elf_plt_trace
99 99 */
100 100
101 101 #define TRCREL1OFF 0x0b
102 102 #define TRCREL2OFF 0x15
103 103
104 104 int dyn_plt_ent_size = sizeof (dyn_plt_template);
105 105
106 106 /*
107 107 * the dynamic plt entry is:
108 108 *
109 109 * pushq %rbp
110 110 * movq %rsp, %rbp
111 111 * subq $0x10, %rsp
112 112 * leaq trace_fields(%rip), %r11
113 113 * movq %r11, -0x8(%rbp)
114 114 * movq $elf_plt_trace, %r11
115 115 * jmp *%r11
116 116 * dyn_data:
117 117 * .align 8
118 118 * uintptr_t reflmp
119 119 * uintptr_t deflmp
120 120 * uint_t symndx
121 121 * uint_t sb_flags
122 122 * Sym symdef
123 123 */
124 124 static caddr_t
125 125 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym,
126 126 uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail)
127 127 {
128 128 extern int elf_plt_trace();
129 129 ulong_t got_entry;
130 130 uchar_t *dyn_plt;
131 131 uintptr_t *dyndata;
132 132
133 133 /*
134 134 * We only need to add the glue code if there is an auditing
135 135 * library that is interested in this binding.
136 136 */
137 137 dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
138 138 (pltndx * dyn_plt_ent_size));
139 139
140 140 /*
141 141 * Have we initialized this dynamic plt entry yet? If we haven't do it
142 142 * now. Otherwise this function has been called before, but from a
143 143 * different plt (ie. from another shared object). In that case
144 144 * we just set the plt to point to the new dyn_plt.
145 145 */
146 146 if (*dyn_plt == 0) {
147 147 Sym *symp;
148 148 Xword symvalue;
149 149 Lm_list *lml = LIST(rlmp);
150 150
151 151 (void) memcpy((void *)dyn_plt, dyn_plt_template,
152 152 sizeof (dyn_plt_template));
153 153 dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
154 154 ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN));
155 155
156 156 /*
157 157 * relocate:
158 158 * leaq trace_fields(%rip), %r11
159 159 * R_AMD64_PC32 0x0b -4 trace_fields
160 160 */
161 161 symvalue = (Xword)((uintptr_t)dyndata -
162 162 (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4);
163 163 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF],
164 164 &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
165 165 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
166 166 *fail = 1;
167 167 return (0);
168 168 }
169 169
170 170 /*
171 171 * relocating:
172 172 * movq $elf_plt_trace, %r11
173 173 * R_AMD64_64 0x15 0 elf_plt_trace
174 174 */
175 175 symvalue = (Xword)elf_plt_trace;
176 176 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF],
177 177 &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
178 178 MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
179 179 *fail = 1;
180 180 return (0);
181 181 }
182 182
183 183 *dyndata++ = (uintptr_t)rlmp;
184 184 *dyndata++ = (uintptr_t)dlmp;
185 185 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx);
186 186 dyndata++;
187 187 symp = (Sym *)dyndata;
188 188 *symp = *sym;
189 189 symp->st_value = (Addr)to;
190 190 }
191 191
192 192 got_entry = (ulong_t)roffset;
193 193 *(ulong_t *)got_entry = (ulong_t)dyn_plt;
194 194 return ((caddr_t)dyn_plt);
195 195 }
196 196
197 197 /*
198 198 * Function binding routine - invoked on the first call to a function through
199 199 * the procedure linkage table;
200 200 * passes first through an assembly language interface.
201 201 *
202 202 * Takes the offset into the relocation table of the associated
203 203 * relocation entry and the address of the link map (rt_private_map struct)
204 204 * for the entry.
205 205 *
206 206 * Returns the address of the function referenced after re-writing the PLT
207 207 * entry to invoke the function directly.
208 208 *
209 209 * On error, causes process to terminate with a signal.
210 210 */
211 211 ulong_t
212 212 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from)
213 213 {
214 214 Rt_map *nlmp, *llmp;
215 215 ulong_t addr, reloff, symval, rsymndx;
216 216 char *name;
217 217 Rela *rptr;
218 218 Sym *rsym, *nsym;
219 219 uint_t binfo, sb_flags = 0, dbg_class;
220 220 Slookup sl;
221 221 Sresult sr;
222 222 int entry, lmflags;
223 223 Lm_list *lml;
224 224
225 225 /*
226 226 * For compatibility with libthread (TI_VERSION 1) we track the entry
227 227 * value. A zero value indicates we have recursed into ld.so.1 to
228 228 * further process a locking request. Under this recursion we disable
229 229 * tsort and cleanup activities.
↓ open down ↓ |
229 lines elided |
↑ open up ↑ |
230 230 */
231 231 entry = enter(0);
232 232
233 233 lml = LIST(lmp);
234 234 if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
235 235 dbg_class = dbg_desc->d_class;
236 236 dbg_desc->d_class = 0;
237 237 }
238 238
239 239 /*
240 - * Perform some basic sanity checks. If we didn't get a load map or
241 - * the relocation offset is invalid then its possible someone has walked
242 - * over the .got entries or jumped to plt0 out of the blue.
240 + * Perform some basic sanity checks. If the relocation offset is
241 + * invalid then its possible someone has walked over the .got entries.
243 242 */
244 - if ((!lmp) && (pltndx <=
245 - (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) {
243 + if (pltndx > (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp)) {
246 244 Conv_inv_buf_t inv_buf;
247 245
248 246 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
249 247 conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf),
250 248 EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from));
251 249 rtldexit(lml, 1);
252 250 }
253 251 reloff = pltndx * (ulong_t)RELENT(lmp);
254 252
255 253 /*
256 254 * Use relocation entry to get symbol table entry and symbol name.
257 255 */
258 256 addr = (ulong_t)JMPREL(lmp);
259 257 rptr = (Rela *)(addr + reloff);
260 258 rsymndx = ELF_R_SYM(rptr->r_info);
261 259 rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
262 260 name = (char *)(STRTAB(lmp) + rsym->st_name);
263 261
264 262 /*
265 263 * Determine the last link-map of this list, this'll be the starting
266 264 * point for any tsort() processing.
267 265 */
268 266 llmp = lml->lm_tail;
269 267
270 268 /*
271 269 * Find definition for symbol. Initialize the symbol lookup, and
272 270 * symbol result, data structures.
273 271 */
274 272 SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
275 273 rsymndx, rsym, 0, LKUP_DEFT);
276 274 SRESULT_INIT(sr, name);
277 275
278 276 if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
279 277 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
280 278 demangle(name));
281 279 rtldexit(lml, 1);
282 280 }
283 281
284 282 name = (char *)sr.sr_name;
285 283 nlmp = sr.sr_dmap;
286 284 nsym = sr.sr_sym;
287 285
288 286 symval = nsym->st_value;
289 287
290 288 if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
291 289 (nsym->st_shndx != SHN_ABS))
292 290 symval += ADDR(nlmp);
293 291 if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
294 292 /*
295 293 * Record that this new link map is now bound to the caller.
296 294 */
297 295 if (bind_one(lmp, nlmp, BND_REFER) == 0)
298 296 rtldexit(lml, 1);
299 297 }
300 298
301 299 if ((lml->lm_tflags | AFLAGS(lmp) | AFLAGS(nlmp)) &
302 300 LML_TFLG_AUD_SYMBIND) {
303 301 uint_t symndx = (((uintptr_t)nsym -
304 302 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
305 303 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
306 304 &sb_flags);
307 305 }
308 306
309 307 if (!(rtld_flags & RT_FL_NOBIND)) {
310 308 addr = rptr->r_offset;
311 309 if (!(FLAGS(lmp) & FLG_RT_FIXED))
312 310 addr += ADDR(lmp);
313 311 if (((lml->lm_tflags | AFLAGS(lmp)) &
314 312 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
315 313 AUDINFO(lmp)->ai_dynplts) {
316 314 int fail = 0;
317 315 uint_t pltndx = reloff / sizeof (Rela);
318 316 uint_t symndx = (((uintptr_t)nsym -
319 317 (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
320 318
321 319 symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp,
322 320 nsym, symndx, pltndx, (caddr_t)symval, sb_flags,
323 321 &fail);
324 322 if (fail)
325 323 rtldexit(lml, 1);
326 324 } else {
327 325 /*
328 326 * Write standard PLT entry to jump directly
329 327 * to newly bound function.
330 328 */
331 329 *(ulong_t *)addr = symval;
332 330 }
333 331 }
334 332
335 333 /*
336 334 * Print binding information and rebuild PLT entry.
337 335 */
338 336 DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
339 337 (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval,
340 338 nsym->st_value, name, binfo));
341 339
342 340 /*
343 341 * Complete any processing for newly loaded objects. Note we don't
344 342 * know exactly where any new objects are loaded (we know the object
345 343 * that supplied the symbol, but others may have been loaded lazily as
346 344 * we searched for the symbol), so sorting starts from the last
347 345 * link-map know on entry to this routine.
348 346 */
349 347 if (entry)
350 348 load_completion(llmp);
351 349
352 350 /*
353 351 * Some operations like dldump() or dlopen()'ing a relocatable object
354 352 * result in objects being loaded on rtld's link-map, make sure these
355 353 * objects are initialized also.
356 354 */
357 355 if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
358 356 load_completion(nlmp);
359 357
360 358 /*
361 359 * Make sure the object to which we've bound has had it's .init fired.
362 360 * Cleanup before return to user code.
363 361 */
364 362 if (entry) {
365 363 is_dep_init(nlmp, lmp);
366 364 leave(lml, 0);
367 365 }
368 366
369 367 if (lmflags & LML_FLG_RTLDLM)
370 368 dbg_desc->d_class = dbg_class;
371 369
372 370 return (symval);
373 371 }
374 372
375 373 /*
376 374 * Read and process the relocations for one link object, we assume all
377 375 * relocation sections for loadable segments are stored contiguously in
378 376 * the file.
379 377 */
380 378 int
381 379 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
382 380 {
383 381 ulong_t relbgn, relend, relsiz, basebgn;
384 382 ulong_t pltbgn, pltend, _pltbgn, _pltend;
385 383 ulong_t roffset, rsymndx, psymndx = 0;
386 384 ulong_t dsymndx;
387 385 uchar_t rtype;
388 386 long reladd, value, pvalue;
389 387 Sym *symref, *psymref, *symdef, *psymdef;
390 388 Syminfo *sip;
391 389 char *name, *pname;
392 390 Rt_map *_lmp, *plmp;
393 391 int ret = 1, noplt = 0;
394 392 int relacount = RELACOUNT(lmp), plthint = 0;
395 393 Rela *rel;
396 394 uint_t binfo, pbinfo;
397 395 APlist *bound = NULL;
398 396
399 397 /*
400 398 * Although only necessary for lazy binding, initialize the first
401 399 * global offset entry to go to elf_rtbndr(). dbx(1) seems
402 400 * to find this useful.
403 401 */
404 402 if ((plt == 0) && PLTGOT(lmp)) {
405 403 mmapobj_result_t *mpp;
406 404
407 405 /*
408 406 * Make sure the segment is writable.
409 407 */
410 408 if ((((mpp =
411 409 find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
412 410 ((mpp->mr_prot & PROT_WRITE) == 0)) &&
413 411 ((set_prot(lmp, mpp, 1) == 0) ||
414 412 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
415 413 return (0);
416 414
417 415 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp);
418 416 }
419 417
420 418 /*
421 419 * Initialize the plt start and end addresses.
422 420 */
423 421 if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
424 422 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
425 423
426 424 relsiz = (ulong_t)(RELENT(lmp));
427 425 basebgn = ADDR(lmp);
428 426
429 427 if (PLTRELSZ(lmp))
430 428 plthint = PLTRELSZ(lmp) / relsiz;
431 429
432 430 /*
433 431 * If we've been called upon to promote an RTLD_LAZY object to an
434 432 * RTLD_NOW then we're only interested in scaning the .plt table.
435 433 * An uninitialized .plt is the case where the associated got entry
436 434 * points back to the plt itself. Determine the range of the real .plt
437 435 * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
438 436 */
439 437 if (plt) {
440 438 Slookup sl;
441 439 Sresult sr;
442 440
443 441 relbgn = pltbgn;
444 442 relend = pltend;
445 443 if (!relbgn || (relbgn == relend))
446 444 return (1);
447 445
448 446 /*
449 447 * Initialize the symbol lookup, and symbol result, data
450 448 * structures.
451 449 */
452 450 SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt,
453 451 elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT);
454 452 SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT));
455 453
456 454 if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0)
457 455 return (1);
458 456
459 457 symdef = sr.sr_sym;
460 458 _pltbgn = symdef->st_value;
461 459 if (!(FLAGS(lmp) & FLG_RT_FIXED) &&
462 460 (symdef->st_shndx != SHN_ABS))
463 461 _pltbgn += basebgn;
464 462 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) *
465 463 M_PLT_ENTSIZE) + M_PLT_RESERVSZ;
466 464
467 465 } else {
468 466 /*
469 467 * The relocation sections appear to the run-time linker as a
470 468 * single table. Determine the address of the beginning and end
471 469 * of this table. There are two different interpretations of
472 470 * the ABI at this point:
473 471 *
474 472 * o The REL table and its associated RELSZ indicate the
475 473 * concatenation of *all* relocation sections (this is the
476 474 * model our link-editor constructs).
477 475 *
478 476 * o The REL table and its associated RELSZ indicate the
479 477 * concatenation of all *but* the .plt relocations. These
480 478 * relocations are specified individually by the JMPREL and
481 479 * PLTRELSZ entries.
482 480 *
483 481 * Determine from our knowledege of the relocation range and
484 482 * .plt range, the range of the total relocation table. Note
485 483 * that one other ABI assumption seems to be that the .plt
486 484 * relocations always follow any other relocations, the
487 485 * following range checking drops that assumption.
488 486 */
489 487 relbgn = (ulong_t)(REL(lmp));
490 488 relend = relbgn + (ulong_t)(RELSZ(lmp));
491 489 if (pltbgn) {
492 490 if (!relbgn || (relbgn > pltbgn))
493 491 relbgn = pltbgn;
494 492 if (!relbgn || (relend < pltend))
495 493 relend = pltend;
496 494 }
497 495 }
498 496 if (!relbgn || (relbgn == relend)) {
499 497 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
500 498 return (1);
501 499 }
502 500 DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
503 501
504 502 /*
505 503 * If we're processing a dynamic executable in lazy mode there is no
506 504 * need to scan the .rel.plt table, however if we're processing a shared
507 505 * object in lazy mode the .got addresses associated to each .plt must
508 506 * be relocated to reflect the location of the shared object.
509 507 */
510 508 if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) &&
511 509 (FLAGS(lmp) & FLG_RT_FIXED))
512 510 noplt = 1;
513 511
514 512 sip = SYMINFO(lmp);
515 513 /*
516 514 * Loop through relocations.
517 515 */
518 516 while (relbgn < relend) {
519 517 mmapobj_result_t *mpp;
520 518 uint_t sb_flags = 0;
521 519
522 520 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
523 521
524 522 /*
525 523 * If this is a RELATIVE relocation in a shared object (the
526 524 * common case), and if we are not debugging, then jump into a
527 525 * tighter relocation loop (elf_reloc_relative).
528 526 */
529 527 if ((rtype == R_AMD64_RELATIVE) &&
530 528 ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
531 529 if (relacount) {
532 530 relbgn = elf_reloc_relative_count(relbgn,
533 531 relacount, relsiz, basebgn, lmp,
534 532 textrel, 0);
535 533 relacount = 0;
536 534 } else {
537 535 relbgn = elf_reloc_relative(relbgn, relend,
538 536 relsiz, basebgn, lmp, textrel, 0);
539 537 }
540 538 if (relbgn >= relend)
541 539 break;
542 540 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
543 541 }
544 542
545 543 roffset = ((Rela *)relbgn)->r_offset;
546 544
547 545 /*
548 546 * If this is a shared object, add the base address to offset.
549 547 */
550 548 if (!(FLAGS(lmp) & FLG_RT_FIXED)) {
551 549 /*
552 550 * If we're processing lazy bindings, we have to step
553 551 * through the plt entries and add the base address
554 552 * to the corresponding got entry.
555 553 */
556 554 if (plthint && (plt == 0) &&
557 555 (rtype == R_AMD64_JUMP_SLOT) &&
558 556 ((MODE(lmp) & RTLD_NOW) == 0)) {
559 557 relbgn = elf_reloc_relative_count(relbgn,
560 558 plthint, relsiz, basebgn, lmp, textrel, 1);
561 559 plthint = 0;
562 560 continue;
563 561 }
564 562 roffset += basebgn;
565 563 }
566 564
567 565 reladd = (long)(((Rela *)relbgn)->r_addend);
568 566 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
569 567 rel = (Rela *)relbgn;
570 568 relbgn += relsiz;
571 569
572 570 /*
573 571 * Optimizations.
574 572 */
575 573 if (rtype == R_AMD64_NONE)
576 574 continue;
577 575 if (noplt && ((ulong_t)rel >= pltbgn) &&
578 576 ((ulong_t)rel < pltend)) {
579 577 relbgn = pltend;
580 578 continue;
581 579 }
582 580
583 581 /*
584 582 * If we're promoting plts, determine if this one has already
585 583 * been written.
586 584 */
587 585 if (plt && ((*(ulong_t *)roffset < _pltbgn) ||
588 586 (*(ulong_t *)roffset > _pltend)))
589 587 continue;
590 588
591 589 /*
592 590 * If this relocation is not against part of the image
593 591 * mapped into memory we skip it.
594 592 */
595 593 if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) {
596 594 elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
597 595 rsymndx);
598 596 continue;
599 597 }
600 598
601 599 binfo = 0;
602 600 /*
603 601 * If a symbol index is specified then get the symbol table
604 602 * entry, locate the symbol definition, and determine its
605 603 * address.
606 604 */
607 605 if (rsymndx) {
608 606 /*
609 607 * If a Syminfo section is provided, determine if this
610 608 * symbol is deferred, and if so, skip this relocation.
611 609 */
612 610 if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp,
613 611 textrel, sip, rsymndx))
614 612 continue;
615 613
616 614 /*
617 615 * Get the local symbol table entry.
618 616 */
619 617 symref = (Sym *)((ulong_t)SYMTAB(lmp) +
620 618 (rsymndx * SYMENT(lmp)));
621 619
622 620 /*
623 621 * If this is a local symbol, just use the base address.
624 622 * (we should have no local relocations in the
625 623 * executable).
626 624 */
627 625 if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
628 626 value = basebgn;
629 627 name = NULL;
630 628
631 629 /*
632 630 * Special case TLS relocations.
633 631 */
634 632 if (rtype == R_AMD64_DTPMOD64) {
635 633 /*
636 634 * Use the TLS modid.
637 635 */
638 636 value = TLSMODID(lmp);
639 637
640 638 } else if ((rtype == R_AMD64_TPOFF64) ||
641 639 (rtype == R_AMD64_TPOFF32)) {
642 640 if ((value = elf_static_tls(lmp, symref,
643 641 rel, rtype, 0, roffset, 0)) == 0) {
644 642 ret = 0;
645 643 break;
646 644 }
647 645 }
648 646 } else {
649 647 /*
650 648 * If the symbol index is equal to the previous
651 649 * symbol index relocation we processed then
652 650 * reuse the previous values. (Note that there
653 651 * have been cases where a relocation exists
654 652 * against a copy relocation symbol, our ld(1)
655 653 * should optimize this away, but make sure we
656 654 * don't use the same symbol information should
657 655 * this case exist).
658 656 */
659 657 if ((rsymndx == psymndx) &&
660 658 (rtype != R_AMD64_COPY)) {
661 659 /* LINTED */
662 660 if (psymdef == 0) {
663 661 DBG_CALL(Dbg_bind_weak(lmp,
664 662 (Addr)roffset, (Addr)
665 663 (roffset - basebgn), name));
666 664 continue;
667 665 }
668 666 /* LINTED */
669 667 value = pvalue;
670 668 /* LINTED */
671 669 name = pname;
672 670 /* LINTED */
673 671 symdef = psymdef;
674 672 /* LINTED */
675 673 symref = psymref;
676 674 /* LINTED */
677 675 _lmp = plmp;
678 676 /* LINTED */
679 677 binfo = pbinfo;
680 678
681 679 if ((LIST(_lmp)->lm_tflags |
682 680 AFLAGS(_lmp)) &
683 681 LML_TFLG_AUD_SYMBIND) {
684 682 value = audit_symbind(lmp, _lmp,
685 683 /* LINTED */
686 684 symdef, dsymndx, value,
687 685 &sb_flags);
688 686 }
689 687 } else {
690 688 Slookup sl;
691 689 Sresult sr;
692 690
693 691 /*
694 692 * Lookup the symbol definition.
695 693 * Initialize the symbol lookup, and
696 694 * symbol result, data structure.
697 695 */
698 696 name = (char *)(STRTAB(lmp) +
699 697 symref->st_name);
700 698
701 699 SLOOKUP_INIT(sl, name, lmp, 0,
702 700 ld_entry_cnt, 0, rsymndx, symref,
703 701 rtype, LKUP_STDRELOC);
704 702 SRESULT_INIT(sr, name);
705 703 symdef = NULL;
706 704
707 705 if (lookup_sym(&sl, &sr, &binfo,
708 706 in_nfavl)) {
709 707 name = (char *)sr.sr_name;
710 708 _lmp = sr.sr_dmap;
711 709 symdef = sr.sr_sym;
712 710 }
713 711
714 712 /*
715 713 * If the symbol is not found and the
716 714 * reference was not to a weak symbol,
717 715 * report an error. Weak references
718 716 * may be unresolved.
719 717 */
720 718 /* BEGIN CSTYLED */
721 719 if (symdef == 0) {
722 720 if (sl.sl_bind != STB_WEAK) {
723 721 if (elf_reloc_error(lmp, name,
724 722 rel, binfo))
725 723 continue;
726 724
727 725 ret = 0;
728 726 break;
729 727
730 728 } else {
731 729 psymndx = rsymndx;
732 730 psymdef = 0;
733 731
734 732 DBG_CALL(Dbg_bind_weak(lmp,
735 733 (Addr)roffset, (Addr)
736 734 (roffset - basebgn), name));
737 735 continue;
738 736 }
739 737 }
740 738 /* END CSTYLED */
741 739
742 740 /*
743 741 * If symbol was found in an object
744 742 * other than the referencing object
745 743 * then record the binding.
746 744 */
747 745 if ((lmp != _lmp) && ((FLAGS1(_lmp) &
748 746 FL1_RT_NOINIFIN) == 0)) {
749 747 if (aplist_test(&bound, _lmp,
750 748 AL_CNT_RELBIND) == 0) {
751 749 ret = 0;
752 750 break;
753 751 }
754 752 }
755 753
756 754 /*
757 755 * Calculate the location of definition;
758 756 * symbol value plus base address of
759 757 * containing shared object.
760 758 */
761 759 if (IS_SIZE(rtype))
762 760 value = symdef->st_size;
763 761 else
764 762 value = symdef->st_value;
765 763
766 764 if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
767 765 !(IS_SIZE(rtype)) &&
768 766 (symdef->st_shndx != SHN_ABS) &&
769 767 (ELF_ST_TYPE(symdef->st_info) !=
770 768 STT_TLS))
771 769 value += ADDR(_lmp);
772 770
773 771 /*
774 772 * Retain this symbol index and the
775 773 * value in case it can be used for the
776 774 * subsequent relocations.
777 775 */
778 776 if (rtype != R_AMD64_COPY) {
779 777 psymndx = rsymndx;
780 778 pvalue = value;
781 779 pname = name;
782 780 psymdef = symdef;
783 781 psymref = symref;
784 782 plmp = _lmp;
785 783 pbinfo = binfo;
786 784 }
787 785 if ((LIST(_lmp)->lm_tflags |
788 786 AFLAGS(_lmp)) &
789 787 LML_TFLG_AUD_SYMBIND) {
790 788 dsymndx = (((uintptr_t)symdef -
791 789 (uintptr_t)SYMTAB(_lmp)) /
792 790 SYMENT(_lmp));
793 791 value = audit_symbind(lmp, _lmp,
794 792 symdef, dsymndx, value,
795 793 &sb_flags);
796 794 }
797 795 }
798 796
799 797 /*
800 798 * If relocation is PC-relative, subtract
801 799 * offset address.
802 800 */
803 801 if (IS_PC_RELATIVE(rtype))
804 802 value -= roffset;
805 803
806 804 /*
807 805 * Special case TLS relocations.
808 806 */
809 807 if (rtype == R_AMD64_DTPMOD64) {
810 808 /*
811 809 * Relocation value is the TLS modid.
812 810 */
813 811 value = TLSMODID(_lmp);
814 812
815 813 } else if ((rtype == R_AMD64_TPOFF64) ||
816 814 (rtype == R_AMD64_TPOFF32)) {
817 815 if ((value = elf_static_tls(_lmp,
818 816 symdef, rel, rtype, name, roffset,
819 817 value)) == 0) {
820 818 ret = 0;
821 819 break;
822 820 }
823 821 }
824 822 }
825 823 } else {
826 824 /*
827 825 * Special cases.
828 826 */
829 827 if (rtype == R_AMD64_DTPMOD64) {
830 828 /*
831 829 * TLS relocation value is the TLS modid.
832 830 */
833 831 value = TLSMODID(lmp);
834 832 } else
835 833 value = basebgn;
836 834
837 835 name = NULL;
838 836 }
839 837
840 838 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
841 839 M_REL_SHT_TYPE, rel, NULL, 0, name));
842 840
843 841 /*
844 842 * Make sure the segment is writable.
845 843 */
846 844 if (((mpp->mr_prot & PROT_WRITE) == 0) &&
847 845 ((set_prot(lmp, mpp, 1) == 0) ||
848 846 (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
849 847 ret = 0;
850 848 break;
851 849 }
852 850
853 851 /*
854 852 * Call relocation routine to perform required relocation.
855 853 */
856 854 switch (rtype) {
857 855 case R_AMD64_COPY:
858 856 if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
859 857 symdef, _lmp, (const void *)value) == 0)
860 858 ret = 0;
861 859 break;
862 860 case R_AMD64_JUMP_SLOT:
863 861 if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
864 862 (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
865 863 AUDINFO(lmp)->ai_dynplts) {
866 864 int fail = 0;
867 865 int pltndx = (((ulong_t)rel -
868 866 (uintptr_t)JMPREL(lmp)) / relsiz);
869 867 int symndx = (((uintptr_t)symdef -
870 868 (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
871 869
872 870 (void) elf_plt_trace_write(roffset, lmp, _lmp,
873 871 symdef, symndx, pltndx, (caddr_t)value,
874 872 sb_flags, &fail);
875 873 if (fail)
876 874 ret = 0;
877 875 } else {
878 876 /*
879 877 * Write standard PLT entry to jump directly
880 878 * to newly bound function.
881 879 */
882 880 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
883 881 ELF_DBG_RTLD, (Xword)roffset,
884 882 (Xword)value));
885 883 *(ulong_t *)roffset = value;
886 884 }
887 885 break;
888 886 default:
889 887 value += reladd;
890 888 /*
891 889 * Write the relocation out.
892 890 */
893 891 if (do_reloc_rtld(rtype, (uchar_t *)roffset,
894 892 (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0)
895 893 ret = 0;
896 894
897 895 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
898 896 (Xword)roffset, (Xword)value));
899 897 }
900 898
901 899 if ((ret == 0) &&
902 900 ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
903 901 break;
904 902
905 903 if (binfo) {
906 904 DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
907 905 (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL,
908 906 _lmp, (Addr)value, symdef->st_value, name, binfo));
909 907 }
910 908 }
911 909
912 910 return (relocate_finish(lmp, bound, ret));
913 911 }
914 912
915 913 /*
916 914 * Initialize the first few got entries so that function calls go to
917 915 * elf_rtbndr:
918 916 *
919 917 * GOT[GOT_XLINKMAP] = the address of the link map
920 918 * GOT[GOT_XRTLD] = the address of rtbinder
921 919 */
922 920 void
923 921 elf_plt_init(void *got, caddr_t l)
924 922 {
925 923 uint64_t *_got;
926 924 /* LINTED */
927 925 Rt_map *lmp = (Rt_map *)l;
928 926
929 927 _got = (uint64_t *)got + M_GOT_XLINKMAP;
930 928 *_got = (uint64_t)lmp;
931 929 _got = (uint64_t *)got + M_GOT_XRTLD;
932 930 *_got = (uint64_t)elf_rtbndr;
933 931 }
934 932
935 933 /*
936 934 * Plt writing interface to allow debugging initialization to be generic.
937 935 */
938 936 Pltbindtype
939 937 /* ARGSUSED1 */
940 938 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
941 939 Xword pltndx)
942 940 {
943 941 Rela *rel = (Rela*)rptr;
944 942 uintptr_t pltaddr;
945 943
946 944 pltaddr = addr + rel->r_offset;
947 945 *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend;
948 946 DBG_CALL(pltcntfull++);
949 947 return (PLT_T_FULL);
950 948 }
951 949
952 950 /*
953 951 * Provide a machine specific interface to the conversion routine. By calling
954 952 * the machine specific version, rather than the generic version, we insure that
955 953 * the data tables/strings for all known machine versions aren't dragged into
956 954 * ld.so.1.
957 955 */
958 956 const char *
959 957 _conv_reloc_type(uint_t rel)
960 958 {
961 959 static Conv_inv_buf_t inv_buf;
962 960
963 961 return (conv_reloc_amd64_type(rel, 0, &inv_buf));
964 962 }
↓ open down ↓ |
709 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX