1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  25  */
  26 
  27 /*
  28  * amd64 machine dependent and ELF file class dependent functions.
  29  * Contains routines for performing function binding and symbol relocations.
  30  */
  31 
  32 #include        <stdio.h>
  33 #include        <sys/elf.h>
  34 #include        <sys/elf_amd64.h>
  35 #include        <sys/mman.h>
  36 #include        <dlfcn.h>
  37 #include        <synch.h>
  38 #include        <string.h>
  39 #include        <debug.h>
  40 #include        <reloc.h>
  41 #include        <conv.h>
  42 #include        "_rtld.h"
  43 #include        "_audit.h"
  44 #include        "_elf.h"
  45 #include        "_inline_gen.h"
  46 #include        "_inline_reloc.h"
  47 #include        "msg.h"
  48 
  49 extern void     elf_rtbndr(Rt_map *, ulong_t, caddr_t);
  50 
  51 int
  52 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
  53 {
  54         /*
  55          * Check machine type and flags.
  56          */
  57         if (ehdr->e_flags != 0) {
  58                 rej->rej_type = SGS_REJ_BADFLAG;
  59                 rej->rej_info = (uint_t)ehdr->e_flags;
  60                 return (0);
  61         }
  62         return (1);
  63 }
  64 
  65 void
  66 ldso_plt_init(Rt_map *lmp)
  67 {
  68         /*
  69          * There is no need to analyze ld.so because we don't map in any of
  70          * its dependencies.  However we may map these dependencies in later
  71          * (as if ld.so had dlopened them), so initialize the plt and the
  72          * permission information.
  73          */
  74         if (PLTGOT(lmp))
  75                 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp);
  76 }
  77 
  78 static const uchar_t dyn_plt_template[] = {
  79 /* 0x00 */  0x55,                       /* pushq %rbp */
  80 /* 0x01 */  0x48, 0x89, 0xe5,           /* movq  %rsp, %rbp */
  81 /* 0x04 */  0x48, 0x83, 0xec, 0x10,     /* subq  $0x10, %rsp */
  82 /* 0x08 */  0x4c, 0x8d, 0x1d, 0x00,     /* leaq  trace_fields(%rip), %r11 */
  83                 0x00, 0x00, 0x00,
  84 /* 0x0f */  0x4c, 0x89, 0x5d, 0xf8,     /* movq  %r11, -0x8(%rbp) */
  85 /* 0x13 */  0x49, 0xbb, 0x00, 0x00,     /* movq  $elf_plt_trace, %r11 */
  86                 0x00, 0x00, 0x00,
  87                 0x00, 0x00, 0x00,
  88 /* 0x1d */  0x41, 0xff, 0xe3            /* jmp   *%r11 */
  89 /* 0x20 */
  90 };
  91 
  92 /*
  93  * And the virutal outstanding relocations against the
  94  * above block are:
  95  *
  96  *      reloc           offset  Addend  symbol
  97  *      R_AMD64_PC32    0x0b    -4      trace_fields
  98  *      R_AMD64_64      0x15    0       elf_plt_trace
  99  */
 100 
 101 #define TRCREL1OFF      0x0b
 102 #define TRCREL2OFF      0x15
 103 
 104 int     dyn_plt_ent_size = sizeof (dyn_plt_template);
 105 
 106 /*
 107  * the dynamic plt entry is:
 108  *
 109  *      pushq   %rbp
 110  *      movq    %rsp, %rbp
 111  *      subq    $0x10, %rsp
 112  *      leaq    trace_fields(%rip), %r11
 113  *      movq    %r11, -0x8(%rbp)
 114  *      movq    $elf_plt_trace, %r11
 115  *      jmp     *%r11
 116  * dyn_data:
 117  *      .align  8
 118  *      uintptr_t       reflmp
 119  *      uintptr_t       deflmp
 120  *      uint_t          symndx
 121  *      uint_t          sb_flags
 122  *      Sym             symdef
 123  */
 124 static caddr_t
 125 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym,
 126     uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail)
 127 {
 128         extern int      elf_plt_trace();
 129         ulong_t         got_entry;
 130         uchar_t         *dyn_plt;
 131         uintptr_t       *dyndata;
 132 
 133         /*
 134          * We only need to add the glue code if there is an auditing
 135          * library that is interested in this binding.
 136          */
 137         dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
 138             (pltndx * dyn_plt_ent_size));
 139 
 140         /*
 141          * Have we initialized this dynamic plt entry yet?  If we haven't do it
 142          * now.  Otherwise this function has been called before, but from a
 143          * different plt (ie. from another shared object).  In that case
 144          * we just set the plt to point to the new dyn_plt.
 145          */
 146         if (*dyn_plt == 0) {
 147                 Sym     *symp;
 148                 Xword   symvalue;
 149                 Lm_list *lml = LIST(rlmp);
 150 
 151                 (void) memcpy((void *)dyn_plt, dyn_plt_template,
 152                     sizeof (dyn_plt_template));
 153                 dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
 154                     ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN));
 155 
 156                 /*
 157                  * relocate:
 158                  *      leaq    trace_fields(%rip), %r11
 159                  *      R_AMD64_PC32    0x0b    -4      trace_fields
 160                  */
 161                 symvalue = (Xword)((uintptr_t)dyndata -
 162                     (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4);
 163                 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF],
 164                     &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
 165                     MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
 166                         *fail = 1;
 167                         return (0);
 168                 }
 169 
 170                 /*
 171                  * relocating:
 172                  *      movq    $elf_plt_trace, %r11
 173                  *      R_AMD64_64      0x15    0       elf_plt_trace
 174                  */
 175                 symvalue = (Xword)elf_plt_trace;
 176                 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF],
 177                     &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
 178                     MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
 179                         *fail = 1;
 180                         return (0);
 181                 }
 182 
 183                 *dyndata++ = (uintptr_t)rlmp;
 184                 *dyndata++ = (uintptr_t)dlmp;
 185                 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx);
 186                 dyndata++;
 187                 symp = (Sym *)dyndata;
 188                 *symp = *sym;
 189                 symp->st_value = (Addr)to;
 190         }
 191 
 192         got_entry = (ulong_t)roffset;
 193         *(ulong_t *)got_entry = (ulong_t)dyn_plt;
 194         return ((caddr_t)dyn_plt);
 195 }
 196 
 197 /*
 198  * Function binding routine - invoked on the first call to a function through
 199  * the procedure linkage table;
 200  * passes first through an assembly language interface.
 201  *
 202  * Takes the offset into the relocation table of the associated
 203  * relocation entry and the address of the link map (rt_private_map struct)
 204  * for the entry.
 205  *
 206  * Returns the address of the function referenced after re-writing the PLT
 207  * entry to invoke the function directly.
 208  *
 209  * On error, causes process to terminate with a signal.
 210  */
 211 ulong_t
 212 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from)
 213 {
 214         Rt_map          *nlmp, *llmp;
 215         ulong_t         addr, reloff, symval, rsymndx;
 216         char            *name;
 217         Rela            *rptr;
 218         Sym             *rsym, *nsym;
 219         uint_t          binfo, sb_flags = 0, dbg_class;
 220         Slookup         sl;
 221         Sresult         sr;
 222         int             entry, lmflags;
 223         Lm_list         *lml;
 224 
 225         /*
 226          * For compatibility with libthread (TI_VERSION 1) we track the entry
 227          * value.  A zero value indicates we have recursed into ld.so.1 to
 228          * further process a locking request.  Under this recursion we disable
 229          * tsort and cleanup activities.
 230          */
 231         entry = enter(0);
 232 
 233         lml = LIST(lmp);
 234         if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
 235                 dbg_class = dbg_desc->d_class;
 236                 dbg_desc->d_class = 0;
 237         }
 238 
 239         /*
 240          * Perform some basic sanity checks.  If we didn't get a load map or
 241          * the relocation offset is invalid then its possible someone has walked
 242          * over the .got entries or jumped to plt0 out of the blue.
 243          */
 244         if ((!lmp) && (pltndx <=
 245             (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp))) {
 246                 Conv_inv_buf_t inv_buf;
 247 
 248                 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
 249                     conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf),
 250                     EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from));
 251                 rtldexit(lml, 1);
 252         }
 253         reloff = pltndx * (ulong_t)RELENT(lmp);
 254 
 255         /*
 256          * Use relocation entry to get symbol table entry and symbol name.
 257          */
 258         addr = (ulong_t)JMPREL(lmp);
 259         rptr = (Rela *)(addr + reloff);
 260         rsymndx = ELF_R_SYM(rptr->r_info);
 261         rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
 262         name = (char *)(STRTAB(lmp) + rsym->st_name);
 263 
 264         /*
 265          * Determine the last link-map of this list, this'll be the starting
 266          * point for any tsort() processing.
 267          */
 268         llmp = lml->lm_tail;
 269 
 270         /*
 271          * Find definition for symbol.  Initialize the symbol lookup, and
 272          * symbol result, data structures.
 273          */
 274         SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
 275             rsymndx, rsym, 0, LKUP_DEFT);
 276         SRESULT_INIT(sr, name);
 277 
 278         if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
 279                 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
 280                     demangle(name));
 281                 rtldexit(lml, 1);
 282         }
 283 
 284         name = (char *)sr.sr_name;
 285         nlmp = sr.sr_dmap;
 286         nsym = sr.sr_sym;
 287 
 288         symval = nsym->st_value;
 289 
 290         if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
 291             (nsym->st_shndx != SHN_ABS))
 292                 symval += ADDR(nlmp);
 293         if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
 294                 /*
 295                  * Record that this new link map is now bound to the caller.
 296                  */
 297                 if (bind_one(lmp, nlmp, BND_REFER) == 0)
 298                         rtldexit(lml, 1);
 299         }
 300 
 301         if ((lml->lm_tflags | AFLAGS(lmp) | AFLAGS(nlmp)) &
 302             LML_TFLG_AUD_SYMBIND) {
 303                 uint_t  symndx = (((uintptr_t)nsym -
 304                     (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
 305                 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
 306                     &sb_flags);
 307         }
 308 
 309         if (!(rtld_flags & RT_FL_NOBIND)) {
 310                 addr = rptr->r_offset;
 311                 if (!(FLAGS(lmp) & FLG_RT_FIXED))
 312                         addr += ADDR(lmp);
 313                 if (((lml->lm_tflags | AFLAGS(lmp)) &
 314                     (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
 315                     AUDINFO(lmp)->ai_dynplts) {
 316                         int     fail = 0;
 317                         uint_t  pltndx = reloff / sizeof (Rela);
 318                         uint_t  symndx = (((uintptr_t)nsym -
 319                             (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
 320 
 321                         symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp,
 322                             nsym, symndx, pltndx, (caddr_t)symval, sb_flags,
 323                             &fail);
 324                         if (fail)
 325                                 rtldexit(lml, 1);
 326                 } else {
 327                         /*
 328                          * Write standard PLT entry to jump directly
 329                          * to newly bound function.
 330                          */
 331                         *(ulong_t *)addr = symval;
 332                 }
 333         }
 334 
 335         /*
 336          * Print binding information and rebuild PLT entry.
 337          */
 338         DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
 339             (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval,
 340             nsym->st_value, name, binfo));
 341 
 342         /*
 343          * Complete any processing for newly loaded objects.  Note we don't
 344          * know exactly where any new objects are loaded (we know the object
 345          * that supplied the symbol, but others may have been loaded lazily as
 346          * we searched for the symbol), so sorting starts from the last
 347          * link-map know on entry to this routine.
 348          */
 349         if (entry)
 350                 load_completion(llmp);
 351 
 352         /*
 353          * Some operations like dldump() or dlopen()'ing a relocatable object
 354          * result in objects being loaded on rtld's link-map, make sure these
 355          * objects are initialized also.
 356          */
 357         if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
 358                 load_completion(nlmp);
 359 
 360         /*
 361          * Make sure the object to which we've bound has had it's .init fired.
 362          * Cleanup before return to user code.
 363          */
 364         if (entry) {
 365                 is_dep_init(nlmp, lmp);
 366                 leave(lml, 0);
 367         }
 368 
 369         if (lmflags & LML_FLG_RTLDLM)
 370                 dbg_desc->d_class = dbg_class;
 371 
 372         return (symval);
 373 }
 374 
 375 /*
 376  * Read and process the relocations for one link object, we assume all
 377  * relocation sections for loadable segments are stored contiguously in
 378  * the file.
 379  */
 380 int
 381 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
 382 {
 383         ulong_t         relbgn, relend, relsiz, basebgn;
 384         ulong_t         pltbgn, pltend, _pltbgn, _pltend;
 385         ulong_t         roffset, rsymndx, psymndx = 0;
 386         ulong_t         dsymndx;
 387         uchar_t         rtype;
 388         long            reladd, value, pvalue;
 389         Sym             *symref, *psymref, *symdef, *psymdef;
 390         Syminfo         *sip;
 391         char            *name, *pname;
 392         Rt_map          *_lmp, *plmp;
 393         int             ret = 1, noplt = 0;
 394         int             relacount = RELACOUNT(lmp), plthint = 0;
 395         Rela            *rel;
 396         uint_t          binfo, pbinfo;
 397         APlist          *bound = NULL;
 398 
 399         /*
 400          * Although only necessary for lazy binding, initialize the first
 401          * global offset entry to go to elf_rtbndr().  dbx(1) seems
 402          * to find this useful.
 403          */
 404         if ((plt == 0) && PLTGOT(lmp)) {
 405                 mmapobj_result_t        *mpp;
 406 
 407                 /*
 408                  * Make sure the segment is writable.
 409                  */
 410                 if ((((mpp =
 411                     find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
 412                     ((mpp->mr_prot & PROT_WRITE) == 0)) &&
 413                     ((set_prot(lmp, mpp, 1) == 0) ||
 414                     (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
 415                         return (0);
 416 
 417                 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp);
 418         }
 419 
 420         /*
 421          * Initialize the plt start and end addresses.
 422          */
 423         if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
 424                 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
 425 
 426         relsiz = (ulong_t)(RELENT(lmp));
 427         basebgn = ADDR(lmp);
 428 
 429         if (PLTRELSZ(lmp))
 430                 plthint = PLTRELSZ(lmp) / relsiz;
 431 
 432         /*
 433          * If we've been called upon to promote an RTLD_LAZY object to an
 434          * RTLD_NOW then we're only interested in scaning the .plt table.
 435          * An uninitialized .plt is the case where the associated got entry
 436          * points back to the plt itself.  Determine the range of the real .plt
 437          * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
 438          */
 439         if (plt) {
 440                 Slookup sl;
 441                 Sresult sr;
 442 
 443                 relbgn = pltbgn;
 444                 relend = pltend;
 445                 if (!relbgn || (relbgn == relend))
 446                         return (1);
 447 
 448                 /*
 449                  * Initialize the symbol lookup, and symbol result, data
 450                  * structures.
 451                  */
 452                 SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt,
 453                     elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT);
 454                 SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT));
 455 
 456                 if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0)
 457                         return (1);
 458 
 459                 symdef = sr.sr_sym;
 460                 _pltbgn = symdef->st_value;
 461                 if (!(FLAGS(lmp) & FLG_RT_FIXED) &&
 462                     (symdef->st_shndx != SHN_ABS))
 463                         _pltbgn += basebgn;
 464                 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) *
 465                     M_PLT_ENTSIZE) + M_PLT_RESERVSZ;
 466 
 467         } else {
 468                 /*
 469                  * The relocation sections appear to the run-time linker as a
 470                  * single table.  Determine the address of the beginning and end
 471                  * of this table.  There are two different interpretations of
 472                  * the ABI at this point:
 473                  *
 474                  *   o  The REL table and its associated RELSZ indicate the
 475                  *      concatenation of *all* relocation sections (this is the
 476                  *      model our link-editor constructs).
 477                  *
 478                  *   o  The REL table and its associated RELSZ indicate the
 479                  *      concatenation of all *but* the .plt relocations.  These
 480                  *      relocations are specified individually by the JMPREL and
 481                  *      PLTRELSZ entries.
 482                  *
 483                  * Determine from our knowledege of the relocation range and
 484                  * .plt range, the range of the total relocation table.  Note
 485                  * that one other ABI assumption seems to be that the .plt
 486                  * relocations always follow any other relocations, the
 487                  * following range checking drops that assumption.
 488                  */
 489                 relbgn = (ulong_t)(REL(lmp));
 490                 relend = relbgn + (ulong_t)(RELSZ(lmp));
 491                 if (pltbgn) {
 492                         if (!relbgn || (relbgn > pltbgn))
 493                                 relbgn = pltbgn;
 494                         if (!relbgn || (relend < pltend))
 495                                 relend = pltend;
 496                 }
 497         }
 498         if (!relbgn || (relbgn == relend)) {
 499                 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
 500                 return (1);
 501         }
 502         DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
 503 
 504         /*
 505          * If we're processing a dynamic executable in lazy mode there is no
 506          * need to scan the .rel.plt table, however if we're processing a shared
 507          * object in lazy mode the .got addresses associated to each .plt must
 508          * be relocated to reflect the location of the shared object.
 509          */
 510         if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) &&
 511             (FLAGS(lmp) & FLG_RT_FIXED))
 512                 noplt = 1;
 513 
 514         sip = SYMINFO(lmp);
 515         /*
 516          * Loop through relocations.
 517          */
 518         while (relbgn < relend) {
 519                 mmapobj_result_t        *mpp;
 520                 uint_t                  sb_flags = 0;
 521 
 522                 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
 523 
 524                 /*
 525                  * If this is a RELATIVE relocation in a shared object (the
 526                  * common case), and if we are not debugging, then jump into a
 527                  * tighter relocation loop (elf_reloc_relative).
 528                  */
 529                 if ((rtype == R_AMD64_RELATIVE) &&
 530                     ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
 531                         if (relacount) {
 532                                 relbgn = elf_reloc_relative_count(relbgn,
 533                                     relacount, relsiz, basebgn, lmp,
 534                                     textrel, 0);
 535                                 relacount = 0;
 536                         } else {
 537                                 relbgn = elf_reloc_relative(relbgn, relend,
 538                                     relsiz, basebgn, lmp, textrel, 0);
 539                         }
 540                         if (relbgn >= relend)
 541                                 break;
 542                         rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
 543                 }
 544 
 545                 roffset = ((Rela *)relbgn)->r_offset;
 546 
 547                 /*
 548                  * If this is a shared object, add the base address to offset.
 549                  */
 550                 if (!(FLAGS(lmp) & FLG_RT_FIXED)) {
 551                         /*
 552                          * If we're processing lazy bindings, we have to step
 553                          * through the plt entries and add the base address
 554                          * to the corresponding got entry.
 555                          */
 556                         if (plthint && (plt == 0) &&
 557                             (rtype == R_AMD64_JUMP_SLOT) &&
 558                             ((MODE(lmp) & RTLD_NOW) == 0)) {
 559                                 relbgn = elf_reloc_relative_count(relbgn,
 560                                     plthint, relsiz, basebgn, lmp, textrel, 1);
 561                                 plthint = 0;
 562                                 continue;
 563                         }
 564                         roffset += basebgn;
 565                 }
 566 
 567                 reladd = (long)(((Rela *)relbgn)->r_addend);
 568                 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
 569                 rel = (Rela *)relbgn;
 570                 relbgn += relsiz;
 571 
 572                 /*
 573                  * Optimizations.
 574                  */
 575                 if (rtype == R_AMD64_NONE)
 576                         continue;
 577                 if (noplt && ((ulong_t)rel >= pltbgn) &&
 578                     ((ulong_t)rel < pltend)) {
 579                         relbgn = pltend;
 580                         continue;
 581                 }
 582 
 583                 /*
 584                  * If we're promoting plts, determine if this one has already
 585                  * been written.
 586                  */
 587                 if (plt && ((*(ulong_t *)roffset < _pltbgn) ||
 588                     (*(ulong_t *)roffset > _pltend)))
 589                         continue;
 590 
 591                 /*
 592                  * If this relocation is not against part of the image
 593                  * mapped into memory we skip it.
 594                  */
 595                 if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) {
 596                         elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
 597                             rsymndx);
 598                         continue;
 599                 }
 600 
 601                 binfo = 0;
 602                 /*
 603                  * If a symbol index is specified then get the symbol table
 604                  * entry, locate the symbol definition, and determine its
 605                  * address.
 606                  */
 607                 if (rsymndx) {
 608                         /*
 609                          * If a Syminfo section is provided, determine if this
 610                          * symbol is deferred, and if so, skip this relocation.
 611                          */
 612                         if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp,
 613                             textrel, sip, rsymndx))
 614                                 continue;
 615 
 616                         /*
 617                          * Get the local symbol table entry.
 618                          */
 619                         symref = (Sym *)((ulong_t)SYMTAB(lmp) +
 620                             (rsymndx * SYMENT(lmp)));
 621 
 622                         /*
 623                          * If this is a local symbol, just use the base address.
 624                          * (we should have no local relocations in the
 625                          * executable).
 626                          */
 627                         if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
 628                                 value = basebgn;
 629                                 name = NULL;
 630 
 631                                 /*
 632                                  * Special case TLS relocations.
 633                                  */
 634                                 if (rtype == R_AMD64_DTPMOD64) {
 635                                         /*
 636                                          * Use the TLS modid.
 637                                          */
 638                                         value = TLSMODID(lmp);
 639 
 640                                 } else if ((rtype == R_AMD64_TPOFF64) ||
 641                                     (rtype == R_AMD64_TPOFF32)) {
 642                                         if ((value = elf_static_tls(lmp, symref,
 643                                             rel, rtype, 0, roffset, 0)) == 0) {
 644                                                 ret = 0;
 645                                                 break;
 646                                         }
 647                                 }
 648                         } else {
 649                                 /*
 650                                  * If the symbol index is equal to the previous
 651                                  * symbol index relocation we processed then
 652                                  * reuse the previous values. (Note that there
 653                                  * have been cases where a relocation exists
 654                                  * against a copy relocation symbol, our ld(1)
 655                                  * should optimize this away, but make sure we
 656                                  * don't use the same symbol information should
 657                                  * this case exist).
 658                                  */
 659                                 if ((rsymndx == psymndx) &&
 660                                     (rtype != R_AMD64_COPY)) {
 661                                         /* LINTED */
 662                                         if (psymdef == 0) {
 663                                                 DBG_CALL(Dbg_bind_weak(lmp,
 664                                                     (Addr)roffset, (Addr)
 665                                                     (roffset - basebgn), name));
 666                                                 continue;
 667                                         }
 668                                         /* LINTED */
 669                                         value = pvalue;
 670                                         /* LINTED */
 671                                         name = pname;
 672                                         /* LINTED */
 673                                         symdef = psymdef;
 674                                         /* LINTED */
 675                                         symref = psymref;
 676                                         /* LINTED */
 677                                         _lmp = plmp;
 678                                         /* LINTED */
 679                                         binfo = pbinfo;
 680 
 681                                         if ((LIST(_lmp)->lm_tflags |
 682                                             AFLAGS(_lmp)) &
 683                                             LML_TFLG_AUD_SYMBIND) {
 684                                                 value = audit_symbind(lmp, _lmp,
 685                                                     /* LINTED */
 686                                                     symdef, dsymndx, value,
 687                                                     &sb_flags);
 688                                         }
 689                                 } else {
 690                                         Slookup         sl;
 691                                         Sresult         sr;
 692 
 693                                         /*
 694                                          * Lookup the symbol definition.
 695                                          * Initialize the symbol lookup, and
 696                                          * symbol result, data structure.
 697                                          */
 698                                         name = (char *)(STRTAB(lmp) +
 699                                             symref->st_name);
 700 
 701                                         SLOOKUP_INIT(sl, name, lmp, 0,
 702                                             ld_entry_cnt, 0, rsymndx, symref,
 703                                             rtype, LKUP_STDRELOC);
 704                                         SRESULT_INIT(sr, name);
 705                                         symdef = NULL;
 706 
 707                                         if (lookup_sym(&sl, &sr, &binfo,
 708                                             in_nfavl)) {
 709                                                 name = (char *)sr.sr_name;
 710                                                 _lmp = sr.sr_dmap;
 711                                                 symdef = sr.sr_sym;
 712                                         }
 713 
 714                                         /*
 715                                          * If the symbol is not found and the
 716                                          * reference was not to a weak symbol,
 717                                          * report an error.  Weak references
 718                                          * may be unresolved.
 719                                          */
 720                                         /* BEGIN CSTYLED */
 721                                         if (symdef == 0) {
 722                                             if (sl.sl_bind != STB_WEAK) {
 723                                                 if (elf_reloc_error(lmp, name,
 724                                                     rel, binfo))
 725                                                         continue;
 726 
 727                                                 ret = 0;
 728                                                 break;
 729 
 730                                             } else {
 731                                                 psymndx = rsymndx;
 732                                                 psymdef = 0;
 733 
 734                                                 DBG_CALL(Dbg_bind_weak(lmp,
 735                                                     (Addr)roffset, (Addr)
 736                                                     (roffset - basebgn), name));
 737                                                 continue;
 738                                             }
 739                                         }
 740                                         /* END CSTYLED */
 741 
 742                                         /*
 743                                          * If symbol was found in an object
 744                                          * other than the referencing object
 745                                          * then record the binding.
 746                                          */
 747                                         if ((lmp != _lmp) && ((FLAGS1(_lmp) &
 748                                             FL1_RT_NOINIFIN) == 0)) {
 749                                                 if (aplist_test(&bound, _lmp,
 750                                                     AL_CNT_RELBIND) == 0) {
 751                                                         ret = 0;
 752                                                         break;
 753                                                 }
 754                                         }
 755 
 756                                         /*
 757                                          * Calculate the location of definition;
 758                                          * symbol value plus base address of
 759                                          * containing shared object.
 760                                          */
 761                                         if (IS_SIZE(rtype))
 762                                                 value = symdef->st_size;
 763                                         else
 764                                                 value = symdef->st_value;
 765 
 766                                         if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
 767                                             !(IS_SIZE(rtype)) &&
 768                                             (symdef->st_shndx != SHN_ABS) &&
 769                                             (ELF_ST_TYPE(symdef->st_info) !=
 770                                             STT_TLS))
 771                                                 value += ADDR(_lmp);
 772 
 773                                         /*
 774                                          * Retain this symbol index and the
 775                                          * value in case it can be used for the
 776                                          * subsequent relocations.
 777                                          */
 778                                         if (rtype != R_AMD64_COPY) {
 779                                                 psymndx = rsymndx;
 780                                                 pvalue = value;
 781                                                 pname = name;
 782                                                 psymdef = symdef;
 783                                                 psymref = symref;
 784                                                 plmp = _lmp;
 785                                                 pbinfo = binfo;
 786                                         }
 787                                         if ((LIST(_lmp)->lm_tflags |
 788                                             AFLAGS(_lmp)) &
 789                                             LML_TFLG_AUD_SYMBIND) {
 790                                                 dsymndx = (((uintptr_t)symdef -
 791                                                     (uintptr_t)SYMTAB(_lmp)) /
 792                                                     SYMENT(_lmp));
 793                                                 value = audit_symbind(lmp, _lmp,
 794                                                     symdef, dsymndx, value,
 795                                                     &sb_flags);
 796                                         }
 797                                 }
 798 
 799                                 /*
 800                                  * If relocation is PC-relative, subtract
 801                                  * offset address.
 802                                  */
 803                                 if (IS_PC_RELATIVE(rtype))
 804                                         value -= roffset;
 805 
 806                                 /*
 807                                  * Special case TLS relocations.
 808                                  */
 809                                 if (rtype == R_AMD64_DTPMOD64) {
 810                                         /*
 811                                          * Relocation value is the TLS modid.
 812                                          */
 813                                         value = TLSMODID(_lmp);
 814 
 815                                 } else if ((rtype == R_AMD64_TPOFF64) ||
 816                                     (rtype == R_AMD64_TPOFF32)) {
 817                                         if ((value = elf_static_tls(_lmp,
 818                                             symdef, rel, rtype, name, roffset,
 819                                             value)) == 0) {
 820                                                 ret = 0;
 821                                                 break;
 822                                         }
 823                                 }
 824                         }
 825                 } else {
 826                         /*
 827                          * Special cases.
 828                          */
 829                         if (rtype == R_AMD64_DTPMOD64) {
 830                                 /*
 831                                  * TLS relocation value is the TLS modid.
 832                                  */
 833                                 value = TLSMODID(lmp);
 834                         } else
 835                                 value = basebgn;
 836 
 837                         name = NULL;
 838                 }
 839 
 840                 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
 841                     M_REL_SHT_TYPE, rel, NULL, 0, name));
 842 
 843                 /*
 844                  * Make sure the segment is writable.
 845                  */
 846                 if (((mpp->mr_prot & PROT_WRITE) == 0) &&
 847                     ((set_prot(lmp, mpp, 1) == 0) ||
 848                     (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
 849                         ret = 0;
 850                         break;
 851                 }
 852 
 853                 /*
 854                  * Call relocation routine to perform required relocation.
 855                  */
 856                 switch (rtype) {
 857                 case R_AMD64_COPY:
 858                         if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
 859                             symdef, _lmp, (const void *)value) == 0)
 860                                 ret = 0;
 861                         break;
 862                 case R_AMD64_JUMP_SLOT:
 863                         if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
 864                             (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
 865                             AUDINFO(lmp)->ai_dynplts) {
 866                                 int     fail = 0;
 867                                 int     pltndx = (((ulong_t)rel -
 868                                     (uintptr_t)JMPREL(lmp)) / relsiz);
 869                                 int     symndx = (((uintptr_t)symdef -
 870                                     (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
 871 
 872                                 (void) elf_plt_trace_write(roffset, lmp, _lmp,
 873                                     symdef, symndx, pltndx, (caddr_t)value,
 874                                     sb_flags, &fail);
 875                                 if (fail)
 876                                         ret = 0;
 877                         } else {
 878                                 /*
 879                                  * Write standard PLT entry to jump directly
 880                                  * to newly bound function.
 881                                  */
 882                                 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
 883                                     ELF_DBG_RTLD, (Xword)roffset,
 884                                     (Xword)value));
 885                                 *(ulong_t *)roffset = value;
 886                         }
 887                         break;
 888                 default:
 889                         value += reladd;
 890                         /*
 891                          * Write the relocation out.
 892                          */
 893                         if (do_reloc_rtld(rtype, (uchar_t *)roffset,
 894                             (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0)
 895                                 ret = 0;
 896 
 897                         DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
 898                             (Xword)roffset, (Xword)value));
 899                 }
 900 
 901                 if ((ret == 0) &&
 902                     ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
 903                         break;
 904 
 905                 if (binfo) {
 906                         DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
 907                             (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL,
 908                             _lmp, (Addr)value, symdef->st_value, name, binfo));
 909                 }
 910         }
 911 
 912         return (relocate_finish(lmp, bound, ret));
 913 }
 914 
 915 /*
 916  * Initialize the first few got entries so that function calls go to
 917  * elf_rtbndr:
 918  *
 919  *      GOT[GOT_XLINKMAP] =     the address of the link map
 920  *      GOT[GOT_XRTLD] =        the address of rtbinder
 921  */
 922 void
 923 elf_plt_init(void *got, caddr_t l)
 924 {
 925         uint64_t        *_got;
 926         /* LINTED */
 927         Rt_map          *lmp = (Rt_map *)l;
 928 
 929         _got = (uint64_t *)got + M_GOT_XLINKMAP;
 930         *_got = (uint64_t)lmp;
 931         _got = (uint64_t *)got + M_GOT_XRTLD;
 932         *_got = (uint64_t)elf_rtbndr;
 933 }
 934 
 935 /*
 936  * Plt writing interface to allow debugging initialization to be generic.
 937  */
 938 Pltbindtype
 939 /* ARGSUSED1 */
 940 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
 941         Xword pltndx)
 942 {
 943         Rela            *rel = (Rela*)rptr;
 944         uintptr_t       pltaddr;
 945 
 946         pltaddr = addr + rel->r_offset;
 947         *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend;
 948         DBG_CALL(pltcntfull++);
 949         return (PLT_T_FULL);
 950 }
 951 
 952 /*
 953  * Provide a machine specific interface to the conversion routine.  By calling
 954  * the machine specific version, rather than the generic version, we insure that
 955  * the data tables/strings for all known machine versions aren't dragged into
 956  * ld.so.1.
 957  */
 958 const char *
 959 _conv_reloc_type(uint_t rel)
 960 {
 961         static Conv_inv_buf_t   inv_buf;
 962 
 963         return (conv_reloc_amd64_type(rel, 0, &inv_buf));
 964 }