1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2012, Joyent, Inc. All rights reserved.
  25  */
  26 
  27 /*
  28  * amd64 machine dependent and ELF file class dependent functions.
  29  * Contains routines for performing function binding and symbol relocations.
  30  */
  31 
  32 #include        <stdio.h>
  33 #include        <sys/elf.h>
  34 #include        <sys/elf_amd64.h>
  35 #include        <sys/mman.h>
  36 #include        <dlfcn.h>
  37 #include        <synch.h>
  38 #include        <string.h>
  39 #include        <debug.h>
  40 #include        <reloc.h>
  41 #include        <conv.h>
  42 #include        "_rtld.h"
  43 #include        "_audit.h"
  44 #include        "_elf.h"
  45 #include        "_inline_gen.h"
  46 #include        "_inline_reloc.h"
  47 #include        "msg.h"
  48 
  49 extern void     elf_rtbndr(Rt_map *, ulong_t, caddr_t);
  50 
  51 int
  52 elf_mach_flags_check(Rej_desc *rej, Ehdr *ehdr)
  53 {
  54         /*
  55          * Check machine type and flags.
  56          */
  57         if (ehdr->e_flags != 0) {
  58                 rej->rej_type = SGS_REJ_BADFLAG;
  59                 rej->rej_info = (uint_t)ehdr->e_flags;
  60                 return (0);
  61         }
  62         return (1);
  63 }
  64 
  65 void
  66 ldso_plt_init(Rt_map *lmp)
  67 {
  68         /*
  69          * There is no need to analyze ld.so because we don't map in any of
  70          * its dependencies.  However we may map these dependencies in later
  71          * (as if ld.so had dlopened them), so initialize the plt and the
  72          * permission information.
  73          */
  74         if (PLTGOT(lmp))
  75                 elf_plt_init((PLTGOT(lmp)), (caddr_t)lmp);
  76 }
  77 
  78 static const uchar_t dyn_plt_template[] = {
  79 /* 0x00 */  0x55,                       /* pushq %rbp */
  80 /* 0x01 */  0x48, 0x89, 0xe5,           /* movq  %rsp, %rbp */
  81 /* 0x04 */  0x48, 0x83, 0xec, 0x10,     /* subq  $0x10, %rsp */
  82 /* 0x08 */  0x4c, 0x8d, 0x1d, 0x00,     /* leaq  trace_fields(%rip), %r11 */
  83                 0x00, 0x00, 0x00,
  84 /* 0x0f */  0x4c, 0x89, 0x5d, 0xf8,     /* movq  %r11, -0x8(%rbp) */
  85 /* 0x13 */  0x49, 0xbb, 0x00, 0x00,     /* movq  $elf_plt_trace, %r11 */
  86                 0x00, 0x00, 0x00,
  87                 0x00, 0x00, 0x00,
  88 /* 0x1d */  0x41, 0xff, 0xe3            /* jmp   *%r11 */
  89 /* 0x20 */
  90 };
  91 
  92 /*
  93  * And the virutal outstanding relocations against the
  94  * above block are:
  95  *
  96  *      reloc           offset  Addend  symbol
  97  *      R_AMD64_PC32    0x0b    -4      trace_fields
  98  *      R_AMD64_64      0x15    0       elf_plt_trace
  99  */
 100 
 101 #define TRCREL1OFF      0x0b
 102 #define TRCREL2OFF      0x15
 103 
 104 int     dyn_plt_ent_size = sizeof (dyn_plt_template);
 105 
 106 /*
 107  * the dynamic plt entry is:
 108  *
 109  *      pushq   %rbp
 110  *      movq    %rsp, %rbp
 111  *      subq    $0x10, %rsp
 112  *      leaq    trace_fields(%rip), %r11
 113  *      movq    %r11, -0x8(%rbp)
 114  *      movq    $elf_plt_trace, %r11
 115  *      jmp     *%r11
 116  * dyn_data:
 117  *      .align  8
 118  *      uintptr_t       reflmp
 119  *      uintptr_t       deflmp
 120  *      uint_t          symndx
 121  *      uint_t          sb_flags
 122  *      Sym             symdef
 123  */
 124 static caddr_t
 125 elf_plt_trace_write(ulong_t roffset, Rt_map *rlmp, Rt_map *dlmp, Sym *sym,
 126     uint_t symndx, uint_t pltndx, caddr_t to, uint_t sb_flags, int *fail)
 127 {
 128         extern int      elf_plt_trace();
 129         ulong_t         got_entry;
 130         uchar_t         *dyn_plt;
 131         uintptr_t       *dyndata;
 132 
 133         /*
 134          * We only need to add the glue code if there is an auditing
 135          * library that is interested in this binding.
 136          */
 137         dyn_plt = (uchar_t *)((uintptr_t)AUDINFO(rlmp)->ai_dynplts +
 138             (pltndx * dyn_plt_ent_size));
 139 
 140         /*
 141          * Have we initialized this dynamic plt entry yet?  If we haven't do it
 142          * now.  Otherwise this function has been called before, but from a
 143          * different plt (ie. from another shared object).  In that case
 144          * we just set the plt to point to the new dyn_plt.
 145          */
 146         if (*dyn_plt == 0) {
 147                 Sym     *symp;
 148                 Xword   symvalue;
 149                 Lm_list *lml = LIST(rlmp);
 150 
 151                 (void) memcpy((void *)dyn_plt, dyn_plt_template,
 152                     sizeof (dyn_plt_template));
 153                 dyndata = (uintptr_t *)((uintptr_t)dyn_plt +
 154                     ROUND(sizeof (dyn_plt_template), M_WORD_ALIGN));
 155 
 156                 /*
 157                  * relocate:
 158                  *      leaq    trace_fields(%rip), %r11
 159                  *      R_AMD64_PC32    0x0b    -4      trace_fields
 160                  */
 161                 symvalue = (Xword)((uintptr_t)dyndata -
 162                     (uintptr_t)(&dyn_plt[TRCREL1OFF]) - 4);
 163                 if (do_reloc_rtld(R_AMD64_PC32, &dyn_plt[TRCREL1OFF],
 164                     &symvalue, MSG_ORIG(MSG_SYM_LADYNDATA),
 165                     MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
 166                         *fail = 1;
 167                         return (0);
 168                 }
 169 
 170                 /*
 171                  * relocating:
 172                  *      movq    $elf_plt_trace, %r11
 173                  *      R_AMD64_64      0x15    0       elf_plt_trace
 174                  */
 175                 symvalue = (Xword)elf_plt_trace;
 176                 if (do_reloc_rtld(R_AMD64_64, &dyn_plt[TRCREL2OFF],
 177                     &symvalue, MSG_ORIG(MSG_SYM_ELFPLTTRACE),
 178                     MSG_ORIG(MSG_SPECFIL_DYNPLT), lml) == 0) {
 179                         *fail = 1;
 180                         return (0);
 181                 }
 182 
 183                 *dyndata++ = (uintptr_t)rlmp;
 184                 *dyndata++ = (uintptr_t)dlmp;
 185                 *dyndata = (uintptr_t)(((uint64_t)sb_flags << 32) | symndx);
 186                 dyndata++;
 187                 symp = (Sym *)dyndata;
 188                 *symp = *sym;
 189                 symp->st_value = (Addr)to;
 190         }
 191 
 192         got_entry = (ulong_t)roffset;
 193         *(ulong_t *)got_entry = (ulong_t)dyn_plt;
 194         return ((caddr_t)dyn_plt);
 195 }
 196 
 197 /*
 198  * Function binding routine - invoked on the first call to a function through
 199  * the procedure linkage table;
 200  * passes first through an assembly language interface.
 201  *
 202  * Takes the offset into the relocation table of the associated
 203  * relocation entry and the address of the link map (rt_private_map struct)
 204  * for the entry.
 205  *
 206  * Returns the address of the function referenced after re-writing the PLT
 207  * entry to invoke the function directly.
 208  *
 209  * On error, causes process to terminate with a signal.
 210  */
 211 ulong_t
 212 elf_bndr(Rt_map *lmp, ulong_t pltndx, caddr_t from)
 213 {
 214         Rt_map          *nlmp, *llmp;
 215         ulong_t         addr, reloff, symval, rsymndx;
 216         char            *name;
 217         Rela            *rptr;
 218         Sym             *rsym, *nsym;
 219         uint_t          binfo, sb_flags = 0, dbg_class;
 220         Slookup         sl;
 221         Sresult         sr;
 222         int             entry, lmflags;
 223         Lm_list         *lml;
 224 
 225         /*
 226          * For compatibility with libthread (TI_VERSION 1) we track the entry
 227          * value.  A zero value indicates we have recursed into ld.so.1 to
 228          * further process a locking request.  Under this recursion we disable
 229          * tsort and cleanup activities.
 230          */
 231         entry = enter(0);
 232 
 233         lml = LIST(lmp);
 234         if ((lmflags = lml->lm_flags) & LML_FLG_RTLDLM) {
 235                 dbg_class = dbg_desc->d_class;
 236                 dbg_desc->d_class = 0;
 237         }
 238 
 239         /*
 240          * Perform some basic sanity checks.  If the relocation offset is
 241          * invalid then its possible someone has walked over the .got entries.
 242          */
 243         if (pltndx > (ulong_t)PLTRELSZ(lmp) / (ulong_t)RELENT(lmp)) {
 244                 Conv_inv_buf_t inv_buf;
 245 
 246                 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_PLTREF),
 247                     conv_reloc_amd64_type(R_AMD64_JUMP_SLOT, 0, &inv_buf),
 248                     EC_NATPTR(lmp), EC_XWORD(pltndx), EC_NATPTR(from));
 249                 rtldexit(lml, 1);
 250         }
 251         reloff = pltndx * (ulong_t)RELENT(lmp);
 252 
 253         /*
 254          * Use relocation entry to get symbol table entry and symbol name.
 255          */
 256         addr = (ulong_t)JMPREL(lmp);
 257         rptr = (Rela *)(addr + reloff);
 258         rsymndx = ELF_R_SYM(rptr->r_info);
 259         rsym = (Sym *)((ulong_t)SYMTAB(lmp) + (rsymndx * SYMENT(lmp)));
 260         name = (char *)(STRTAB(lmp) + rsym->st_name);
 261 
 262         /*
 263          * Determine the last link-map of this list, this'll be the starting
 264          * point for any tsort() processing.
 265          */
 266         llmp = lml->lm_tail;
 267 
 268         /*
 269          * Find definition for symbol.  Initialize the symbol lookup, and
 270          * symbol result, data structures.
 271          */
 272         SLOOKUP_INIT(sl, name, lmp, lml->lm_head, ld_entry_cnt, 0,
 273             rsymndx, rsym, 0, LKUP_DEFT);
 274         SRESULT_INIT(sr, name);
 275 
 276         if (lookup_sym(&sl, &sr, &binfo, NULL) == 0) {
 277                 eprintf(lml, ERR_FATAL, MSG_INTL(MSG_REL_NOSYM), NAME(lmp),
 278                     demangle(name));
 279                 rtldexit(lml, 1);
 280         }
 281 
 282         name = (char *)sr.sr_name;
 283         nlmp = sr.sr_dmap;
 284         nsym = sr.sr_sym;
 285 
 286         symval = nsym->st_value;
 287 
 288         if (!(FLAGS(nlmp) & FLG_RT_FIXED) &&
 289             (nsym->st_shndx != SHN_ABS))
 290                 symval += ADDR(nlmp);
 291         if ((lmp != nlmp) && ((FLAGS1(nlmp) & FL1_RT_NOINIFIN) == 0)) {
 292                 /*
 293                  * Record that this new link map is now bound to the caller.
 294                  */
 295                 if (bind_one(lmp, nlmp, BND_REFER) == 0)
 296                         rtldexit(lml, 1);
 297         }
 298 
 299         if ((lml->lm_tflags | AFLAGS(lmp) | AFLAGS(nlmp)) &
 300             LML_TFLG_AUD_SYMBIND) {
 301                 uint_t  symndx = (((uintptr_t)nsym -
 302                     (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
 303                 symval = audit_symbind(lmp, nlmp, nsym, symndx, symval,
 304                     &sb_flags);
 305         }
 306 
 307         if (!(rtld_flags & RT_FL_NOBIND)) {
 308                 addr = rptr->r_offset;
 309                 if (!(FLAGS(lmp) & FLG_RT_FIXED))
 310                         addr += ADDR(lmp);
 311                 if (((lml->lm_tflags | AFLAGS(lmp)) &
 312                     (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
 313                     AUDINFO(lmp)->ai_dynplts) {
 314                         int     fail = 0;
 315                         uint_t  pltndx = reloff / sizeof (Rela);
 316                         uint_t  symndx = (((uintptr_t)nsym -
 317                             (uintptr_t)SYMTAB(nlmp)) / SYMENT(nlmp));
 318 
 319                         symval = (ulong_t)elf_plt_trace_write(addr, lmp, nlmp,
 320                             nsym, symndx, pltndx, (caddr_t)symval, sb_flags,
 321                             &fail);
 322                         if (fail)
 323                                 rtldexit(lml, 1);
 324                 } else {
 325                         /*
 326                          * Write standard PLT entry to jump directly
 327                          * to newly bound function.
 328                          */
 329                         *(ulong_t *)addr = symval;
 330                 }
 331         }
 332 
 333         /*
 334          * Print binding information and rebuild PLT entry.
 335          */
 336         DBG_CALL(Dbg_bind_global(lmp, (Addr)from, (Off)(from - ADDR(lmp)),
 337             (Xword)(reloff / sizeof (Rela)), PLT_T_FULL, nlmp, (Addr)symval,
 338             nsym->st_value, name, binfo));
 339 
 340         /*
 341          * Complete any processing for newly loaded objects.  Note we don't
 342          * know exactly where any new objects are loaded (we know the object
 343          * that supplied the symbol, but others may have been loaded lazily as
 344          * we searched for the symbol), so sorting starts from the last
 345          * link-map know on entry to this routine.
 346          */
 347         if (entry)
 348                 load_completion(llmp);
 349 
 350         /*
 351          * Some operations like dldump() or dlopen()'ing a relocatable object
 352          * result in objects being loaded on rtld's link-map, make sure these
 353          * objects are initialized also.
 354          */
 355         if ((LIST(nlmp)->lm_flags & LML_FLG_RTLDLM) && LIST(nlmp)->lm_init)
 356                 load_completion(nlmp);
 357 
 358         /*
 359          * Make sure the object to which we've bound has had it's .init fired.
 360          * Cleanup before return to user code.
 361          */
 362         if (entry) {
 363                 is_dep_init(nlmp, lmp);
 364                 leave(lml, 0);
 365         }
 366 
 367         if (lmflags & LML_FLG_RTLDLM)
 368                 dbg_desc->d_class = dbg_class;
 369 
 370         return (symval);
 371 }
 372 
 373 /*
 374  * Read and process the relocations for one link object, we assume all
 375  * relocation sections for loadable segments are stored contiguously in
 376  * the file.
 377  */
 378 int
 379 elf_reloc(Rt_map *lmp, uint_t plt, int *in_nfavl, APlist **textrel)
 380 {
 381         ulong_t         relbgn, relend, relsiz, basebgn;
 382         ulong_t         pltbgn, pltend, _pltbgn, _pltend;
 383         ulong_t         roffset, rsymndx, psymndx = 0;
 384         ulong_t         dsymndx;
 385         uchar_t         rtype;
 386         long            reladd, value, pvalue;
 387         Sym             *symref, *psymref, *symdef, *psymdef;
 388         Syminfo         *sip;
 389         char            *name, *pname;
 390         Rt_map          *_lmp, *plmp;
 391         int             ret = 1, noplt = 0;
 392         int             relacount = RELACOUNT(lmp), plthint = 0;
 393         Rela            *rel;
 394         uint_t          binfo, pbinfo;
 395         APlist          *bound = NULL;
 396 
 397         /*
 398          * Although only necessary for lazy binding, initialize the first
 399          * global offset entry to go to elf_rtbndr().  dbx(1) seems
 400          * to find this useful.
 401          */
 402         if ((plt == 0) && PLTGOT(lmp)) {
 403                 mmapobj_result_t        *mpp;
 404 
 405                 /*
 406                  * Make sure the segment is writable.
 407                  */
 408                 if ((((mpp =
 409                     find_segment((caddr_t)PLTGOT(lmp), lmp)) != NULL) &&
 410                     ((mpp->mr_prot & PROT_WRITE) == 0)) &&
 411                     ((set_prot(lmp, mpp, 1) == 0) ||
 412                     (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL)))
 413                         return (0);
 414 
 415                 elf_plt_init(PLTGOT(lmp), (caddr_t)lmp);
 416         }
 417 
 418         /*
 419          * Initialize the plt start and end addresses.
 420          */
 421         if ((pltbgn = (ulong_t)JMPREL(lmp)) != 0)
 422                 pltend = pltbgn + (ulong_t)(PLTRELSZ(lmp));
 423 
 424         relsiz = (ulong_t)(RELENT(lmp));
 425         basebgn = ADDR(lmp);
 426 
 427         if (PLTRELSZ(lmp))
 428                 plthint = PLTRELSZ(lmp) / relsiz;
 429 
 430         /*
 431          * If we've been called upon to promote an RTLD_LAZY object to an
 432          * RTLD_NOW then we're only interested in scaning the .plt table.
 433          * An uninitialized .plt is the case where the associated got entry
 434          * points back to the plt itself.  Determine the range of the real .plt
 435          * entries using the _PROCEDURE_LINKAGE_TABLE_ symbol.
 436          */
 437         if (plt) {
 438                 Slookup sl;
 439                 Sresult sr;
 440 
 441                 relbgn = pltbgn;
 442                 relend = pltend;
 443                 if (!relbgn || (relbgn == relend))
 444                         return (1);
 445 
 446                 /*
 447                  * Initialize the symbol lookup, and symbol result, data
 448                  * structures.
 449                  */
 450                 SLOOKUP_INIT(sl, MSG_ORIG(MSG_SYM_PLT), lmp, lmp, ld_entry_cnt,
 451                     elf_hash(MSG_ORIG(MSG_SYM_PLT)), 0, 0, 0, LKUP_DEFT);
 452                 SRESULT_INIT(sr, MSG_ORIG(MSG_SYM_PLT));
 453 
 454                 if (elf_find_sym(&sl, &sr, &binfo, NULL) == 0)
 455                         return (1);
 456 
 457                 symdef = sr.sr_sym;
 458                 _pltbgn = symdef->st_value;
 459                 if (!(FLAGS(lmp) & FLG_RT_FIXED) &&
 460                     (symdef->st_shndx != SHN_ABS))
 461                         _pltbgn += basebgn;
 462                 _pltend = _pltbgn + (((PLTRELSZ(lmp) / relsiz)) *
 463                     M_PLT_ENTSIZE) + M_PLT_RESERVSZ;
 464 
 465         } else {
 466                 /*
 467                  * The relocation sections appear to the run-time linker as a
 468                  * single table.  Determine the address of the beginning and end
 469                  * of this table.  There are two different interpretations of
 470                  * the ABI at this point:
 471                  *
 472                  *   o  The REL table and its associated RELSZ indicate the
 473                  *      concatenation of *all* relocation sections (this is the
 474                  *      model our link-editor constructs).
 475                  *
 476                  *   o  The REL table and its associated RELSZ indicate the
 477                  *      concatenation of all *but* the .plt relocations.  These
 478                  *      relocations are specified individually by the JMPREL and
 479                  *      PLTRELSZ entries.
 480                  *
 481                  * Determine from our knowledege of the relocation range and
 482                  * .plt range, the range of the total relocation table.  Note
 483                  * that one other ABI assumption seems to be that the .plt
 484                  * relocations always follow any other relocations, the
 485                  * following range checking drops that assumption.
 486                  */
 487                 relbgn = (ulong_t)(REL(lmp));
 488                 relend = relbgn + (ulong_t)(RELSZ(lmp));
 489                 if (pltbgn) {
 490                         if (!relbgn || (relbgn > pltbgn))
 491                                 relbgn = pltbgn;
 492                         if (!relbgn || (relend < pltend))
 493                                 relend = pltend;
 494                 }
 495         }
 496         if (!relbgn || (relbgn == relend)) {
 497                 DBG_CALL(Dbg_reloc_run(lmp, 0, plt, DBG_REL_NONE));
 498                 return (1);
 499         }
 500         DBG_CALL(Dbg_reloc_run(lmp, M_REL_SHT_TYPE, plt, DBG_REL_START));
 501 
 502         /*
 503          * If we're processing a dynamic executable in lazy mode there is no
 504          * need to scan the .rel.plt table, however if we're processing a shared
 505          * object in lazy mode the .got addresses associated to each .plt must
 506          * be relocated to reflect the location of the shared object.
 507          */
 508         if (pltbgn && ((MODE(lmp) & RTLD_NOW) == 0) &&
 509             (FLAGS(lmp) & FLG_RT_FIXED))
 510                 noplt = 1;
 511 
 512         sip = SYMINFO(lmp);
 513         /*
 514          * Loop through relocations.
 515          */
 516         while (relbgn < relend) {
 517                 mmapobj_result_t        *mpp;
 518                 uint_t                  sb_flags = 0;
 519 
 520                 rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
 521 
 522                 /*
 523                  * If this is a RELATIVE relocation in a shared object (the
 524                  * common case), and if we are not debugging, then jump into a
 525                  * tighter relocation loop (elf_reloc_relative).
 526                  */
 527                 if ((rtype == R_AMD64_RELATIVE) &&
 528                     ((FLAGS(lmp) & FLG_RT_FIXED) == 0) && (DBG_ENABLED == 0)) {
 529                         if (relacount) {
 530                                 relbgn = elf_reloc_relative_count(relbgn,
 531                                     relacount, relsiz, basebgn, lmp,
 532                                     textrel, 0);
 533                                 relacount = 0;
 534                         } else {
 535                                 relbgn = elf_reloc_relative(relbgn, relend,
 536                                     relsiz, basebgn, lmp, textrel, 0);
 537                         }
 538                         if (relbgn >= relend)
 539                                 break;
 540                         rtype = ELF_R_TYPE(((Rela *)relbgn)->r_info, M_MACH);
 541                 }
 542 
 543                 roffset = ((Rela *)relbgn)->r_offset;
 544 
 545                 /*
 546                  * If this is a shared object, add the base address to offset.
 547                  */
 548                 if (!(FLAGS(lmp) & FLG_RT_FIXED)) {
 549                         /*
 550                          * If we're processing lazy bindings, we have to step
 551                          * through the plt entries and add the base address
 552                          * to the corresponding got entry.
 553                          */
 554                         if (plthint && (plt == 0) &&
 555                             (rtype == R_AMD64_JUMP_SLOT) &&
 556                             ((MODE(lmp) & RTLD_NOW) == 0)) {
 557                                 relbgn = elf_reloc_relative_count(relbgn,
 558                                     plthint, relsiz, basebgn, lmp, textrel, 1);
 559                                 plthint = 0;
 560                                 continue;
 561                         }
 562                         roffset += basebgn;
 563                 }
 564 
 565                 reladd = (long)(((Rela *)relbgn)->r_addend);
 566                 rsymndx = ELF_R_SYM(((Rela *)relbgn)->r_info);
 567                 rel = (Rela *)relbgn;
 568                 relbgn += relsiz;
 569 
 570                 /*
 571                  * Optimizations.
 572                  */
 573                 if (rtype == R_AMD64_NONE)
 574                         continue;
 575                 if (noplt && ((ulong_t)rel >= pltbgn) &&
 576                     ((ulong_t)rel < pltend)) {
 577                         relbgn = pltend;
 578                         continue;
 579                 }
 580 
 581                 /*
 582                  * If we're promoting plts, determine if this one has already
 583                  * been written.
 584                  */
 585                 if (plt && ((*(ulong_t *)roffset < _pltbgn) ||
 586                     (*(ulong_t *)roffset > _pltend)))
 587                         continue;
 588 
 589                 /*
 590                  * If this relocation is not against part of the image
 591                  * mapped into memory we skip it.
 592                  */
 593                 if ((mpp = find_segment((caddr_t)roffset, lmp)) == NULL) {
 594                         elf_reloc_bad(lmp, (void *)rel, rtype, roffset,
 595                             rsymndx);
 596                         continue;
 597                 }
 598 
 599                 binfo = 0;
 600                 /*
 601                  * If a symbol index is specified then get the symbol table
 602                  * entry, locate the symbol definition, and determine its
 603                  * address.
 604                  */
 605                 if (rsymndx) {
 606                         /*
 607                          * If a Syminfo section is provided, determine if this
 608                          * symbol is deferred, and if so, skip this relocation.
 609                          */
 610                         if (sip && is_sym_deferred((ulong_t)rel, basebgn, lmp,
 611                             textrel, sip, rsymndx))
 612                                 continue;
 613 
 614                         /*
 615                          * Get the local symbol table entry.
 616                          */
 617                         symref = (Sym *)((ulong_t)SYMTAB(lmp) +
 618                             (rsymndx * SYMENT(lmp)));
 619 
 620                         /*
 621                          * If this is a local symbol, just use the base address.
 622                          * (we should have no local relocations in the
 623                          * executable).
 624                          */
 625                         if (ELF_ST_BIND(symref->st_info) == STB_LOCAL) {
 626                                 value = basebgn;
 627                                 name = NULL;
 628 
 629                                 /*
 630                                  * Special case TLS relocations.
 631                                  */
 632                                 if (rtype == R_AMD64_DTPMOD64) {
 633                                         /*
 634                                          * Use the TLS modid.
 635                                          */
 636                                         value = TLSMODID(lmp);
 637 
 638                                 } else if ((rtype == R_AMD64_TPOFF64) ||
 639                                     (rtype == R_AMD64_TPOFF32)) {
 640                                         if ((value = elf_static_tls(lmp, symref,
 641                                             rel, rtype, 0, roffset, 0)) == 0) {
 642                                                 ret = 0;
 643                                                 break;
 644                                         }
 645                                 }
 646                         } else {
 647                                 /*
 648                                  * If the symbol index is equal to the previous
 649                                  * symbol index relocation we processed then
 650                                  * reuse the previous values. (Note that there
 651                                  * have been cases where a relocation exists
 652                                  * against a copy relocation symbol, our ld(1)
 653                                  * should optimize this away, but make sure we
 654                                  * don't use the same symbol information should
 655                                  * this case exist).
 656                                  */
 657                                 if ((rsymndx == psymndx) &&
 658                                     (rtype != R_AMD64_COPY)) {
 659                                         /* LINTED */
 660                                         if (psymdef == 0) {
 661                                                 DBG_CALL(Dbg_bind_weak(lmp,
 662                                                     (Addr)roffset, (Addr)
 663                                                     (roffset - basebgn), name));
 664                                                 continue;
 665                                         }
 666                                         /* LINTED */
 667                                         value = pvalue;
 668                                         /* LINTED */
 669                                         name = pname;
 670                                         /* LINTED */
 671                                         symdef = psymdef;
 672                                         /* LINTED */
 673                                         symref = psymref;
 674                                         /* LINTED */
 675                                         _lmp = plmp;
 676                                         /* LINTED */
 677                                         binfo = pbinfo;
 678 
 679                                         if ((LIST(_lmp)->lm_tflags |
 680                                             AFLAGS(_lmp)) &
 681                                             LML_TFLG_AUD_SYMBIND) {
 682                                                 value = audit_symbind(lmp, _lmp,
 683                                                     /* LINTED */
 684                                                     symdef, dsymndx, value,
 685                                                     &sb_flags);
 686                                         }
 687                                 } else {
 688                                         Slookup         sl;
 689                                         Sresult         sr;
 690 
 691                                         /*
 692                                          * Lookup the symbol definition.
 693                                          * Initialize the symbol lookup, and
 694                                          * symbol result, data structure.
 695                                          */
 696                                         name = (char *)(STRTAB(lmp) +
 697                                             symref->st_name);
 698 
 699                                         SLOOKUP_INIT(sl, name, lmp, 0,
 700                                             ld_entry_cnt, 0, rsymndx, symref,
 701                                             rtype, LKUP_STDRELOC);
 702                                         SRESULT_INIT(sr, name);
 703                                         symdef = NULL;
 704 
 705                                         if (lookup_sym(&sl, &sr, &binfo,
 706                                             in_nfavl)) {
 707                                                 name = (char *)sr.sr_name;
 708                                                 _lmp = sr.sr_dmap;
 709                                                 symdef = sr.sr_sym;
 710                                         }
 711 
 712                                         /*
 713                                          * If the symbol is not found and the
 714                                          * reference was not to a weak symbol,
 715                                          * report an error.  Weak references
 716                                          * may be unresolved.
 717                                          */
 718                                         /* BEGIN CSTYLED */
 719                                         if (symdef == 0) {
 720                                             if (sl.sl_bind != STB_WEAK) {
 721                                                 if (elf_reloc_error(lmp, name,
 722                                                     rel, binfo))
 723                                                         continue;
 724 
 725                                                 ret = 0;
 726                                                 break;
 727 
 728                                             } else {
 729                                                 psymndx = rsymndx;
 730                                                 psymdef = 0;
 731 
 732                                                 DBG_CALL(Dbg_bind_weak(lmp,
 733                                                     (Addr)roffset, (Addr)
 734                                                     (roffset - basebgn), name));
 735                                                 continue;
 736                                             }
 737                                         }
 738                                         /* END CSTYLED */
 739 
 740                                         /*
 741                                          * If symbol was found in an object
 742                                          * other than the referencing object
 743                                          * then record the binding.
 744                                          */
 745                                         if ((lmp != _lmp) && ((FLAGS1(_lmp) &
 746                                             FL1_RT_NOINIFIN) == 0)) {
 747                                                 if (aplist_test(&bound, _lmp,
 748                                                     AL_CNT_RELBIND) == 0) {
 749                                                         ret = 0;
 750                                                         break;
 751                                                 }
 752                                         }
 753 
 754                                         /*
 755                                          * Calculate the location of definition;
 756                                          * symbol value plus base address of
 757                                          * containing shared object.
 758                                          */
 759                                         if (IS_SIZE(rtype))
 760                                                 value = symdef->st_size;
 761                                         else
 762                                                 value = symdef->st_value;
 763 
 764                                         if (!(FLAGS(_lmp) & FLG_RT_FIXED) &&
 765                                             !(IS_SIZE(rtype)) &&
 766                                             (symdef->st_shndx != SHN_ABS) &&
 767                                             (ELF_ST_TYPE(symdef->st_info) !=
 768                                             STT_TLS))
 769                                                 value += ADDR(_lmp);
 770 
 771                                         /*
 772                                          * Retain this symbol index and the
 773                                          * value in case it can be used for the
 774                                          * subsequent relocations.
 775                                          */
 776                                         if (rtype != R_AMD64_COPY) {
 777                                                 psymndx = rsymndx;
 778                                                 pvalue = value;
 779                                                 pname = name;
 780                                                 psymdef = symdef;
 781                                                 psymref = symref;
 782                                                 plmp = _lmp;
 783                                                 pbinfo = binfo;
 784                                         }
 785                                         if ((LIST(_lmp)->lm_tflags |
 786                                             AFLAGS(_lmp)) &
 787                                             LML_TFLG_AUD_SYMBIND) {
 788                                                 dsymndx = (((uintptr_t)symdef -
 789                                                     (uintptr_t)SYMTAB(_lmp)) /
 790                                                     SYMENT(_lmp));
 791                                                 value = audit_symbind(lmp, _lmp,
 792                                                     symdef, dsymndx, value,
 793                                                     &sb_flags);
 794                                         }
 795                                 }
 796 
 797                                 /*
 798                                  * If relocation is PC-relative, subtract
 799                                  * offset address.
 800                                  */
 801                                 if (IS_PC_RELATIVE(rtype))
 802                                         value -= roffset;
 803 
 804                                 /*
 805                                  * Special case TLS relocations.
 806                                  */
 807                                 if (rtype == R_AMD64_DTPMOD64) {
 808                                         /*
 809                                          * Relocation value is the TLS modid.
 810                                          */
 811                                         value = TLSMODID(_lmp);
 812 
 813                                 } else if ((rtype == R_AMD64_TPOFF64) ||
 814                                     (rtype == R_AMD64_TPOFF32)) {
 815                                         if ((value = elf_static_tls(_lmp,
 816                                             symdef, rel, rtype, name, roffset,
 817                                             value)) == 0) {
 818                                                 ret = 0;
 819                                                 break;
 820                                         }
 821                                 }
 822                         }
 823                 } else {
 824                         /*
 825                          * Special cases.
 826                          */
 827                         if (rtype == R_AMD64_DTPMOD64) {
 828                                 /*
 829                                  * TLS relocation value is the TLS modid.
 830                                  */
 831                                 value = TLSMODID(lmp);
 832                         } else
 833                                 value = basebgn;
 834 
 835                         name = NULL;
 836                 }
 837 
 838                 DBG_CALL(Dbg_reloc_in(LIST(lmp), ELF_DBG_RTLD, M_MACH,
 839                     M_REL_SHT_TYPE, rel, NULL, 0, name));
 840 
 841                 /*
 842                  * Make sure the segment is writable.
 843                  */
 844                 if (((mpp->mr_prot & PROT_WRITE) == 0) &&
 845                     ((set_prot(lmp, mpp, 1) == 0) ||
 846                     (aplist_append(textrel, mpp, AL_CNT_TEXTREL) == NULL))) {
 847                         ret = 0;
 848                         break;
 849                 }
 850 
 851                 /*
 852                  * Call relocation routine to perform required relocation.
 853                  */
 854                 switch (rtype) {
 855                 case R_AMD64_COPY:
 856                         if (elf_copy_reloc(name, symref, lmp, (void *)roffset,
 857                             symdef, _lmp, (const void *)value) == 0)
 858                                 ret = 0;
 859                         break;
 860                 case R_AMD64_JUMP_SLOT:
 861                         if (((LIST(lmp)->lm_tflags | AFLAGS(lmp)) &
 862                             (LML_TFLG_AUD_PLTENTER | LML_TFLG_AUD_PLTEXIT)) &&
 863                             AUDINFO(lmp)->ai_dynplts) {
 864                                 int     fail = 0;
 865                                 int     pltndx = (((ulong_t)rel -
 866                                     (uintptr_t)JMPREL(lmp)) / relsiz);
 867                                 int     symndx = (((uintptr_t)symdef -
 868                                     (uintptr_t)SYMTAB(_lmp)) / SYMENT(_lmp));
 869 
 870                                 (void) elf_plt_trace_write(roffset, lmp, _lmp,
 871                                     symdef, symndx, pltndx, (caddr_t)value,
 872                                     sb_flags, &fail);
 873                                 if (fail)
 874                                         ret = 0;
 875                         } else {
 876                                 /*
 877                                  * Write standard PLT entry to jump directly
 878                                  * to newly bound function.
 879                                  */
 880                                 DBG_CALL(Dbg_reloc_apply_val(LIST(lmp),
 881                                     ELF_DBG_RTLD, (Xword)roffset,
 882                                     (Xword)value));
 883                                 *(ulong_t *)roffset = value;
 884                         }
 885                         break;
 886                 default:
 887                         value += reladd;
 888                         /*
 889                          * Write the relocation out.
 890                          */
 891                         if (do_reloc_rtld(rtype, (uchar_t *)roffset,
 892                             (Xword *)&value, name, NAME(lmp), LIST(lmp)) == 0)
 893                                 ret = 0;
 894 
 895                         DBG_CALL(Dbg_reloc_apply_val(LIST(lmp), ELF_DBG_RTLD,
 896                             (Xword)roffset, (Xword)value));
 897                 }
 898 
 899                 if ((ret == 0) &&
 900                     ((LIST(lmp)->lm_flags & LML_FLG_TRC_WARN) == 0))
 901                         break;
 902 
 903                 if (binfo) {
 904                         DBG_CALL(Dbg_bind_global(lmp, (Addr)roffset,
 905                             (Off)(roffset - basebgn), (Xword)(-1), PLT_T_FULL,
 906                             _lmp, (Addr)value, symdef->st_value, name, binfo));
 907                 }
 908         }
 909 
 910         return (relocate_finish(lmp, bound, ret));
 911 }
 912 
 913 /*
 914  * Initialize the first few got entries so that function calls go to
 915  * elf_rtbndr:
 916  *
 917  *      GOT[GOT_XLINKMAP] =     the address of the link map
 918  *      GOT[GOT_XRTLD] =        the address of rtbinder
 919  */
 920 void
 921 elf_plt_init(void *got, caddr_t l)
 922 {
 923         uint64_t        *_got;
 924         /* LINTED */
 925         Rt_map          *lmp = (Rt_map *)l;
 926 
 927         _got = (uint64_t *)got + M_GOT_XLINKMAP;
 928         *_got = (uint64_t)lmp;
 929         _got = (uint64_t *)got + M_GOT_XRTLD;
 930         *_got = (uint64_t)elf_rtbndr;
 931 }
 932 
 933 /*
 934  * Plt writing interface to allow debugging initialization to be generic.
 935  */
 936 Pltbindtype
 937 /* ARGSUSED1 */
 938 elf_plt_write(uintptr_t addr, uintptr_t vaddr, void *rptr, uintptr_t symval,
 939         Xword pltndx)
 940 {
 941         Rela            *rel = (Rela*)rptr;
 942         uintptr_t       pltaddr;
 943 
 944         pltaddr = addr + rel->r_offset;
 945         *(ulong_t *)pltaddr = (ulong_t)symval + rel->r_addend;
 946         DBG_CALL(pltcntfull++);
 947         return (PLT_T_FULL);
 948 }
 949 
 950 /*
 951  * Provide a machine specific interface to the conversion routine.  By calling
 952  * the machine specific version, rather than the generic version, we insure that
 953  * the data tables/strings for all known machine versions aren't dragged into
 954  * ld.so.1.
 955  */
 956 const char *
 957 _conv_reloc_type(uint_t rel)
 958 {
 959         static Conv_inv_buf_t   inv_buf;
 960 
 961         return (conv_reloc_amd64_type(rel, 0, &inv_buf));
 962 }