Print this page
3625 we only need one thread_create_intr

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/thread.c
          +++ new/usr/src/uts/common/disp/thread.c
↓ open down ↓ 311 lines elided ↑ open up ↑
 312  312  
 313  313  /*
 314  314   * Create a thread.
 315  315   *
 316  316   * thread_create() blocks for memory if necessary.  It never fails.
 317  317   *
 318  318   * If stk is NULL, the thread is created at the base of the stack
 319  319   * and cannot be swapped.
 320  320   */
 321  321  kthread_t *
 322      -thread_create(
 323      -        caddr_t stk,
 324      -        size_t  stksize,
 325      -        void    (*proc)(),
 326      -        void    *arg,
 327      -        size_t  len,
 328      -        proc_t   *pp,
 329      -        int     state,
 330      -        pri_t   pri)
      322 +thread_create(caddr_t stk, size_t stksize, void (*proc)(), void *arg,
      323 +    size_t len, proc_t *pp, int state, pri_t pri)
 331  324  {
 332  325          kthread_t *t;
 333  326          extern struct classfuncs sys_classfuncs;
 334  327          turnstile_t *ts;
 335  328  
 336  329          /*
 337  330           * Every thread keeps a turnstile around in case it needs to block.
 338  331           * The only reason the turnstile is not simply part of the thread
 339  332           * structure is that we may have to break the association whenever
 340  333           * more than one thread blocks on a given synchronization object.
↓ open down ↓ 543 lines elided ↑ open up ↑
 884  877          /*
 885  878           * Reap lwps
 886  879           */
 887  880          thread_reap_list(l);
 888  881  }
 889  882  
 890  883  /*
 891  884   * cleanup zombie threads that are on deathrow.
 892  885   */
 893  886  void
 894      -thread_reaper()
      887 +thread_reaper(void)
 895  888  {
 896  889          kthread_t *t, *l;
 897  890          callb_cpr_t cprinfo;
 898  891  
 899  892          /*
 900  893           * Register callback to clean up threads when zone is destroyed.
 901  894           */
 902  895          zone_key_create(&zone_thread_key, NULL, NULL, thread_zone_destroy);
 903  896  
 904  897          CALLB_CPR_INIT(&cprinfo, &reaplock, callb_generic_cpr, "t_reaper");
↓ open down ↓ 116 lines elided ↑ open up ↑
1021 1014          thread_lock(t);
1022 1015          thread_unlock(t);
1023 1016  
1024 1017          mutex_exit(&reaplock);
1025 1018  }
1026 1019  
1027 1020  /*
1028 1021   * Install thread context ops for the current thread.
1029 1022   */
1030 1023  void
1031      -installctx(
1032      -        kthread_t *t,
1033      -        void    *arg,
1034      -        void    (*save)(void *),
1035      -        void    (*restore)(void *),
1036      -        void    (*fork)(void *, void *),
1037      -        void    (*lwp_create)(void *, void *),
1038      -        void    (*exit)(void *),
1039      -        void    (*free)(void *, int))
     1024 +installctx(kthread_t *t, void *arg, void (*save)(void *),
     1025 +    void (*restore)(void *), void (*fork)(void *, void *),
     1026 +    void (*lwp_create)(void *, void *), void (*exit)(void *),
     1027 +    void (*free)(void *, int))
1040 1028  {
1041 1029          struct ctxop *ctx;
1042 1030  
1043 1031          ctx = kmem_alloc(sizeof (struct ctxop), KM_SLEEP);
1044 1032          ctx->save_op = save;
1045 1033          ctx->restore_op = restore;
1046 1034          ctx->fork_op = fork;
1047 1035          ctx->lwp_create_op = lwp_create;
1048 1036          ctx->exit_op = exit;
1049 1037          ctx->free_op = free;
1050 1038          ctx->arg = arg;
1051 1039          ctx->next = t->t_ctx;
1052 1040          t->t_ctx = ctx;
1053 1041  }
1054 1042  
1055 1043  /*
1056 1044   * Remove the thread context ops from a thread.
1057 1045   */
1058 1046  int
1059      -removectx(
1060      -        kthread_t *t,
1061      -        void    *arg,
1062      -        void    (*save)(void *),
1063      -        void    (*restore)(void *),
1064      -        void    (*fork)(void *, void *),
1065      -        void    (*lwp_create)(void *, void *),
1066      -        void    (*exit)(void *),
1067      -        void    (*free)(void *, int))
     1047 +removectx(kthread_t *t, void *arg, void (*save)(void *),
     1048 +    void (*restore)(void *), void (*fork)(void *, void *),
     1049 +    void (*lwp_create)(void *, void *), void (*exit)(void *),
     1050 +    void (*free)(void *, int))
1068 1051  {
1069 1052          struct ctxop *ctx, *prev_ctx;
1070 1053  
1071 1054          /*
1072 1055           * The incoming kthread_t (which is the thread for which the
1073 1056           * context ops will be removed) should be one of the following:
1074 1057           *
1075 1058           * a) the current thread,
1076 1059           *
1077 1060           * b) a thread of a process that's being forked (SIDL),
↓ open down ↓ 215 lines elided ↑ open up ↑
1293 1276   *      of an interrupt thread, taken from a pool linked to the CPU structure.
1294 1277   *
1295 1278   *      When swtch() is switching away from an interrupt thread because it
1296 1279   *      blocked or was preempted, this routine is called to complete the
1297 1280   *      saving of the interrupted thread state, and returns the interrupted
1298 1281   *      thread pointer so it may be resumed.
1299 1282   *
1300 1283   *      Called by swtch() only at high spl.
1301 1284   */
1302 1285  kthread_t *
1303      -thread_unpin()
     1286 +thread_unpin(void)
1304 1287  {
1305 1288          kthread_t       *t = curthread; /* current thread */
1306 1289          kthread_t       *itp;           /* interrupted thread */
1307 1290          int             i;              /* interrupt level */
1308 1291          extern int      intr_passivate();
1309 1292  
1310 1293          ASSERT(t->t_intr != NULL);
1311 1294  
1312 1295          itp = t->t_intr;                /* interrupted thread */
1313 1296          t->t_intr = NULL;               /* clear interrupt ptr */
↓ open down ↓ 27 lines elided ↑ open up ↑
1341 1324           * Compute the CPU's base interrupt level based on the active
1342 1325           * interrupts.
1343 1326           */
1344 1327          ASSERT(CPU->cpu_intr_actv & (1 << i));
1345 1328          set_base_spl();
1346 1329  
1347 1330          return (itp);
1348 1331  }
1349 1332  
1350 1333  /*
1351      - * Create and initialize an interrupt thread.
1352      - *      Returns non-zero on error.
1353      - *      Called at spl7() or better.
1354      - */
1355      -void
1356      -thread_create_intr(struct cpu *cp)
1357      -{
1358      -        kthread_t *tp;
1359      -
1360      -        tp = thread_create(NULL, 0,
1361      -            (void (*)())thread_create_intr, NULL, 0, &p0, TS_ONPROC, 0);
1362      -
1363      -        /*
1364      -         * Set the thread in the TS_FREE state.  The state will change
1365      -         * to TS_ONPROC only while the interrupt is active.  Think of these
1366      -         * as being on a private free list for the CPU.  Being TS_FREE keeps
1367      -         * inactive interrupt threads out of debugger thread lists.
1368      -         *
1369      -         * We cannot call thread_create with TS_FREE because of the current
1370      -         * checks there for ONPROC.  Fix this when thread_create takes flags.
1371      -         */
1372      -        THREAD_FREEINTR(tp, cp);
1373      -
1374      -        /*
1375      -         * Nobody should ever reference the credentials of an interrupt
1376      -         * thread so make it NULL to catch any such references.
1377      -         */
1378      -        tp->t_cred = NULL;
1379      -        tp->t_flag |= T_INTR_THREAD;
1380      -        tp->t_cpu = cp;
1381      -        tp->t_bound_cpu = cp;
1382      -        tp->t_disp_queue = cp->cpu_disp;
1383      -        tp->t_affinitycnt = 1;
1384      -        tp->t_preempt = 1;
1385      -
1386      -        /*
1387      -         * Don't make a user-requested binding on this thread so that
1388      -         * the processor can be offlined.
1389      -         */
1390      -        tp->t_bind_cpu = PBIND_NONE;    /* no USER-requested binding */
1391      -        tp->t_bind_pset = PS_NONE;
1392      -
1393      -#if defined(__i386) || defined(__amd64)
1394      -        tp->t_stk -= STACK_ALIGN;
1395      -        *(tp->t_stk) = 0;               /* terminate intr thread stack */
1396      -#endif
1397      -
1398      -        /*
1399      -         * Link onto CPU's interrupt pool.
1400      -         */
1401      -        tp->t_link = cp->cpu_intr_thread;
1402      -        cp->cpu_intr_thread = tp;
1403      -}
1404      -
1405      -/*
1406 1334   * TSD -- THREAD SPECIFIC DATA
1407 1335   */
1408 1336  static kmutex_t         tsd_mutex;       /* linked list spin lock */
1409 1337  static uint_t           tsd_nkeys;       /* size of destructor array */
1410 1338  /* per-key destructor funcs */
1411 1339  static void             (**tsd_destructor)(void *);
1412 1340  /* list of tsd_thread's */
1413 1341  static struct tsd_thread        *tsd_list;
1414 1342  
1415 1343  /*
↓ open down ↓ 706 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX