Print this page
XXXX adding PID information to netstat output

*** 649,663 **** --- 649,666 ---- stdata_t *stp = buf; mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&stp->sd_pid_list_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); + list_create(&stp->sd_pid_list, sizeof (pid_node_t), + offsetof(pid_node_t, pn_ref_link)); stp->sd_wrq = NULL; return (0); }
*** 668,682 **** --- 671,687 ---- stdata_t *stp = buf; mutex_destroy(&stp->sd_lock); mutex_destroy(&stp->sd_reflock); mutex_destroy(&stp->sd_qlock); + mutex_destroy(&stp->sd_pid_list_lock); cv_destroy(&stp->sd_monitor); cv_destroy(&stp->sd_iocmonitor); cv_destroy(&stp->sd_refmonitor); cv_destroy(&stp->sd_qcv); cv_destroy(&stp->sd_zcopy_wait); + list_destroy(&stp->sd_pid_list); } /* * Constructor/destructor routines for the queue cache */
*** 3314,3323 **** --- 3319,3330 ---- * Free a stream head. */ void shfree(stdata_t *stp) { + pid_node_t *pn; + ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); stp->sd_wrq = NULL; mutex_enter(&stp->sd_qlock);
*** 3337,3349 **** --- 3344,3445 ---- stp->sd_nciputctrl = 0; } ASSERT(stp->sd_qhead == NULL); ASSERT(stp->sd_qtail == NULL); ASSERT(stp->sd_nqueues == 0); + + mutex_enter(&stp->sd_pid_list_lock); + while ((pn = list_head(&stp->sd_pid_list)) != NULL) { + list_remove(&stp->sd_pid_list, pn); + kmem_free(pn, sizeof (*pn)); + } + mutex_exit(&stp->sd_pid_list_lock); + kmem_cache_free(stream_head_cache, stp); } + void + sh_insert_pid(struct stdata *stp, pid_t pid) + { + pid_node_t *pn; + + mutex_enter(&stp->sd_pid_list_lock); + for (pn = list_head(&stp->sd_pid_list); + pn != NULL && pn->pn_pid != pid; + pn = list_next(&stp->sd_pid_list, pn)) + ; + + if (pn != NULL) { + pn->pn_count++; + } else { + pn = kmem_zalloc(sizeof (*pn), KM_SLEEP); + list_link_init(&pn->pn_ref_link); + pn->pn_pid = pid; + pn->pn_count = 1; + list_insert_tail(&stp->sd_pid_list, pn); + } + mutex_exit(&stp->sd_pid_list_lock); + } + + void + sh_remove_pid(struct stdata *stp, pid_t pid) + { + pid_node_t *pn; + + mutex_enter(&stp->sd_pid_list_lock); + for (pn = list_head(&stp->sd_pid_list); + pn != NULL && pn->pn_pid != pid; + pn = list_next(&stp->sd_pid_list, pn)) + ; + + if (pn != NULL) { + if (pn->pn_count > 1) { + pn->pn_count--; + } else { + list_remove(&stp->sd_pid_list, pn); + kmem_free(pn, sizeof (*pn)); + } + } + mutex_exit(&stp->sd_pid_list_lock); + } + + mblk_t * + sh_get_pid_mblk(struct stdata *stp) + { + mblk_t *mblk; + int sz, n = 0; + pid_t *pids; + pid_node_t *pn; + conn_pid_info_t *cpi; + + mutex_enter(&stp->sd_pid_list_lock); + + n = list_numnodes(&stp->sd_pid_list); + sz = sizeof (conn_pid_info_t); + sz += (n > 1) ? ((n - 1) * sizeof (pid_t)) : 0; + if ((mblk = allocb(sz, BPRI_HI)) == NULL) { + mutex_exit(&stp->sd_pid_list_lock); + return (NULL); + } + mblk->b_wptr += sz; + cpi = (conn_pid_info_t *)mblk->b_datap->db_base; + cpi->cpi_magic = CONN_PID_INFO_MGC; + cpi->cpi_contents = CONN_PID_INFO_XTI; + cpi->cpi_pids_cnt = n; + cpi->cpi_tot_size = sz; + cpi->cpi_pids[0] = 0; + + if (cpi->cpi_pids_cnt > 0) { + pids = cpi->cpi_pids; + for (pn = list_head(&stp->sd_pid_list); pn != NULL; + pids++, pn = list_next(&stp->sd_pid_list, pn)) + *pids = pn->pn_pid; + } + mutex_exit(&stp->sd_pid_list_lock); + return (mblk); + } + /* * Allocate a pair of queues and a syncq for the pair */ queue_t * allocq(void)