Print this page
XXXX adding PID information to netstat output

*** 649,663 **** --- 649,666 ---- stdata_t *stp = buf; mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&stp->sd_pid_list_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL); cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL); + list_create(&stp->sd_pid_list, sizeof (pid_node_t), + offsetof(pid_node_t, pn_ref_link)); stp->sd_wrq = NULL; return (0); }
*** 668,682 **** --- 671,687 ---- stdata_t *stp = buf; mutex_destroy(&stp->sd_lock); mutex_destroy(&stp->sd_reflock); mutex_destroy(&stp->sd_qlock); + mutex_destroy(&stp->sd_pid_list_lock); cv_destroy(&stp->sd_monitor); cv_destroy(&stp->sd_iocmonitor); cv_destroy(&stp->sd_refmonitor); cv_destroy(&stp->sd_qcv); cv_destroy(&stp->sd_zcopy_wait); + list_destroy(&stp->sd_pid_list); } /* * Constructor/destructor routines for the queue cache */
*** 3304,3323 **** --- 3309,3331 ---- stp->sd_qtail = NULL; stp->sd_servid = NULL; stp->sd_nqueues = 0; stp->sd_svcflags = 0; stp->sd_copyflag = 0; + sh_insert_pid(stp, curproc); return (stp); } /* * Free a stream head. */ void shfree(stdata_t *stp) { + pid_node_t *pn; + ASSERT(MUTEX_NOT_HELD(&stp->sd_lock)); stp->sd_wrq = NULL; mutex_enter(&stp->sd_qlock);
*** 3337,3349 **** --- 3345,3446 ---- stp->sd_nciputctrl = 0; } ASSERT(stp->sd_qhead == NULL); ASSERT(stp->sd_qtail == NULL); ASSERT(stp->sd_nqueues == 0); + + mutex_enter(&stp->sd_pid_list_lock); + while ((pn = list_head(&stp->sd_pid_list)) != NULL) { + list_remove(&stp->sd_pid_list, pn); + kmem_free(pn, sizeof (*pn)); + } + mutex_exit(&stp->sd_pid_list_lock); + kmem_cache_free(stream_head_cache, stp); } + void + sh_insert_pid(struct stdata *stp, proc_t *p) + { + pid_node_t *pn; + + mutex_enter(&stp->sd_pid_list_lock); + pn = list_head(&stp->sd_pid_list); + while (pn != NULL && pn->pn_pid != p->p_pidp->pid_id) { + pn = list_next(&stp->sd_pid_list, pn); + } + + if (pn != NULL) { + pn->pn_count++; + } else { + pn = kmem_zalloc(sizeof (*pn), KM_SLEEP); + list_link_init(&pn->pn_ref_link); + pn->pn_pid = p->p_pidp->pid_id; + pn->pn_count = 1; + list_insert_tail(&stp->sd_pid_list, pn); + } + mutex_exit(&stp->sd_pid_list_lock); + } + void + sh_remove_pid(struct stdata *stp, proc_t *p) + { + pid_node_t *pn; + + mutex_enter(&stp->sd_pid_list_lock); + pn = list_head(&stp->sd_pid_list); + while (pn != NULL && pn->pn_pid != p->p_pidp->pid_id) { + pn = list_next(&stp->sd_pid_list, pn); + } + + if (pn != NULL) { + if (pn->pn_count > 1) + pn->pn_count--; + else { + list_remove(&stp->sd_pid_list, pn); + kmem_free(pn, sizeof (*pn)); + } + } + mutex_exit(&stp->sd_pid_list_lock); + } + + conn_pid_node_list_hdr_t * + sh_get_pid_list(struct stdata *stp) + { + int sz, n = 0; + pid_node_t *pn; + conn_pid_node_t *cpn; + conn_pid_node_list_hdr_t *cph; + + mutex_enter(&stp->sd_pid_list_lock); + + n = list_size(&stp->sd_pid_list); + sz = sizeof (conn_pid_node_list_hdr_t); + sz += (n > 1)?((n - 1) * sizeof (conn_pid_node_t)):0; + + cph = kmem_zalloc(sz, KM_SLEEP); + cph->cph_magic = CONN_PID_NODE_LIST_HDR_MAGIC; + cph->cph_contents = CONN_PID_NODE_LIST_HDR_XTI; + cph->cph_pn_cnt = n; + cph->cph_tot_size = sz; + cph->cph_flags = 0; + cph->cph_optional1 = 0; + cph->cph_optional2 = 0; + + if (cph->cph_pn_cnt > 0) { + cpn = cph->cph_cpns; + pn = list_head(&stp->sd_pid_list); + while (pn != NULL) { + PIDNODE2CONNPIDNODE(pn, cpn); + pn = list_next(&stp->sd_pid_list, pn); + cpn++; + } + } + + mutex_exit(&stp->sd_pid_list_lock); + return (cph); + } + /* * Allocate a pair of queues and a syncq for the pair */ queue_t * allocq(void)