Print this page
XXXX adding PID information to netstat output
@@ -649,15 +649,18 @@
stdata_t *stp = buf;
mutex_init(&stp->sd_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&stp->sd_reflock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&stp->sd_qlock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&stp->sd_pid_tree_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&stp->sd_monitor, NULL, CV_DEFAULT, NULL);
cv_init(&stp->sd_iocmonitor, NULL, CV_DEFAULT, NULL);
cv_init(&stp->sd_refmonitor, NULL, CV_DEFAULT, NULL);
cv_init(&stp->sd_qcv, NULL, CV_DEFAULT, NULL);
cv_init(&stp->sd_zcopy_wait, NULL, CV_DEFAULT, NULL);
+ avl_create(&stp->sd_pid_tree, pid_node_comparator, sizeof (pid_node_t),
+ offsetof(pid_node_t, pn_ref_link));
stp->sd_wrq = NULL;
return (0);
}
@@ -668,15 +671,17 @@
stdata_t *stp = buf;
mutex_destroy(&stp->sd_lock);
mutex_destroy(&stp->sd_reflock);
mutex_destroy(&stp->sd_qlock);
+ mutex_destroy(&stp->sd_pid_tree_lock);
cv_destroy(&stp->sd_monitor);
cv_destroy(&stp->sd_iocmonitor);
cv_destroy(&stp->sd_refmonitor);
cv_destroy(&stp->sd_qcv);
cv_destroy(&stp->sd_zcopy_wait);
+ avl_destroy(&stp->sd_pid_tree);
}
/*
* Constructor/destructor routines for the queue cache
*/
@@ -3314,10 +3319,12 @@
* Free a stream head.
*/
void
shfree(stdata_t *stp)
{
+ pid_node_t *pn;
+
ASSERT(MUTEX_NOT_HELD(&stp->sd_lock));
stp->sd_wrq = NULL;
mutex_enter(&stp->sd_qlock);
@@ -3337,13 +3344,97 @@
stp->sd_nciputctrl = 0;
}
ASSERT(stp->sd_qhead == NULL);
ASSERT(stp->sd_qtail == NULL);
ASSERT(stp->sd_nqueues == 0);
+
+ mutex_enter(&stp->sd_pid_tree_lock);
+ while ((pn = avl_first(&stp->sd_pid_tree)) != NULL) {
+ avl_remove(&stp->sd_pid_tree, pn);
+ kmem_free(pn, sizeof (*pn));
+ }
+ mutex_exit(&stp->sd_pid_tree_lock);
+
kmem_cache_free(stream_head_cache, stp);
}
+void
+sh_insert_pid(struct stdata *stp, pid_t pid)
+{
+ pid_node_t *pn, lookup_pn;
+ avl_index_t idx_pn;
+
+ lookup_pn.pn_pid = pid;
+ mutex_enter(&stp->sd_pid_tree_lock);
+ pn = avl_find(&stp->sd_pid_tree, &lookup_pn, &idx_pn);
+
+ if (pn != NULL) {
+ pn->pn_count++;
+ } else {
+ pn = kmem_zalloc(sizeof (*pn), KM_SLEEP);
+ pn->pn_pid = pid;
+ pn->pn_count = 1;
+ avl_insert(&stp->sd_pid_tree, pn, idx_pn);
+ }
+ mutex_exit(&stp->sd_pid_tree_lock);
+}
+
+void
+sh_remove_pid(struct stdata *stp, pid_t pid)
+{
+ pid_node_t *pn, lookup_pn;
+
+ lookup_pn.pn_pid = pid;
+ mutex_enter(&stp->sd_pid_tree_lock);
+ pn = avl_find(&stp->sd_pid_tree, &lookup_pn, NULL);
+
+ if (pn != NULL) {
+ if (pn->pn_count > 1) {
+ pn->pn_count--;
+ } else {
+ avl_remove(&stp->sd_pid_tree, pn);
+ kmem_free(pn, sizeof (*pn));
+ }
+ }
+ mutex_exit(&stp->sd_pid_tree_lock);
+}
+
+mblk_t *
+sh_get_pid_mblk(struct stdata *stp)
+{
+ mblk_t *mblk;
+ ulong_t sz, n;
+ pid_t *pids;
+ pid_node_t *pn;
+ conn_pid_info_t *cpi;
+
+ mutex_enter(&stp->sd_pid_tree_lock);
+
+ n = avl_numnodes(&stp->sd_pid_tree);
+ sz = sizeof (conn_pid_info_t);
+ sz += (n > 1) ? ((n - 1) * sizeof (pid_t)) : 0;
+ if ((mblk = allocb(sz, BPRI_HI)) == NULL) {
+ mutex_exit(&stp->sd_pid_tree_lock);
+ return (NULL);
+ }
+ mblk->b_wptr += sz;
+ cpi = (conn_pid_info_t *)mblk->b_datap->db_base;
+ cpi->cpi_contents = CONN_PID_INFO_XTI;
+ cpi->cpi_pids_cnt = n;
+ cpi->cpi_tot_size = sz;
+ cpi->cpi_pids[0] = 0;
+
+ if (cpi->cpi_pids_cnt > 0) {
+ pids = cpi->cpi_pids;
+ for (pn = avl_first(&stp->sd_pid_tree); pn != NULL;
+ pids++, pn = AVL_NEXT(&stp->sd_pid_tree, pn))
+ *pids = pn->pn_pid;
+ }
+ mutex_exit(&stp->sd_pid_tree_lock);
+ return (mblk);
+}
+
/*
* Allocate a pair of queues and a syncq for the pair
*/
queue_t *
allocq(void)
@@ -8083,12 +8174,12 @@
flushq(_RD(stp->sd_wrq), flag);
mutex_exit(&stp->sd_lock);
}
void
-strsetrputhooks(vnode_t *vp, uint_t flags,
- msgfunc_t protofunc, msgfunc_t miscfunc)
+strsetrputhooks(vnode_t *vp, uint_t flags, msgfunc_t protofunc,
+ msgfunc_t miscfunc)
{
struct stdata *stp = vp->v_stream;
mutex_enter(&stp->sd_lock);