28 static STAILQ_HEAD(v8plus_callq_head, v8plus_async_call) _v8plus_callq =
29 STAILQ_HEAD_INITIALIZER(_v8plus_callq);
30 static pthread_mutex_t _v8plus_callq_mtx;
31 static pthread_t _v8plus_uv_event_thread;
32 static uv_async_t _v8plus_uv_async;
33
34 typedef struct v8plus_async_call {
35 void *vac_cop;
36 const char *vac_name;
37 const nvlist_t *vac_lp;
38
39 pthread_cond_t vac_cv;
40 pthread_mutex_t vac_mtx;
41
42 boolean_t vac_run;
43 nvlist_t *vac_return;
44
45 STAILQ_ENTRY(v8plus_async_call) vac_callq_entry;
46 } v8plus_async_call_t;
47
48 nvlist_t *v8plus_method_call_direct(void *, const char *, const nvlist_t *);
49
50 boolean_t
51 v8plus_in_event_thread(void)
52 {
53 return (_v8plus_uv_event_thread == pthread_self() ? B_TRUE : B_FALSE);
54 }
55
56 static void
57 v8plus_async_callback(uv_async_t *async, __attribute__((unused)) int status)
58 {
59 if (v8plus_in_event_thread() != B_TRUE)
60 v8plus_panic("async callback called outside of event loop");
61
62 for (;;) {
63 v8plus_async_call_t *vac = NULL;
64
65 /*
66 * Fetch the next queued method:
67 */
68 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
69 v8plus_panic("could not lock async queue mutex");
70 if (!STAILQ_EMPTY(&_v8plus_callq)) {
71 vac = STAILQ_FIRST(&_v8plus_callq);
72 STAILQ_REMOVE_HEAD(&_v8plus_callq, vac_callq_entry);
73 }
74 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
75 v8plus_panic("could not unlock async queue mutex");
76
77 if (vac == NULL)
78 break;
79
80 /*
81 * Run the queued method:
82 */
83 if (pthread_mutex_lock(&vac->vac_mtx) != 0)
84 v8plus_panic("could not lock async call mutex");
85
86 if (vac->vac_run == B_TRUE)
87 v8plus_panic("async call already run");
88
89 vac->vac_return = v8plus_method_call_direct(vac->vac_cop,
90 vac->vac_name, vac->vac_lp);
91 vac->vac_run = B_TRUE;
92
93 if (pthread_cond_broadcast(&vac->vac_cv) != 0)
94 v8plus_panic("could not signal async call condvar");
95 if (pthread_mutex_unlock(&vac->vac_mtx) != 0)
96 v8plus_panic("could not unlock async call mutex");
97 }
98 }
99
100 nvlist_t *
101 v8plus_method_call(void *cop, const char *name, const nvlist_t *lp)
102 {
103 v8plus_async_call_t vac;
104
105 if (v8plus_in_event_thread() == B_TRUE) {
106 /*
107 * We're running in the event loop thread, so we can make the
108 * call directly.
109 */
110 return (v8plus_method_call_direct(cop, name, lp));
111 }
112
113 /*
114 * As we cannot manipulate v8plus/V8/Node structures directly from
115 * outside the event loop thread, we push the call arguments onto a
116 * queue and post to the event loop thread. We then sleep on our
117 * condition variable until the event loop thread makes the call
118 * for us and wakes us up.
119 */
120 vac.vac_cop = cop;
121 vac.vac_name = name;
122 vac.vac_lp = lp;
123 if (pthread_mutex_init(&vac.vac_mtx, NULL) != 0)
124 v8plus_panic("could not init async call mutex");
125 if (pthread_cond_init(&vac.vac_cv, NULL) != 0)
126 v8plus_panic("could not init async call condvar");
127 vac.vac_run = B_FALSE;
128 vac.vac_return = NULL;
129
130 /*
131 * Post request to queue:
132 */
133 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
134 v8plus_panic("could not lock async queue mutex");
135 STAILQ_INSERT_TAIL(&_v8plus_callq, &vac, vac_callq_entry);
136 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
137 v8plus_panic("could not unlock async queue mutex");
138 uv_async_send(&_v8plus_uv_async);
139
140 /*
141 * Wait for our request to be serviced on the Event Loop thread:
142 */
143 if (pthread_mutex_lock(&vac.vac_mtx) != 0)
144 v8plus_panic("could not lock async call mutex");
145 while (vac.vac_run == B_FALSE) {
146 if (pthread_cond_wait(&vac.vac_cv, &vac.vac_mtx) != 0)
147 v8plus_panic("could not wait on async call condvar");
148 }
149
150 return (vac.vac_return);
151 }
152
153
154 /*
155 * Initialise structures for off-event-loop method calls.
156 *
157 * Note that uv_async_init() must be called inside the libuv Event Loop, so we
158 * do it here. We also want to record the thread ID of the Event Loop thread
159 * so as to determine what kind of method calls to make later.
160 */
161 void
162 v8plus_crossthread_init(void)
163 {
164 _v8plus_uv_event_thread = pthread_self();
165 if (uv_async_init(uv_default_loop(), &_v8plus_uv_async,
166 v8plus_async_callback) != 0)
167 v8plus_panic("unable to initialise uv_async_t");
168 if (pthread_mutex_init(&_v8plus_callq_mtx, NULL) != 0)
169 v8plus_panic("unable to initialise mutex");
170 }
171
172 nvlist_t *
173 v8plus_verror(v8plus_errno_t e, const char *fmt, va_list ap)
174 {
175 if (fmt == NULL) {
176 if (e == V8PLUSERR_NOERROR) {
177 *_v8plus_errmsg = '\0';
|
28 static STAILQ_HEAD(v8plus_callq_head, v8plus_async_call) _v8plus_callq =
29 STAILQ_HEAD_INITIALIZER(_v8plus_callq);
30 static pthread_mutex_t _v8plus_callq_mtx;
31 static pthread_t _v8plus_uv_event_thread;
32 static uv_async_t _v8plus_uv_async;
33
34 typedef struct v8plus_async_call {
35 void *vac_cop;
36 const char *vac_name;
37 const nvlist_t *vac_lp;
38
39 pthread_cond_t vac_cv;
40 pthread_mutex_t vac_mtx;
41
42 boolean_t vac_run;
43 nvlist_t *vac_return;
44
45 STAILQ_ENTRY(v8plus_async_call) vac_callq_entry;
46 } v8plus_async_call_t;
47
48 boolean_t
49 v8plus_in_event_thread(void)
50 {
51 return (_v8plus_uv_event_thread == pthread_self() ? B_TRUE : B_FALSE);
52 }
53
54 static void
55 v8plus_async_callback(uv_async_t *async, int status __UNUSED)
56 {
57 if (v8plus_in_event_thread() != B_TRUE)
58 v8plus_panic("async callback called outside of event loop");
59
60 for (;;) {
61 v8plus_async_call_t *vac = NULL;
62
63 /*
64 * Fetch the next queued method:
65 */
66 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
67 v8plus_panic("could not lock async queue mutex");
68 if (!STAILQ_EMPTY(&_v8plus_callq)) {
69 vac = STAILQ_FIRST(&_v8plus_callq);
70 STAILQ_REMOVE_HEAD(&_v8plus_callq, vac_callq_entry);
71 }
72 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
73 v8plus_panic("could not unlock async queue mutex");
74
75 if (vac == NULL)
76 break;
77
78 /*
79 * Run the queued method:
80 */
81 if (vac->vac_run == B_TRUE)
82 v8plus_panic("async call already run");
83 vac->vac_return = v8plus_method_call_direct(vac->vac_cop,
84 vac->vac_name, vac->vac_lp);
85
86 if (pthread_mutex_lock(&vac->vac_mtx) != 0)
87 v8plus_panic("could not lock async call mutex");
88 vac->vac_run = B_TRUE;
89 if (pthread_cond_broadcast(&vac->vac_cv) != 0)
90 v8plus_panic("could not signal async call condvar");
91 if (pthread_mutex_unlock(&vac->vac_mtx) != 0)
92 v8plus_panic("could not unlock async call mutex");
93 }
94 }
95
96 nvlist_t *
97 v8plus_method_call(void *cop, const char *name, const nvlist_t *lp)
98 {
99 v8plus_async_call_t vac;
100
101 if (v8plus_in_event_thread() == B_TRUE) {
102 /*
103 * We're running in the event loop thread, so we can make the
104 * call directly.
105 */
106 return (v8plus_method_call_direct(cop, name, lp));
107 }
108
109 /*
110 * As we cannot manipulate v8plus/V8/Node structures directly from
111 * outside the event loop thread, we push the call arguments onto a
112 * queue and post to the event loop thread. We then sleep on our
113 * condition variable until the event loop thread makes the call
114 * for us and wakes us up.
115 */
116 bzero(&vac, sizeof (vac));
117 vac.vac_cop = cop;
118 vac.vac_name = name;
119 vac.vac_lp = lp;
120 if (pthread_mutex_init(&vac.vac_mtx, NULL) != 0)
121 v8plus_panic("could not init async call mutex");
122 if (pthread_cond_init(&vac.vac_cv, NULL) != 0)
123 v8plus_panic("could not init async call condvar");
124 vac.vac_run = B_FALSE;
125
126 /*
127 * Post request to queue:
128 */
129 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
130 v8plus_panic("could not lock async queue mutex");
131 STAILQ_INSERT_TAIL(&_v8plus_callq, &vac, vac_callq_entry);
132 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
133 v8plus_panic("could not unlock async queue mutex");
134 uv_async_send(&_v8plus_uv_async);
135
136 /*
137 * Wait for our request to be serviced on the event loop thread:
138 */
139 if (pthread_mutex_lock(&vac.vac_mtx) != 0)
140 v8plus_panic("could not lock async call mutex");
141 while (vac.vac_run == B_FALSE) {
142 if (pthread_cond_wait(&vac.vac_cv, &vac.vac_mtx) != 0)
143 v8plus_panic("could not wait on async call condvar");
144 }
145 if (pthread_mutex_unlock(&vac.vac_mtx) != 0)
146 v8plus_panic("could not unlock async call mutex");
147
148 if (pthread_cond_destroy(&vac.vac_cv) != 0)
149 v8plus_panic("could not destroy async call condvar");
150 if (pthread_mutex_destroy(&vac.vac_mtx) != 0)
151 v8plus_panic("could not destroy async call mutex");
152
153 return (vac.vac_return);
154 }
155
156
157 /*
158 * Initialise structures for off-event-loop method calls.
159 *
160 * Note that uv_async_init() must be called inside the libuv event loop, so we
161 * do it here. We also want to record the thread ID of the Event Loop thread
162 * so as to determine what kind of method calls to make later.
163 */
164 void
165 v8plus_crossthread_init(void)
166 {
167 _v8plus_uv_event_thread = pthread_self();
168 if (uv_async_init(uv_default_loop(), &_v8plus_uv_async,
169 v8plus_async_callback) != 0)
170 v8plus_panic("unable to initialise uv_async_t");
171 if (pthread_mutex_init(&_v8plus_callq_mtx, NULL) != 0)
172 v8plus_panic("unable to initialise mutex");
173 }
174
175 nvlist_t *
176 v8plus_verror(v8plus_errno_t e, const char *fmt, va_list ap)
177 {
178 if (fmt == NULL) {
179 if (e == V8PLUSERR_NOERROR) {
180 *_v8plus_errmsg = '\0';
|