Print this page
keith IIa
XXX keith II
Split |
Close |
Expand all |
Collapse all |
--- old/./v8plus_csup.c
+++ new/./v8plus_csup.c
1 1 /*
2 2 * Copyright (c) 2012 Joyent, Inc. All rights reserved.
3 3 */
4 4
5 5 #include <sys/ccompile.h>
6 6 #include <sys/debug.h>
7 7 #include <sys/queue.h>
8 8 #include <sys/types.h>
9 9 #include <stdarg.h>
10 10 #include <string.h>
11 11 #include <strings.h>
12 12 #include <errno.h>
13 13 #include <uv.h>
14 14 #include <pthread.h>
15 15 #include "v8plus_glue.h"
16 16
17 17 __thread v8plus_errno_t _v8plus_errno;
18 18 __thread char _v8plus_errmsg[V8PLUS_ERRMSG_LEN];
19 19
20 20 typedef struct v8plus_uv_ctx {
21 21 void *vuc_obj;
22 22 void *vuc_ctx;
23 23 void *vuc_result;
24 24 v8plus_worker_f vuc_worker;
25 25 v8plus_completion_f vuc_completion;
26 26 } v8plus_uv_ctx_t;
27 27
28 28 static STAILQ_HEAD(v8plus_callq_head, v8plus_async_call) _v8plus_callq =
29 29 STAILQ_HEAD_INITIALIZER(_v8plus_callq);
30 30 static pthread_mutex_t _v8plus_callq_mtx;
31 31 static pthread_t _v8plus_uv_event_thread;
32 32 static uv_async_t _v8plus_uv_async;
33 33
34 34 typedef struct v8plus_async_call {
35 35 void *vac_cop;
36 36 const char *vac_name;
37 37 const nvlist_t *vac_lp;
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
38 38
39 39 pthread_cond_t vac_cv;
40 40 pthread_mutex_t vac_mtx;
41 41
42 42 boolean_t vac_run;
43 43 nvlist_t *vac_return;
44 44
45 45 STAILQ_ENTRY(v8plus_async_call) vac_callq_entry;
46 46 } v8plus_async_call_t;
47 47
48 -nvlist_t *v8plus_method_call_direct(void *, const char *, const nvlist_t *);
49 -
50 48 boolean_t
51 49 v8plus_in_event_thread(void)
52 50 {
53 51 return (_v8plus_uv_event_thread == pthread_self() ? B_TRUE : B_FALSE);
54 52 }
55 53
56 54 static void
57 -v8plus_async_callback(uv_async_t *async, __attribute__((unused)) int status)
55 +v8plus_async_callback(uv_async_t *async, int status __UNUSED)
58 56 {
59 57 if (v8plus_in_event_thread() != B_TRUE)
60 58 v8plus_panic("async callback called outside of event loop");
61 59
62 60 for (;;) {
63 61 v8plus_async_call_t *vac = NULL;
64 62
65 63 /*
66 64 * Fetch the next queued method:
67 65 */
68 66 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
69 67 v8plus_panic("could not lock async queue mutex");
70 68 if (!STAILQ_EMPTY(&_v8plus_callq)) {
71 69 vac = STAILQ_FIRST(&_v8plus_callq);
72 70 STAILQ_REMOVE_HEAD(&_v8plus_callq, vac_callq_entry);
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
73 71 }
74 72 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
75 73 v8plus_panic("could not unlock async queue mutex");
76 74
77 75 if (vac == NULL)
78 76 break;
79 77
80 78 /*
81 79 * Run the queued method:
82 80 */
83 - if (pthread_mutex_lock(&vac->vac_mtx) != 0)
84 - v8plus_panic("could not lock async call mutex");
85 -
86 81 if (vac->vac_run == B_TRUE)
87 82 v8plus_panic("async call already run");
88 -
89 83 vac->vac_return = v8plus_method_call_direct(vac->vac_cop,
90 84 vac->vac_name, vac->vac_lp);
91 - vac->vac_run = B_TRUE;
92 85
86 + if (pthread_mutex_lock(&vac->vac_mtx) != 0)
87 + v8plus_panic("could not lock async call mutex");
88 + vac->vac_run = B_TRUE;
93 89 if (pthread_cond_broadcast(&vac->vac_cv) != 0)
94 90 v8plus_panic("could not signal async call condvar");
95 91 if (pthread_mutex_unlock(&vac->vac_mtx) != 0)
96 92 v8plus_panic("could not unlock async call mutex");
97 93 }
98 94 }
99 95
100 96 nvlist_t *
101 97 v8plus_method_call(void *cop, const char *name, const nvlist_t *lp)
102 98 {
103 99 v8plus_async_call_t vac;
104 100
105 101 if (v8plus_in_event_thread() == B_TRUE) {
106 102 /*
107 103 * We're running in the event loop thread, so we can make the
108 104 * call directly.
109 105 */
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
110 106 return (v8plus_method_call_direct(cop, name, lp));
111 107 }
112 108
113 109 /*
114 110 * As we cannot manipulate v8plus/V8/Node structures directly from
115 111 * outside the event loop thread, we push the call arguments onto a
116 112 * queue and post to the event loop thread. We then sleep on our
117 113 * condition variable until the event loop thread makes the call
118 114 * for us and wakes us up.
119 115 */
116 + bzero(&vac, sizeof (vac));
120 117 vac.vac_cop = cop;
121 118 vac.vac_name = name;
122 119 vac.vac_lp = lp;
123 120 if (pthread_mutex_init(&vac.vac_mtx, NULL) != 0)
124 121 v8plus_panic("could not init async call mutex");
125 122 if (pthread_cond_init(&vac.vac_cv, NULL) != 0)
126 123 v8plus_panic("could not init async call condvar");
127 124 vac.vac_run = B_FALSE;
128 - vac.vac_return = NULL;
129 125
130 126 /*
131 127 * Post request to queue:
132 128 */
133 129 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
134 130 v8plus_panic("could not lock async queue mutex");
135 131 STAILQ_INSERT_TAIL(&_v8plus_callq, &vac, vac_callq_entry);
136 132 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
137 133 v8plus_panic("could not unlock async queue mutex");
138 134 uv_async_send(&_v8plus_uv_async);
139 135
140 136 /*
141 - * Wait for our request to be serviced on the Event Loop thread:
137 + * Wait for our request to be serviced on the event loop thread:
142 138 */
143 139 if (pthread_mutex_lock(&vac.vac_mtx) != 0)
144 140 v8plus_panic("could not lock async call mutex");
145 141 while (vac.vac_run == B_FALSE) {
146 142 if (pthread_cond_wait(&vac.vac_cv, &vac.vac_mtx) != 0)
147 143 v8plus_panic("could not wait on async call condvar");
148 144 }
145 + if (pthread_mutex_unlock(&vac.vac_mtx) != 0)
146 + v8plus_panic("could not unlock async call mutex");
147 +
148 + if (pthread_cond_destroy(&vac.vac_cv) != 0)
149 + v8plus_panic("could not destroy async call condvar");
150 + if (pthread_mutex_destroy(&vac.vac_mtx) != 0)
151 + v8plus_panic("could not destroy async call mutex");
149 152
150 153 return (vac.vac_return);
151 154 }
152 155
153 156
154 157 /*
155 158 * Initialise structures for off-event-loop method calls.
156 159 *
157 - * Note that uv_async_init() must be called inside the libuv Event Loop, so we
160 + * Note that uv_async_init() must be called inside the libuv event loop, so we
158 161 * do it here. We also want to record the thread ID of the Event Loop thread
159 162 * so as to determine what kind of method calls to make later.
160 163 */
161 164 void
162 165 v8plus_crossthread_init(void)
163 166 {
164 167 _v8plus_uv_event_thread = pthread_self();
165 168 if (uv_async_init(uv_default_loop(), &_v8plus_uv_async,
166 169 v8plus_async_callback) != 0)
167 170 v8plus_panic("unable to initialise uv_async_t");
168 171 if (pthread_mutex_init(&_v8plus_callq_mtx, NULL) != 0)
169 172 v8plus_panic("unable to initialise mutex");
170 173 }
171 174
172 175 nvlist_t *
173 176 v8plus_verror(v8plus_errno_t e, const char *fmt, va_list ap)
174 177 {
175 178 if (fmt == NULL) {
176 179 if (e == V8PLUSERR_NOERROR) {
177 180 *_v8plus_errmsg = '\0';
178 181 } else {
179 182 (void) snprintf(_v8plus_errmsg, V8PLUS_ERRMSG_LEN,
180 183 "%s", v8plus_strerror(e));
181 184 }
182 185 } else {
183 186 (void) vsnprintf(_v8plus_errmsg, V8PLUS_ERRMSG_LEN, fmt, ap);
184 187 }
185 188 _v8plus_errno = e;
186 189
187 190 return (NULL);
188 191 }
189 192
190 193 nvlist_t *
191 194 v8plus_error(v8plus_errno_t e, const char *fmt, ...)
192 195 {
193 196 va_list ap;
194 197
195 198 va_start(ap, fmt);
196 199 (void) v8plus_verror(e, fmt, ap);
197 200 va_end(ap);
198 201
199 202 return (NULL);
200 203 }
201 204
202 205 static void __NORETURN
203 206 v8plus_vpanic(const char *fmt, va_list ap)
204 207 {
205 208 (void) vfprintf(stderr, fmt, ap);
206 209 (void) fflush(stderr);
207 210 abort();
208 211 }
209 212
210 213 void
211 214 v8plus_panic(const char *fmt, ...)
212 215 {
213 216 va_list ap;
214 217
215 218 va_start(ap, fmt);
216 219 v8plus_vpanic(fmt, ap);
217 220 va_end(ap);
218 221 }
219 222
220 223 nvlist_t *
221 224 v8plus_nverr(int nverr, const char *member)
222 225 {
223 226 (void) snprintf(_v8plus_errmsg, V8PLUS_ERRMSG_LEN,
224 227 "nvlist manipulation error on member %s: %s",
225 228 member == NULL ? "<none>" : member, strerror(nverr));
226 229
227 230 switch (nverr) {
228 231 case ENOMEM:
229 232 _v8plus_errno = V8PLUSERR_NOMEM;
230 233 break;
231 234 case EINVAL:
232 235 _v8plus_errno = V8PLUSERR_YOUSUCK;
233 236 break;
234 237 default:
235 238 _v8plus_errno = V8PLUSERR_UNKNOWN;
236 239 break;
237 240 }
238 241
239 242 return (NULL);
240 243 }
241 244
242 245 nvlist_t *
243 246 v8plus_syserr(int syserr, const char *fmt, ...)
244 247 {
245 248 v8plus_errno_t e;
246 249 va_list ap;
247 250
248 251 switch (syserr) {
249 252 case ENOMEM:
250 253 e = V8PLUSERR_NOMEM;
251 254 break;
252 255 case EBADF:
253 256 e = V8PLUSERR_BADF;
254 257 break;
255 258 default:
256 259 e = V8PLUSERR_UNKNOWN;
257 260 break;
258 261 }
259 262
260 263 va_start(ap, fmt);
261 264 (void) v8plus_verror(e, fmt, ap);
262 265 va_end(ap);
263 266
264 267 return (NULL);
265 268 }
266 269
267 270 /*
268 271 * The NULL nvlist with V8PLUSERR_NOERROR means we are returning void.
269 272 */
270 273 nvlist_t *
271 274 v8plus_void(void)
272 275 {
273 276 return (v8plus_error(V8PLUSERR_NOERROR, NULL));
274 277 }
275 278
276 279 v8plus_type_t
277 280 v8plus_typeof(const nvpair_t *pp)
278 281 {
279 282 data_type_t t = nvpair_type((nvpair_t *)pp);
280 283
281 284 switch (t) {
282 285 case DATA_TYPE_DOUBLE:
283 286 return (V8PLUS_TYPE_NUMBER);
284 287 case DATA_TYPE_STRING:
285 288 return (V8PLUS_TYPE_STRING);
286 289 case DATA_TYPE_NVLIST:
287 290 return (V8PLUS_TYPE_OBJECT);
288 291 case DATA_TYPE_BOOLEAN_VALUE:
289 292 return (V8PLUS_TYPE_BOOLEAN);
290 293 case DATA_TYPE_BOOLEAN:
291 294 return (V8PLUS_TYPE_UNDEFINED);
292 295 case DATA_TYPE_BYTE:
293 296 {
294 297 uchar_t v;
295 298 if (nvpair_value_byte((nvpair_t *)pp, &v) != 0 || v != 0)
296 299 return (V8PLUS_TYPE_INVALID);
297 300 return (V8PLUS_TYPE_NULL);
298 301 }
299 302 case DATA_TYPE_UINT64_ARRAY:
300 303 {
301 304 uint64_t *vp;
302 305 uint_t nv;
303 306 if (nvpair_value_uint64_array((nvpair_t *)pp, &vp, &nv) != 0 ||
304 307 nv != 1) {
305 308 return (V8PLUS_TYPE_INVALID);
306 309 }
307 310 return (V8PLUS_TYPE_JSFUNC);
308 311 }
309 312 default:
310 313 return (V8PLUS_TYPE_INVALID);
311 314 }
312 315 }
313 316
314 317 static int
315 318 v8plus_arg_value(v8plus_type_t t, const nvpair_t *pp, void *vp)
316 319 {
317 320 data_type_t dt = nvpair_type((nvpair_t *)pp);
318 321
319 322 switch (t) {
320 323 case V8PLUS_TYPE_NONE:
321 324 return (-1);
322 325 case V8PLUS_TYPE_STRING:
323 326 if (dt == DATA_TYPE_STRING) {
324 327 if (vp != NULL) {
325 328 (void) nvpair_value_string((nvpair_t *)pp,
326 329 (char **)vp);
327 330 }
328 331 return (0);
329 332 }
330 333 return (-1);
331 334 case V8PLUS_TYPE_NUMBER:
332 335 if (dt == DATA_TYPE_DOUBLE) {
333 336 if (vp != NULL) {
334 337 (void) nvpair_value_double((nvpair_t *)pp,
335 338 (double *)vp);
336 339 }
337 340 return (0);
338 341 }
339 342 return (-1);
340 343 case V8PLUS_TYPE_BOOLEAN:
341 344 if (dt == DATA_TYPE_BOOLEAN_VALUE) {
342 345 if (vp != NULL) {
343 346 (void) nvpair_value_boolean_value(
344 347 (nvpair_t *)pp, (boolean_t *)vp);
345 348 }
346 349 return (0);
347 350 }
348 351 return (-1);
349 352 case V8PLUS_TYPE_JSFUNC:
350 353 if (dt == DATA_TYPE_UINT64_ARRAY) {
351 354 uint_t nv;
352 355 uint64_t *vpp;
353 356
354 357 if (nvpair_value_uint64_array((nvpair_t *)pp,
355 358 &vpp, &nv) == 0 && nv == 1) {
356 359 if (vp != NULL)
357 360 *(v8plus_jsfunc_t *)vp = vpp[0];
358 361 return (0);
359 362 }
360 363 }
361 364 return (-1);
362 365 case V8PLUS_TYPE_OBJECT:
363 366 if (dt == DATA_TYPE_NVLIST) {
364 367 if (vp != NULL) {
365 368 (void) nvpair_value_nvlist((nvpair_t *)pp,
366 369 (nvlist_t **)vp);
367 370 }
368 371 return (0);
369 372 }
370 373 return (-1);
371 374 case V8PLUS_TYPE_NULL:
372 375 if (dt == DATA_TYPE_BYTE) {
373 376 uchar_t v;
374 377
375 378 if (nvpair_value_byte((nvpair_t *)pp, &v) == 0 &&
376 379 v == 0)
377 380 return (0);
378 381 }
379 382 return (-1);
380 383 case V8PLUS_TYPE_UNDEFINED:
381 384 return (dt == DATA_TYPE_BOOLEAN ? 0 : -1);
382 385 case V8PLUS_TYPE_ANY:
383 386 if (vp != NULL)
384 387 *(const nvpair_t **)vp = pp;
385 388 return (0);
386 389 case V8PLUS_TYPE_INVALID:
387 390 if (vp != NULL)
388 391 *(data_type_t *)vp = dt;
389 392 return (0);
390 393 case V8PLUS_TYPE_STRNUMBER64:
391 394 if (dt == DATA_TYPE_STRING) {
392 395 char *s;
393 396 uint64_t v;
394 397
395 398 (void) nvpair_value_string((nvpair_t *)pp, &s);
396 399 errno = 0;
397 400 v = (uint64_t)strtoull(s, NULL, 0);
398 401 if (errno != 0)
399 402 return (-1);
400 403 if (vp != NULL)
401 404 *(uint64_t *)vp = v;
402 405 return (0);
403 406 }
404 407 return (-1);
405 408 default:
406 409 return (-1);
407 410 }
408 411 }
409 412
410 413 int
411 414 v8plus_args(const nvlist_t *lp, uint_t flags, v8plus_type_t t, ...)
412 415 {
413 416 v8plus_type_t nt;
414 417 nvpair_t *pp;
415 418 void *vp;
416 419 va_list ap;
417 420 uint_t i;
418 421 char buf[32];
419 422
420 423 va_start(ap, t);
421 424
422 425 for (i = 0, nt = t; nt != V8PLUS_TYPE_NONE; i++) {
423 426 switch (nt) {
424 427 case V8PLUS_TYPE_UNDEFINED:
425 428 case V8PLUS_TYPE_NULL:
426 429 break;
427 430 default:
428 431 (void) va_arg(ap, void *);
429 432 }
430 433
431 434 (void) snprintf(buf, sizeof (buf), "%u", i);
432 435 if (nvlist_lookup_nvpair((nvlist_t *)lp, buf, &pp) != 0) {
433 436 (void) v8plus_error(V8PLUSERR_MISSINGARG,
434 437 "argument %u is required", i);
435 438 return (-1);
436 439 }
437 440
438 441 if (v8plus_arg_value(nt, pp, NULL) != 0) {
439 442 (void) v8plus_error(V8PLUSERR_BADARG,
440 443 "argument %u is of incorrect type", i);
441 444 return (-1);
442 445 }
443 446
444 447 nt = va_arg(ap, data_type_t);
445 448 }
446 449
447 450 va_end(ap);
448 451
449 452 if (flags & V8PLUS_ARG_F_NOEXTRA) {
450 453 (void) snprintf(buf, sizeof (buf), "%u", i);
451 454 if (nvlist_lookup_nvpair((nvlist_t *)lp, buf, &pp) == 0) {
452 455 (void) v8plus_error(V8PLUSERR_EXTRAARG,
453 456 "superfluous extra argument(s) detected");
454 457 return (-1);
455 458 }
456 459 }
457 460
458 461 va_start(ap, t);
459 462
460 463 for (i = 0, nt = t; nt != V8PLUS_TYPE_NONE; i++) {
461 464 switch (nt) {
462 465 case V8PLUS_TYPE_UNDEFINED:
463 466 case V8PLUS_TYPE_NULL:
464 467 vp = NULL;
465 468 break;
466 469 default:
467 470 vp = va_arg(ap, void *);
468 471 }
469 472
470 473 (void) snprintf(buf, sizeof (buf), "%u", i);
471 474 VERIFY(nvlist_lookup_nvpair((nvlist_t *)lp, buf, &pp) == 0);
472 475 VERIFY(v8plus_arg_value(nt, pp, vp) == 0);
473 476
474 477 nt = va_arg(ap, data_type_t);
475 478 }
476 479
477 480 va_end(ap);
478 481
479 482 return (0);
480 483 }
481 484
482 485 static int
483 486 v8plus_obj_vsetprops(nvlist_t *lp, v8plus_type_t t, va_list *ap)
484 487 {
485 488 v8plus_type_t nt = t;
486 489 char *name;
487 490 int err;
488 491
489 492 /*
490 493 * Do not call va_start() or va_end() in this function! We are limited
491 494 * to a single traversal of the arguments so that we can recurse to
492 495 * handle embedded object definitions.
493 496 */
494 497
495 498 while (nt != V8PLUS_TYPE_NONE) {
496 499 name = va_arg(*ap, char *);
497 500
498 501 switch (nt) {
499 502 case V8PLUS_TYPE_STRING:
500 503 {
501 504 char *s = va_arg(*ap, char *);
502 505 if ((err = nvlist_add_string(lp, name, s)) != 0) {
503 506 (void) v8plus_nverr(err, name);
504 507 return (-1);
505 508 }
506 509 break;
507 510 }
508 511 case V8PLUS_TYPE_NUMBER:
509 512 {
510 513 double d = va_arg(*ap, double);
511 514 if ((err = nvlist_add_double(lp, name, d)) != 0) {
512 515 (void) v8plus_nverr(err, name);
513 516 return (-1);
514 517 }
515 518 break;
516 519 }
517 520 case V8PLUS_TYPE_BOOLEAN:
518 521 {
519 522 boolean_t b = va_arg(*ap, boolean_t);
520 523 if ((err = nvlist_add_boolean_value(lp,
521 524 name, b)) != 0) {
522 525 (void) v8plus_nverr(err, name);
523 526 return (-1);
524 527 }
525 528 break;
526 529 }
527 530 case V8PLUS_TYPE_JSFUNC:
528 531 {
529 532 v8plus_jsfunc_t j = va_arg(*ap, v8plus_jsfunc_t);
530 533 if ((err = nvlist_add_uint64_array(lp,
531 534 name, &j, 1)) != 0) {
532 535 (void) v8plus_nverr(err, name);
533 536 return (-1);
534 537 }
535 538 if ((err = nvlist_add_string_array(lp,
536 539 V8PLUS_JSF_COOKIE, NULL, 0)) != 0) {
537 540 (void) v8plus_nverr(err, V8PLUS_JSF_COOKIE);
538 541 return (-1);
539 542 }
540 543 v8plus_jsfunc_hold(j);
541 544 break;
542 545 }
543 546 case V8PLUS_TYPE_OBJECT:
544 547 {
545 548 const nvlist_t *op = va_arg(*ap, const nvlist_t *);
546 549 if ((err = nvlist_add_nvlist(lp, name,
547 550 (nvlist_t *)op)) != 0) {
548 551 (void) v8plus_nverr(err, name);
549 552 return (-1);
550 553 }
551 554 break;
552 555 }
553 556 case V8PLUS_TYPE_NULL:
554 557 if ((err = nvlist_add_byte(lp, name, 0)) != 0) {
555 558 (void) v8plus_nverr(err, name);
556 559 return (-1);
557 560 }
558 561 break;
559 562 case V8PLUS_TYPE_UNDEFINED:
560 563 if ((err = nvlist_add_boolean(lp, name)) != 0) {
561 564 (void) v8plus_nverr(err, name);
562 565 return (-1);
563 566 }
564 567 break;
565 568 case V8PLUS_TYPE_ANY:
566 569 {
567 570 nvpair_t *pp = va_arg(*ap, nvpair_t *);
568 571 if ((err = nvlist_add_nvpair(lp, pp)) != 0) {
569 572 (void) v8plus_nverr(err, name);
570 573 return (-1);
571 574 }
572 575 break;
573 576 }
574 577 case V8PLUS_TYPE_STRNUMBER64:
575 578 {
576 579 uint64_t v = va_arg(*ap, uint64_t);
577 580 char s[32];
578 581 (void) snprintf(s, sizeof (s), "%" PRIu64, v);
579 582 if ((err = nvlist_add_string(lp, name, s)) != 0) {
580 583 (void) v8plus_nverr(err, name);
581 584 return (-1);
582 585 }
583 586 break;
584 587 }
585 588 case V8PLUS_TYPE_INL_OBJECT:
586 589 {
587 590 nvlist_t *slp;
588 591
589 592 nt = va_arg(*ap, v8plus_type_t);
590 593 err = nvlist_alloc(&slp, NV_UNIQUE_NAME, 0);
591 594 if (err != 0) {
592 595 (void) v8plus_nverr(err, name);
593 596 return (-1);
594 597 }
595 598 if (v8plus_obj_vsetprops(slp, nt, ap) != 0)
596 599 return (-1);
597 600
598 601 err = nvlist_add_nvlist(lp, name, slp);
599 602 nvlist_free(slp);
600 603 if (err != 0) {
601 604 (void) v8plus_nverr(err, name);
602 605 return (-1);
603 606 }
604 607 break;
605 608 }
606 609 case V8PLUS_TYPE_INVALID:
607 610 default:
608 611 (void) v8plus_error(V8PLUSERR_YOUSUCK,
609 612 "invalid property type %d", nt);
610 613 return (-1);
611 614 }
612 615
613 616 nt = va_arg(*ap, v8plus_type_t);
614 617 }
615 618
616 619 return (0);
617 620 }
618 621
619 622 nvlist_t *
620 623 v8plus_obj(v8plus_type_t t, ...)
621 624 {
622 625 nvlist_t *rp;
623 626 va_list ap;
624 627 int err;
625 628
626 629 if ((err = nvlist_alloc(&rp, NV_UNIQUE_NAME, 0)) != 0)
627 630 return (v8plus_nverr(err, NULL));
628 631
629 632 va_start(ap, t);
630 633 err = v8plus_obj_vsetprops(rp, t, &ap);
631 634 va_end(ap);
632 635
633 636 if (err != 0) {
634 637 nvlist_free(rp);
635 638 rp = NULL;
636 639 }
637 640
638 641 return (rp);
639 642 }
640 643
641 644 int
642 645 v8plus_obj_setprops(nvlist_t *lp, v8plus_type_t t, ...)
643 646 {
644 647 va_list ap;
645 648 int err;
646 649
647 650 va_start(ap, t);
648 651 err = v8plus_obj_vsetprops(lp, t, &ap);
649 652 va_end(ap);
650 653
651 654 return (err);
652 655 }
653 656
654 657 static void
655 658 v8plus_uv_worker(uv_work_t *wp)
656 659 {
657 660 v8plus_uv_ctx_t *cp = wp->data;
658 661
659 662 cp->vuc_result = cp->vuc_worker(cp->vuc_obj, cp->vuc_ctx);
660 663 }
661 664
662 665 static void
663 666 v8plus_uv_completion(uv_work_t *wp)
664 667 {
665 668 v8plus_uv_ctx_t *cp = wp->data;
666 669
667 670 cp->vuc_completion(cp->vuc_obj, cp->vuc_ctx, cp->vuc_result);
668 671 v8plus_obj_rele(cp->vuc_obj);
669 672 free(cp);
670 673 free(wp);
671 674 }
672 675
673 676 void
674 677 v8plus_defer(void *cop, void *ctxp, v8plus_worker_f worker,
675 678 v8plus_completion_f completion)
676 679 {
677 680 uv_work_t *wp = malloc(sizeof (uv_work_t));
678 681 v8plus_uv_ctx_t *cp = malloc(sizeof (v8plus_uv_ctx_t));
679 682
680 683 bzero(wp, sizeof (uv_work_t));
681 684 bzero(cp, sizeof (v8plus_uv_ctx_t));
682 685
683 686 v8plus_obj_hold(cop);
684 687 cp->vuc_obj = cop;
685 688 cp->vuc_ctx = ctxp;
686 689 cp->vuc_worker = worker;
687 690 cp->vuc_completion = completion;
688 691 wp->data = cp;
689 692
690 693 uv_queue_work(uv_default_loop(), wp, v8plus_uv_worker,
691 694 v8plus_uv_completion);
692 695 }
↓ open down ↓ |
525 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX