1 /*
2 * Copyright (c) 2012 Joyent, Inc. All rights reserved.
3 */
4
5 #include <sys/ccompile.h>
6 #include <sys/debug.h>
7 #include <sys/queue.h>
8 #include <sys/types.h>
9 #include <stdarg.h>
10 #include <string.h>
11 #include <strings.h>
12 #include <errno.h>
13 #include <uv.h>
14 #include <pthread.h>
15 #include "v8plus_glue.h"
16
17 __thread v8plus_errno_t _v8plus_errno;
18 __thread char _v8plus_errmsg[V8PLUS_ERRMSG_LEN];
19
20 typedef struct v8plus_uv_ctx {
21 void *vuc_obj;
22 void *vuc_ctx;
23 void *vuc_result;
24 v8plus_worker_f vuc_worker;
25 v8plus_completion_f vuc_completion;
26 } v8plus_uv_ctx_t;
27
28 static STAILQ_HEAD(v8plus_callq_head, v8plus_async_call) _v8plus_callq =
29 STAILQ_HEAD_INITIALIZER(_v8plus_callq);
30 static pthread_mutex_t _v8plus_callq_mtx;
31 static pthread_t _v8plus_uv_event_thread;
32 static uv_async_t _v8plus_uv_async;
33
34 typedef enum v8plus_async_call_type {
35 ACT_OBJECT_CALL = 1,
36 ACT_OBJECT_RELEASE,
37 ACT_JSFUNC_CALL,
38 ACT_JSFUNC_RELEASE,
39 } v8plus_async_call_type_t;
40
41 typedef struct v8plus_async_call {
42 v8plus_async_call_type_t vac_type;
43 boolean_t vac_noreply;
44
45 /*
46 * For ACT_OBJECT_{CALL,RELEASE}:
47 */
48 void *vac_cop;
49 const char *vac_name;
50 /*
51 * For ACT_JSFUNC_{CALL,RELEASE}:
52 */
53 v8plus_jsfunc_t vac_func;
54
55 /*
56 * Common call arguments:
57 */
58 const nvlist_t *vac_lp;
59
60 pthread_cond_t vac_cv;
61 pthread_mutex_t vac_mtx;
62
63 boolean_t vac_run;
64 nvlist_t *vac_return;
65
66 STAILQ_ENTRY(v8plus_async_call) vac_callq_entry;
67 } v8plus_async_call_t;
68
69 boolean_t
70 v8plus_in_event_thread(void)
71 {
72 return (_v8plus_uv_event_thread == pthread_self() ? B_TRUE : B_FALSE);
73 }
74
75 static void
76 v8plus_async_callback(uv_async_t *async __UNUSED, int status __UNUSED)
77 {
78 if (v8plus_in_event_thread() != B_TRUE)
79 v8plus_panic("async callback called outside of event loop");
80
81 for (;;) {
82 v8plus_async_call_t *vac = NULL;
83
84 /*
85 * Fetch the next queued method:
86 */
87 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
88 v8plus_panic("could not lock async queue mutex");
89 if (!STAILQ_EMPTY(&_v8plus_callq)) {
90 vac = STAILQ_FIRST(&_v8plus_callq);
91 STAILQ_REMOVE_HEAD(&_v8plus_callq, vac_callq_entry);
92 }
93 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
94 v8plus_panic("could not unlock async queue mutex");
95
96 if (vac == NULL)
97 break;
98
99 /*
100 * Run the queued method:
101 */
102 if (vac->vac_run == B_TRUE)
103 v8plus_panic("async call already run");
104
105 switch (vac->vac_type) {
106 case ACT_OBJECT_CALL:
107 vac->vac_return = v8plus_method_call_direct(
108 vac->vac_cop, vac->vac_name, vac->vac_lp);
109 break;
110 case ACT_OBJECT_RELEASE:
111 v8plus_obj_rele(vac->vac_cop);
112 break;
113 case ACT_JSFUNC_CALL:
114 vac->vac_return = v8plus_call_direct(
115 vac->vac_func, vac->vac_lp);
116 break;
117 case ACT_JSFUNC_RELEASE:
118 v8plus_jsfunc_rele(vac->vac_func);
119 break;
120 }
121
122 if (vac->vac_noreply == B_TRUE) {
123 /*
124 * The caller posted this event and is not sleeping
125 * on a reply. Just free the call structure and move
126 * on.
127 */
128 free(vac);
129 if (vac->vac_lp != NULL)
130 nvlist_free((nvlist_t *)vac->vac_lp);
131 continue;
132 }
133
134 if (pthread_mutex_lock(&vac->vac_mtx) != 0)
135 v8plus_panic("could not lock async call mutex");
136 vac->vac_run = B_TRUE;
137 if (pthread_cond_broadcast(&vac->vac_cv) != 0)
138 v8plus_panic("could not signal async call condvar");
139 if (pthread_mutex_unlock(&vac->vac_mtx) != 0)
140 v8plus_panic("could not unlock async call mutex");
141 }
142 }
143
144 /*
145 * As we cannot manipulate v8plus/V8/Node structures directly from outside the
146 * event loop thread, we push the call arguments onto a queue and post to the
147 * event loop thread. We then sleep on our condition variable until the event
148 * loop thread makes the call for us and wakes us up.
149 *
150 * This routine implements the parts of this interaction common to all
151 * variants.
152 */
153 static nvlist_t *
154 v8plus_cross_thread_call(v8plus_async_call_t *vac)
155 {
156 /*
157 * Common call structure initialisation:
158 */
159 if (pthread_mutex_init(&vac->vac_mtx, NULL) != 0)
160 v8plus_panic("could not init async call mutex");
161 if (pthread_cond_init(&vac->vac_cv, NULL) != 0)
162 v8plus_panic("could not init async call condvar");
163 vac->vac_run = B_FALSE;
164
165 /*
166 * Post request to queue:
167 */
168 if (pthread_mutex_lock(&_v8plus_callq_mtx) != 0)
169 v8plus_panic("could not lock async queue mutex");
170 STAILQ_INSERT_TAIL(&_v8plus_callq, vac, vac_callq_entry);
171 if (pthread_mutex_unlock(&_v8plus_callq_mtx) != 0)
172 v8plus_panic("could not unlock async queue mutex");
173 uv_async_send(&_v8plus_uv_async);
174
175 if (vac->vac_noreply == B_TRUE) {
176 /*
177 * The caller does not care about the reply, and has allocated
178 * the v8plus_async_call_t structure from the heap. The
179 * async callback will free the storage when it completes.
180 */
181 return (NULL);
182 }
183
184 /*
185 * Wait for our request to be serviced on the event loop thread:
186 */
187 if (pthread_mutex_lock(&vac->vac_mtx) != 0)
188 v8plus_panic("could not lock async call mutex");
189 while (vac->vac_run == B_FALSE) {
190 if (pthread_cond_wait(&vac->vac_cv, &vac->vac_mtx) != 0)
191 v8plus_panic("could not wait on async call condvar");
192 }
193 if (pthread_mutex_unlock(&vac->vac_mtx) != 0)
194 v8plus_panic("could not unlock async call mutex");
195
196 if (pthread_cond_destroy(&vac->vac_cv) != 0)
197 v8plus_panic("could not destroy async call condvar");
198 if (pthread_mutex_destroy(&vac->vac_mtx) != 0)
199 v8plus_panic("could not destroy async call mutex");
200
201 return (vac->vac_return);
202 }
203
204 nvlist_t *
205 v8plus_method_call(void *cop, const char *name, const nvlist_t *lp)
206 {
207 v8plus_async_call_t vac;
208
209 if (v8plus_in_event_thread() == B_TRUE) {
210 /*
211 * We're running in the event loop thread, so we can make the
212 * call directly.
213 */
214 return (v8plus_method_call_direct(cop, name, lp));
215 }
216
217 bzero(&vac, sizeof (vac));
218 vac.vac_type = ACT_OBJECT_CALL;
219 vac.vac_noreply = B_FALSE;
220 vac.vac_cop = cop;
221 vac.vac_name = name;
222 vac.vac_lp = lp;
223
224 return (v8plus_cross_thread_call(&vac));
225 }
226
227 nvlist_t *
228 v8plus_call(v8plus_jsfunc_t func, const nvlist_t *lp)
229 {
230 v8plus_async_call_t vac;
231
232 if (v8plus_in_event_thread() == B_TRUE) {
233 /*
234 * We're running in the event loop thread, so we can make the
235 * call directly.
236 */
237 return (v8plus_call_direct(func, lp));
238 }
239
240 bzero(&vac, sizeof (vac));
241 vac.vac_type = ACT_JSFUNC_CALL;
242 vac.vac_noreply = B_FALSE;
243 vac.vac_func = func;
244 vac.vac_lp = lp;
245
246 return (v8plus_cross_thread_call(&vac));
247 }
248
249 void
250 v8plus_obj_rele(const void *cop)
251 {
252 v8plus_async_call_t *vac;
253
254 if (v8plus_in_event_thread() == B_TRUE) {
255 return (v8plus_obj_rele_direct(cop));
256 }
257
258 vac = calloc(1, sizeof (*vac));
259 if (vac == NULL)
260 v8plus_panic("could not allocate async call structure");
261
262 vac->vac_type = ACT_OBJECT_RELEASE;
263 vac->vac_noreply = B_TRUE;
264 vac->vac_cop = (void *)cop;
265
266 (void) v8plus_cross_thread_call(vac);
267 }
268
269 void
270 v8plus_jsfunc_rele(v8plus_jsfunc_t f)
271 {
272 v8plus_async_call_t *vac;
273
274 if (v8plus_in_event_thread() == B_TRUE) {
275 return (v8plus_jsfunc_rele_direct(f));
276 }
277
278 vac = calloc(1, sizeof (*vac));
279 if (vac == NULL)
280 v8plus_panic("could not allocate async call structure");
281
282 vac->vac_type = ACT_JSFUNC_RELEASE;
283 vac->vac_noreply = B_TRUE;
284 vac->vac_func = f;
285
286 (void) v8plus_cross_thread_call(vac);
287 }
288
289 /*
290 * Initialise structures for off-event-loop method calls.
291 *
292 * Note that uv_async_init() must be called inside the libuv event loop, so we
293 * do it here. We also want to record the thread ID of the Event Loop thread
294 * so as to determine what kind of method calls to make later.
295 */
296 void
297 v8plus_crossthread_init(void)
298 {
299 _v8plus_uv_event_thread = pthread_self();
300 if (uv_async_init(uv_default_loop(), &_v8plus_uv_async,
301 v8plus_async_callback) != 0)
302 v8plus_panic("unable to initialise uv_async_t");
303 if (pthread_mutex_init(&_v8plus_callq_mtx, NULL) != 0)
304 v8plus_panic("unable to initialise mutex");
305 }
306
307 nvlist_t *
308 v8plus_verror(v8plus_errno_t e, const char *fmt, va_list ap)
309 {
310 if (fmt == NULL) {
311 if (e == V8PLUSERR_NOERROR) {
312 *_v8plus_errmsg = '\0';
313 } else {
314 (void) snprintf(_v8plus_errmsg, V8PLUS_ERRMSG_LEN,
315 "%s", v8plus_strerror(e));
316 }
317 } else {
318 (void) vsnprintf(_v8plus_errmsg, V8PLUS_ERRMSG_LEN, fmt, ap);
319 }
320 _v8plus_errno = e;
321
322 return (NULL);
323 }
324
325 nvlist_t *
326 v8plus_error(v8plus_errno_t e, const char *fmt, ...)
327 {
328 va_list ap;
329
330 va_start(ap, fmt);
331 (void) v8plus_verror(e, fmt, ap);
332 va_end(ap);
333
334 return (NULL);
335 }
336
337 static void __NORETURN
338 v8plus_vpanic(const char *fmt, va_list ap)
339 {
340 (void) vfprintf(stderr, fmt, ap);
341 (void) fflush(stderr);
342 abort();
343 }
344
345 void
346 v8plus_panic(const char *fmt, ...)
347 {
348 va_list ap;
349
350 va_start(ap, fmt);
351 v8plus_vpanic(fmt, ap);
352 va_end(ap);
353 }
354
355 nvlist_t *
356 v8plus_nverr(int nverr, const char *member)
357 {
358 (void) snprintf(_v8plus_errmsg, V8PLUS_ERRMSG_LEN,
359 "nvlist manipulation error on member %s: %s",
360 member == NULL ? "<none>" : member, strerror(nverr));
361
362 switch (nverr) {
363 case ENOMEM:
364 _v8plus_errno = V8PLUSERR_NOMEM;
365 break;
366 case EINVAL:
367 _v8plus_errno = V8PLUSERR_YOUSUCK;
368 break;
369 default:
370 _v8plus_errno = V8PLUSERR_UNKNOWN;
371 break;
372 }
373
374 return (NULL);
375 }
376
377 nvlist_t *
378 v8plus_syserr(int syserr, const char *fmt, ...)
379 {
380 v8plus_errno_t e;
381 va_list ap;
382
383 switch (syserr) {
384 case ENOMEM:
385 e = V8PLUSERR_NOMEM;
386 break;
387 case EBADF:
388 e = V8PLUSERR_BADF;
389 break;
390 default:
391 e = V8PLUSERR_UNKNOWN;
392 break;
393 }
394
395 va_start(ap, fmt);
396 (void) v8plus_verror(e, fmt, ap);
397 va_end(ap);
398
399 return (NULL);
400 }
401
402 /*
403 * The NULL nvlist with V8PLUSERR_NOERROR means we are returning void.
404 */
405 nvlist_t *
406 v8plus_void(void)
407 {
408 return (v8plus_error(V8PLUSERR_NOERROR, NULL));
409 }
410
411 v8plus_type_t
412 v8plus_typeof(const nvpair_t *pp)
413 {
414 data_type_t t = nvpair_type((nvpair_t *)pp);
415
416 switch (t) {
417 case DATA_TYPE_DOUBLE:
418 return (V8PLUS_TYPE_NUMBER);
419 case DATA_TYPE_STRING:
420 return (V8PLUS_TYPE_STRING);
421 case DATA_TYPE_NVLIST:
422 return (V8PLUS_TYPE_OBJECT);
423 case DATA_TYPE_BOOLEAN_VALUE:
424 return (V8PLUS_TYPE_BOOLEAN);
425 case DATA_TYPE_BOOLEAN:
426 return (V8PLUS_TYPE_UNDEFINED);
427 case DATA_TYPE_BYTE:
428 {
429 uchar_t v;
430 if (nvpair_value_byte((nvpair_t *)pp, &v) != 0 || v != 0)
431 return (V8PLUS_TYPE_INVALID);
432 return (V8PLUS_TYPE_NULL);
433 }
434 case DATA_TYPE_UINT64_ARRAY:
435 {
436 uint64_t *vp;
437 uint_t nv;
438 if (nvpair_value_uint64_array((nvpair_t *)pp, &vp, &nv) != 0 ||
439 nv != 1) {
440 return (V8PLUS_TYPE_INVALID);
441 }
442 return (V8PLUS_TYPE_JSFUNC);
443 }
444 default:
445 return (V8PLUS_TYPE_INVALID);
446 }
447 }
448
449 static int
450 v8plus_arg_value(v8plus_type_t t, const nvpair_t *pp, void *vp)
451 {
452 data_type_t dt = nvpair_type((nvpair_t *)pp);
453
454 switch (t) {
455 case V8PLUS_TYPE_NONE:
456 return (-1);
457 case V8PLUS_TYPE_STRING:
458 if (dt == DATA_TYPE_STRING) {
459 if (vp != NULL) {
460 (void) nvpair_value_string((nvpair_t *)pp,
461 (char **)vp);
462 }
463 return (0);
464 }
465 return (-1);
466 case V8PLUS_TYPE_NUMBER:
467 if (dt == DATA_TYPE_DOUBLE) {
468 if (vp != NULL) {
469 (void) nvpair_value_double((nvpair_t *)pp,
470 (double *)vp);
471 }
472 return (0);
473 }
474 return (-1);
475 case V8PLUS_TYPE_BOOLEAN:
476 if (dt == DATA_TYPE_BOOLEAN_VALUE) {
477 if (vp != NULL) {
478 (void) nvpair_value_boolean_value(
479 (nvpair_t *)pp, (boolean_t *)vp);
480 }
481 return (0);
482 }
483 return (-1);
484 case V8PLUS_TYPE_JSFUNC:
485 if (dt == DATA_TYPE_UINT64_ARRAY) {
486 uint_t nv;
487 uint64_t *vpp;
488
489 if (nvpair_value_uint64_array((nvpair_t *)pp,
490 &vpp, &nv) == 0 && nv == 1) {
491 if (vp != NULL)
492 *(v8plus_jsfunc_t *)vp = vpp[0];
493 return (0);
494 }
495 }
496 return (-1);
497 case V8PLUS_TYPE_OBJECT:
498 if (dt == DATA_TYPE_NVLIST) {
499 if (vp != NULL) {
500 (void) nvpair_value_nvlist((nvpair_t *)pp,
501 (nvlist_t **)vp);
502 }
503 return (0);
504 }
505 return (-1);
506 case V8PLUS_TYPE_NULL:
507 if (dt == DATA_TYPE_BYTE) {
508 uchar_t v;
509
510 if (nvpair_value_byte((nvpair_t *)pp, &v) == 0 &&
511 v == 0)
512 return (0);
513 }
514 return (-1);
515 case V8PLUS_TYPE_UNDEFINED:
516 return (dt == DATA_TYPE_BOOLEAN ? 0 : -1);
517 case V8PLUS_TYPE_ANY:
518 if (vp != NULL)
519 *(const nvpair_t **)vp = pp;
520 return (0);
521 case V8PLUS_TYPE_INVALID:
522 if (vp != NULL)
523 *(data_type_t *)vp = dt;
524 return (0);
525 case V8PLUS_TYPE_STRNUMBER64:
526 if (dt == DATA_TYPE_STRING) {
527 char *s;
528 uint64_t v;
529
530 (void) nvpair_value_string((nvpair_t *)pp, &s);
531 errno = 0;
532 v = (uint64_t)strtoull(s, NULL, 0);
533 if (errno != 0)
534 return (-1);
535 if (vp != NULL)
536 *(uint64_t *)vp = v;
537 return (0);
538 }
539 return (-1);
540 default:
541 return (-1);
542 }
543 }
544
545 int
546 v8plus_args(const nvlist_t *lp, uint_t flags, v8plus_type_t t, ...)
547 {
548 v8plus_type_t nt;
549 nvpair_t *pp;
550 void *vp;
551 va_list ap;
552 uint_t i;
553 char buf[32];
554
555 va_start(ap, t);
556
557 for (i = 0, nt = t; nt != V8PLUS_TYPE_NONE; i++) {
558 switch (nt) {
559 case V8PLUS_TYPE_UNDEFINED:
560 case V8PLUS_TYPE_NULL:
561 break;
562 default:
563 (void) va_arg(ap, void *);
564 }
565
566 (void) snprintf(buf, sizeof (buf), "%u", i);
567 if (nvlist_lookup_nvpair((nvlist_t *)lp, buf, &pp) != 0) {
568 (void) v8plus_error(V8PLUSERR_MISSINGARG,
569 "argument %u is required", i);
570 return (-1);
571 }
572
573 if (v8plus_arg_value(nt, pp, NULL) != 0) {
574 (void) v8plus_error(V8PLUSERR_BADARG,
575 "argument %u is of incorrect type", i);
576 return (-1);
577 }
578
579 nt = va_arg(ap, data_type_t);
580 }
581
582 va_end(ap);
583
584 if (flags & V8PLUS_ARG_F_NOEXTRA) {
585 (void) snprintf(buf, sizeof (buf), "%u", i);
586 if (nvlist_lookup_nvpair((nvlist_t *)lp, buf, &pp) == 0) {
587 (void) v8plus_error(V8PLUSERR_EXTRAARG,
588 "superfluous extra argument(s) detected");
589 return (-1);
590 }
591 }
592
593 va_start(ap, t);
594
595 for (i = 0, nt = t; nt != V8PLUS_TYPE_NONE; i++) {
596 switch (nt) {
597 case V8PLUS_TYPE_UNDEFINED:
598 case V8PLUS_TYPE_NULL:
599 vp = NULL;
600 break;
601 default:
602 vp = va_arg(ap, void *);
603 }
604
605 (void) snprintf(buf, sizeof (buf), "%u", i);
606 VERIFY(nvlist_lookup_nvpair((nvlist_t *)lp, buf, &pp) == 0);
607 VERIFY(v8plus_arg_value(nt, pp, vp) == 0);
608
609 nt = va_arg(ap, data_type_t);
610 }
611
612 va_end(ap);
613
614 return (0);
615 }
616
617 static int
618 v8plus_obj_vsetprops(nvlist_t *lp, v8plus_type_t t, va_list *ap)
619 {
620 v8plus_type_t nt = t;
621 char *name;
622 int err;
623
624 /*
625 * Do not call va_start() or va_end() in this function! We are limited
626 * to a single traversal of the arguments so that we can recurse to
627 * handle embedded object definitions.
628 */
629
630 while (nt != V8PLUS_TYPE_NONE) {
631 name = va_arg(*ap, char *);
632
633 switch (nt) {
634 case V8PLUS_TYPE_STRING:
635 {
636 char *s = va_arg(*ap, char *);
637 if ((err = nvlist_add_string(lp, name, s)) != 0) {
638 (void) v8plus_nverr(err, name);
639 return (-1);
640 }
641 break;
642 }
643 case V8PLUS_TYPE_NUMBER:
644 {
645 double d = va_arg(*ap, double);
646 if ((err = nvlist_add_double(lp, name, d)) != 0) {
647 (void) v8plus_nverr(err, name);
648 return (-1);
649 }
650 break;
651 }
652 case V8PLUS_TYPE_BOOLEAN:
653 {
654 boolean_t b = va_arg(*ap, boolean_t);
655 if ((err = nvlist_add_boolean_value(lp,
656 name, b)) != 0) {
657 (void) v8plus_nverr(err, name);
658 return (-1);
659 }
660 break;
661 }
662 case V8PLUS_TYPE_JSFUNC:
663 {
664 v8plus_jsfunc_t j = va_arg(*ap, v8plus_jsfunc_t);
665 if ((err = nvlist_add_uint64_array(lp,
666 name, &j, 1)) != 0) {
667 (void) v8plus_nverr(err, name);
668 return (-1);
669 }
670 if ((err = nvlist_add_string_array(lp,
671 V8PLUS_JSF_COOKIE, NULL, 0)) != 0) {
672 (void) v8plus_nverr(err, V8PLUS_JSF_COOKIE);
673 return (-1);
674 }
675 v8plus_jsfunc_hold(j);
676 break;
677 }
678 case V8PLUS_TYPE_OBJECT:
679 {
680 const nvlist_t *op = va_arg(*ap, const nvlist_t *);
681 if ((err = nvlist_add_nvlist(lp, name,
682 (nvlist_t *)op)) != 0) {
683 (void) v8plus_nverr(err, name);
684 return (-1);
685 }
686 break;
687 }
688 case V8PLUS_TYPE_NULL:
689 if ((err = nvlist_add_byte(lp, name, 0)) != 0) {
690 (void) v8plus_nverr(err, name);
691 return (-1);
692 }
693 break;
694 case V8PLUS_TYPE_UNDEFINED:
695 if ((err = nvlist_add_boolean(lp, name)) != 0) {
696 (void) v8plus_nverr(err, name);
697 return (-1);
698 }
699 break;
700 case V8PLUS_TYPE_ANY:
701 {
702 nvpair_t *pp = va_arg(*ap, nvpair_t *);
703 if ((err = nvlist_add_nvpair(lp, pp)) != 0) {
704 (void) v8plus_nverr(err, name);
705 return (-1);
706 }
707 break;
708 }
709 case V8PLUS_TYPE_STRNUMBER64:
710 {
711 uint64_t v = va_arg(*ap, uint64_t);
712 char s[32];
713 (void) snprintf(s, sizeof (s), "%" PRIu64, v);
714 if ((err = nvlist_add_string(lp, name, s)) != 0) {
715 (void) v8plus_nverr(err, name);
716 return (-1);
717 }
718 break;
719 }
720 case V8PLUS_TYPE_INL_OBJECT:
721 {
722 nvlist_t *slp;
723
724 nt = va_arg(*ap, v8plus_type_t);
725 err = nvlist_alloc(&slp, NV_UNIQUE_NAME, 0);
726 if (err != 0) {
727 (void) v8plus_nverr(err, name);
728 return (-1);
729 }
730 if (v8plus_obj_vsetprops(slp, nt, ap) != 0)
731 return (-1);
732
733 err = nvlist_add_nvlist(lp, name, slp);
734 nvlist_free(slp);
735 if (err != 0) {
736 (void) v8plus_nverr(err, name);
737 return (-1);
738 }
739 break;
740 }
741 case V8PLUS_TYPE_INVALID:
742 default:
743 (void) v8plus_error(V8PLUSERR_YOUSUCK,
744 "invalid property type %d", nt);
745 return (-1);
746 }
747
748 nt = va_arg(*ap, v8plus_type_t);
749 }
750
751 return (0);
752 }
753
754 nvlist_t *
755 v8plus_obj(v8plus_type_t t, ...)
756 {
757 nvlist_t *rp;
758 va_list ap;
759 int err;
760
761 if ((err = nvlist_alloc(&rp, NV_UNIQUE_NAME, 0)) != 0)
762 return (v8plus_nverr(err, NULL));
763
764 va_start(ap, t);
765 err = v8plus_obj_vsetprops(rp, t, &ap);
766 va_end(ap);
767
768 if (err != 0) {
769 nvlist_free(rp);
770 rp = NULL;
771 }
772
773 return (rp);
774 }
775
776 int
777 v8plus_obj_setprops(nvlist_t *lp, v8plus_type_t t, ...)
778 {
779 va_list ap;
780 int err;
781
782 va_start(ap, t);
783 err = v8plus_obj_vsetprops(lp, t, &ap);
784 va_end(ap);
785
786 return (err);
787 }
788
789 static void
790 v8plus_uv_worker(uv_work_t *wp)
791 {
792 v8plus_uv_ctx_t *cp = wp->data;
793
794 cp->vuc_result = cp->vuc_worker(cp->vuc_obj, cp->vuc_ctx);
795 }
796
797 static void
798 v8plus_uv_completion(uv_work_t *wp)
799 {
800 v8plus_uv_ctx_t *cp = wp->data;
801
802 cp->vuc_completion(cp->vuc_obj, cp->vuc_ctx, cp->vuc_result);
803 v8plus_obj_rele(cp->vuc_obj);
804 free(cp);
805 free(wp);
806 }
807
808 void
809 v8plus_defer(void *cop, void *ctxp, v8plus_worker_f worker,
810 v8plus_completion_f completion)
811 {
812 uv_work_t *wp = malloc(sizeof (uv_work_t));
813 v8plus_uv_ctx_t *cp = malloc(sizeof (v8plus_uv_ctx_t));
814
815 bzero(wp, sizeof (uv_work_t));
816 bzero(cp, sizeof (v8plus_uv_ctx_t));
817
818 v8plus_obj_hold(cop);
819 cp->vuc_obj = cop;
820 cp->vuc_ctx = ctxp;
821 cp->vuc_worker = worker;
822 cp->vuc_completion = completion;
823 wp->data = cp;
824
825 uv_queue_work(uv_default_loop(), wp, v8plus_uv_worker,
826 v8plus_uv_completion);
827 }