35 #include "atexit.h"
36 #include "stdiom.h"
37
38 /*
39 * Note that memory is managed by lmalloc()/lfree().
40 *
41 * Among other reasons, this is occasioned by the insistence of our
42 * brothers sh(1) and csh(1) that they can do malloc, etc., better than
43 * libc can. Those programs define their own malloc routines, and
44 * initialize the underlying mechanism in main(). This means that calls
45 * to malloc occuring before main will crash. The loader calls atexit(3C)
46 * before calling main, so we'd better avoid malloc() when it does.
47 *
48 * Another reason for using lmalloc()/lfree() is that the atexit()
49 * list must transcend all link maps. See the Linker and Libraries
50 * Guide for information on alternate link maps.
51 *
52 * See "thr_uberdata.h" for the definitions of structures used here.
53 */
54
55 static int in_range(_exithdlr_func_t, Lc_addr_range_t[], uint_t count);
56
57 extern caddr_t _getfp(void);
58
59 /*
60 * exitfns_lock is declared to be a recursive mutex so that we
61 * can hold it while calling out to the registered functions.
62 * If they call back to us, we are self-consistent and everything
63 * works, even the case of calling exit() from functions called
64 * by _exithandle() (recursive exit()). All that is required is
65 * that the registered functions actually return (no longjmp()s).
66 *
67 * Because exitfns_lock is declared to be a recursive mutex, we
68 * cannot use it with lmutex_lock()/lmutex_unlock() and we must
69 * use mutex_lock()/mutex_unlock(). This means that atexit()
70 * and exit() are not async-signal-safe. We make them fork1-safe
71 * via the atexit_locks()/atexit_unlocks() functions, called from
72 * libc_prepare_atfork()/libc_child_atfork()/libc_parent_atfork()
73 */
74
75 /*
76 * atexit_locks() and atexit_unlocks() are called on every link map.
77 * Do not use curthread->ul_uberdata->atexit_root for these.
78 */
79 void
80 atexit_locks()
81 {
82 (void) mutex_lock(&__uberdata.atexit_root.exitfns_lock);
83 }
84
85 void
86 atexit_unlocks()
87 {
88 (void) mutex_unlock(&__uberdata.atexit_root.exitfns_lock);
89 }
90
91 /*
92 * atexit() is called before the primordial thread is fully set up.
93 * Be careful about dereferencing self->ul_uberdata->atexit_root.
94 */
95 int
96 atexit(void (*func)(void))
97 {
98 ulwp_t *self;
99 atexit_root_t *arp;
100 _exthdlr_t *p;
101
102 if ((p = lmalloc(sizeof (_exthdlr_t))) == NULL)
103 return (-1);
104
105 if ((self = __curthread()) == NULL)
106 arp = &__uberdata.atexit_root;
107 else {
108 arp = &self->ul_uberdata->atexit_root;
109 (void) mutex_lock(&arp->exitfns_lock);
110 }
111 p->hdlr = func;
112 p->next = arp->head;
113 arp->head = p;
114 if (self != NULL)
115 (void) mutex_unlock(&arp->exitfns_lock);
116 return (0);
117 }
118
119 void
120 _exithandle(void)
121 {
122 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
123 _exthdlr_t *p;
124 int cancel_state;
125
126 /* disable cancellation while running atexit handlers */
127 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
128 (void) mutex_lock(&arp->exitfns_lock);
129 arp->exit_frame_monitor = _getfp() + STACK_BIAS;
130 p = arp->head;
131 while (p != NULL) {
132 arp->head = p->next;
133 p->hdlr();
134 lfree(p, sizeof (_exthdlr_t));
135 p = arp->head;
136 }
137 (void) mutex_unlock(&arp->exitfns_lock);
138 (void) pthread_setcancelstate(cancel_state, NULL);
139 }
140
141 /*
142 * _get_exit_frame_monitor is called by the C++ runtimes.
143 */
144 void *
145 _get_exit_frame_monitor(void)
146 {
147 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
148 return (&arp->exit_frame_monitor);
149 }
150
151 /*
152 * The following is a routine which the loader (ld.so.1) calls when it
153 * processes a dlclose call on an object. It resets all signal handlers
154 * which fall within the union of the ranges specified by the elements
155 * of the array range to SIG_DFL.
156 */
157 static void
158 _preexec_sig_unload(Lc_addr_range_t range[], uint_t count)
159 {
160 uberdata_t *udp = curthread->ul_uberdata;
161 int sig;
162 rwlock_t *rwlp;
163 struct sigaction *sap;
164 struct sigaction oact;
165 void (*handler)();
166
167 for (sig = 1; sig < NSIG; sig++) {
168 sap = (struct sigaction *)&udp->siguaction[sig].sig_uaction;
169 again:
170 handler = sap->sa_handler;
171 if (handler != SIG_DFL && handler != SIG_IGN &&
172 in_range(handler, range, count)) {
173 rwlp = &udp->siguaction[sig].sig_lock;
174 lrw_wrlock(rwlp);
175 if (handler != sap->sa_handler) {
176 lrw_unlock(rwlp);
177 goto again;
178 }
179 sap->sa_handler = SIG_DFL;
180 sap->sa_flags = SA_SIGINFO;
181 (void) sigemptyset(&sap->sa_mask);
182 if (__sigaction(sig, NULL, &oact) == 0 &&
183 oact.sa_handler != SIG_DFL &&
184 oact.sa_handler != SIG_IGN)
185 (void) __sigaction(sig, sap, NULL);
186 lrw_unlock(rwlp);
187 }
188 }
189 }
190
191 /*
192 * The following is a routine which the loader (ld.so.1) calls when it
196 */
197 static void
198 _preexec_atfork_unload(Lc_addr_range_t range[], uint_t count)
199 {
200 ulwp_t *self = curthread;
201 uberdata_t *udp = self->ul_uberdata;
202 atfork_t *atfork_q;
203 atfork_t *atfp;
204 atfork_t *next;
205 void (*func)(void);
206 int start_again;
207
208 (void) mutex_lock(&udp->atfork_lock);
209 if ((atfork_q = udp->atforklist) != NULL) {
210 atfp = atfork_q;
211 do {
212 next = atfp->forw;
213 start_again = 0;
214
215 if (((func = atfp->prepare) != NULL &&
216 in_range(func, range, count)) ||
217 ((func = atfp->parent) != NULL &&
218 in_range(func, range, count)) ||
219 ((func = atfp->child) != NULL &&
220 in_range(func, range, count))) {
221 if (self->ul_fork) {
222 /*
223 * dlclose() called from a fork handler.
224 * Deleting the entry would wreak havoc.
225 * Just null out the function pointers
226 * and leave the entry in place.
227 */
228 atfp->prepare = NULL;
229 atfp->parent = NULL;
230 atfp->child = NULL;
231 continue;
232 }
233 if (atfp == atfork_q) {
234 /* deleting the list head member */
235 udp->atforklist = atfork_q = next;
236 start_again = 1;
237 }
238 atfp->forw->back = atfp->back;
239 atfp->back->forw = atfp->forw;
240 lfree(atfp, sizeof (atfork_t));
251
252 /*
253 * The following is a routine which the loader (ld.so.1) calls when it
254 * processes a dlclose call on an object. It sets the destructor
255 * function pointer to NULL for all keys whose destructors fall within
256 * the union of the ranges specified by the elements of the array range.
257 * We don't assign TSD_UNALLOCATED (the equivalent of pthread_key_destroy())
258 * because the thread may use the key's TSD further on in fini processing.
259 */
260 static void
261 _preexec_tsd_unload(Lc_addr_range_t range[], uint_t count)
262 {
263 tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
264 void (*func)(void *);
265 int key;
266
267 lmutex_lock(&tsdm->tsdm_lock);
268 for (key = 1; key < tsdm->tsdm_nused; key++) {
269 if ((func = tsdm->tsdm_destro[key]) != NULL &&
270 func != TSD_UNALLOCATED &&
271 in_range((_exithdlr_func_t)func, range, count))
272 tsdm->tsdm_destro[key] = NULL;
273 }
274 lmutex_unlock(&tsdm->tsdm_lock);
275 }
276
277 /*
278 * The following is a routine which the loader (ld.so.1) calls when it
279 * processes dlclose calls on objects with atexit registrations. It
280 * executes the exit handlers that fall within the union of the ranges
281 * specified by the elements of the array range in the REVERSE ORDER of
282 * their registration. Do not change this characteristic; it is REQUIRED
283 * BEHAVIOR.
284 */
285 int
286 _preexec_exit_handlers(Lc_addr_range_t range[], uint_t count)
287 {
288 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
289 _exthdlr_t *o; /* previous node */
290 _exthdlr_t *p; /* this node */
291 int cancel_state;
292
293 /* disable cancellation while running atexit handlers */
294 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
295 (void) mutex_lock(&arp->exitfns_lock);
296 o = NULL;
297 p = arp->head;
298 while (p != NULL) {
299 if (in_range(p->hdlr, range, count)) {
300 /* We need to execute this one */
301 if (o != NULL)
302 o->next = p->next;
303 else
304 arp->head = p->next;
305 p->hdlr();
306 lfree(p, sizeof (_exthdlr_t));
307 o = NULL;
308 p = arp->head;
309 } else {
310 o = p;
311 p = p->next;
312 }
313 }
314 (void) mutex_unlock(&arp->exitfns_lock);
315 (void) pthread_setcancelstate(cancel_state, NULL);
316
317 _preexec_tsd_unload(range, count);
318 _preexec_atfork_unload(range, count);
319 _preexec_sig_unload(range, count);
320
321 return (0);
322 }
323
324 static int
325 in_range(_exithdlr_func_t addr, Lc_addr_range_t ranges[], uint_t count)
326 {
327 uint_t idx;
328
329 for (idx = 0; idx < count; idx++) {
330 if ((void *)addr >= ranges[idx].lb &&
331 (void *)addr < ranges[idx].ub) {
332 return (1);
333 }
334 }
335
336 return (0);
337 }
|
35 #include "atexit.h"
36 #include "stdiom.h"
37
38 /*
39 * Note that memory is managed by lmalloc()/lfree().
40 *
41 * Among other reasons, this is occasioned by the insistence of our
42 * brothers sh(1) and csh(1) that they can do malloc, etc., better than
43 * libc can. Those programs define their own malloc routines, and
44 * initialize the underlying mechanism in main(). This means that calls
45 * to malloc occuring before main will crash. The loader calls atexit(3C)
46 * before calling main, so we'd better avoid malloc() when it does.
47 *
48 * Another reason for using lmalloc()/lfree() is that the atexit()
49 * list must transcend all link maps. See the Linker and Libraries
50 * Guide for information on alternate link maps.
51 *
52 * See "thr_uberdata.h" for the definitions of structures used here.
53 */
54
55 static int in_range(void *, Lc_addr_range_t[], uint_t count);
56
57 extern caddr_t _getfp(void);
58
59 /*
60 * exitfns_lock is declared to be a recursive mutex so that we
61 * can hold it while calling out to the registered functions.
62 * If they call back to us, we are self-consistent and everything
63 * works, even the case of calling exit() from functions called
64 * by _exithandle() (recursive exit()). All that is required is
65 * that the registered functions actually return (no longjmp()s).
66 *
67 * Because exitfns_lock is declared to be a recursive mutex, we
68 * cannot use it with lmutex_lock()/lmutex_unlock() and we must
69 * use mutex_lock()/mutex_unlock(). This means that atexit()
70 * and exit() are not async-signal-safe. We make them fork1-safe
71 * via the atexit_locks()/atexit_unlocks() functions, called from
72 * libc_prepare_atfork()/libc_child_atfork()/libc_parent_atfork()
73 */
74
75 /*
76 * atexit_locks() and atexit_unlocks() are called on every link map.
77 * Do not use curthread->ul_uberdata->atexit_root for these.
78 */
79 void
80 atexit_locks()
81 {
82 (void) mutex_lock(&__uberdata.atexit_root.exitfns_lock);
83 }
84
85 void
86 atexit_unlocks()
87 {
88 (void) mutex_unlock(&__uberdata.atexit_root.exitfns_lock);
89 }
90
91
92 /*
93 * atexit() is called before the primordial thread is fully set up.
94 * Be careful about dereferencing self->ul_uberdata->atexit_root.
95 */
96 int
97 __cxa_atexit(void (*hdlr)(void *), void *arg, void *dso)
98 {
99 ulwp_t *self;
100 atexit_root_t *arp;
101 _exthdlr_t *p;
102
103 if ((p = lmalloc(sizeof (_exthdlr_t))) == NULL)
104 return (-1);
105
106 if ((self = __curthread()) == NULL)
107 arp = &__uberdata.atexit_root;
108 else {
109 arp = &self->ul_uberdata->atexit_root;
110 (void) mutex_lock(&arp->exitfns_lock);
111 }
112 p->hdlr = hdlr;
113 p->arg = arg;
114 p->dso = dso;
115 p->next = arp->head;
116 arp->head = p;
117
118 if (self != NULL)
119 (void) mutex_unlock(&arp->exitfns_lock);
120 return (0);
121 }
122
123 int
124 atexit(void (*func)(void))
125 {
126 return (__cxa_atexit((_exithdlr_func_t)func, NULL, NULL));
127 }
128
129 /*
130 * Note that we may be entered recursively, as we'll call __cxa_finalize(0) at
131 * exit, one of our handlers is ld.so.1`atexit_fini, and libraries may call
132 * __cxa_finalize(__dso_handle) from their _fini.
133 */
134 void
135 __cxa_finalize(void *dso)
136 {
137 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
138 _exthdlr_t *p, *o;
139 int cancel_state;
140
141 /* disable cancellation while running atexit handlers */
142 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
143 (void) mutex_lock(&arp->exitfns_lock);
144
145 o = NULL;
146 p = arp->head;
147 while (p != NULL) {
148 if ((dso == NULL) || (p->dso == dso)) {
149 if (o != NULL)
150 o->next = p->next;
151 else
152 arp->head = p->next;
153
154 p->hdlr(p->arg);
155 lfree(p, sizeof (_exthdlr_t));
156 o = NULL;
157 p = arp->head;
158 } else {
159 o = p;
160 p = p->next;
161 }
162 }
163
164 (void) mutex_unlock(&arp->exitfns_lock);
165 (void) pthread_setcancelstate(cancel_state, NULL);
166 }
167
168 void
169 _exithandle(void)
170 {
171 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
172
173 arp->exit_frame_monitor = _getfp() + STACK_BIAS;
174 __cxa_finalize(NULL);
175 }
176
177 /*
178 * _get_exit_frame_monitor is called by the C++ runtimes.
179 */
180 void *
181 _get_exit_frame_monitor(void)
182 {
183 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
184 return (&arp->exit_frame_monitor);
185 }
186
187 /*
188 * The following is a routine which the loader (ld.so.1) calls when it
189 * processes a dlclose call on an object. It resets all signal handlers
190 * which fall within the union of the ranges specified by the elements
191 * of the array range to SIG_DFL.
192 */
193 static void
194 _preexec_sig_unload(Lc_addr_range_t range[], uint_t count)
195 {
196 uberdata_t *udp = curthread->ul_uberdata;
197 int sig;
198 rwlock_t *rwlp;
199 struct sigaction *sap;
200 struct sigaction oact;
201 void (*handler)();
202
203 for (sig = 1; sig < NSIG; sig++) {
204 sap = (struct sigaction *)&udp->siguaction[sig].sig_uaction;
205 again:
206 handler = sap->sa_handler;
207 if (handler != SIG_DFL && handler != SIG_IGN &&
208 in_range((void *)handler, range, count)) {
209 rwlp = &udp->siguaction[sig].sig_lock;
210 lrw_wrlock(rwlp);
211 if (handler != sap->sa_handler) {
212 lrw_unlock(rwlp);
213 goto again;
214 }
215 sap->sa_handler = SIG_DFL;
216 sap->sa_flags = SA_SIGINFO;
217 (void) sigemptyset(&sap->sa_mask);
218 if (__sigaction(sig, NULL, &oact) == 0 &&
219 oact.sa_handler != SIG_DFL &&
220 oact.sa_handler != SIG_IGN)
221 (void) __sigaction(sig, sap, NULL);
222 lrw_unlock(rwlp);
223 }
224 }
225 }
226
227 /*
228 * The following is a routine which the loader (ld.so.1) calls when it
232 */
233 static void
234 _preexec_atfork_unload(Lc_addr_range_t range[], uint_t count)
235 {
236 ulwp_t *self = curthread;
237 uberdata_t *udp = self->ul_uberdata;
238 atfork_t *atfork_q;
239 atfork_t *atfp;
240 atfork_t *next;
241 void (*func)(void);
242 int start_again;
243
244 (void) mutex_lock(&udp->atfork_lock);
245 if ((atfork_q = udp->atforklist) != NULL) {
246 atfp = atfork_q;
247 do {
248 next = atfp->forw;
249 start_again = 0;
250
251 if (((func = atfp->prepare) != NULL &&
252 in_range((void *)func, range, count)) ||
253 ((func = atfp->parent) != NULL &&
254 in_range((void *)func, range, count)) ||
255 ((func = atfp->child) != NULL &&
256 in_range((void *)func, range, count))) {
257 if (self->ul_fork) {
258 /*
259 * dlclose() called from a fork handler.
260 * Deleting the entry would wreak havoc.
261 * Just null out the function pointers
262 * and leave the entry in place.
263 */
264 atfp->prepare = NULL;
265 atfp->parent = NULL;
266 atfp->child = NULL;
267 continue;
268 }
269 if (atfp == atfork_q) {
270 /* deleting the list head member */
271 udp->atforklist = atfork_q = next;
272 start_again = 1;
273 }
274 atfp->forw->back = atfp->back;
275 atfp->back->forw = atfp->forw;
276 lfree(atfp, sizeof (atfork_t));
287
288 /*
289 * The following is a routine which the loader (ld.so.1) calls when it
290 * processes a dlclose call on an object. It sets the destructor
291 * function pointer to NULL for all keys whose destructors fall within
292 * the union of the ranges specified by the elements of the array range.
293 * We don't assign TSD_UNALLOCATED (the equivalent of pthread_key_destroy())
294 * because the thread may use the key's TSD further on in fini processing.
295 */
296 static void
297 _preexec_tsd_unload(Lc_addr_range_t range[], uint_t count)
298 {
299 tsd_metadata_t *tsdm = &curthread->ul_uberdata->tsd_metadata;
300 void (*func)(void *);
301 int key;
302
303 lmutex_lock(&tsdm->tsdm_lock);
304 for (key = 1; key < tsdm->tsdm_nused; key++) {
305 if ((func = tsdm->tsdm_destro[key]) != NULL &&
306 func != TSD_UNALLOCATED &&
307 in_range((void *)func, range, count))
308 tsdm->tsdm_destro[key] = NULL;
309 }
310 lmutex_unlock(&tsdm->tsdm_lock);
311 }
312
313 /*
314 * The following is a routine which the loader (ld.so.1) calls when it
315 * processes dlclose calls on objects with atexit registrations. It
316 * executes the exit handlers that fall within the union of the ranges
317 * specified by the elements of the array range in the REVERSE ORDER of
318 * their registration. Do not change this characteristic; it is REQUIRED
319 * BEHAVIOR.
320 */
321 int
322 _preexec_exit_handlers(Lc_addr_range_t range[], uint_t count)
323 {
324 atexit_root_t *arp = &curthread->ul_uberdata->atexit_root;
325 _exthdlr_t *o; /* previous node */
326 _exthdlr_t *p; /* this node */
327 int cancel_state;
328
329 /* disable cancellation while running atexit handlers */
330 (void) pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cancel_state);
331 (void) mutex_lock(&arp->exitfns_lock);
332 o = NULL;
333 p = arp->head;
334 while (p != NULL) {
335 /*
336 * We call even CXA handlers of functions present in the
337 * library being unloaded. The specification isn't
338 * particularly clear on this, and this seems the most sane.
339 * This is the behaviour of FreeBSD 9.1 (GNU libc leaves the
340 * handler on the exit list, and crashes at exit time).
341 *
342 * This won't cause handlers to be called twice, because
343 * anything called from a __cxa_finalize call from the
344 * language runtime will have been removed from the list.
345 */
346 if (in_range((void *)p->hdlr, range, count)) {
347 /* We need to execute this one */
348 if (o != NULL)
349 o->next = p->next;
350 else
351 arp->head = p->next;
352 p->hdlr(p->arg);
353 lfree(p, sizeof (_exthdlr_t));
354 o = NULL;
355 p = arp->head;
356 } else {
357 o = p;
358 p = p->next;
359 }
360 }
361 (void) mutex_unlock(&arp->exitfns_lock);
362 (void) pthread_setcancelstate(cancel_state, NULL);
363
364 _preexec_tsd_unload(range, count);
365 _preexec_atfork_unload(range, count);
366 _preexec_sig_unload(range, count);
367
368 return (0);
369 }
370
371 static int
372 in_range(void *addr, Lc_addr_range_t ranges[], uint_t count)
373 {
374 uint_t idx;
375
376 for (idx = 0; idx < count; idx++) {
377 if (addr >= ranges[idx].lb &&
378 addr < ranges[idx].ub) {
379 return (1);
380 }
381 }
382
383 return (0);
384 }
|