Print this page
3605 Xen HVM hangs during boot if apix is enabled
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/i86hvm/io/xpv/evtchn.c
+++ new/usr/src/uts/i86pc/i86hvm/io/xpv/evtchn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2014 by Delphix. All rights reserved.
29 29 */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/xpv_support.h>
33 33 #include <sys/hypervisor.h>
34 34 #include <sys/machsystm.h>
35 35 #include <sys/mutex.h>
36 36 #include <sys/cmn_err.h>
37 37 #include <sys/dditypes.h>
38 38 #include <sys/atomic.h>
39 39 #include <sys/sysmacros.h>
40 40 #include <sys/cpu.h>
41 41 #include <sys/psw.h>
42 42 #include <sys/psm.h>
43 43 #include <sys/sdt.h>
44 44
45 45 extern dev_info_t *xpv_dip;
46 46 static ddi_intr_handle_t *evtchn_ihp = NULL;
47 47 static ddi_softint_handle_t evtchn_to_handle[NR_EVENT_CHANNELS];
48 48 kmutex_t ec_lock;
49 49
50 50 static int evtchn_callback_irq = -1;
51 51
52 52 static volatile ulong_t *pending_events;
53 53 static volatile ulong_t *masked_events;
54 54
55 55 /* log2(NBBY * sizeof (ulong)) */
56 56 #ifdef __amd64
57 57 #define EVTCHN_SHIFT 6
58 58 #else /* __i386 */
59 59 #define EVTCHN_SHIFT 5
60 60 #endif
61 61
62 62 /* Atomically get and clear a ulong from memory. */
63 63 #define GET_AND_CLEAR(src, targ) { \
64 64 membar_enter(); \
65 65 do { \
66 66 targ = *src; \
67 67 } while (atomic_cas_ulong(src, targ, 0) != targ); \
68 68 }
69 69
70 70 /* Get the first and last bits set in a bitmap */
71 71 #define GET_BOUNDS(bitmap, low, high) { \
72 72 int _i; \
73 73 low = high = -1; \
74 74 for (_i = 0; _i <= sizeof (ulong_t); _i++) \
75 75 if (bitmap & (1UL << _i)) { \
76 76 if (low == -1) \
77 77 low = _i; \
78 78 high = _i; \
79 79 } \
80 80 }
81 81
82 82 void
83 83 ec_bind_evtchn_to_handler(int evtchn, pri_t pri, ec_handler_fcn_t handler,
84 84 void *arg1)
85 85 {
86 86 ddi_softint_handle_t hdl;
87 87
88 88 if (evtchn < 0 || evtchn >= NR_EVENT_CHANNELS) {
89 89 cmn_err(CE_WARN, "Binding invalid event channel: %d", evtchn);
90 90 return;
91 91 }
92 92
93 93 (void) ddi_intr_add_softint(xpv_dip, &hdl, pri, handler, (caddr_t)arg1);
94 94 mutex_enter(&ec_lock);
95 95 ASSERT(evtchn_to_handle[evtchn] == NULL);
96 96 evtchn_to_handle[evtchn] = hdl;
97 97 mutex_exit(&ec_lock);
98 98
99 99 /* Let the hypervisor know we're prepared to handle this event */
100 100 hypervisor_unmask_event(evtchn);
101 101 }
102 102
103 103 void
104 104 ec_unbind_evtchn(int evtchn)
105 105 {
106 106 evtchn_close_t close;
107 107 ddi_softint_handle_t hdl;
108 108
109 109 if (evtchn < 0 || evtchn >= NR_EVENT_CHANNELS) {
110 110 cmn_err(CE_WARN, "Unbinding invalid event channel: %d", evtchn);
111 111 return;
112 112 }
113 113
114 114 /*
115 115 * Let the hypervisor know we're no longer prepared to handle this
116 116 * event
117 117 */
118 118 hypervisor_mask_event(evtchn);
119 119
120 120 /* Cleanup the event handler metadata */
121 121 mutex_enter(&ec_lock);
122 122 hdl = evtchn_to_handle[evtchn];
123 123 evtchn_to_handle[evtchn] = NULL;
124 124 mutex_exit(&ec_lock);
125 125
126 126 close.port = evtchn;
127 127 (void) HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
128 128 (void) ddi_intr_remove_softint(hdl);
129 129 }
130 130
131 131 void
132 132 ec_notify_via_evtchn(unsigned int port)
133 133 {
134 134 evtchn_send_t send;
135 135
136 136 if ((int)port == -1)
137 137 return;
138 138 send.port = port;
139 139 (void) HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
140 140 }
141 141
142 142 void
143 143 hypervisor_unmask_event(unsigned int ev)
144 144 {
145 145 int index = ev >> EVTCHN_SHIFT;
146 146 ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
147 147 volatile ulong_t *maskp;
148 148 evtchn_unmask_t unmask;
149 149
150 150 /*
151 151 * index,bit contain the event number as an index into the
152 152 * masked-events bitmask. Set it to 0.
153 153 */
154 154 maskp = &masked_events[index];
155 155 atomic_and_ulong(maskp, ~bit);
156 156
157 157 /* Let the hypervisor know the event has been unmasked */
158 158 unmask.port = ev;
159 159 if (HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask) != 0)
160 160 panic("xen_evtchn_unmask() failed");
161 161 }
162 162
163 163 /* Set a bit in an evtchan mask word */
164 164 void
165 165 hypervisor_mask_event(uint_t ev)
166 166 {
167 167 int index = ev >> EVTCHN_SHIFT;
168 168 ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
169 169 volatile ulong_t *maskp;
170 170
171 171 maskp = &masked_events[index];
172 172 atomic_or_ulong(maskp, bit);
173 173 }
174 174
175 175 void
176 176 hypervisor_clear_event(uint_t ev)
177 177 {
178 178 int index = ev >> EVTCHN_SHIFT;
179 179 ulong_t bit = 1UL << (ev & ((1UL << EVTCHN_SHIFT) - 1));
180 180 volatile ulong_t *maskp;
181 181
182 182 maskp = &pending_events[index];
183 183 atomic_and_ulong(maskp, ~bit);
184 184 }
185 185
186 186 int
187 187 xen_alloc_unbound_evtchn(int domid, int *evtchnp)
188 188 {
189 189 evtchn_alloc_unbound_t alloc;
190 190 int err;
191 191
192 192 alloc.dom = DOMID_SELF;
193 193 alloc.remote_dom = (domid_t)domid;
194 194
195 195 if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
196 196 &alloc)) == 0) {
197 197 *evtchnp = alloc.port;
198 198 /* ensure evtchn is masked till we're ready to use it */
199 199 (void) hypervisor_mask_event(*evtchnp);
200 200 } else {
201 201 err = xen_xlate_errcode(err);
202 202 }
203 203
204 204 return (err);
205 205 }
206 206
207 207 int
208 208 xen_bind_interdomain(int domid, int remote_port, int *port)
209 209 {
210 210 evtchn_bind_interdomain_t bind;
211 211 int err;
212 212
213 213 bind.remote_dom = (domid_t)domid;
214 214 bind.remote_port = remote_port;
215 215 if ((err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
216 216 &bind)) == 0)
217 217 *port = bind.local_port;
218 218 else
219 219 err = xen_xlate_errcode(err);
220 220 return (err);
221 221 }
222 222
223 223 /*ARGSUSED*/
224 224 uint_t
225 225 evtchn_callback_fcn(caddr_t arg0, caddr_t arg1)
226 226 {
227 227 ulong_t pending_word;
228 228 int i, j, port;
229 229 volatile struct vcpu_info *vci;
230 230 uint_t rv = DDI_INTR_UNCLAIMED;
231 231 ddi_softint_handle_t hdl;
232 232 int low, high;
233 233 ulong_t sels;
234 234
235 235 /*
236 236 * Xen hard-codes all notifications to VCPU0, so we bind
237 237 * ourselves via xpv.conf. Note that this also assumes that all
238 238 * evtchns are bound to VCPU0, which is true by default.
239 239 */
240 240 ASSERT(CPU->cpu_id == 0);
241 241
242 242 vci = &HYPERVISOR_shared_info->vcpu_info[0];
243 243
244 244 again:
245 245 DTRACE_PROBE2(evtchn__scan__start, int, vci->evtchn_upcall_pending,
246 246 ulong_t, vci->evtchn_pending_sel);
247 247
248 248 atomic_and_8(&vci->evtchn_upcall_pending, 0);
249 249
250 250 /*
251 251 * Find the upper and lower bounds in which we need to search for
252 252 * pending events.
253 253 */
254 254 GET_AND_CLEAR(&vci->evtchn_pending_sel, sels);
255 255
256 256 /* sels == 1 is by far the most common case. Make it fast */
257 257 if (sels == 1)
258 258 low = high = 0;
259 259 else if (sels == 0)
260 260 return (rv);
261 261 else
262 262 GET_BOUNDS(sels, low, high);
263 263
264 264 /* Scan the port list, looking for words with bits set */
265 265 for (i = low; i <= high; i++) {
266 266 ulong_t tmp;
267 267
268 268 GET_AND_CLEAR(&pending_events[i], tmp);
269 269 pending_word = tmp & ~(masked_events[i]);
270 270
271 271 /* Scan the bits in the word, looking for pending events */
272 272 while (pending_word != 0) {
273 273 j = lowbit(pending_word) - 1;
274 274 port = (i << EVTCHN_SHIFT) + j;
275 275 pending_word = pending_word & ~(1UL << j);
276 276
277 277 /*
278 278 * If there is a handler registered for this event,
279 279 * schedule a softint of the appropriate priority
280 280 * to execute it.
281 281 */
282 282 if ((hdl = evtchn_to_handle[port]) != NULL) {
283 283 (void) ddi_intr_trigger_softint(hdl, NULL);
284 284 rv = DDI_INTR_CLAIMED;
285 285 }
286 286 }
287 287 }
288 288 DTRACE_PROBE2(evtchn__scan__end, int, vci->evtchn_upcall_pending,
289 289 ulong_t, vci->evtchn_pending_sel);
290 290
291 291 if ((volatile uint8_t)vci->evtchn_upcall_pending ||
292 292 ((volatile ulong_t)vci->evtchn_pending_sel))
293 293 goto again;
294 294
295 295 return (rv);
296 296 }
297 297
298 298 static int
299 299 set_hvm_callback(int irq)
300 300 {
301 301 struct xen_hvm_param xhp;
302 302
303 303 xhp.domid = DOMID_SELF;
304 304 xhp.index = HVM_PARAM_CALLBACK_IRQ;
305 305 xhp.value = irq;
306 306 return (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp));
307 307 }
308 308
309 309 void
310 310 ec_fini()
311 311 {
312 312 int i;
313 313
314 314 for (i = 0; i < NR_EVENT_CHANNELS; i++)
315 315 ec_unbind_evtchn(i);
316 316
317 317 evtchn_callback_irq = -1;
318 318 if (evtchn_ihp != NULL) {
319 319 (void) ddi_intr_disable(*evtchn_ihp);
320 320 (void) ddi_intr_remove_handler(*evtchn_ihp);
321 321 (void) ddi_intr_free(*evtchn_ihp);
322 322 kmem_free(evtchn_ihp, sizeof (ddi_intr_handle_t));
323 323 evtchn_ihp = NULL;
324 324 }
325 325 }
326 326
327 327 int
328 328 ec_init(void)
329 329 {
330 330 int i;
331 331 int rv, actual;
332 332 ddi_intr_handle_t *ihp;
333 333
334 334 /*
335 335 * Translate the variable-sized pending and masked event bitmasks
336 336 * into constant-sized arrays of uint32_t's.
337 337 */
338 338 pending_events = &HYPERVISOR_shared_info->evtchn_pending[0];
339 339 masked_events = &HYPERVISOR_shared_info->evtchn_mask[0];
340 340
341 341 /*
342 342 * Clear our event handler structures and prevent the hypervisor
343 343 * from triggering any events.
344 344 */
345 345 mutex_init(&ec_lock, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
346 346 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
347 347 evtchn_to_handle[i] = NULL;
348 348 (void) hypervisor_mask_event(i);
349 349 }
350 350
351 351 /*
352 352 * Allocate and initialize an interrupt handler to process the
353 353 * hypervisor's "hey you have events pending!" interrupt.
354 354 */
355 355 ihp = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
356 356 rv = ddi_intr_alloc(xpv_dip, ihp, DDI_INTR_TYPE_FIXED, 0, 1, &actual,
357 357 DDI_INTR_ALLOC_NORMAL);
358 358 if (rv < 0 || actual != 1) {
359 359 cmn_err(CE_WARN, "Could not allocate evtchn interrupt: %d",
360 360 rv);
361 361 return (-1);
362 362 }
363 363
364 364 rv = ddi_intr_add_handler(*ihp, evtchn_callback_fcn, NULL, NULL);
365 365 if (rv < 0) {
366 366 (void) ddi_intr_free(*ihp);
367 367 cmn_err(CE_WARN, "Could not attach evtchn handler");
↓ open down ↓ |
367 lines elided |
↑ open up ↑ |
368 368 return (-1);
369 369 }
370 370 evtchn_ihp = ihp;
371 371
372 372 if (ddi_intr_enable(*ihp) != DDI_SUCCESS) {
373 373 cmn_err(CE_WARN, "Could not enable evtchn interrupts\n");
374 374 return (-1);
375 375 }
376 376
377 377 /* Tell the hypervisor which interrupt we're waiting on. */
378 - evtchn_callback_irq = ((ddi_intr_handle_impl_t *)*ihp)->ih_vector;
378 + evtchn_callback_irq = ((ddi_intr_handle_impl_t *)*ihp)->ih_irq;
379 379
380 380 if (set_hvm_callback(evtchn_callback_irq) != 0) {
381 381 cmn_err(CE_WARN, "Couldn't register evtchn callback");
382 382 return (-1);
383 383 }
384 384 return (0);
385 385 }
386 386
387 387 void
388 388 ec_resume(void)
389 389 {
390 390 int i;
391 391
392 392 /* New event-channel space is not 'live' yet. */
393 393 for (i = 0; i < NR_EVENT_CHANNELS; i++)
394 394 (void) hypervisor_mask_event(i);
395 395 if (set_hvm_callback(evtchn_callback_irq) != 0)
396 396 cmn_err(CE_WARN, "Couldn't register evtchn callback");
397 397
398 398 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX