Print this page
10597 would like a way to set NMI behavior at boot
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Andy Fiddaman <andy@omniosce.org>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/os/mlsetup.c
+++ new/usr/src/uts/i86pc/os/mlsetup.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2012 Gary Mills
23 23 *
24 24 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
25 25 * Copyright (c) 2011 by Delphix. All rights reserved.
26 - * Copyright 2018 Joyent, Inc.
26 + * Copyright 2019, Joyent, Inc.
27 27 */
28 28 /*
29 29 * Copyright (c) 2010, Intel Corporation.
30 30 * All rights reserved.
31 31 */
32 32
33 33 #include <sys/types.h>
34 34 #include <sys/sysmacros.h>
35 35 #include <sys/disp.h>
36 36 #include <sys/promif.h>
37 37 #include <sys/clock.h>
38 38 #include <sys/cpuvar.h>
39 39 #include <sys/stack.h>
40 40 #include <vm/as.h>
41 41 #include <vm/hat.h>
42 42 #include <sys/reboot.h>
43 43 #include <sys/avintr.h>
44 44 #include <sys/vtrace.h>
45 45 #include <sys/proc.h>
46 46 #include <sys/thread.h>
47 47 #include <sys/cpupart.h>
48 48 #include <sys/pset.h>
49 49 #include <sys/copyops.h>
50 50 #include <sys/pg.h>
51 51 #include <sys/disp.h>
52 52 #include <sys/debug.h>
53 53 #include <sys/sunddi.h>
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
54 54 #include <sys/x86_archext.h>
55 55 #include <sys/privregs.h>
56 56 #include <sys/machsystm.h>
57 57 #include <sys/ontrap.h>
58 58 #include <sys/bootconf.h>
59 59 #include <sys/boot_console.h>
60 60 #include <sys/kdi_machimpl.h>
61 61 #include <sys/archsystm.h>
62 62 #include <sys/promif.h>
63 63 #include <sys/pci_cfgspace.h>
64 +#include <sys/apic.h>
65 +#include <sys/apic_common.h>
64 66 #include <sys/bootvfs.h>
65 67 #include <sys/tsc.h>
66 68 #ifdef __xpv
67 69 #include <sys/hypervisor.h>
68 70 #else
69 71 #include <sys/xpv_support.h>
70 72 #endif
71 73
72 74 /*
73 75 * some globals for patching the result of cpuid
74 76 * to solve problems w/ creative cpu vendors
75 77 */
76 78
77 79 extern uint32_t cpuid_feature_ecx_include;
78 80 extern uint32_t cpuid_feature_ecx_exclude;
79 81 extern uint32_t cpuid_feature_edx_include;
80 82 extern uint32_t cpuid_feature_edx_exclude;
81 83
84 +nmi_action_t nmi_action = NMI_ACTION_UNSET;
85 +
82 86 /*
83 87 * Set console mode
84 88 */
85 89 static void
86 90 set_console_mode(uint8_t val)
87 91 {
88 92 struct bop_regs rp = {0};
89 93
90 94 rp.eax.byte.ah = 0x0;
91 95 rp.eax.byte.al = val;
92 96 rp.ebx.word.bx = 0x0;
93 97
94 98 BOP_DOINT(bootops, 0x10, &rp);
95 99 }
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
96 100
97 101
98 102 /*
99 103 * Setup routine called right before main(). Interposing this function
100 104 * before main() allows us to call it in a machine-independent fashion.
101 105 */
102 106 void
103 107 mlsetup(struct regs *rp)
104 108 {
105 109 u_longlong_t prop_value;
110 + char prop_str[BP_MAX_STRLEN];
106 111 extern struct classfuncs sys_classfuncs;
107 112 extern disp_t cpu0_disp;
108 113 extern char t0stack[];
109 114 extern int post_fastreboot;
110 115 extern uint64_t plat_dr_options;
111 116
112 117 ASSERT_STACK_ALIGNED();
113 118
114 119 /*
115 120 * initialize cpu_self
116 121 */
117 122 cpu[0]->cpu_self = cpu[0];
118 123
119 124 #if defined(__xpv)
120 125 /*
121 126 * Point at the hypervisor's virtual cpu structure
122 127 */
123 128 cpu[0]->cpu_m.mcpu_vcpu_info = &HYPERVISOR_shared_info->vcpu_info[0];
124 129 #endif
125 130
126 131 /*
127 132 * check if we've got special bits to clear or set
128 133 * when checking cpu features
129 134 */
130 135
131 136 if (bootprop_getval("cpuid_feature_ecx_include", &prop_value) != 0)
132 137 cpuid_feature_ecx_include = 0;
133 138 else
134 139 cpuid_feature_ecx_include = (uint32_t)prop_value;
135 140
136 141 if (bootprop_getval("cpuid_feature_ecx_exclude", &prop_value) != 0)
137 142 cpuid_feature_ecx_exclude = 0;
138 143 else
139 144 cpuid_feature_ecx_exclude = (uint32_t)prop_value;
140 145
141 146 if (bootprop_getval("cpuid_feature_edx_include", &prop_value) != 0)
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
142 147 cpuid_feature_edx_include = 0;
143 148 else
144 149 cpuid_feature_edx_include = (uint32_t)prop_value;
145 150
146 151 if (bootprop_getval("cpuid_feature_edx_exclude", &prop_value) != 0)
147 152 cpuid_feature_edx_exclude = 0;
148 153 else
149 154 cpuid_feature_edx_exclude = (uint32_t)prop_value;
150 155
151 156 #if !defined(__xpv)
157 + if (bootprop_getstr("nmi", prop_str, sizeof (prop_str)) == 0) {
158 + if (strcmp(prop_str, "ignore") == 0) {
159 + nmi_action = NMI_ACTION_IGNORE;
160 + } else if (strcmp(prop_str, "panic") == 0) {
161 + nmi_action = NMI_ACTION_PANIC;
162 + } else if (strcmp(prop_str, "kmdb") == 0) {
163 + nmi_action = NMI_ACTION_KMDB;
164 + } else {
165 + prom_printf("unix: ignoring unknown nmi=%s\n",
166 + prop_str);
167 + }
168 + }
169 +
152 170 /*
153 171 * Check to see if KPTI has been explicitly enabled or disabled.
154 172 * We have to check this before init_desctbls().
155 173 */
156 174 if (bootprop_getval("kpti", &prop_value) == 0) {
157 175 kpti_enable = (uint64_t)(prop_value == 1);
158 176 prom_printf("unix: forcing kpti to %s due to boot argument\n",
159 177 (kpti_enable == 1) ? "ON" : "OFF");
160 178 } else {
161 179 kpti_enable = 1;
162 180 }
163 181
164 182 if (bootprop_getval("pcid", &prop_value) == 0 && prop_value == 0) {
165 183 prom_printf("unix: forcing pcid to OFF due to boot argument\n");
166 184 x86_use_pcid = 0;
167 185 } else if (kpti_enable != 1) {
168 186 x86_use_pcid = 0;
169 187 }
170 188 #endif
171 189
172 190 /*
173 191 * Initialize idt0, gdt0, ldt0_default, ktss0 and dftss.
174 192 */
175 193 init_desctbls();
176 194
177 195 /*
178 196 * lgrp_init() and possibly cpuid_pass1() need PCI config
179 197 * space access
180 198 */
181 199 #if defined(__xpv)
182 200 if (DOMAIN_IS_INITDOMAIN(xen_info))
183 201 pci_cfgspace_init();
184 202 #else
185 203 pci_cfgspace_init();
186 204 /*
187 205 * Initialize the platform type from CPU 0 to ensure that
188 206 * determine_platform() is only ever called once.
189 207 */
190 208 determine_platform();
191 209 #endif
192 210
193 211 /*
194 212 * The first lightweight pass (pass0) through the cpuid data
195 213 * was done in locore before mlsetup was called. Do the next
196 214 * pass in C code.
197 215 *
198 216 * The x86_featureset is initialized here based on the capabilities
199 217 * of the boot CPU. Note that if we choose to support CPUs that have
200 218 * different feature sets (at which point we would almost certainly
201 219 * want to set the feature bits to correspond to the feature
202 220 * minimum) this value may be altered.
203 221 */
204 222 cpuid_pass1(cpu[0], x86_featureset);
205 223
206 224 #if !defined(__xpv)
207 225 if ((get_hwenv() & HW_XEN_HVM) != 0)
208 226 xen_hvm_init();
209 227
210 228 /*
211 229 * Before we do anything with the TSCs, we need to work around
212 230 * Intel erratum BT81. On some CPUs, warm reset does not
213 231 * clear the TSC. If we are on such a CPU, we will clear TSC ourselves
214 232 * here. Other CPUs will clear it when we boot them later, and the
215 233 * resulting skew will be handled by tsc_sync_master()/_slave();
216 234 * note that such skew already exists and has to be handled anyway.
217 235 *
218 236 * We do this only on metal. This same problem can occur with a
219 237 * hypervisor that does not happen to virtualise a TSC that starts from
220 238 * zero, regardless of CPU type; however, we do not expect hypervisors
221 239 * that do not virtualise TSC that way to handle writes to TSC
222 240 * correctly, either.
223 241 */
224 242 if (get_hwenv() == HW_NATIVE &&
225 243 cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
226 244 cpuid_getfamily(CPU) == 6 &&
227 245 (cpuid_getmodel(CPU) == 0x2d || cpuid_getmodel(CPU) == 0x3e) &&
228 246 is_x86_feature(x86_featureset, X86FSET_TSC)) {
229 247 (void) wrmsr(REG_TSC, 0UL);
230 248 }
231 249
232 250 /*
233 251 * Patch the tsc_read routine with appropriate set of instructions,
234 252 * depending on the processor family and architecure, to read the
235 253 * time-stamp counter while ensuring no out-of-order execution.
236 254 * Patch it while the kernel text is still writable.
237 255 *
238 256 * Note: tsc_read is not patched for intel processors whose family
239 257 * is >6 and for amd whose family >f (in case they don't support rdtscp
240 258 * instruction, unlikely). By default tsc_read will use cpuid for
241 259 * serialization in such cases. The following code needs to be
242 260 * revisited if intel processors of family >= f retains the
243 261 * instruction serialization nature of mfence instruction.
244 262 * Note: tsc_read is not patched for x86 processors which do
245 263 * not support "mfence". By default tsc_read will use cpuid for
246 264 * serialization in such cases.
247 265 *
248 266 * The Xen hypervisor does not correctly report whether rdtscp is
249 267 * supported or not, so we must assume that it is not.
250 268 */
251 269 if ((get_hwenv() & HW_XEN_HVM) == 0 &&
252 270 is_x86_feature(x86_featureset, X86FSET_TSCP))
253 271 patch_tsc_read(TSC_TSCP);
254 272 else if (cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
255 273 cpuid_getfamily(CPU) <= 0xf &&
256 274 is_x86_feature(x86_featureset, X86FSET_SSE2))
257 275 patch_tsc_read(TSC_RDTSC_MFENCE);
258 276 else if (cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
259 277 cpuid_getfamily(CPU) <= 6 &&
260 278 is_x86_feature(x86_featureset, X86FSET_SSE2))
261 279 patch_tsc_read(TSC_RDTSC_LFENCE);
262 280
263 281 #endif /* !__xpv */
264 282
265 283 #if defined(__i386) && !defined(__xpv)
266 284 /*
267 285 * Some i386 processors do not implement the rdtsc instruction,
268 286 * or at least they do not implement it correctly. Patch them to
269 287 * return 0.
270 288 */
271 289 if (!is_x86_feature(x86_featureset, X86FSET_TSC))
272 290 patch_tsc_read(TSC_NONE);
273 291 #endif /* __i386 && !__xpv */
274 292
275 293 #if defined(__amd64) && !defined(__xpv)
276 294 patch_memops(cpuid_getvendor(CPU));
277 295 #endif /* __amd64 && !__xpv */
278 296
279 297 #if !defined(__xpv)
280 298 /* XXPV what, if anything, should be dorked with here under xen? */
281 299
282 300 /*
283 301 * While we're thinking about the TSC, let's set up %cr4 so that
284 302 * userland can issue rdtsc, and initialize the TSC_AUX value
285 303 * (the cpuid) for the rdtscp instruction on appropriately
286 304 * capable hardware.
287 305 */
288 306 if (is_x86_feature(x86_featureset, X86FSET_TSC))
289 307 setcr4(getcr4() & ~CR4_TSD);
290 308
291 309 if (is_x86_feature(x86_featureset, X86FSET_TSCP))
292 310 (void) wrmsr(MSR_AMD_TSCAUX, 0);
293 311
294 312 /*
295 313 * Let's get the other %cr4 stuff while we're here. Note, we defer
296 314 * enabling CR4_SMAP until startup_end(); however, that's importantly
297 315 * before we start other CPUs. That ensures that it will be synced out
298 316 * to other CPUs.
299 317 */
300 318 if (is_x86_feature(x86_featureset, X86FSET_DE))
301 319 setcr4(getcr4() | CR4_DE);
302 320
303 321 if (is_x86_feature(x86_featureset, X86FSET_SMEP))
304 322 setcr4(getcr4() | CR4_SMEP);
305 323 #endif /* __xpv */
306 324
307 325 /*
308 326 * initialize t0
309 327 */
310 328 t0.t_stk = (caddr_t)rp - MINFRAME;
311 329 t0.t_stkbase = t0stack;
312 330 t0.t_pri = maxclsyspri - 3;
313 331 t0.t_schedflag = TS_LOAD | TS_DONT_SWAP;
314 332 t0.t_procp = &p0;
315 333 t0.t_plockp = &p0lock.pl_lock;
316 334 t0.t_lwp = &lwp0;
317 335 t0.t_forw = &t0;
318 336 t0.t_back = &t0;
319 337 t0.t_next = &t0;
320 338 t0.t_prev = &t0;
321 339 t0.t_cpu = cpu[0];
322 340 t0.t_disp_queue = &cpu0_disp;
323 341 t0.t_bind_cpu = PBIND_NONE;
324 342 t0.t_bind_pset = PS_NONE;
325 343 t0.t_bindflag = (uchar_t)default_binding_mode;
326 344 t0.t_cpupart = &cp_default;
327 345 t0.t_clfuncs = &sys_classfuncs.thread;
328 346 t0.t_copyops = NULL;
329 347 THREAD_ONPROC(&t0, CPU);
330 348
331 349 lwp0.lwp_thread = &t0;
332 350 lwp0.lwp_regs = (void *)rp;
333 351 lwp0.lwp_procp = &p0;
334 352 t0.t_tid = p0.p_lwpcnt = p0.p_lwprcnt = p0.p_lwpid = 1;
335 353
336 354 p0.p_exec = NULL;
337 355 p0.p_stat = SRUN;
338 356 p0.p_flag = SSYS;
339 357 p0.p_tlist = &t0;
340 358 p0.p_stksize = 2*PAGESIZE;
341 359 p0.p_stkpageszc = 0;
342 360 p0.p_as = &kas;
343 361 p0.p_lockp = &p0lock;
344 362 p0.p_brkpageszc = 0;
345 363 p0.p_t1_lgrpid = LGRP_NONE;
346 364 p0.p_tr_lgrpid = LGRP_NONE;
347 365 psecflags_default(&p0.p_secflags);
348 366
349 367 sigorset(&p0.p_ignore, &ignoredefault);
350 368
351 369 CPU->cpu_thread = &t0;
352 370 bzero(&cpu0_disp, sizeof (disp_t));
353 371 CPU->cpu_disp = &cpu0_disp;
354 372 CPU->cpu_disp->disp_cpu = CPU;
355 373 CPU->cpu_dispthread = &t0;
356 374 CPU->cpu_idle_thread = &t0;
357 375 CPU->cpu_flags = CPU_READY | CPU_RUNNING | CPU_EXISTS | CPU_ENABLE;
358 376 CPU->cpu_dispatch_pri = t0.t_pri;
359 377
360 378 CPU->cpu_id = 0;
361 379
362 380 CPU->cpu_pri = 12; /* initial PIL for the boot CPU */
363 381
364 382 /*
365 383 * Initialize thread/cpu microstate accounting
366 384 */
367 385 init_mstate(&t0, LMS_SYSTEM);
368 386 init_cpu_mstate(CPU, CMS_SYSTEM);
369 387
370 388 /*
371 389 * Initialize lists of available and active CPUs.
372 390 */
373 391 cpu_list_init(CPU);
374 392
375 393 pg_cpu_bootstrap(CPU);
376 394
377 395 /*
378 396 * Now that we have taken over the GDT, IDT and have initialized
379 397 * active CPU list it's time to inform kmdb if present.
380 398 */
381 399 if (boothowto & RB_DEBUG)
382 400 kdi_idt_sync();
383 401
384 402 if (BOP_GETPROPLEN(bootops, "efi-systab") < 0) {
385 403 /*
386 404 * In BIOS system, explicitly set console to text mode (0x3)
387 405 * if this is a boot post Fast Reboot, and the console is set
388 406 * to CONS_SCREEN_TEXT.
389 407 */
390 408 if (post_fastreboot &&
391 409 boot_console_type(NULL) == CONS_SCREEN_TEXT) {
392 410 set_console_mode(0x3);
393 411 }
394 412 }
395 413
396 414 /*
397 415 * If requested (boot -d) drop into kmdb.
398 416 *
399 417 * This must be done after cpu_list_init() on the 64-bit kernel
400 418 * since taking a trap requires that we re-compute gsbase based
401 419 * on the cpu list.
402 420 */
403 421 if (boothowto & RB_DEBUGENTER)
404 422 kmdb_enter();
405 423
406 424 cpu_vm_data_init(CPU);
407 425
408 426 rp->r_fp = 0; /* terminate kernel stack traces! */
409 427
410 428 prom_init("kernel", (void *)NULL);
411 429
412 430 /* User-set option overrides firmware value. */
413 431 if (bootprop_getval(PLAT_DR_OPTIONS_NAME, &prop_value) == 0) {
414 432 plat_dr_options = (uint64_t)prop_value;
415 433 }
416 434 #if defined(__xpv)
417 435 /* No support of DR operations on xpv */
418 436 plat_dr_options = 0;
419 437 #else /* __xpv */
420 438 /* Flag PLAT_DR_FEATURE_ENABLED should only be set by DR driver. */
421 439 plat_dr_options &= ~PLAT_DR_FEATURE_ENABLED;
422 440 #ifndef __amd64
423 441 /* Only enable CPU/memory DR on 64 bits kernel. */
424 442 plat_dr_options &= ~PLAT_DR_FEATURE_MEMORY;
425 443 plat_dr_options &= ~PLAT_DR_FEATURE_CPU;
426 444 #endif /* __amd64 */
427 445 #endif /* __xpv */
428 446
429 447 /*
430 448 * Get value of "plat_dr_physmax" boot option.
431 449 * It overrides values calculated from MSCT or SRAT table.
432 450 */
433 451 if (bootprop_getval(PLAT_DR_PHYSMAX_NAME, &prop_value) == 0) {
434 452 plat_dr_physmax = ((uint64_t)prop_value) >> PAGESHIFT;
435 453 }
436 454
437 455 /* Get value of boot_ncpus. */
438 456 if (bootprop_getval(BOOT_NCPUS_NAME, &prop_value) != 0) {
439 457 boot_ncpus = NCPU;
440 458 } else {
441 459 boot_ncpus = (int)prop_value;
442 460 if (boot_ncpus <= 0 || boot_ncpus > NCPU)
443 461 boot_ncpus = NCPU;
444 462 }
445 463
446 464 /*
447 465 * Set max_ncpus and boot_max_ncpus to boot_ncpus if platform doesn't
448 466 * support CPU DR operations.
449 467 */
450 468 if (plat_dr_support_cpu() == 0) {
451 469 max_ncpus = boot_max_ncpus = boot_ncpus;
452 470 } else {
453 471 if (bootprop_getval(PLAT_MAX_NCPUS_NAME, &prop_value) != 0) {
454 472 max_ncpus = NCPU;
455 473 } else {
456 474 max_ncpus = (int)prop_value;
457 475 if (max_ncpus <= 0 || max_ncpus > NCPU) {
458 476 max_ncpus = NCPU;
459 477 }
460 478 if (boot_ncpus > max_ncpus) {
461 479 boot_ncpus = max_ncpus;
462 480 }
463 481 }
464 482
465 483 if (bootprop_getval(BOOT_MAX_NCPUS_NAME, &prop_value) != 0) {
466 484 boot_max_ncpus = boot_ncpus;
467 485 } else {
468 486 boot_max_ncpus = (int)prop_value;
469 487 if (boot_max_ncpus <= 0 || boot_max_ncpus > NCPU) {
470 488 boot_max_ncpus = boot_ncpus;
471 489 } else if (boot_max_ncpus > max_ncpus) {
472 490 boot_max_ncpus = max_ncpus;
473 491 }
474 492 }
475 493 }
476 494
477 495 /*
478 496 * Initialize the lgrp framework
479 497 */
480 498 lgrp_init(LGRP_INIT_STAGE1);
481 499
482 500 if (boothowto & RB_HALT) {
483 501 prom_printf("unix: kernel halted by -h flag\n");
484 502 prom_enter_mon();
485 503 }
486 504
487 505 ASSERT_STACK_ALIGNED();
488 506
489 507 /*
490 508 * Fill out cpu_ucode_info. Update microcode if necessary.
491 509 */
492 510 ucode_check(CPU);
493 511 cpuid_pass_ucode(CPU, x86_featureset);
494 512
495 513 if (workaround_errata(CPU) != 0)
496 514 panic("critical workaround(s) missing for boot cpu");
497 515 }
498 516
499 517
500 518 void
501 519 mach_modpath(char *path, const char *filename)
502 520 {
503 521 /*
504 522 * Construct the directory path from the filename.
505 523 */
506 524
507 525 int len;
508 526 char *p;
509 527 const char isastr[] = "/amd64";
510 528 size_t isalen = strlen(isastr);
511 529
512 530 len = strlen(SYSTEM_BOOT_PATH "/kernel");
513 531 (void) strcpy(path, SYSTEM_BOOT_PATH "/kernel ");
514 532 path += len + 1;
515 533
516 534 if ((p = strrchr(filename, '/')) == NULL)
517 535 return;
518 536
519 537 while (p > filename && *(p - 1) == '/')
520 538 p--; /* remove trailing '/' characters */
521 539 if (p == filename)
522 540 p++; /* so "/" -is- the modpath in this case */
523 541
524 542 /*
525 543 * Remove optional isa-dependent directory name - the module
526 544 * subsystem will put this back again (!)
527 545 */
528 546 len = p - filename;
529 547 if (len > isalen &&
530 548 strncmp(&filename[len - isalen], isastr, isalen) == 0)
531 549 p -= isalen;
532 550
533 551 /*
534 552 * "/platform/mumblefrotz" + " " + MOD_DEFPATH
535 553 */
536 554 len += (p - filename) + 1 + strlen(MOD_DEFPATH) + 1;
537 555 (void) strncpy(path, filename, p - filename);
538 556 }
↓ open down ↓ |
377 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX