442
443 /*
444 * cpu private vm data - accessed thru CPU->cpu_vm_data
445 * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
446 * vc_pnext_memseg: tracks last memseg visited in page_nextn()
447 * vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t
448 * vc_kmsize: orignal kmem size for this vm_cpu_data_t
449 */
450
451 typedef struct {
452 struct memseg *vc_pnum_memseg;
453 struct memseg *vc_pnext_memseg;
454 void *vc_kmptr;
455 size_t vc_kmsize;
456 } vm_cpu_data_t;
457
458 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */
459 #define VM_CPU_DATA_PADSIZE \
460 (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
461
462 /* for boot cpu before kmem is initialized */
463 extern char vm_cpu_data0[];
464
465 /*
466 * When a bin is empty, and we can't satisfy a color request correctly,
467 * we scan. If we assume that the programs have reasonable spatial
468 * behavior, then it will not be a good idea to use the adjacent color.
469 * Using the adjacent color would result in virtually adjacent addresses
470 * mapping into the same spot in the cache. So, if we stumble across
471 * an empty bin, skip a bunch before looking. After the first skip,
472 * then just look one bin at a time so we don't miss our cache on
473 * every look. Be sure to check every bin. Page_create() will panic
474 * if we miss a page.
475 *
476 * This also explains the `<=' in the for loops in both page_get_freelist()
477 * and page_get_cachelist(). Since we checked the target bin, skipped
478 * a bunch, then continued one a time, we wind up checking the target bin
479 * twice to make sure we get all of them bins.
480 */
481 #define BIN_STEP 19
482
483 #ifdef VM_STATS
484 struct vmm_vmstats_str {
|
442
443 /*
444 * cpu private vm data - accessed thru CPU->cpu_vm_data
445 * vc_pnum_memseg: tracks last memseg visited in page_numtopp_nolock()
446 * vc_pnext_memseg: tracks last memseg visited in page_nextn()
447 * vc_kmptr: orignal unaligned kmem pointer for this vm_cpu_data_t
448 * vc_kmsize: orignal kmem size for this vm_cpu_data_t
449 */
450
451 typedef struct {
452 struct memseg *vc_pnum_memseg;
453 struct memseg *vc_pnext_memseg;
454 void *vc_kmptr;
455 size_t vc_kmsize;
456 } vm_cpu_data_t;
457
458 /* allocation size to ensure vm_cpu_data_t resides in its own cache line */
459 #define VM_CPU_DATA_PADSIZE \
460 (P2ROUNDUP(sizeof (vm_cpu_data_t), L2CACHE_ALIGN_MAX))
461
462 /*
463 * When a bin is empty, and we can't satisfy a color request correctly,
464 * we scan. If we assume that the programs have reasonable spatial
465 * behavior, then it will not be a good idea to use the adjacent color.
466 * Using the adjacent color would result in virtually adjacent addresses
467 * mapping into the same spot in the cache. So, if we stumble across
468 * an empty bin, skip a bunch before looking. After the first skip,
469 * then just look one bin at a time so we don't miss our cache on
470 * every look. Be sure to check every bin. Page_create() will panic
471 * if we miss a page.
472 *
473 * This also explains the `<=' in the for loops in both page_get_freelist()
474 * and page_get_cachelist(). Since we checked the target bin, skipped
475 * a bunch, then continued one a time, we wind up checking the target bin
476 * twice to make sure we get all of them bins.
477 */
478 #define BIN_STEP 19
479
480 #ifdef VM_STATS
481 struct vmm_vmstats_str {
|