1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  *
  26  * Copyright 2017 RackTop Systems.
  27  */
  28 
  29 #ifndef _LGRP_H
  30 #define _LGRP_H
  31 
  32 /*
  33  * locality group definitions for kernel
  34  */
  35 
  36 #include <sys/types.h>
  37 
  38 #ifdef  __cplusplus
  39 extern "C" {
  40 #endif
  41 
  42 #define LGRP_NONE       (-1)            /* non-existent lgroup ID */
  43 
  44 #if !defined(_KERNEL) && !defined(_FAKE_KERNEL) && !defined(_KMEMUSER)
  45 typedef struct lgrp_mem_policy_info { int opaque[2]; }  lgrp_mem_policy_info_t;
  46 #endif  /* !_KERNEL && !_FAKE_KERNEL && !_KMEMUSER */
  47 
  48 #if defined(_KERNEL) || defined(_FAKE_KERNEL) || defined(_KMEMUSER)
  49 #include <sys/cpuvar.h>
  50 #include <sys/bitmap.h>
  51 #include <sys/vnode.h>
  52 #include <vm/anon.h>
  53 #include <vm/seg.h>
  54 #include <sys/lgrp_user.h>
  55 #include <sys/param.h>
  56 
  57 typedef uint32_t        lgrp_load_t;    /* lgrp_loadavg type */
  58 typedef uintptr_t       lgrp_handle_t;  /* lgrp handle */
  59 
  60 #define LGRP_NONE_SUCH          LGRP_NONE       /* non-existent lgroup ID */
  61 /* null platform handle */
  62 #define LGRP_NULL_HANDLE        ((lgrp_handle_t)0xbadbad)
  63 #define LGRP_DEFAULT_HANDLE     ((lgrp_handle_t)0xbabecafe) /* uma handle */
  64 #define LGRP_ROOTID             (0)             /* root lgroup ID */
  65 
  66 /*
  67  * Maximum number of lgrps a platform may define.
  68  */
  69 #define NLGRPS_MAX              64
  70 #define LGRP_LOADAVG_MAX        UINT32_MAX
  71 
  72 /*
  73  * The load-average we expect for one cpu-bound thread's worth of load
  74  */
  75 #define LGRP_LOADAVG_THREAD_MAX         65516
  76 
  77 /*
  78  * The input to the load-average generating function for one cpu-bound thread's
  79  * worth of load
  80  */
  81 
  82 #define LGRP_LOADAVG_IN_THREAD_MAX      128
  83 
  84 /*
  85  * LPL actions
  86  */
  87 
  88 typedef enum {
  89         LPL_INCREMENT,
  90         LPL_DECREMENT
  91 } lpl_act_t;
  92 
  93 /*
  94  * lgroup statistics.  Most of these are counters that are updated
  95  * dynamically so they are hashed to CPU buckets to reduce cache
  96  * interference.  The remaining statistics are snapshots of kernel
  97  * data, so they aren't stored in the array of counter stats.
  98  *
  99  * For the hashed stats to make sense, you have to sum all the buckets for
 100  * that stat, hence macros are provided to read the stats.
 101  */
 102 
 103 #define LGRP_NUM_CPU_BUCKETS    8       /* must be power of 2 */
 104 #define LGRP_CPU_BUCKET_MASK    (LGRP_NUM_CPU_BUCKETS - 1)
 105 
 106 /*
 107  * Flags for what to do with lgroup memory policy
 108  * Used for heap and stack where policy is extended to new segments added to
 109  * the end
 110  */
 111 #define LGRP_MP_FLAG_EXTEND_UP          0x1     /* policy should extend up */
 112 #define LGRP_MP_FLAG_EXTEND_DOWN        0x2     /* policy should extend down */
 113 
 114 #define LGRP_STAT(stats, bucket, whichstat) \
 115         ((stats)->ls_data[bucket][whichstat])
 116 
 117 /* Return a pointer suitable for an atomic 64-bit op on the bucket */
 118 #define LGRP_STAT_WRITE_PTR(stats, whichstat) \
 119         (&LGRP_STAT(stats, (CPU->cpu_id) & LGRP_CPU_BUCKET_MASK, \
 120             whichstat))
 121 
 122 /* Sum up all the buckets and return the value in 'val' */
 123 #define LGRP_STAT_READ(stats, whichstat, val) {                         \
 124         int bkt;                                                        \
 125         for (val = 0, bkt = 0; bkt < LGRP_NUM_CPU_BUCKETS; bkt++)    \
 126                 val += LGRP_STAT(stats, bkt, whichstat);                \
 127 }
 128 
 129 /* Reset all buckets for the stat to 0 */
 130 #define LGRP_STAT_RESET(stats, stat) {                                  \
 131         int i;                                                          \
 132         for (i = 0; i < LGRP_NUM_CPU_BUCKETS; i++)                   \
 133                 LGRP_STAT(stats, i, stat) = 0;                          \
 134 }
 135 
 136 /*
 137  * Define all of the statistics that are kept for lgrp kstats,
 138  * and their corresponding text names.
 139  */
 140 
 141 typedef enum lgrp_stat_types {
 142         LGRP_NUM_MIGR,          /* # migrations away from this lgrp */
 143         LGRP_NUM_ALLOC_FAIL,    /* # times alloc fails for chosen lgrp */
 144         LGRP_PM_SRC_PGS,        /* # pages migrated from this lgrp */
 145         LGRP_PM_DEST_PGS,       /* # pages migrated to this lgrp */
 146         LGRP_PM_FAIL_ALLOC_PGS, /* # pages failed to migrate to this lgrp */
 147         LGRP_PM_FAIL_LOCK_PGS,  /* # pages failed to migrate from this lgrp */
 148         LGRP_PMM_PGS,           /* # pages marked to migrate from this lgrp */
 149         LGRP_PMM_FAIL_PGS,      /* # pages marked to migrate from this lgrp */
 150         LGRP_NUM_DEFAULT,       /* # of times default policy applied */
 151         LGRP_NUM_NEXT,          /* # of times next touch policy applied */
 152         LGRP_NUM_RANDOM,        /* # of times random policy applied */
 153         LGRP_NUM_RANDOM_PROC,   /* # of times random proc policy applied */
 154         LGRP_NUM_RANDOM_PSET,   /* # of times random pset policy applied */
 155         LGRP_NUM_ROUNDROBIN,    /* # of times round robin policy applied */
 156         LGRP_NUM_NEXT_SEG,      /* # of times next to seg policy applied */
 157         LGRP_NUM_COUNTER_STATS, /* always last */
 158         LGRP_CTR_STATS_ALLOC = 16       /* cache-align pad - multiple of 8 */
 159                                 /* always keep >= LGRP_NUM_COUNTER_STATS */
 160 } lgrp_stat_t;
 161 
 162 typedef enum lgrp_snap_stat_types {
 163         LGRP_NUM_CPUS,          /* number of CPUs */
 164         LGRP_NUM_PG_FREE,       /* # of free pages */
 165         LGRP_NUM_PG_AVAIL,      /* # of allocatable physical pages */
 166         LGRP_NUM_PG_INSTALL,    /* # of installed physical pages */
 167         LGRP_LOADAVG,           /* unscaled load average of this lgrp */
 168         LGRP_LOADAVG_SCALE,     /* load unit of one CPU bound thread */
 169         LGRP_NUM_SNAPSHOT_STATS /* always last */
 170 } lgrp_snap_stat_t;
 171 
 172 #define LGRP_KSTAT_NAMES                \
 173 static char *lgrp_kstat_names[] = {     \
 174                                         \
 175         /* Counter stats */             \
 176         "lwp migrations",               \
 177         "alloc fail",                   \
 178         "pages migrated from",          \
 179         "pages migrated to",            \
 180         "pages failed to migrate to",   \
 181         "pages failed to migrate from", \
 182         "pages marked for migration",   \
 183         "pages failed to mark",         \
 184         "default policy",               \
 185         "next-touch policy",            \
 186         "random policy",                \
 187         "span process policy",          \
 188         "span psrset policy",           \
 189         "round robin policy",           \
 190         "next-seg policy",              \
 191                                         \
 192         /* Snapshot stats */            \
 193         "cpus",                         \
 194         "pages free",                   \
 195         "pages avail",                  \
 196         "pages installed",              \
 197         "load average",                 \
 198         "loadscale"                     \
 199 }
 200 
 201 #define LGRP_NUM_STATS  ((int)LGRP_NUM_COUNTER_STATS +                  \
 202         (int)LGRP_NUM_SNAPSHOT_STATS)
 203 
 204 /*
 205  * The contents of this structure are opaque and should only be
 206  * accessed through the LGRP_STAT macro.
 207  */
 208 struct lgrp_stats {
 209         int64_t ls_data[LGRP_NUM_CPU_BUCKETS][LGRP_CTR_STATS_ALLOC];
 210 };
 211 
 212 /* The kernel's version of a bitmap of lgroups */
 213 typedef uint64_t klgrpset_t;
 214 
 215 /*
 216  * This really belongs in memnode.h, but it must be defined here to avoid
 217  * recursive inclusion problems. Note that memnode.h includes this header.
 218  */
 219 typedef uint64_t        mnodeset_t;
 220 
 221 /*
 222  * lgroup structure
 223  *
 224  * Visible to generic code and contains the lgroup ID, CPUs in this lgroup,
 225  * and a platform handle used to identify this lgroup to the lgroup platform
 226  * support code
 227  */
 228 typedef struct lgrp {
 229 
 230         lgrp_id_t       lgrp_id;        /* which lgroup */
 231         int             lgrp_latency;
 232         lgrp_handle_t   lgrp_plathand;  /* handle for platform calls */
 233         struct lgrp     *lgrp_parent;   /* parent lgroup */
 234         uint_t          lgrp_reserved1; /* filler */
 235         uint_t          lgrp_childcnt;  /* number of children lgroups */
 236         klgrpset_t      lgrp_children;  /* children lgroups */
 237         klgrpset_t      lgrp_leaves;    /* (direct decendant) leaf lgroups */
 238 
 239         /*
 240          * set of lgroups containing a given type of resource
 241          * at this level of locality
 242          */
 243         klgrpset_t      lgrp_set[LGRP_RSRC_COUNT];
 244 
 245         mnodeset_t      lgrp_mnodes;    /* set of memory nodes in this lgroup */
 246         uint_t          lgrp_nmnodes;   /* number of memnodes */
 247         uint_t          lgrp_reserved2; /* filler */
 248 
 249         struct cpu      *lgrp_cpu;      /* pointer to a cpu may be null */
 250         uint_t          lgrp_cpucnt;    /* number of cpus in this lgrp  */
 251         kstat_t         *lgrp_kstat;    /* per-lgrp kstats */
 252 } lgrp_t;
 253 
 254 /*
 255  * lgroup load average structure
 256  */
 257 
 258 typedef struct lgrp_ld {
 259         lgrp_load_t     lpl_loadavg;    /* load average         */
 260         uint_t          lpl_ncpu;       /* how many cpus        */
 261         lgrp_id_t       lpl_lgrpid;     /* which group this lpl part of */
 262         lgrp_t          *lpl_lgrp;      /* ptr to lpl's lgrp */
 263         struct lgrp_ld  *lpl_parent;    /* lpl of parent lgrp */
 264         struct cpu      *lpl_cpus;      /* list of cpus in lpl */
 265                                         /* NULL for non-leaf lgrps */
 266         uint_t          lpl_nrset;      /* no. of leaf lpls for lgrp */
 267         hrtime_t        lpl_homed_time; /* time of last homing to this lpl */
 268         uint_t          lpl_rset_sz;    /* Resource set capacity */
 269         struct lgrp_ld  **lpl_rset;     /* leaf lpls for lgrp */
 270                                         /* contains ptr to self for leaf lgrp */
 271         int             *lpl_id2rset;   /* mapping of lgrpid to rset index */
 272 } lpl_t;
 273 
 274 /*
 275  * 1 << LGRP_MAX_EFFECT_SHFT ==  lgrp_loadavg_max_effect
 276  */
 277 #define LGRP_MAX_EFFECT_SHFT 16
 278 
 279 /*
 280  * Operations handled by lgrp_config()
 281  */
 282 typedef enum lgrp_config_flag {
 283         LGRP_CONFIG_NOP,
 284         LGRP_CONFIG_CPU_ADD,
 285         LGRP_CONFIG_CPU_DEL,
 286         LGRP_CONFIG_CPU_ONLINE,
 287         LGRP_CONFIG_CPU_OFFLINE,
 288         LGRP_CONFIG_CPUPART_ADD,
 289         LGRP_CONFIG_CPUPART_DEL,
 290         LGRP_CONFIG_MEM_ADD,
 291         LGRP_CONFIG_MEM_DEL,
 292         LGRP_CONFIG_MEM_RENAME,
 293         LGRP_CONFIG_GEN_UPDATE,
 294         LGRP_CONFIG_FLATTEN,
 295         LGRP_CONFIG_LAT_CHANGE_ALL,
 296         LGRP_CONFIG_LAT_CHANGE
 297 } lgrp_config_flag_t;
 298 
 299 /*
 300  * Stages of lgroup framework initialization (done through lgrp_init()):
 301  *
 302  * 1) Initialize common and platform specific code (called in mlsetup())
 303  *
 304  * 2) Setup root lgroup and add CPU 0 to lgroup(s) (called near beginning of
 305  *    main() before startup())
 306  *
 307  * 3) Probe from CPU 0 and copy and release any BOP_ALLOC-ed memory temporarily
 308  *    allocated before kernel memory allocator is setup (called in main()
 309  *    after startup(), gethrtime() is setup, and before interrupts enabled)
 310  *
 311  * 4) Check for null proc LPA on Starcat, collapse lgroup topology (if
 312  *    necessary), setup lgroup kstats, etc. (called before start_other_cpus())
 313  *
 314  * 5) Finish any lgroup initialization needed including updating lgroup
 315  *    topology after all CPUs started (called after start_other_cpus())
 316  */
 317 typedef enum lgrp_init_stages {
 318         LGRP_INIT_STAGE1,
 319         LGRP_INIT_STAGE2,
 320         LGRP_INIT_STAGE3,
 321         LGRP_INIT_STAGE4,
 322         LGRP_INIT_STAGE5
 323 } lgrp_init_stages_t;
 324 
 325 /*
 326  * Memory allocation policies
 327  */
 328 typedef enum lgrp_mem_policy {
 329         LGRP_MEM_POLICY_DEFAULT,
 330         LGRP_MEM_POLICY_NEXT,           /* near LWP to next touch */
 331         LGRP_MEM_POLICY_RANDOM_PROC,    /* randomly across process */
 332         LGRP_MEM_POLICY_RANDOM_PSET,    /* randomly across processor set */
 333         LGRP_MEM_POLICY_RANDOM,         /* randomly across all lgroups */
 334         LGRP_MEM_POLICY_ROUNDROBIN,     /* round robin across all lgroups */
 335         LGRP_MEM_POLICY_NEXT_CPU,       /* Near next CPU to touch memory */
 336         LGRP_MEM_POLICY_NEXT_SEG,       /* lgrp specified directly by seg */
 337         LGRP_NUM_MEM_POLICIES
 338 } lgrp_mem_policy_t;
 339 
 340 /*
 341  * Search scopes for finding resouces
 342  */
 343 typedef enum lgrp_res_ss {
 344         LGRP_SRCH_LOCAL,                /* Search local lgroup only */
 345         LGRP_SRCH_HIER                  /* Search entire hierarchy */
 346 } lgrp_res_ss_t;
 347 
 348 /*
 349  * Cookie used for lgrp mnode selection
 350  */
 351 typedef struct lgrp_mnode_cookie {
 352         lgrp_t          *lmc_lgrp;      /* lgrp under consideration */
 353         mnodeset_t      lmc_nodes;      /* nodes not yet tried in lgrp */
 354         int             lmc_cnt;        /* how many nodes in untried set */
 355         mnodeset_t      lmc_tried;      /* nodes already tried */
 356         int             lmc_ntried;     /* how many nodes in tried set */
 357         lgrp_res_ss_t   lmc_scope;      /* consider non-local nodes? */
 358         ushort_t        lmc_rand;       /* a "random" number */
 359 } lgrp_mnode_cookie_t;
 360 
 361 /*
 362  * Information needed to implement memory allocation policy
 363  */
 364 typedef struct lgrp_mem_policy_info {
 365         int             mem_policy;             /* memory allocation policy */
 366         lgrp_id_t       mem_lgrpid;             /* lgroup id */
 367 } lgrp_mem_policy_info_t;
 368 
 369 /*
 370  * Shared memory policy segment
 371  */
 372 typedef struct lgrp_shm_policy_seg {
 373         u_offset_t              shm_off;        /* offset into shared object */
 374         size_t                  shm_size;       /* size of segment */
 375         lgrp_mem_policy_info_t  shm_policy;     /* memory allocation policy */
 376         avl_node_t              shm_tree;       /* AVL tree */
 377 } lgrp_shm_policy_seg_t;
 378 
 379 /*
 380  * Shared memory locality info
 381  */
 382 typedef struct lgrp_shm_locality {
 383         size_t          loc_count;              /* reference count */
 384         avl_tree_t      *loc_tree;              /* policy segment tree */
 385         krwlock_t       loc_lock;               /* protects tree */
 386 } lgrp_shm_locality_t;
 387 
 388 /*
 389  * Queries that may be made to determine lgroup memory size
 390  */
 391 typedef enum {
 392         LGRP_MEM_SIZE_FREE,             /* number of free pages */
 393         LGRP_MEM_SIZE_AVAIL,            /* number of pages in phys_avail */
 394         LGRP_MEM_SIZE_INSTALL           /* number of pages in phys_install */
 395 } lgrp_mem_query_t;
 396 
 397 /*
 398  * Argument for the memory copy-rename operation, contains the source and the
 399  * destination platform handles.
 400  */
 401 typedef struct lgrp_config_mem_rename {
 402         lgrp_handle_t lmem_rename_from;
 403         lgrp_handle_t lmem_rename_to;
 404 } lgrp_config_mem_rename_t;
 405 
 406 /* Macro to clear an lgroup bitmap */
 407 #define klgrpset_clear(klgrpset) \
 408         (klgrpset) = (klgrpset_t)0
 409 
 410 /* Macro to fill an lgroup bitmap */
 411 #define klgrpset_fill(klgrpset) \
 412         (klgrpset) = (klgrpset_t)(-1)
 413 
 414 /* Macro to add an lgroup to an lgroup bitmap */
 415 #define klgrpset_add(klgrpset, lgrpid) \
 416         (klgrpset) |= ((klgrpset_t)1 << (lgrpid))
 417 
 418 /* Macro to delete an lgroup from an lgroup bitmap */
 419 #define klgrpset_del(klgrpset, lgrpid) \
 420         (klgrpset) &= ~((klgrpset_t)1 << (lgrpid))
 421 
 422 /* Macro to copy a klgrpset into another klgrpset */
 423 #define klgrpset_copy(klgrpset_to, klgrpset_from) \
 424         (klgrpset_to) = (klgrpset_from)
 425 
 426 /* Macro to perform an 'and' operation on a pair of lgroup bitmaps */
 427 #define klgrpset_and(klgrpset_rslt, klgrpset_arg) \
 428         (klgrpset_rslt) &= (klgrpset_arg)
 429 
 430 /* Macro to perform an 'or' operation on a pair of lgroup bitmaps */
 431 #define klgrpset_or(klgrpset_rslt, klgrpset_arg) \
 432         (klgrpset_rslt) |= (klgrpset_arg)
 433 
 434 /* Macro to perform a 'diff' operation on a pair of lgroup bitmaps */
 435 #define klgrpset_diff(klgrpset_rslt, klgrpset_arg) \
 436         (klgrpset_rslt) &= ~(klgrpset_arg)
 437 
 438 /* Macro to check if an lgroup is a member of an lgrpset */
 439 #define klgrpset_ismember(klgrpset, lgrpid) \
 440         ((klgrpset) & ((klgrpset_t)1 << (lgrpid)))
 441 
 442 /* Macro to check if an lgroup bitmap is empty */
 443 #define klgrpset_isempty(klgrpset) \
 444         ((klgrpset) == (klgrpset_t)0)
 445 
 446 /* Macro to check if two lgrpsets intersect */
 447 #define klgrpset_intersects(klgrpset1, klgrpset2) \
 448         ((klgrpset1) & (klgrpset2))
 449 
 450 /* Macro to count the number of members in an lgrpset */
 451 #define klgrpset_nlgrps(klgrpset, count)                                \
 452 {                                                                       \
 453         lgrp_id_t       lgrpid;                                         \
 454         for (lgrpid = 0, count = 0; lgrpid <= lgrp_alloc_max; lgrpid++) {\
 455                 if (klgrpset_ismember(klgrpset, lgrpid))                \
 456                         count++;                                        \
 457         }                                                               \
 458 }
 459 
 460 /* Macro to get total memory size (in bytes) of a given set of lgroups */
 461 #define klgrpset_totalsize(klgrpset, size)                              \
 462 {                                                                       \
 463         lgrp_handle_t   hand;                                           \
 464         lgrp_id_t       lgrpid;                                         \
 465                                                                         \
 466         for (lgrpid = 0, size = 0; lgrpid <= lgrp_alloc_max; lgrpid++) {\
 467                 if (klgrpset_ismember(klgrpset, lgrpid) &&              \
 468                     lgrp_table[lgrpid]) {                               \
 469                         hand = lgrp_table[lgrpid]->lgrp_plathand;    \
 470                         size += lgrp_plat_mem_size(hand,                \
 471                             LGRP_MEM_SIZE_AVAIL) * PAGESIZE;            \
 472                 }                                                       \
 473         }                                                               \
 474 }
 475 
 476 /*
 477  * Does this lgroup exist?
 478  */
 479 #define LGRP_EXISTS(lgrp)       \
 480         (lgrp != NULL && lgrp->lgrp_id != LGRP_NONE)
 481 
 482 /*
 483  * Macro for testing if a CPU is contained in an lgrp.
 484  */
 485 #define LGRP_CONTAINS_CPU(lgrp, cpu)    \
 486         (klgrpset_ismember(lgrp->lgrp_set[LGRP_RSRC_CPU],    \
 487             cpu->cpu_lpl->lpl_lgrpid))
 488 
 489 /*
 490  * Initialize an lgrp_mnode_cookie
 491  */
 492 #define LGRP_MNODE_COOKIE_INIT(c, lgrp, scope)  \
 493 {                                                       \
 494         bzero(&(c), sizeof (lgrp_mnode_cookie_t));  \
 495         (&(c))->lmc_lgrp = lgrp;                 \
 496         (&(c))->lmc_nodes = lgrp->lgrp_mnodes;                \
 497         (&(c))->lmc_cnt = lgrp->lgrp_nmnodes;         \
 498         (&(c))->lmc_scope = scope;                       \
 499         (&(c))->lmc_rand = (ushort_t)gethrtime_unscaled() >> 4;    \
 500 }
 501 
 502 /*
 503  * Upgrade cookie scope from LGRP_SRCH_LOCAL to LGRP_SRCH_HIER.
 504  */
 505 #define LGRP_MNODE_COOKIE_UPGRADE(c)    \
 506 {                                                       \
 507         ASSERT((&(c))->lmc_scope == LGRP_SRCH_LOCAL);    \
 508         (&(c))->lmc_scope = LGRP_SRCH_HIER;              \
 509 }
 510 
 511 /*
 512  * Macro to see whether memory allocation policy can be reapplied
 513  */
 514 #define LGRP_MEM_POLICY_REAPPLICABLE(p) \
 515         (p == LGRP_MEM_POLICY_NEXT)
 516 
 517 /*
 518  * Return true if lgrp has CPU resources in the cpupart
 519  */
 520 #define LGRP_CPUS_IN_PART(lgrpid, cpupart) \
 521         (cpupart->cp_lgrploads[lgrpid].lpl_ncpu > 0)
 522 
 523 extern int      lgrp_alloc_max;
 524 extern lgrp_t   *lgrp_table[NLGRPS_MAX];        /* indexed by lgrp_id */
 525 extern int              nlgrps;         /* number of lgroups in machine */
 526 extern int              nlgrpsmax;      /* max number of lgroups on platform */
 527 extern lgrp_gen_t       lgrp_gen;       /* generation of lgroup hierarchy */
 528 extern int              lgrp_initialized; /* single-CPU initialization done */
 529 extern int              lgrp_topo_initialized; /* lgrp topology constructed */
 530 extern lgrp_t           *lgrp_root;     /* root lgroup */
 531 extern unsigned int     lgrp_topo_levels;
 532 extern lpl_t            *lpl_bootstrap; /* bootstrap lpl for non-active CPUs */
 533 
 534 
 535 /* generic interfaces */
 536 
 537 /*
 538  * lgroup management
 539  */
 540 int     lgrp_optimizations(void);
 541 void    lgrp_init(lgrp_init_stages_t);
 542 lgrp_t  *lgrp_create(void);
 543 void    lgrp_destroy(lgrp_t *);
 544 void    lgrp_config(lgrp_config_flag_t, uintptr_t, uintptr_t);
 545 lgrp_t  *lgrp_hand_to_lgrp(lgrp_handle_t);
 546 
 547 /*
 548  * lgroup stats
 549  */
 550 void    lgrp_kstat_create(struct cpu *);
 551 void    lgrp_kstat_destroy(struct cpu *);
 552 void    lgrp_stat_add(lgrp_id_t, lgrp_stat_t, int64_t);
 553 int64_t lgrp_stat_read(lgrp_id_t, lgrp_stat_t);
 554 
 555 /*
 556  * lgroup memory
 557  */
 558 lgrp_mem_policy_t       lgrp_madv_to_policy(uchar_t, size_t, int);
 559 pgcnt_t lgrp_mem_size(lgrp_id_t, lgrp_mem_query_t);
 560 lgrp_t  *lgrp_mem_choose(struct seg *, caddr_t, size_t);
 561 int     lgrp_memnode_choose(lgrp_mnode_cookie_t *);
 562 lgrp_mem_policy_t       lgrp_mem_policy_default(size_t, int);
 563 int     lgrp_mnode_update(klgrpset_t, klgrpset_t *);
 564 lgrp_t  *lgrp_pfn_to_lgrp(pfn_t);
 565 lgrp_t  *lgrp_phys_to_lgrp(u_longlong_t);       /* used by numat driver */
 566 int     lgrp_privm_policy_set(lgrp_mem_policy_t, lgrp_mem_policy_info_t *,
 567     size_t);
 568 void    lgrp_shm_policy_init(struct anon_map *, vnode_t *);
 569 void    lgrp_shm_policy_fini(struct anon_map *, vnode_t *);
 570 lgrp_mem_policy_info_t  *lgrp_shm_policy_get(struct anon_map *, ulong_t,
 571     vnode_t *, u_offset_t);
 572 int     lgrp_shm_policy_set(lgrp_mem_policy_t, struct anon_map *, ulong_t,
 573     vnode_t *, u_offset_t, size_t);
 574 
 575 /*
 576  * Used by numat driver
 577  */
 578 int     lgrp_query_cpu(processorid_t, lgrp_id_t *);
 579 int     lgrp_query_load(processorid_t, lgrp_load_t *);
 580 
 581 /*
 582  * lgroup thread placement
 583  */
 584 lpl_t   *lgrp_affinity_best(kthread_t *, struct cpupart *, lgrp_id_t,
 585     boolean_t);
 586 void    lgrp_affinity_init(lgrp_affinity_t **);
 587 void    lgrp_affinity_free(lgrp_affinity_t **);
 588 lpl_t   *lgrp_choose(kthread_t *t, struct cpupart *);
 589 lgrp_t  *lgrp_home_lgrp(void);
 590 lgrp_id_t       lgrp_home_id(kthread_t *);
 591 void    lgrp_loadavg(lpl_t *, uint_t, int);
 592 void    lgrp_move_thread(kthread_t *, lpl_t *, int);
 593 uint64_t lgrp_get_trthr_migrations(void);
 594 void    lgrp_update_trthr_migrations(uint64_t);
 595 
 596 /*
 597  * lgroup topology
 598  */
 599 int     lgrp_leaf_add(lgrp_t *, lgrp_t **, int, klgrpset_t *);
 600 int     lgrp_leaf_delete(lgrp_t *, lgrp_t **, int, klgrpset_t *);
 601 int     lgrp_rsets_empty(klgrpset_t *);
 602 int     lgrp_rsets_member(klgrpset_t *, lgrp_id_t);
 603 int     lgrp_topo_flatten(int, lgrp_t **, int, klgrpset_t *);
 604 int     lgrp_topo_ht_limit(void);
 605 int     lgrp_topo_ht_limit_default(void);
 606 int     lgrp_topo_ht_limit_set(int);
 607 int     lgrp_topo_update(lgrp_t **, int, klgrpset_t *);
 608 
 609 /*
 610  * lpl topology
 611  */
 612 void    lpl_topo_bootstrap(lpl_t *, int);
 613 int     lpl_topo_flatten(int);
 614 int     lpl_topo_verify(struct cpupart *);
 615 
 616 
 617 /* platform interfaces */
 618 void    lgrp_plat_init(lgrp_init_stages_t);
 619 lgrp_t  *lgrp_plat_alloc(lgrp_id_t lgrpid);
 620 void    lgrp_plat_config(lgrp_config_flag_t, uintptr_t);
 621 lgrp_handle_t   lgrp_plat_cpu_to_hand(processorid_t);
 622 lgrp_handle_t   lgrp_plat_pfn_to_hand(pfn_t);
 623 int     lgrp_plat_max_lgrps(void);
 624 pgcnt_t lgrp_plat_mem_size(lgrp_handle_t, lgrp_mem_query_t);
 625 int     lgrp_plat_latency(lgrp_handle_t, lgrp_handle_t);
 626 lgrp_handle_t   lgrp_plat_root_hand(void);
 627 
 628 extern uint32_t         lgrp_expand_proc_thresh;
 629 extern uint32_t         lgrp_expand_proc_diff;
 630 extern pgcnt_t          lgrp_mem_free_thresh;
 631 extern uint32_t         lgrp_loadavg_tolerance;
 632 extern uint32_t         lgrp_loadavg_max_effect;
 633 extern uint32_t         lgrp_load_thresh;
 634 extern lgrp_mem_policy_t lgrp_mem_policy_root;
 635 
 636 #endif  /* _KERNEL || _FAKE_KERNEL || _KMEMUSER */
 637 
 638 #ifdef  __cplusplus
 639 }
 640 #endif
 641 
 642 #endif /* _LGRP_H */