1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*      Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T     */
  22 /*        All Rights Reserved   */
  23 
  24 /*
  25  * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
  26  * Copyright (c) 2011, Joyent, Inc. All rights reserved.
  27  */
  28 
  29 #include <sys/param.h>
  30 #include <sys/t_lock.h>
  31 #include <sys/types.h>
  32 #include <sys/tuneable.h>
  33 #include <sys/sysmacros.h>
  34 #include <sys/systm.h>
  35 #include <sys/cpuvar.h>
  36 #include <sys/lgrp.h>
  37 #include <sys/user.h>
  38 #include <sys/proc.h>
  39 #include <sys/callo.h>
  40 #include <sys/kmem.h>
  41 #include <sys/var.h>
  42 #include <sys/cmn_err.h>
  43 #include <sys/swap.h>
  44 #include <sys/vmsystm.h>
  45 #include <sys/class.h>
  46 #include <sys/time.h>
  47 #include <sys/debug.h>
  48 #include <sys/vtrace.h>
  49 #include <sys/spl.h>
  50 #include <sys/atomic.h>
  51 #include <sys/dumphdr.h>
  52 #include <sys/archsystm.h>
  53 #include <sys/fs/swapnode.h>
  54 #include <sys/panic.h>
  55 #include <sys/disp.h>
  56 #include <sys/msacct.h>
  57 #include <sys/mem_cage.h>
  58 
  59 #include <vm/page.h>
  60 #include <vm/anon.h>
  61 #include <vm/rm.h>
  62 #include <sys/cyclic.h>
  63 #include <sys/cpupart.h>
  64 #include <sys/rctl.h>
  65 #include <sys/task.h>
  66 #include <sys/sdt.h>
  67 #include <sys/ddi_timer.h>
  68 #include <sys/random.h>
  69 #include <sys/modctl.h>
  70 #include <sys/zone.h>
  71 
  72 /*
  73  * for NTP support
  74  */
  75 #include <sys/timex.h>
  76 #include <sys/inttypes.h>
  77 
  78 #include <sys/sunddi.h>
  79 #include <sys/clock_impl.h>
  80 
  81 /*
  82  * clock() is called straight from the clock cyclic; see clock_init().
  83  *
  84  * Functions:
  85  *      reprime clock
  86  *      maintain date
  87  *      jab the scheduler
  88  */
  89 
  90 extern kcondvar_t       fsflush_cv;
  91 extern sysinfo_t        sysinfo;
  92 extern vminfo_t vminfo;
  93 extern int      idleswtch;      /* flag set while idle in pswtch() */
  94 extern hrtime_t volatile devinfo_freeze;
  95 
  96 /*
  97  * high-precision avenrun values.  These are needed to make the
  98  * regular avenrun values accurate.
  99  */
 100 static uint64_t hp_avenrun[3];
 101 int     avenrun[3];             /* FSCALED average run queue lengths */
 102 time_t  time;   /* time in seconds since 1970 - for compatibility only */
 103 
 104 static struct loadavg_s loadavg;
 105 /*
 106  * Phase/frequency-lock loop (PLL/FLL) definitions
 107  *
 108  * The following variables are read and set by the ntp_adjtime() system
 109  * call.
 110  *
 111  * time_state shows the state of the system clock, with values defined
 112  * in the timex.h header file.
 113  *
 114  * time_status shows the status of the system clock, with bits defined
 115  * in the timex.h header file.
 116  *
 117  * time_offset is used by the PLL/FLL to adjust the system time in small
 118  * increments.
 119  *
 120  * time_constant determines the bandwidth or "stiffness" of the PLL.
 121  *
 122  * time_tolerance determines maximum frequency error or tolerance of the
 123  * CPU clock oscillator and is a property of the architecture; however,
 124  * in principle it could change as result of the presence of external
 125  * discipline signals, for instance.
 126  *
 127  * time_precision is usually equal to the kernel tick variable; however,
 128  * in cases where a precision clock counter or external clock is
 129  * available, the resolution can be much less than this and depend on
 130  * whether the external clock is working or not.
 131  *
 132  * time_maxerror is initialized by a ntp_adjtime() call and increased by
 133  * the kernel once each second to reflect the maximum error bound
 134  * growth.
 135  *
 136  * time_esterror is set and read by the ntp_adjtime() call, but
 137  * otherwise not used by the kernel.
 138  */
 139 int32_t time_state = TIME_OK;   /* clock state */
 140 int32_t time_status = STA_UNSYNC;       /* clock status bits */
 141 int32_t time_offset = 0;                /* time offset (us) */
 142 int32_t time_constant = 0;              /* pll time constant */
 143 int32_t time_tolerance = MAXFREQ;       /* frequency tolerance (scaled ppm) */
 144 int32_t time_precision = 1;     /* clock precision (us) */
 145 int32_t time_maxerror = MAXPHASE;       /* maximum error (us) */
 146 int32_t time_esterror = MAXPHASE;       /* estimated error (us) */
 147 
 148 /*
 149  * The following variables establish the state of the PLL/FLL and the
 150  * residual time and frequency offset of the local clock. The scale
 151  * factors are defined in the timex.h header file.
 152  *
 153  * time_phase and time_freq are the phase increment and the frequency
 154  * increment, respectively, of the kernel time variable.
 155  *
 156  * time_freq is set via ntp_adjtime() from a value stored in a file when
 157  * the synchronization daemon is first started. Its value is retrieved
 158  * via ntp_adjtime() and written to the file about once per hour by the
 159  * daemon.
 160  *
 161  * time_adj is the adjustment added to the value of tick at each timer
 162  * interrupt and is recomputed from time_phase and time_freq at each
 163  * seconds rollover.
 164  *
 165  * time_reftime is the second's portion of the system time at the last
 166  * call to ntp_adjtime(). It is used to adjust the time_freq variable
 167  * and to increase the time_maxerror as the time since last update
 168  * increases.
 169  */
 170 int32_t time_phase = 0;         /* phase offset (scaled us) */
 171 int32_t time_freq = 0;          /* frequency offset (scaled ppm) */
 172 int32_t time_adj = 0;           /* tick adjust (scaled 1 / hz) */
 173 int32_t time_reftime = 0;               /* time at last adjustment (s) */
 174 
 175 /*
 176  * The scale factors of the following variables are defined in the
 177  * timex.h header file.
 178  *
 179  * pps_time contains the time at each calibration interval, as read by
 180  * microtime(). pps_count counts the seconds of the calibration
 181  * interval, the duration of which is nominally pps_shift in powers of
 182  * two.
 183  *
 184  * pps_offset is the time offset produced by the time median filter
 185  * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
 186  * this filter.
 187  *
 188  * pps_freq is the frequency offset produced by the frequency median
 189  * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
 190  * by this filter.
 191  *
 192  * pps_usec is latched from a high resolution counter or external clock
 193  * at pps_time. Here we want the hardware counter contents only, not the
 194  * contents plus the time_tv.usec as usual.
 195  *
 196  * pps_valid counts the number of seconds since the last PPS update. It
 197  * is used as a watchdog timer to disable the PPS discipline should the
 198  * PPS signal be lost.
 199  *
 200  * pps_glitch counts the number of seconds since the beginning of an
 201  * offset burst more than tick/2 from current nominal offset. It is used
 202  * mainly to suppress error bursts due to priority conflicts between the
 203  * PPS interrupt and timer interrupt.
 204  *
 205  * pps_intcnt counts the calibration intervals for use in the interval-
 206  * adaptation algorithm. It's just too complicated for words.
 207  */
 208 struct timeval pps_time;        /* kernel time at last interval */
 209 int32_t pps_tf[] = {0, 0, 0};   /* pps time offset median filter (us) */
 210 int32_t pps_offset = 0;         /* pps time offset (us) */
 211 int32_t pps_jitter = MAXTIME;   /* time dispersion (jitter) (us) */
 212 int32_t pps_ff[] = {0, 0, 0};   /* pps frequency offset median filter */
 213 int32_t pps_freq = 0;           /* frequency offset (scaled ppm) */
 214 int32_t pps_stabil = MAXFREQ;   /* frequency dispersion (scaled ppm) */
 215 int32_t pps_usec = 0;           /* microsec counter at last interval */
 216 int32_t pps_valid = PPS_VALID;  /* pps signal watchdog counter */
 217 int32_t pps_glitch = 0;         /* pps signal glitch counter */
 218 int32_t pps_count = 0;          /* calibration interval counter (s) */
 219 int32_t pps_shift = PPS_SHIFT;  /* interval duration (s) (shift) */
 220 int32_t pps_intcnt = 0;         /* intervals at current duration */
 221 
 222 /*
 223  * PPS signal quality monitors
 224  *
 225  * pps_jitcnt counts the seconds that have been discarded because the
 226  * jitter measured by the time median filter exceeds the limit MAXTIME
 227  * (100 us).
 228  *
 229  * pps_calcnt counts the frequency calibration intervals, which are
 230  * variable from 4 s to 256 s.
 231  *
 232  * pps_errcnt counts the calibration intervals which have been discarded
 233  * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
 234  * calibration interval jitter exceeds two ticks.
 235  *
 236  * pps_stbcnt counts the calibration intervals that have been discarded
 237  * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
 238  */
 239 int32_t pps_jitcnt = 0;         /* jitter limit exceeded */
 240 int32_t pps_calcnt = 0;         /* calibration intervals */
 241 int32_t pps_errcnt = 0;         /* calibration errors */
 242 int32_t pps_stbcnt = 0;         /* stability limit exceeded */
 243 
 244 kcondvar_t lbolt_cv;
 245 
 246 /*
 247  * Hybrid lbolt implementation:
 248  *
 249  * The service historically provided by the lbolt and lbolt64 variables has
 250  * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
 251  * original symbols removed from the system. The once clock driven variables are
 252  * now implemented in an event driven fashion, backed by gethrtime() coarsed to
 253  * the appropriate clock resolution. The default event driven implementation is
 254  * complemented by a cyclic driven one, active only during periods of intense
 255  * activity around the DDI lbolt routines, when a lbolt specific cyclic is
 256  * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
 257  * rely on the original low cost of consulting a memory position.
 258  *
 259  * The implementation uses the number of calls to these routines and the
 260  * frequency of these to determine when to transition from event to cyclic
 261  * driven and vice-versa. These values are kept on a per CPU basis for
 262  * scalability reasons and to prevent CPUs from constantly invalidating a single
 263  * cache line when modifying a global variable. The transition from event to
 264  * cyclic mode happens once the thresholds are crossed, and activity on any CPU
 265  * can cause such transition.
 266  *
 267  * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
 268  * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
 269  * lbolt_cyclic_driven() according to the current mode. When the thresholds
 270  * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
 271  * fire at a nsec_per_tick interval and increment an internal variable at
 272  * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
 273  * will simply return the value of such variable. lbolt_cyclic() will attempt
 274  * to shut itself off at each threshold interval (sampling period for calls
 275  * to the DDI lbolt routines), and return to the event driven mode, but will
 276  * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
 277  *
 278  * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
 279  * for the cyclic subsystem to be intialized.
 280  *
 281  */
 282 int64_t lbolt_bootstrap(void);
 283 int64_t lbolt_event_driven(void);
 284 int64_t lbolt_cyclic_driven(void);
 285 int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
 286 uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
 287 
 288 /*
 289  * lbolt's cyclic, installed by clock_init().
 290  */
 291 static void lbolt_cyclic(void);
 292 
 293 /*
 294  * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
 295  * from switching back to event driven, once it reaches cyclic mode.
 296  */
 297 static boolean_t lbolt_cyc_only = B_FALSE;
 298 
 299 /*
 300  * Cache aligned, per CPU structure with lbolt usage statistics.
 301  */
 302 static lbolt_cpu_t *lb_cpu;
 303 
 304 /*
 305  * Single, cache aligned, structure with all the information required by
 306  * the lbolt implementation.
 307  */
 308 lbolt_info_t *lb_info;
 309 
 310 
 311 int one_sec = 1; /* turned on once every second */
 312 static int fsflushcnt;  /* counter for t_fsflushr */
 313 int     dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */
 314 int     tod_needsync = 0;       /* need to sync tod chip with software time */
 315 static int tod_broken = 0;      /* clock chip doesn't work */
 316 time_t  boot_time = 0;          /* Boot time in seconds since 1970 */
 317 cyclic_id_t clock_cyclic;       /* clock()'s cyclic_id */
 318 cyclic_id_t deadman_cyclic;     /* deadman()'s cyclic_id */
 319 cyclic_id_t ddi_timer_cyclic;   /* cyclic_timer()'s cyclic_id */
 320 
 321 extern void     clock_tick_schedule(int);
 322 
 323 static int lgrp_ticks;          /* counter to schedule lgrp load calcs */
 324 
 325 /*
 326  * for tod fault detection
 327  */
 328 #define TOD_REF_FREQ            ((longlong_t)(NANOSEC))
 329 #define TOD_STALL_THRESHOLD     (TOD_REF_FREQ * 3 / 2)
 330 #define TOD_JUMP_THRESHOLD      (TOD_REF_FREQ / 2)
 331 #define TOD_FILTER_N            4
 332 #define TOD_FILTER_SETTLE       (4 * TOD_FILTER_N)
 333 static int tod_faulted = TOD_NOFAULT;
 334 
 335 static int tod_status_flag = 0;         /* used by tod_validate() */
 336 
 337 static hrtime_t prev_set_tick = 0;      /* gethrtime() prior to tod_set() */
 338 static time_t prev_set_tod = 0;         /* tv_sec value passed to tod_set() */
 339 
 340 /* patchable via /etc/system */
 341 int tod_validate_enable = 1;
 342 
 343 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
 344 int                     delay_from_interrupt_diagnose = 0;
 345 volatile uint32_t       delay_from_interrupt_msg = 20;
 346 
 347 /*
 348  * On non-SPARC systems, TOD validation must be deferred until gethrtime
 349  * returns non-zero values (after mach_clkinit's execution).
 350  * On SPARC systems, it must be deferred until after hrtime_base
 351  * and hres_last_tick are set (in the first invocation of hres_tick).
 352  * Since in both cases the prerequisites occur before the invocation of
 353  * tod_get() in clock(), the deferment is lifted there.
 354  */
 355 static boolean_t tod_validate_deferred = B_TRUE;
 356 
 357 /*
 358  * tod_fault_table[] must be aligned with
 359  * enum tod_fault_type in systm.h
 360  */
 361 static char *tod_fault_table[] = {
 362         "Reversed",                     /* TOD_REVERSED */
 363         "Stalled",                      /* TOD_STALLED */
 364         "Jumped",                       /* TOD_JUMPED */
 365         "Changed in Clock Rate",        /* TOD_RATECHANGED */
 366         "Is Read-Only"                  /* TOD_RDONLY */
 367         /*
 368          * no strings needed for TOD_NOFAULT
 369          */
 370 };
 371 
 372 /*
 373  * test hook for tod broken detection in tod_validate
 374  */
 375 int tod_unit_test = 0;
 376 time_t tod_test_injector;
 377 
 378 #define CLOCK_ADJ_HIST_SIZE     4
 379 
 380 static int      adj_hist_entry;
 381 
 382 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
 383 
 384 static void calcloadavg(int, uint64_t *);
 385 static int genloadavg(struct loadavg_s *);
 386 static void loadavg_update();
 387 
 388 void (*cmm_clock_callout)() = NULL;
 389 void (*cpucaps_clock_callout)() = NULL;
 390 
 391 extern clock_t clock_tick_proc_max;
 392 
 393 static int64_t deadman_counter = 0;
 394 
 395 static void
 396 clock(void)
 397 {
 398         kthread_t       *t;
 399         uint_t  nrunnable;
 400         uint_t  w_io;
 401         cpu_t   *cp;
 402         cpupart_t *cpupart;
 403         extern  void    set_freemem();
 404         void    (*funcp)();
 405         int32_t ltemp;
 406         int64_t lltemp;
 407         int s;
 408         int do_lgrp_load;
 409         int i;
 410         clock_t now = LBOLT_NO_ACCOUNT; /* current tick */
 411 
 412         if (panicstr)
 413                 return;
 414 
 415         /*
 416          * Make sure that 'freemem' do not drift too far from the truth
 417          */
 418         set_freemem();
 419 
 420 
 421         /*
 422          * Before the section which is repeated is executed, we do
 423          * the time delta processing which occurs every clock tick
 424          *
 425          * There is additional processing which happens every time
 426          * the nanosecond counter rolls over which is described
 427          * below - see the section which begins with : if (one_sec)
 428          *
 429          * This section marks the beginning of the precision-kernel
 430          * code fragment.
 431          *
 432          * First, compute the phase adjustment. If the low-order bits
 433          * (time_phase) of the update overflow, bump the higher order
 434          * bits (time_update).
 435          */
 436         time_phase += time_adj;
 437         if (time_phase <= -FINEUSEC) {
 438                 ltemp = -time_phase / SCALE_PHASE;
 439                 time_phase += ltemp * SCALE_PHASE;
 440                 s = hr_clock_lock();
 441                 timedelta -= ltemp * (NANOSEC/MICROSEC);
 442                 hr_clock_unlock(s);
 443         } else if (time_phase >= FINEUSEC) {
 444                 ltemp = time_phase / SCALE_PHASE;
 445                 time_phase -= ltemp * SCALE_PHASE;
 446                 s = hr_clock_lock();
 447                 timedelta += ltemp * (NANOSEC/MICROSEC);
 448                 hr_clock_unlock(s);
 449         }
 450 
 451         /*
 452          * End of precision-kernel code fragment which is processed
 453          * every timer interrupt.
 454          *
 455          * Continue with the interrupt processing as scheduled.
 456          */
 457         /*
 458          * Count the number of runnable threads and the number waiting
 459          * for some form of I/O to complete -- gets added to
 460          * sysinfo.waiting.  To know the state of the system, must add
 461          * wait counts from all CPUs.  Also add up the per-partition
 462          * statistics.
 463          */
 464         w_io = 0;
 465         nrunnable = 0;
 466 
 467         /*
 468          * keep track of when to update lgrp/part loads
 469          */
 470 
 471         do_lgrp_load = 0;
 472         if (lgrp_ticks++ >= hz / 10) {
 473                 lgrp_ticks = 0;
 474                 do_lgrp_load = 1;
 475         }
 476 
 477         if (one_sec) {
 478                 loadavg_update();
 479                 deadman_counter++;
 480         }
 481 
 482         /*
 483          * First count the threads waiting on kpreempt queues in each
 484          * CPU partition.
 485          */
 486 
 487         cpupart = cp_list_head;
 488         do {
 489                 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
 490 
 491                 cpupart->cp_updates++;
 492                 nrunnable += cpupart_nrunnable;
 493                 cpupart->cp_nrunnable_cum += cpupart_nrunnable;
 494                 if (one_sec) {
 495                         cpupart->cp_nrunning = 0;
 496                         cpupart->cp_nrunnable = cpupart_nrunnable;
 497                 }
 498         } while ((cpupart = cpupart->cp_next) != cp_list_head);
 499 
 500 
 501         /* Now count the per-CPU statistics. */
 502         cp = cpu_list;
 503         do {
 504                 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
 505 
 506                 nrunnable += cpu_nrunnable;
 507                 cpupart = cp->cpu_part;
 508                 cpupart->cp_nrunnable_cum += cpu_nrunnable;
 509                 if (one_sec) {
 510                         cpupart->cp_nrunnable += cpu_nrunnable;
 511                         /*
 512                          * Update user, system, and idle cpu times.
 513                          */
 514                         cpupart->cp_nrunning++;
 515                         /*
 516                          * w_io is used to update sysinfo.waiting during
 517                          * one_second processing below.  Only gather w_io
 518                          * information when we walk the list of cpus if we're
 519                          * going to perform one_second processing.
 520                          */
 521                         w_io += CPU_STATS(cp, sys.iowait);
 522                 }
 523 
 524                 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
 525                         int i, load, change;
 526                         hrtime_t intracct, intrused;
 527                         const hrtime_t maxnsec = 1000000000;
 528                         const int precision = 100;
 529 
 530                         /*
 531                          * Estimate interrupt load on this cpu each second.
 532                          * Computes cpu_intrload as %utilization (0-99).
 533                          */
 534 
 535                         /* add up interrupt time from all micro states */
 536                         for (intracct = 0, i = 0; i < NCMSTATES; i++)
 537                                 intracct += cp->cpu_intracct[i];
 538                         scalehrtime(&intracct);
 539 
 540                         /* compute nsec used in the past second */
 541                         intrused = intracct - cp->cpu_intrlast;
 542                         cp->cpu_intrlast = intracct;
 543 
 544                         /* limit the value for safety (and the first pass) */
 545                         if (intrused >= maxnsec)
 546                                 intrused = maxnsec - 1;
 547 
 548                         /* calculate %time in interrupt */
 549                         load = (precision * intrused) / maxnsec;
 550                         ASSERT(load >= 0 && load < precision);
 551                         change = cp->cpu_intrload - load;
 552 
 553                         /* jump to new max, or decay the old max */
 554                         if (change < 0)
 555                                 cp->cpu_intrload = load;
 556                         else if (change > 0)
 557                                 cp->cpu_intrload -= (change + 3) / 4;
 558 
 559                         DTRACE_PROBE3(cpu_intrload,
 560                             cpu_t *, cp,
 561                             hrtime_t, intracct,
 562                             hrtime_t, intrused);
 563                 }
 564 
 565                 if (do_lgrp_load &&
 566                     (cp->cpu_flags & CPU_EXISTS)) {
 567                         /*
 568                          * When updating the lgroup's load average,
 569                          * account for the thread running on the CPU.
 570                          * If the CPU is the current one, then we need
 571                          * to account for the underlying thread which
 572                          * got the clock interrupt not the thread that is
 573                          * handling the interrupt and caculating the load
 574                          * average
 575                          */
 576                         t = cp->cpu_thread;
 577                         if (CPU == cp)
 578                                 t = t->t_intr;
 579 
 580                         /*
 581                          * Account for the load average for this thread if
 582                          * it isn't the idle thread or it is on the interrupt
 583                          * stack and not the current CPU handling the clock
 584                          * interrupt
 585                          */
 586                         if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
 587                             CPU_ON_INTR(cp))) {
 588                                 if (t->t_lpl == cp->cpu_lpl) {
 589                                         /* local thread */
 590                                         cpu_nrunnable++;
 591                                 } else {
 592                                         /*
 593                                          * This is a remote thread, charge it
 594                                          * against its home lgroup.  Note that
 595                                          * we notice that a thread is remote
 596                                          * only if it's currently executing.
 597                                          * This is a reasonable approximation,
 598                                          * since queued remote threads are rare.
 599                                          * Note also that if we didn't charge
 600                                          * it to its home lgroup, remote
 601                                          * execution would often make a system
 602                                          * appear balanced even though it was
 603                                          * not, and thread placement/migration
 604                                          * would often not be done correctly.
 605                                          */
 606                                         lgrp_loadavg(t->t_lpl,
 607                                             LGRP_LOADAVG_IN_THREAD_MAX, 0);
 608                                 }
 609                         }
 610                         lgrp_loadavg(cp->cpu_lpl,
 611                             cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
 612                 }
 613         } while ((cp = cp->cpu_next) != cpu_list);
 614 
 615         clock_tick_schedule(one_sec);
 616 
 617         /*
 618          * Check for a callout that needs be called from the clock
 619          * thread to support the membership protocol in a clustered
 620          * system.  Copy the function pointer so that we can reset
 621          * this to NULL if needed.
 622          */
 623         if ((funcp = cmm_clock_callout) != NULL)
 624                 (*funcp)();
 625 
 626         if ((funcp = cpucaps_clock_callout) != NULL)
 627                 (*funcp)();
 628 
 629         /*
 630          * Wakeup the cageout thread waiters once per second.
 631          */
 632         if (one_sec)
 633                 kcage_tick();
 634 
 635         if (one_sec) {
 636 
 637                 int drift, absdrift;
 638                 timestruc_t tod;
 639                 int s;
 640 
 641                 /*
 642                  * Beginning of precision-kernel code fragment executed
 643                  * every second.
 644                  *
 645                  * On rollover of the second the phase adjustment to be
 646                  * used for the next second is calculated.  Also, the
 647                  * maximum error is increased by the tolerance.  If the
 648                  * PPS frequency discipline code is present, the phase is
 649                  * increased to compensate for the CPU clock oscillator
 650                  * frequency error.
 651                  *
 652                  * On a 32-bit machine and given parameters in the timex.h
 653                  * header file, the maximum phase adjustment is +-512 ms
 654                  * and maximum frequency offset is (a tad less than)
 655                  * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
 656                  */
 657                 time_maxerror += time_tolerance / SCALE_USEC;
 658 
 659                 /*
 660                  * Leap second processing. If in leap-insert state at
 661                  * the end of the day, the system clock is set back one
 662                  * second; if in leap-delete state, the system clock is
 663                  * set ahead one second. The microtime() routine or
 664                  * external clock driver will insure that reported time
 665                  * is always monotonic. The ugly divides should be
 666                  * replaced.
 667                  */
 668                 switch (time_state) {
 669 
 670                 case TIME_OK:
 671                         if (time_status & STA_INS)
 672                                 time_state = TIME_INS;
 673                         else if (time_status & STA_DEL)
 674                                 time_state = TIME_DEL;
 675                         break;
 676 
 677                 case TIME_INS:
 678                         if (hrestime.tv_sec % 86400 == 0) {
 679                                 s = hr_clock_lock();
 680                                 hrestime.tv_sec--;
 681                                 hr_clock_unlock(s);
 682                                 time_state = TIME_OOP;
 683                         }
 684                         break;
 685 
 686                 case TIME_DEL:
 687                         if ((hrestime.tv_sec + 1) % 86400 == 0) {
 688                                 s = hr_clock_lock();
 689                                 hrestime.tv_sec++;
 690                                 hr_clock_unlock(s);
 691                                 time_state = TIME_WAIT;
 692                         }
 693                         break;
 694 
 695                 case TIME_OOP:
 696                         time_state = TIME_WAIT;
 697                         break;
 698 
 699                 case TIME_WAIT:
 700                         if (!(time_status & (STA_INS | STA_DEL)))
 701                                 time_state = TIME_OK;
 702                 default:
 703                         break;
 704                 }
 705 
 706                 /*
 707                  * Compute the phase adjustment for the next second. In
 708                  * PLL mode, the offset is reduced by a fixed factor
 709                  * times the time constant. In FLL mode the offset is
 710                  * used directly. In either mode, the maximum phase
 711                  * adjustment for each second is clamped so as to spread
 712                  * the adjustment over not more than the number of
 713                  * seconds between updates.
 714                  */
 715                 if (time_offset == 0)
 716                         time_adj = 0;
 717                 else if (time_offset < 0) {
 718                         lltemp = -time_offset;
 719                         if (!(time_status & STA_FLL)) {
 720                                 if ((1 << time_constant) >= SCALE_KG)
 721                                         lltemp *= (1 << time_constant) /
 722                                             SCALE_KG;
 723                                 else
 724                                         lltemp = (lltemp / SCALE_KG) >>
 725                                             time_constant;
 726                         }
 727                         if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
 728                                 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
 729                         time_offset += lltemp;
 730                         time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
 731                 } else {
 732                         lltemp = time_offset;
 733                         if (!(time_status & STA_FLL)) {
 734                                 if ((1 << time_constant) >= SCALE_KG)
 735                                         lltemp *= (1 << time_constant) /
 736                                             SCALE_KG;
 737                                 else
 738                                         lltemp = (lltemp / SCALE_KG) >>
 739                                             time_constant;
 740                         }
 741                         if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
 742                                 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
 743                         time_offset -= lltemp;
 744                         time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
 745                 }
 746 
 747                 /*
 748                  * Compute the frequency estimate and additional phase
 749                  * adjustment due to frequency error for the next
 750                  * second. When the PPS signal is engaged, gnaw on the
 751                  * watchdog counter and update the frequency computed by
 752                  * the pll and the PPS signal.
 753                  */
 754                 pps_valid++;
 755                 if (pps_valid == PPS_VALID) {
 756                         pps_jitter = MAXTIME;
 757                         pps_stabil = MAXFREQ;
 758                         time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
 759                             STA_PPSWANDER | STA_PPSERROR);
 760                 }
 761                 lltemp = time_freq + pps_freq;
 762 
 763                 if (lltemp)
 764                         time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
 765 
 766                 /*
 767                  * End of precision kernel-code fragment
 768                  *
 769                  * The section below should be modified if we are planning
 770                  * to use NTP for synchronization.
 771                  *
 772                  * Note: the clock synchronization code now assumes
 773                  * the following:
 774                  *   - if dosynctodr is 1, then compute the drift between
 775                  *      the tod chip and software time and adjust one or
 776                  *      the other depending on the circumstances
 777                  *
 778                  *   - if dosynctodr is 0, then the tod chip is independent
 779                  *      of the software clock and should not be adjusted,
 780                  *      but allowed to free run.  this allows NTP to sync.
 781                  *      hrestime without any interference from the tod chip.
 782                  */
 783 
 784                 tod_validate_deferred = B_FALSE;
 785                 mutex_enter(&tod_lock);
 786                 tod = tod_get();
 787                 drift = tod.tv_sec - hrestime.tv_sec;
 788                 absdrift = (drift >= 0) ? drift : -drift;
 789                 if (tod_needsync || absdrift > 1) {
 790                         int s;
 791                         if (absdrift > 2) {
 792                                 if (!tod_broken && tod_faulted == TOD_NOFAULT) {
 793                                         s = hr_clock_lock();
 794                                         hrestime = tod;
 795                                         membar_enter(); /* hrestime visible */
 796                                         timedelta = 0;
 797                                         timechanged++;
 798                                         tod_needsync = 0;
 799                                         hr_clock_unlock(s);
 800                                         callout_hrestime();
 801 
 802                                 }
 803                         } else {
 804                                 if (tod_needsync || !dosynctodr) {
 805                                         gethrestime(&tod);
 806                                         tod_set(tod);
 807                                         s = hr_clock_lock();
 808                                         if (timedelta == 0)
 809                                                 tod_needsync = 0;
 810                                         hr_clock_unlock(s);
 811                                 } else {
 812                                         /*
 813                                          * If the drift is 2 seconds on the
 814                                          * money, then the TOD is adjusting
 815                                          * the clock;  record that.
 816                                          */
 817                                         clock_adj_hist[adj_hist_entry++ %
 818                                             CLOCK_ADJ_HIST_SIZE] = now;
 819                                         s = hr_clock_lock();
 820                                         timedelta = (int64_t)drift*NANOSEC;
 821                                         hr_clock_unlock(s);
 822                                 }
 823                         }
 824                 }
 825                 one_sec = 0;
 826                 time = gethrestime_sec();  /* for crusty old kmem readers */
 827                 mutex_exit(&tod_lock);
 828 
 829                 /*
 830                  * Some drivers still depend on this... XXX
 831                  */
 832                 cv_broadcast(&lbolt_cv);
 833 
 834                 vminfo.freemem += freemem;
 835                 {
 836                         pgcnt_t maxswap, resv, free;
 837                         pgcnt_t avail =
 838                             MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
 839 
 840                         maxswap = k_anoninfo.ani_mem_resv +
 841                             k_anoninfo.ani_max +avail;
 842                         /* Update ani_free */
 843                         set_anoninfo();
 844                         free = k_anoninfo.ani_free + avail;
 845                         resv = k_anoninfo.ani_phys_resv +
 846                             k_anoninfo.ani_mem_resv;
 847 
 848                         vminfo.swap_resv += resv;
 849                         /* number of reserved and allocated pages */
 850 #ifdef  DEBUG
 851                         if (maxswap < free)
 852                                 cmn_err(CE_WARN, "clock: maxswap < free");
 853                         if (maxswap < resv)
 854                                 cmn_err(CE_WARN, "clock: maxswap < resv");
 855 #endif
 856                         vminfo.swap_alloc += maxswap - free;
 857                         vminfo.swap_avail += maxswap - resv;
 858                         vminfo.swap_free += free;
 859                 }
 860                 vminfo.updates++;
 861                 if (nrunnable) {
 862                         sysinfo.runque += nrunnable;
 863                         sysinfo.runocc++;
 864                 }
 865                 if (nswapped) {
 866                         sysinfo.swpque += nswapped;
 867                         sysinfo.swpocc++;
 868                 }
 869                 sysinfo.waiting += w_io;
 870                 sysinfo.updates++;
 871 
 872                 /*
 873                  * Wake up fsflush to write out DELWRI
 874                  * buffers, dirty pages and other cached
 875                  * administrative data, e.g. inodes.
 876                  */
 877                 if (--fsflushcnt <= 0) {
 878                         fsflushcnt = tune.t_fsflushr;
 879                         cv_signal(&fsflush_cv);
 880                 }
 881 
 882                 vmmeter();
 883                 calcloadavg(genloadavg(&loadavg), hp_avenrun);
 884                 for (i = 0; i < 3; i++)
 885                         /*
 886                          * At the moment avenrun[] can only hold 31
 887                          * bits of load average as it is a signed
 888                          * int in the API. We need to ensure that
 889                          * hp_avenrun[i] >> (16 - FSHIFT) will not be
 890                          * too large. If it is, we put the largest value
 891                          * that we can use into avenrun[i]. This is
 892                          * kludgey, but about all we can do until we
 893                          * avenrun[] is declared as an array of uint64[]
 894                          */
 895                         if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
 896                                 avenrun[i] = (int32_t)(hp_avenrun[i] >>
 897                                     (16 - FSHIFT));
 898                         else
 899                                 avenrun[i] = 0x7fffffff;
 900 
 901                 cpupart = cp_list_head;
 902                 do {
 903                         calcloadavg(genloadavg(&cpupart->cp_loadavg),
 904                             cpupart->cp_hp_avenrun);
 905                 } while ((cpupart = cpupart->cp_next) != cp_list_head);
 906 
 907                 /*
 908                  * Wake up the swapper thread if necessary.
 909                  */
 910                 if (runin ||
 911                     (runout && (avefree < desfree || wake_sched_sec))) {
 912                         t = &t0;
 913                         thread_lock(t);
 914                         if (t->t_state == TS_STOPPED) {
 915                                 runin = runout = 0;
 916                                 wake_sched_sec = 0;
 917                                 t->t_whystop = 0;
 918                                 t->t_whatstop = 0;
 919                                 t->t_schedflag &= ~TS_ALLSTART;
 920                                 THREAD_TRANSITION(t);
 921                                 setfrontdq(t);
 922                         }
 923                         thread_unlock(t);
 924                 }
 925         }
 926 
 927         /*
 928          * Wake up the swapper if any high priority swapped-out threads
 929          * became runable during the last tick.
 930          */
 931         if (wake_sched) {
 932                 t = &t0;
 933                 thread_lock(t);
 934                 if (t->t_state == TS_STOPPED) {
 935                         runin = runout = 0;
 936                         wake_sched = 0;
 937                         t->t_whystop = 0;
 938                         t->t_whatstop = 0;
 939                         t->t_schedflag &= ~TS_ALLSTART;
 940                         THREAD_TRANSITION(t);
 941                         setfrontdq(t);
 942                 }
 943                 thread_unlock(t);
 944         }
 945 }
 946 
 947 void
 948 clock_init(void)
 949 {
 950         cyc_handler_t clk_hdlr, timer_hdlr, lbolt_hdlr;
 951         cyc_time_t clk_when, lbolt_when;
 952         int i, sz;
 953         intptr_t buf;
 954 
 955         /*
 956          * Setup handler and timer for the clock cyclic.
 957          */
 958         clk_hdlr.cyh_func = (cyc_func_t)clock;
 959         clk_hdlr.cyh_level = CY_LOCK_LEVEL;
 960         clk_hdlr.cyh_arg = NULL;
 961 
 962         clk_when.cyt_when = 0;
 963         clk_when.cyt_interval = nsec_per_tick;
 964 
 965         /*
 966          * cyclic_timer is dedicated to the ddi interface, which
 967          * uses the same clock resolution as the system one.
 968          */
 969         timer_hdlr.cyh_func = (cyc_func_t)cyclic_timer;
 970         timer_hdlr.cyh_level = CY_LOCK_LEVEL;
 971         timer_hdlr.cyh_arg = NULL;
 972 
 973         /*
 974          * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
 975          * interval to satisfy performance needs of the DDI lbolt consumers.
 976          * It is off by default.
 977          */
 978         lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
 979         lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
 980         lbolt_hdlr.cyh_arg = NULL;
 981 
 982         lbolt_when.cyt_interval = nsec_per_tick;
 983 
 984         /*
 985          * Allocate cache line aligned space for the per CPU lbolt data and
 986          * lbolt info structures, and initialize them with their default
 987          * values. Note that these structures are also cache line sized.
 988          */
 989         sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
 990         buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
 991         lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
 992 
 993         if (hz != HZ_DEFAULT)
 994                 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
 995                     hz/HZ_DEFAULT;
 996         else
 997                 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
 998 
 999         lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
1000 
1001         sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
1002         buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
1003         lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
1004 
1005         for (i = 0; i < max_ncpus; i++)
1006                 lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
1007 
1008         /*
1009          * Install the softint used to switch between event and cyclic driven
1010          * lbolt. We use a soft interrupt to make sure the context of the
1011          * cyclic reprogram call is safe.
1012          */
1013         lbolt_softint_add();
1014 
1015         /*
1016          * Since the hybrid lbolt implementation is based on a hardware counter
1017          * that is reset at every hardware reboot and that we'd like to have
1018          * the lbolt value starting at zero after both a hardware and a fast
1019          * reboot, we calculate the number of clock ticks the system's been up
1020          * and store it in the lbi_debug_time field of the lbolt info structure.
1021          * The value of this field will be subtracted from lbolt before
1022          * returning it.
1023          */
1024         lb_info->lbi_internal = lb_info->lbi_debug_time =
1025             (gethrtime()/nsec_per_tick);
1026 
1027         /*
1028          * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1029          * and lbolt_debug_{enter,return} use this value as an indication that
1030          * the initializaion above hasn't been completed. Setting lbolt_hybrid
1031          * to either lbolt_{cyclic,event}_driven here signals those code paths
1032          * that the lbolt related structures can be used.
1033          */
1034         if (lbolt_cyc_only) {
1035                 lbolt_when.cyt_when = 0;
1036                 lbolt_hybrid = lbolt_cyclic_driven;
1037         } else {
1038                 lbolt_when.cyt_when = CY_INFINITY;
1039                 lbolt_hybrid = lbolt_event_driven;
1040         }
1041 
1042         /*
1043          * Grab cpu_lock and install all three cyclics.
1044          */
1045         mutex_enter(&cpu_lock);
1046 
1047         clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
1048         ddi_timer_cyclic = cyclic_add(&timer_hdlr, &clk_when);
1049         lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
1050 
1051         mutex_exit(&cpu_lock);
1052 }
1053 
1054 /*
1055  * Called before calcloadavg to get 10-sec moving loadavg together
1056  */
1057 
1058 static int
1059 genloadavg(struct loadavg_s *avgs)
1060 {
1061         int avg;
1062         int spos; /* starting position */
1063         int cpos; /* moving current position */
1064         int i;
1065         int slen;
1066         hrtime_t hr_avg;
1067 
1068         /* 10-second snapshot, calculate first positon */
1069         if (avgs->lg_len == 0) {
1070                 return (0);
1071         }
1072         slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
1073 
1074         spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
1075             S_LOADAVG_SZ + (avgs->lg_cur - 1);
1076         for (i = hr_avg = 0; i < slen; i++) {
1077                 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
1078                 hr_avg += avgs->lg_loads[cpos];
1079         }
1080 
1081         hr_avg = hr_avg / slen;
1082         avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
1083 
1084         return (avg);
1085 }
1086 
1087 /*
1088  * Run every second from clock () to update the loadavg count available to the
1089  * system and cpu-partitions.
1090  *
1091  * This works by sampling the previous usr, sys, wait time elapsed,
1092  * computing a delta, and adding that delta to the elapsed usr, sys,
1093  * wait increase.
1094  */
1095 
1096 static void
1097 loadavg_update()
1098 {
1099         cpu_t *cp;
1100         cpupart_t *cpupart;
1101         hrtime_t cpu_total;
1102         int prev;
1103 
1104         cp = cpu_list;
1105         loadavg.lg_total = 0;
1106 
1107         /*
1108          * first pass totals up per-cpu statistics for system and cpu
1109          * partitions
1110          */
1111 
1112         do {
1113                 struct loadavg_s *lavg;
1114 
1115                 lavg = &cp->cpu_loadavg;
1116 
1117                 cpu_total = cp->cpu_acct[CMS_USER] +
1118                     cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
1119                 /* compute delta against last total */
1120                 scalehrtime(&cpu_total);
1121                 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
1122                     S_LOADAVG_SZ + (lavg->lg_cur - 1);
1123                 if (lavg->lg_loads[prev] <= 0) {
1124                         lavg->lg_loads[lavg->lg_cur] = cpu_total;
1125                         cpu_total = 0;
1126                 } else {
1127                         lavg->lg_loads[lavg->lg_cur] = cpu_total;
1128                         cpu_total = cpu_total - lavg->lg_loads[prev];
1129                         if (cpu_total < 0)
1130                                 cpu_total = 0;
1131                 }
1132 
1133                 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1134                 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1135                     lavg->lg_len + 1 : S_LOADAVG_SZ;
1136 
1137                 loadavg.lg_total += cpu_total;
1138                 cp->cpu_part->cp_loadavg.lg_total += cpu_total;
1139 
1140         } while ((cp = cp->cpu_next) != cpu_list);
1141 
1142         loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
1143         loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
1144         loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
1145             loadavg.lg_len + 1 : S_LOADAVG_SZ;
1146         /*
1147          * Second pass updates counts
1148          */
1149         cpupart = cp_list_head;
1150 
1151         do {
1152                 struct loadavg_s *lavg;
1153 
1154                 lavg = &cpupart->cp_loadavg;
1155                 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
1156                 lavg->lg_total = 0;
1157                 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1158                 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1159                     lavg->lg_len + 1 : S_LOADAVG_SZ;
1160 
1161         } while ((cpupart = cpupart->cp_next) != cp_list_head);
1162 
1163         /*
1164          * Third pass totals up per-zone statistics.
1165          */
1166         zone_loadavg_update();
1167 }
1168 
1169 /*
1170  * clock_update() - local clock update
1171  *
1172  * This routine is called by ntp_adjtime() to update the local clock
1173  * phase and frequency. The implementation is of an
1174  * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1175  * routine computes new time and frequency offset estimates for each
1176  * call.  The PPS signal itself determines the new time offset,
1177  * instead of the calling argument.  Presumably, calls to
1178  * ntp_adjtime() occur only when the caller believes the local clock
1179  * is valid within some bound (+-128 ms with NTP). If the caller's
1180  * time is far different than the PPS time, an argument will ensue,
1181  * and it's not clear who will lose.
1182  *
1183  * For uncompensated quartz crystal oscillatores and nominal update
1184  * intervals less than 1024 s, operation should be in phase-lock mode
1185  * (STA_FLL = 0), where the loop is disciplined to phase. For update
1186  * intervals greater than this, operation should be in frequency-lock
1187  * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1188  *
1189  * Note: mutex(&tod_lock) is in effect.
1190  */
1191 void
1192 clock_update(int offset)
1193 {
1194         int ltemp, mtemp, s;
1195 
1196         ASSERT(MUTEX_HELD(&tod_lock));
1197 
1198         if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1199                 return;
1200         ltemp = offset;
1201         if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
1202                 ltemp = pps_offset;
1203 
1204         /*
1205          * Scale the phase adjustment and clamp to the operating range.
1206          */
1207         if (ltemp > MAXPHASE)
1208                 time_offset = MAXPHASE * SCALE_UPDATE;
1209         else if (ltemp < -MAXPHASE)
1210                 time_offset = -(MAXPHASE * SCALE_UPDATE);
1211         else
1212                 time_offset = ltemp * SCALE_UPDATE;
1213 
1214         /*
1215          * Select whether the frequency is to be controlled and in which
1216          * mode (PLL or FLL). Clamp to the operating range. Ugly
1217          * multiply/divide should be replaced someday.
1218          */
1219         if (time_status & STA_FREQHOLD || time_reftime == 0)
1220                 time_reftime = hrestime.tv_sec;
1221 
1222         mtemp = hrestime.tv_sec - time_reftime;
1223         time_reftime = hrestime.tv_sec;
1224 
1225         if (time_status & STA_FLL) {
1226                 if (mtemp >= MINSEC) {
1227                         ltemp = ((time_offset / mtemp) * (SCALE_USEC /
1228                             SCALE_UPDATE));
1229                         if (ltemp)
1230                                 time_freq += ltemp / SCALE_KH;
1231                 }
1232         } else {
1233                 if (mtemp < MAXSEC) {
1234                         ltemp *= mtemp;
1235                         if (ltemp)
1236                                 time_freq += (int)(((int64_t)ltemp *
1237                                     SCALE_USEC) / SCALE_KF)
1238                                     / (1 << (time_constant * 2));
1239                 }
1240         }
1241         if (time_freq > time_tolerance)
1242                 time_freq = time_tolerance;
1243         else if (time_freq < -time_tolerance)
1244                 time_freq = -time_tolerance;
1245 
1246         s = hr_clock_lock();
1247         tod_needsync = 1;
1248         hr_clock_unlock(s);
1249 }
1250 
1251 /*
1252  * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1253  *
1254  * This routine is called at each PPS interrupt in order to discipline
1255  * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1256  * and leaves it in a handy spot for the clock() routine. It
1257  * integrates successive PPS phase differences and calculates the
1258  * frequency offset. This is used in clock() to discipline the CPU
1259  * clock oscillator so that intrinsic frequency error is cancelled out.
1260  * The code requires the caller to capture the time and hardware counter
1261  * value at the on-time PPS signal transition.
1262  *
1263  * Note that, on some Unix systems, this routine runs at an interrupt
1264  * priority level higher than the timer interrupt routine clock().
1265  * Therefore, the variables used are distinct from the clock()
1266  * variables, except for certain exceptions: The PPS frequency pps_freq
1267  * and phase pps_offset variables are determined by this routine and
1268  * updated atomically. The time_tolerance variable can be considered a
1269  * constant, since it is infrequently changed, and then only when the
1270  * PPS signal is disabled. The watchdog counter pps_valid is updated
1271  * once per second by clock() and is atomically cleared in this
1272  * routine.
1273  *
1274  * tvp is the time of the last tick; usec is a microsecond count since the
1275  * last tick.
1276  *
1277  * Note: In Solaris systems, the tick value is actually given by
1278  *       usec_per_tick.  This is called from the serial driver cdintr(),
1279  *       or equivalent, at a high PIL.  Because the kernel keeps a
1280  *       highresolution time, the following code can accept either
1281  *       the traditional argument pair, or the current highres timestamp
1282  *       in tvp and zero in usec.
1283  */
1284 void
1285 ddi_hardpps(struct timeval *tvp, int usec)
1286 {
1287         int u_usec, v_usec, bigtick;
1288         time_t cal_sec;
1289         int cal_usec;
1290 
1291         /*
1292          * An occasional glitch can be produced when the PPS interrupt
1293          * occurs in the clock() routine before the time variable is
1294          * updated. Here the offset is discarded when the difference
1295          * between it and the last one is greater than tick/2, but not
1296          * if the interval since the first discard exceeds 30 s.
1297          */
1298         time_status |= STA_PPSSIGNAL;
1299         time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1300         pps_valid = 0;
1301         u_usec = -tvp->tv_usec;
1302         if (u_usec < -(MICROSEC/2))
1303                 u_usec += MICROSEC;
1304         v_usec = pps_offset - u_usec;
1305         if (v_usec < 0)
1306                 v_usec = -v_usec;
1307         if (v_usec > (usec_per_tick >> 1)) {
1308                 if (pps_glitch > MAXGLITCH) {
1309                         pps_glitch = 0;
1310                         pps_tf[2] = u_usec;
1311                         pps_tf[1] = u_usec;
1312                 } else {
1313                         pps_glitch++;
1314                         u_usec = pps_offset;
1315                 }
1316         } else
1317                 pps_glitch = 0;
1318 
1319         /*
1320          * A three-stage median filter is used to help deglitch the pps
1321          * time. The median sample becomes the time offset estimate; the
1322          * difference between the other two samples becomes the time
1323          * dispersion (jitter) estimate.
1324          */
1325         pps_tf[2] = pps_tf[1];
1326         pps_tf[1] = pps_tf[0];
1327         pps_tf[0] = u_usec;
1328         if (pps_tf[0] > pps_tf[1]) {
1329                 if (pps_tf[1] > pps_tf[2]) {
1330                         pps_offset = pps_tf[1];         /* 0 1 2 */
1331                         v_usec = pps_tf[0] - pps_tf[2];
1332                 } else if (pps_tf[2] > pps_tf[0]) {
1333                         pps_offset = pps_tf[0];         /* 2 0 1 */
1334                         v_usec = pps_tf[2] - pps_tf[1];
1335                 } else {
1336                         pps_offset = pps_tf[2];         /* 0 2 1 */
1337                         v_usec = pps_tf[0] - pps_tf[1];
1338                 }
1339         } else {
1340                 if (pps_tf[1] < pps_tf[2]) {
1341                         pps_offset = pps_tf[1];         /* 2 1 0 */
1342                         v_usec = pps_tf[2] - pps_tf[0];
1343                 } else  if (pps_tf[2] < pps_tf[0]) {
1344                         pps_offset = pps_tf[0];         /* 1 0 2 */
1345                         v_usec = pps_tf[1] - pps_tf[2];
1346                 } else {
1347                         pps_offset = pps_tf[2];         /* 1 2 0 */
1348                         v_usec = pps_tf[1] - pps_tf[0];
1349                 }
1350         }
1351         if (v_usec > MAXTIME)
1352                 pps_jitcnt++;
1353         v_usec = (v_usec << PPS_AVG) - pps_jitter;
1354         pps_jitter += v_usec / (1 << PPS_AVG);
1355         if (pps_jitter > (MAXTIME >> 1))
1356                 time_status |= STA_PPSJITTER;
1357 
1358         /*
1359          * During the calibration interval adjust the starting time when
1360          * the tick overflows. At the end of the interval compute the
1361          * duration of the interval and the difference of the hardware
1362          * counters at the beginning and end of the interval. This code
1363          * is deliciously complicated by the fact valid differences may
1364          * exceed the value of tick when using long calibration
1365          * intervals and small ticks. Note that the counter can be
1366          * greater than tick if caught at just the wrong instant, but
1367          * the values returned and used here are correct.
1368          */
1369         bigtick = (int)usec_per_tick * SCALE_USEC;
1370         pps_usec -= pps_freq;
1371         if (pps_usec >= bigtick)
1372                 pps_usec -= bigtick;
1373         if (pps_usec < 0)
1374                 pps_usec += bigtick;
1375         pps_time.tv_sec++;
1376         pps_count++;
1377         if (pps_count < (1 << pps_shift))
1378                 return;
1379         pps_count = 0;
1380         pps_calcnt++;
1381         u_usec = usec * SCALE_USEC;
1382         v_usec = pps_usec - u_usec;
1383         if (v_usec >= bigtick >> 1)
1384                 v_usec -= bigtick;
1385         if (v_usec < -(bigtick >> 1))
1386                 v_usec += bigtick;
1387         if (v_usec < 0)
1388                 v_usec = -(-v_usec >> pps_shift);
1389         else
1390                 v_usec = v_usec >> pps_shift;
1391         pps_usec = u_usec;
1392         cal_sec = tvp->tv_sec;
1393         cal_usec = tvp->tv_usec;
1394         cal_sec -= pps_time.tv_sec;
1395         cal_usec -= pps_time.tv_usec;
1396         if (cal_usec < 0) {
1397                 cal_usec += MICROSEC;
1398                 cal_sec--;
1399         }
1400         pps_time = *tvp;
1401 
1402         /*
1403          * Check for lost interrupts, noise, excessive jitter and
1404          * excessive frequency error. The number of timer ticks during
1405          * the interval may vary +-1 tick. Add to this a margin of one
1406          * tick for the PPS signal jitter and maximum frequency
1407          * deviation. If the limits are exceeded, the calibration
1408          * interval is reset to the minimum and we start over.
1409          */
1410         u_usec = (int)usec_per_tick << 1;
1411         if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
1412             (cal_sec == 0 && cal_usec < u_usec)) ||
1413             v_usec > time_tolerance || v_usec < -time_tolerance) {
1414                 pps_errcnt++;
1415                 pps_shift = PPS_SHIFT;
1416                 pps_intcnt = 0;
1417                 time_status |= STA_PPSERROR;
1418                 return;
1419         }
1420 
1421         /*
1422          * A three-stage median filter is used to help deglitch the pps
1423          * frequency. The median sample becomes the frequency offset
1424          * estimate; the difference between the other two samples
1425          * becomes the frequency dispersion (stability) estimate.
1426          */
1427         pps_ff[2] = pps_ff[1];
1428         pps_ff[1] = pps_ff[0];
1429         pps_ff[0] = v_usec;
1430         if (pps_ff[0] > pps_ff[1]) {
1431                 if (pps_ff[1] > pps_ff[2]) {
1432                         u_usec = pps_ff[1];             /* 0 1 2 */
1433                         v_usec = pps_ff[0] - pps_ff[2];
1434                 } else if (pps_ff[2] > pps_ff[0]) {
1435                         u_usec = pps_ff[0];             /* 2 0 1 */
1436                         v_usec = pps_ff[2] - pps_ff[1];
1437                 } else {
1438                         u_usec = pps_ff[2];             /* 0 2 1 */
1439                         v_usec = pps_ff[0] - pps_ff[1];
1440                 }
1441         } else {
1442                 if (pps_ff[1] < pps_ff[2]) {
1443                         u_usec = pps_ff[1];             /* 2 1 0 */
1444                         v_usec = pps_ff[2] - pps_ff[0];
1445                 } else  if (pps_ff[2] < pps_ff[0]) {
1446                         u_usec = pps_ff[0];             /* 1 0 2 */
1447                         v_usec = pps_ff[1] - pps_ff[2];
1448                 } else {
1449                         u_usec = pps_ff[2];             /* 1 2 0 */
1450                         v_usec = pps_ff[1] - pps_ff[0];
1451                 }
1452         }
1453 
1454         /*
1455          * Here the frequency dispersion (stability) is updated. If it
1456          * is less than one-fourth the maximum (MAXFREQ), the frequency
1457          * offset is updated as well, but clamped to the tolerance. It
1458          * will be processed later by the clock() routine.
1459          */
1460         v_usec = (v_usec >> 1) - pps_stabil;
1461         if (v_usec < 0)
1462                 pps_stabil -= -v_usec >> PPS_AVG;
1463         else
1464                 pps_stabil += v_usec >> PPS_AVG;
1465         if (pps_stabil > MAXFREQ >> 2) {
1466                 pps_stbcnt++;
1467                 time_status |= STA_PPSWANDER;
1468                 return;
1469         }
1470         if (time_status & STA_PPSFREQ) {
1471                 if (u_usec < 0) {
1472                         pps_freq -= -u_usec >> PPS_AVG;
1473                         if (pps_freq < -time_tolerance)
1474                                 pps_freq = -time_tolerance;
1475                         u_usec = -u_usec;
1476                 } else {
1477                         pps_freq += u_usec >> PPS_AVG;
1478                         if (pps_freq > time_tolerance)
1479                                 pps_freq = time_tolerance;
1480                 }
1481         }
1482 
1483         /*
1484          * Here the calibration interval is adjusted. If the maximum
1485          * time difference is greater than tick / 4, reduce the interval
1486          * by half. If this is not the case for four consecutive
1487          * intervals, double the interval.
1488          */
1489         if (u_usec << pps_shift > bigtick >> 2) {
1490                 pps_intcnt = 0;
1491                 if (pps_shift > PPS_SHIFT)
1492                         pps_shift--;
1493         } else if (pps_intcnt >= 4) {
1494                 pps_intcnt = 0;
1495                 if (pps_shift < PPS_SHIFTMAX)
1496                         pps_shift++;
1497         } else
1498                 pps_intcnt++;
1499 
1500         /*
1501          * If recovering from kmdb, then make sure the tod chip gets resynced.
1502          * If we took an early exit above, then we don't yet have a stable
1503          * calibration signal to lock onto, so don't mark the tod for sync
1504          * until we get all the way here.
1505          */
1506         {
1507                 int s = hr_clock_lock();
1508 
1509                 tod_needsync = 1;
1510                 hr_clock_unlock(s);
1511         }
1512 }
1513 
1514 /*
1515  * Handle clock tick processing for a thread.
1516  * Check for timer action, enforce CPU rlimit, do profiling etc.
1517  */
1518 void
1519 clock_tick(kthread_t *t, int pending)
1520 {
1521         struct proc *pp;
1522         klwp_id_t    lwp;
1523         struct as *as;
1524         clock_t ticks;
1525         int     poke = 0;               /* notify another CPU */
1526         int     user_mode;
1527         size_t   rss;
1528         int i, total_usec, usec;
1529         rctl_qty_t secs;
1530 
1531         ASSERT(pending > 0);
1532 
1533         /* Must be operating on a lwp/thread */
1534         if ((lwp = ttolwp(t)) == NULL) {
1535                 panic("clock_tick: no lwp");
1536                 /*NOTREACHED*/
1537         }
1538 
1539         for (i = 0; i < pending; i++) {
1540                 CL_TICK(t);     /* Class specific tick processing */
1541                 DTRACE_SCHED1(tick, kthread_t *, t);
1542         }
1543 
1544         pp = ttoproc(t);
1545 
1546         /* pp->p_lock makes sure that the thread does not exit */
1547         ASSERT(MUTEX_HELD(&pp->p_lock));
1548 
1549         user_mode = (lwp->lwp_state == LWP_USER);
1550 
1551         ticks = (pp->p_utime + pp->p_stime) % hz;
1552         /*
1553          * Update process times. Should use high res clock and state
1554          * changes instead of statistical sampling method. XXX
1555          */
1556         if (user_mode) {
1557                 pp->p_utime += pending;
1558         } else {
1559                 pp->p_stime += pending;
1560         }
1561 
1562         pp->p_ttime += pending;
1563         as = pp->p_as;
1564 
1565         /*
1566          * Update user profiling statistics. Get the pc from the
1567          * lwp when the AST happens.
1568          */
1569         if (pp->p_prof.pr_scale) {
1570                 atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
1571                 if (user_mode) {
1572                         poke = 1;
1573                         aston(t);
1574                 }
1575         }
1576 
1577         /*
1578          * If CPU was in user state, process lwp-virtual time
1579          * interval timer. The value passed to itimerdecr() has to be
1580          * in microseconds and has to be less than one second. Hence
1581          * this loop.
1582          */
1583         total_usec = usec_per_tick * pending;
1584         while (total_usec > 0) {
1585                 usec = MIN(total_usec, (MICROSEC - 1));
1586                 if (user_mode &&
1587                     timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
1588                     itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
1589                         poke = 1;
1590                         sigtoproc(pp, t, SIGVTALRM);
1591                 }
1592                 total_usec -= usec;
1593         }
1594 
1595         /*
1596          * If CPU was in user state, process lwp-profile
1597          * interval timer.
1598          */
1599         total_usec = usec_per_tick * pending;
1600         while (total_usec > 0) {
1601                 usec = MIN(total_usec, (MICROSEC - 1));
1602                 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
1603                     itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
1604                         poke = 1;
1605                         sigtoproc(pp, t, SIGPROF);
1606                 }
1607                 total_usec -= usec;
1608         }
1609 
1610         /*
1611          * Enforce CPU resource controls:
1612          *   (a) process.max-cpu-time resource control
1613          *
1614          * Perform the check only if we have accumulated more a second.
1615          */
1616         if ((ticks + pending) >= hz) {
1617                 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
1618                     (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
1619         }
1620 
1621         /*
1622          *   (b) task.max-cpu-time resource control
1623          *
1624          * If we have accumulated enough ticks, increment the task CPU
1625          * time usage and test for the resource limit. This minimizes the
1626          * number of calls to the rct_test(). The task CPU time mutex
1627          * is highly contentious as many processes can be sharing a task.
1628          */
1629         if (pp->p_ttime >= clock_tick_proc_max) {
1630                 secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
1631                 pp->p_ttime = 0;
1632                 if (secs) {
1633                         (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
1634                             pp, secs, RCA_UNSAFE_SIGINFO);
1635                 }
1636         }
1637 
1638         /*
1639          * Update memory usage for the currently running process.
1640          */
1641         rss = rm_asrss(as);
1642         PTOU(pp)->u_mem += rss;
1643         if (rss > PTOU(pp)->u_mem_max)
1644                 PTOU(pp)->u_mem_max = rss;
1645 
1646         /*
1647          * Notify the CPU the thread is running on.
1648          */
1649         if (poke && t->t_cpu != CPU)
1650                 poke_cpu(t->t_cpu->cpu_id);
1651 }
1652 
1653 void
1654 profil_tick(uintptr_t upc)
1655 {
1656         int ticks;
1657         proc_t *p = ttoproc(curthread);
1658         klwp_t *lwp = ttolwp(curthread);
1659         struct prof *pr = &p->p_prof;
1660 
1661         do {
1662                 ticks = lwp->lwp_oweupc;
1663         } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks);
1664 
1665         mutex_enter(&p->p_pflock);
1666         if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
1667                 /*
1668                  * Old-style profiling
1669                  */
1670                 uint16_t *slot = pr->pr_base;
1671                 uint16_t old, new;
1672                 if (pr->pr_scale != 2) {
1673                         uintptr_t delta = upc - pr->pr_off;
1674                         uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
1675                             (((delta & 0xffff) * pr->pr_scale) >> 16);
1676                         if (byteoff >= (uintptr_t)pr->pr_size) {
1677                                 mutex_exit(&p->p_pflock);
1678                                 return;
1679                         }
1680                         slot += byteoff / sizeof (uint16_t);
1681                 }
1682                 if (fuword16(slot, &old) < 0 ||
1683                     (new = old + ticks) > SHRT_MAX ||
1684                     suword16(slot, new) < 0) {
1685                         pr->pr_scale = 0;
1686                 }
1687         } else if (pr->pr_scale == 1) {
1688                 /*
1689                  * PC Sampling
1690                  */
1691                 model_t model = lwp_getdatamodel(lwp);
1692                 int result;
1693 #ifdef __lint
1694                 model = model;
1695 #endif
1696                 while (ticks-- > 0) {
1697                         if (pr->pr_samples == pr->pr_size) {
1698                                 /* buffer full, turn off sampling */
1699                                 pr->pr_scale = 0;
1700                                 break;
1701                         }
1702                         switch (SIZEOF_PTR(model)) {
1703                         case sizeof (uint32_t):
1704                                 result = suword32(pr->pr_base, (uint32_t)upc);
1705                                 break;
1706 #ifdef _LP64
1707                         case sizeof (uint64_t):
1708                                 result = suword64(pr->pr_base, (uint64_t)upc);
1709                                 break;
1710 #endif
1711                         default:
1712                                 cmn_err(CE_WARN, "profil_tick: unexpected "
1713                                     "data model");
1714                                 result = -1;
1715                                 break;
1716                         }
1717                         if (result != 0) {
1718                                 pr->pr_scale = 0;
1719                                 break;
1720                         }
1721                         pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
1722                         pr->pr_samples++;
1723                 }
1724         }
1725         mutex_exit(&p->p_pflock);
1726 }
1727 
1728 static void
1729 delay_wakeup(void *arg)
1730 {
1731         kthread_t       *t = arg;
1732 
1733         mutex_enter(&t->t_delay_lock);
1734         cv_signal(&t->t_delay_cv);
1735         mutex_exit(&t->t_delay_lock);
1736 }
1737 
1738 /*
1739  * The delay(9F) man page indicates that it can only be called from user or
1740  * kernel context - detect and diagnose bad calls. The following macro will
1741  * produce a limited number of messages identifying bad callers.  This is done
1742  * in a macro so that caller() is meaningful. When a bad caller is identified,
1743  * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1744  */
1745 #define DELAY_CONTEXT_CHECK()   {                                       \
1746         uint32_t        m;                                              \
1747         char            *f;                                             \
1748         ulong_t         off;                                            \
1749                                                                         \
1750         m = delay_from_interrupt_msg;                                   \
1751         if (delay_from_interrupt_diagnose && servicing_interrupt() &&   \
1752             !panicstr && !devinfo_freeze &&                             \
1753             atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) {     \
1754                 f = modgetsymname((uintptr_t)caller(), &off);               \
1755                 cmn_err(CE_WARN, "delay(9F) called from "               \
1756                     "interrupt context: %s`%s",                         \
1757                     mod_containing_pc(caller()), f ? f : "...");        \
1758         }                                                               \
1759 }
1760 
1761 /*
1762  * delay_common: common delay code.
1763  */
1764 static void
1765 delay_common(clock_t ticks)
1766 {
1767         kthread_t       *t = curthread;
1768         clock_t         deadline;
1769         clock_t         timeleft;
1770         callout_id_t    id;
1771 
1772         /* If timeouts aren't running all we can do is spin. */
1773         if (panicstr || devinfo_freeze) {
1774                 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1775                 if (ticks > 0)
1776                         drv_usecwait(TICK_TO_USEC(ticks));
1777                 return;
1778         }
1779 
1780         deadline = ddi_get_lbolt() + ticks;
1781         while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
1782                 mutex_enter(&t->t_delay_lock);
1783                 id = timeout_default(delay_wakeup, t, timeleft);
1784                 cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1785                 mutex_exit(&t->t_delay_lock);
1786                 (void) untimeout_default(id, 0);
1787         }
1788 }
1789 
1790 /*
1791  * Delay specified number of clock ticks.
1792  */
1793 void
1794 delay(clock_t ticks)
1795 {
1796         DELAY_CONTEXT_CHECK();
1797 
1798         delay_common(ticks);
1799 }
1800 
1801 /*
1802  * Delay a random number of clock ticks between 1 and ticks.
1803  */
1804 void
1805 delay_random(clock_t ticks)
1806 {
1807         int     r;
1808 
1809         DELAY_CONTEXT_CHECK();
1810 
1811         (void) random_get_pseudo_bytes((void *)&r, sizeof (r));
1812         if (ticks == 0)
1813                 ticks = 1;
1814         ticks = (r % ticks) + 1;
1815         delay_common(ticks);
1816 }
1817 
1818 /*
1819  * Like delay, but interruptible by a signal.
1820  */
1821 int
1822 delay_sig(clock_t ticks)
1823 {
1824         kthread_t       *t = curthread;
1825         clock_t         deadline;
1826         clock_t         rc;
1827 
1828         /* If timeouts aren't running all we can do is spin. */
1829         if (panicstr || devinfo_freeze) {
1830                 if (ticks > 0)
1831                         drv_usecwait(TICK_TO_USEC(ticks));
1832                 return (0);
1833         }
1834 
1835         deadline = ddi_get_lbolt() + ticks;
1836         mutex_enter(&t->t_delay_lock);
1837         do {
1838                 rc = cv_timedwait_sig(&t->t_delay_cv,
1839                     &t->t_delay_lock, deadline);
1840                 /* loop until past deadline or signaled */
1841         } while (rc > 0);
1842         mutex_exit(&t->t_delay_lock);
1843         if (rc == 0)
1844                 return (EINTR);
1845         return (0);
1846 }
1847 
1848 
1849 #define SECONDS_PER_DAY 86400
1850 
1851 /*
1852  * Initialize the system time based on the TOD chip.  approx is used as
1853  * an approximation of time (e.g. from the filesystem) in the event that
1854  * the TOD chip has been cleared or is unresponsive.  An approx of -1
1855  * means the filesystem doesn't keep time.
1856  */
1857 void
1858 clkset(time_t approx)
1859 {
1860         timestruc_t ts;
1861         int spl;
1862         int set_clock = 0;
1863 
1864         mutex_enter(&tod_lock);
1865         ts = tod_get();
1866 
1867         if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
1868                 /*
1869                  * If the TOD chip is reporting some time after 1971,
1870                  * then it probably didn't lose power or become otherwise
1871                  * cleared in the recent past;  check to assure that
1872                  * the time coming from the filesystem isn't in the future
1873                  * according to the TOD chip.
1874                  */
1875                 if (approx != -1 && approx > ts.tv_sec) {
1876                         cmn_err(CE_WARN, "Last shutdown is later "
1877                             "than time on time-of-day chip; check date.");
1878                 }
1879         } else {
1880                 /*
1881                  * If the TOD chip isn't giving correct time, set it to the
1882                  * greater of i) approx and ii) 1987. That way if approx
1883                  * is negative or is earlier than 1987, we set the clock
1884                  * back to a time when Oliver North, ALF and Dire Straits
1885                  * were all on the collective brain:  1987.
1886                  */
1887                 timestruc_t tmp;
1888                 time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY;
1889                 ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date);
1890                 ts.tv_nsec = 0;
1891 
1892                 /*
1893                  * Attempt to write the new time to the TOD chip.  Set spl high
1894                  * to avoid getting preempted between the tod_set and tod_get.
1895                  */
1896                 spl = splhi();
1897                 tod_set(ts);
1898                 tmp = tod_get();
1899                 splx(spl);
1900 
1901                 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
1902                         tod_broken = 1;
1903                         dosynctodr = 0;
1904                         cmn_err(CE_WARN, "Time-of-day chip unresponsive.");
1905                 } else {
1906                         cmn_err(CE_WARN, "Time-of-day chip had "
1907                             "incorrect date; check and reset.");
1908                 }
1909                 set_clock = 1;
1910         }
1911 
1912         if (!boot_time) {
1913                 boot_time = ts.tv_sec;
1914                 set_clock = 1;
1915         }
1916 
1917         if (set_clock)
1918                 set_hrestime(&ts);
1919 
1920         mutex_exit(&tod_lock);
1921 }
1922 
1923 int     timechanged;    /* for testing if the system time has been reset */
1924 
1925 void
1926 set_hrestime(timestruc_t *ts)
1927 {
1928         int spl = hr_clock_lock();
1929         hrestime = *ts;
1930         membar_enter(); /* hrestime must be visible before timechanged++ */
1931         timedelta = 0;
1932         timechanged++;
1933         hr_clock_unlock(spl);
1934         callout_hrestime();
1935 }
1936 
1937 static uint_t deadman_seconds;
1938 static uint32_t deadman_panics;
1939 static int deadman_enabled = 0;
1940 static int deadman_panic_timers = 1;
1941 
1942 static void
1943 deadman(void)
1944 {
1945         if (panicstr) {
1946                 /*
1947                  * During panic, other CPUs besides the panic
1948                  * master continue to handle cyclics and some other
1949                  * interrupts.  The code below is intended to be
1950                  * single threaded, so any CPU other than the master
1951                  * must keep out.
1952                  */
1953                 if (CPU->cpu_id != panic_cpu.cpu_id)
1954                         return;
1955 
1956                 if (!deadman_panic_timers)
1957                         return; /* allow all timers to be manually disabled */
1958 
1959                 /*
1960                  * If we are generating a crash dump or syncing filesystems and
1961                  * the corresponding timer is set, decrement it and re-enter
1962                  * the panic code to abort it and advance to the next state.
1963                  * The panic states and triggers are explained in panic.c.
1964                  */
1965                 if (panic_dump) {
1966                         if (dump_timeleft && (--dump_timeleft == 0)) {
1967                                 panic("panic dump timeout");
1968                                 /*NOTREACHED*/
1969                         }
1970                 } else if (panic_sync) {
1971                         if (sync_timeleft && (--sync_timeleft == 0)) {
1972                                 panic("panic sync timeout");
1973                                 /*NOTREACHED*/
1974                         }
1975                 }
1976 
1977                 return;
1978         }
1979 
1980         if (deadman_counter != CPU->cpu_deadman_counter) {
1981                 CPU->cpu_deadman_counter = deadman_counter;
1982                 CPU->cpu_deadman_countdown = deadman_seconds;
1983                 return;
1984         }
1985 
1986         if (--CPU->cpu_deadman_countdown > 0)
1987                 return;
1988 
1989         /*
1990          * Regardless of whether or not we actually bring the system down,
1991          * bump the deadman_panics variable.
1992          *
1993          * N.B. deadman_panics is incremented once for each CPU that
1994          * passes through here.  It's expected that all the CPUs will
1995          * detect this condition within one second of each other, so
1996          * when deadman_enabled is off, deadman_panics will
1997          * typically be a multiple of the total number of CPUs in
1998          * the system.
1999          */
2000         atomic_add_32(&deadman_panics, 1);
2001 
2002         if (!deadman_enabled) {
2003                 CPU->cpu_deadman_countdown = deadman_seconds;
2004                 return;
2005         }
2006 
2007         /*
2008          * If we're here, we want to bring the system down.
2009          */
2010         panic("deadman: timed out after %d seconds of clock "
2011             "inactivity", deadman_seconds);
2012         /*NOTREACHED*/
2013 }
2014 
2015 /*ARGSUSED*/
2016 static void
2017 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
2018 {
2019         cpu->cpu_deadman_counter = 0;
2020         cpu->cpu_deadman_countdown = deadman_seconds;
2021 
2022         hdlr->cyh_func = (cyc_func_t)deadman;
2023         hdlr->cyh_level = CY_HIGH_LEVEL;
2024         hdlr->cyh_arg = NULL;
2025 
2026         /*
2027          * Stagger the CPUs so that they don't all run deadman() at
2028          * the same time.  Simplest reason to do this is to make it
2029          * more likely that only one CPU will panic in case of a
2030          * timeout.  This is (strictly speaking) an aesthetic, not a
2031          * technical consideration.
2032          */
2033         when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
2034         when->cyt_interval = NANOSEC;
2035 }
2036 
2037 
2038 void
2039 deadman_init(void)
2040 {
2041         cyc_omni_handler_t hdlr;
2042 
2043         if (deadman_seconds == 0)
2044                 deadman_seconds = snoop_interval / MICROSEC;
2045 
2046         if (snooping)
2047                 deadman_enabled = 1;
2048 
2049         hdlr.cyo_online = deadman_online;
2050         hdlr.cyo_offline = NULL;
2051         hdlr.cyo_arg = NULL;
2052 
2053         mutex_enter(&cpu_lock);
2054         deadman_cyclic = cyclic_add_omni(&hdlr);
2055         mutex_exit(&cpu_lock);
2056 }
2057 
2058 /*
2059  * tod_fault() is for updating tod validate mechanism state:
2060  * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2061  *     currently used for debugging only
2062  * (2) The following four cases detected by tod validate mechanism:
2063  *       TOD_REVERSED: current tod value is less than previous value.
2064  *       TOD_STALLED: current tod value hasn't advanced.
2065  *       TOD_JUMPED: current tod value advanced too far from previous value.
2066  *       TOD_RATECHANGED: the ratio between average tod delta and
2067  *       average tick delta has changed.
2068  * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2069  *     a virtual TOD provided by a hypervisor.
2070  */
2071 enum tod_fault_type
2072 tod_fault(enum tod_fault_type ftype, int off)
2073 {
2074         ASSERT(MUTEX_HELD(&tod_lock));
2075 
2076         if (tod_faulted != ftype) {
2077                 switch (ftype) {
2078                 case TOD_NOFAULT:
2079                         plat_tod_fault(TOD_NOFAULT);
2080                         cmn_err(CE_NOTE, "Restarted tracking "
2081                             "Time of Day clock.");
2082                         tod_faulted = ftype;
2083                         break;
2084                 case TOD_REVERSED:
2085                 case TOD_JUMPED:
2086                         if (tod_faulted == TOD_NOFAULT) {
2087                                 plat_tod_fault(ftype);
2088                                 cmn_err(CE_WARN, "Time of Day clock error: "
2089                                     "reason [%s by 0x%x]. -- "
2090                                     " Stopped tracking Time Of Day clock.",
2091                                     tod_fault_table[ftype], off);
2092                                 tod_faulted = ftype;
2093                         }
2094                         break;
2095                 case TOD_STALLED:
2096                 case TOD_RATECHANGED:
2097                         if (tod_faulted == TOD_NOFAULT) {
2098                                 plat_tod_fault(ftype);
2099                                 cmn_err(CE_WARN, "Time of Day clock error: "
2100                                     "reason [%s]. -- "
2101                                     " Stopped tracking Time Of Day clock.",
2102                                     tod_fault_table[ftype]);
2103                                 tod_faulted = ftype;
2104                         }
2105                         break;
2106                 case TOD_RDONLY:
2107                         if (tod_faulted == TOD_NOFAULT) {
2108                                 plat_tod_fault(ftype);
2109                                 cmn_err(CE_NOTE, "!Time of Day clock is "
2110                                     "Read-Only; set of Date/Time will not "
2111                                     "persist across reboot.");
2112                                 tod_faulted = ftype;
2113                         }
2114                         break;
2115                 default:
2116                         break;
2117                 }
2118         }
2119         return (tod_faulted);
2120 }
2121 
2122 /*
2123  * Two functions that allow tod_status_flag to be manipulated by functions
2124  * external to this file.
2125  */
2126 
2127 void
2128 tod_status_set(int tod_flag)
2129 {
2130         tod_status_flag |= tod_flag;
2131 }
2132 
2133 void
2134 tod_status_clear(int tod_flag)
2135 {
2136         tod_status_flag &= ~tod_flag;
2137 }
2138 
2139 /*
2140  * Record a timestamp and the value passed to tod_set().  The next call to
2141  * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2142  * when checking the timestruc_t returned by tod_get().  Ordinarily,
2143  * tod_validate() will use prev_tick and prev_tod for this task but these
2144  * become obsolete, and will be re-assigned with the prev_set_* values,
2145  * in the case when the TOD is re-written.
2146  */
2147 void
2148 tod_set_prev(timestruc_t ts)
2149 {
2150         if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2151             tod_validate_deferred) {
2152                 return;
2153         }
2154         prev_set_tick = gethrtime();
2155         /*
2156          * A negative value will be set to zero in utc_to_tod() so we fake
2157          * a zero here in such a case.  This would need to change if the
2158          * behavior of utc_to_tod() changes.
2159          */
2160         prev_set_tod = ts.tv_sec < 0 ? 0 : ts.tv_sec;
2161 }
2162 
2163 /*
2164  * tod_validate() is used for checking values returned by tod_get().
2165  * Four error cases can be detected by this routine:
2166  *   TOD_REVERSED: current tod value is less than previous.
2167  *   TOD_STALLED: current tod value hasn't advanced.
2168  *   TOD_JUMPED: current tod value advanced too far from previous value.
2169  *   TOD_RATECHANGED: the ratio between average tod delta and
2170  *   average tick delta has changed.
2171  */
2172 time_t
2173 tod_validate(time_t tod)
2174 {
2175         time_t diff_tod;
2176         hrtime_t diff_tick;
2177 
2178         long dtick;
2179         int dtick_delta;
2180 
2181         int off = 0;
2182         enum tod_fault_type tod_bad = TOD_NOFAULT;
2183 
2184         static int firsttime = 1;
2185 
2186         static time_t prev_tod = 0;
2187         static hrtime_t prev_tick = 0;
2188         static long dtick_avg = TOD_REF_FREQ;
2189 
2190         int cpr_resume_done = 0;
2191         int dr_resume_done = 0;
2192 
2193         hrtime_t tick = gethrtime();
2194 
2195         ASSERT(MUTEX_HELD(&tod_lock));
2196 
2197         /*
2198          * tod_validate_enable is patchable via /etc/system.
2199          * If TOD is already faulted, or if TOD validation is deferred,
2200          * there is nothing to do.
2201          */
2202         if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2203             tod_validate_deferred) {
2204                 return (tod);
2205         }
2206 
2207         /*
2208          * If this is the first time through, we just need to save the tod
2209          * we were called with and hrtime so we can use them next time to
2210          * validate tod_get().
2211          */
2212         if (firsttime) {
2213                 firsttime = 0;
2214                 prev_tod = tod;
2215                 prev_tick = tick;
2216                 return (tod);
2217         }
2218 
2219         /*
2220          * Handle any flags that have been turned on by tod_status_set().
2221          * In the case where a tod_set() is done and then a subsequent
2222          * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2223          * true), we treat the TOD_GET_FAILED with precedence by switching
2224          * off the flag, returning tod and leaving TOD_SET_DONE asserted
2225          * until such time as tod_get() completes successfully.
2226          */
2227         if (tod_status_flag & TOD_GET_FAILED) {
2228                 /*
2229                  * tod_get() has encountered an issue, possibly transitory,
2230                  * when reading TOD.  We'll just return the incoming tod
2231                  * value (which is actually hrestime.tv_sec in this case)
2232                  * and when we get a genuine tod, following a successful
2233                  * tod_get(), we can validate using prev_tod and prev_tick.
2234                  */
2235                 tod_status_flag &= ~TOD_GET_FAILED;
2236                 return (tod);
2237         } else if (tod_status_flag & TOD_SET_DONE) {
2238                 /*
2239                  * TOD has been modified.  Just before the TOD was written,
2240                  * tod_set_prev() saved tod and hrtime; we can now use
2241                  * those values, prev_set_tod and prev_set_tick, to validate
2242                  * the incoming tod that's just been read.
2243                  */
2244                 prev_tod = prev_set_tod;
2245                 prev_tick = prev_set_tick;
2246                 dtick_avg = TOD_REF_FREQ;
2247                 tod_status_flag &= ~TOD_SET_DONE;
2248                 /*
2249                  * If a tod_set() preceded a cpr_suspend() without an
2250                  * intervening tod_validate(), we need to ensure that a
2251                  * TOD_JUMPED condition is ignored.
2252                  * Note this isn't a concern in the case of DR as we've
2253                  * just reassigned dtick_avg, above.
2254                  */
2255                 if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2256                         cpr_resume_done = 1;
2257                         tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2258                 }
2259         } else if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2260                 /*
2261                  * The system's coming back from a checkpoint resume.
2262                  */
2263                 cpr_resume_done = 1;
2264                 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2265                 /*
2266                  * We need to handle the possibility of a CPR suspend
2267                  * operation having been initiated whilst a DR event was
2268                  * in-flight.
2269                  */
2270                 if (tod_status_flag & TOD_DR_RESUME_DONE) {
2271                         dr_resume_done = 1;
2272                         tod_status_flag &= ~TOD_DR_RESUME_DONE;
2273                 }
2274         } else if (tod_status_flag & TOD_DR_RESUME_DONE) {
2275                 /*
2276                  * A Dynamic Reconfiguration event has taken place.
2277                  */
2278                 dr_resume_done = 1;
2279                 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2280         }
2281 
2282         /* test hook */
2283         switch (tod_unit_test) {
2284         case 1: /* for testing jumping tod */
2285                 tod += tod_test_injector;
2286                 tod_unit_test = 0;
2287                 break;
2288         case 2: /* for testing stuck tod bit */
2289                 tod |= 1 << tod_test_injector;
2290                 tod_unit_test = 0;
2291                 break;
2292         case 3: /* for testing stalled tod */
2293                 tod = prev_tod;
2294                 tod_unit_test = 0;
2295                 break;
2296         case 4: /* reset tod fault status */
2297                 (void) tod_fault(TOD_NOFAULT, 0);
2298                 tod_unit_test = 0;
2299                 break;
2300         default:
2301                 break;
2302         }
2303 
2304         diff_tod = tod - prev_tod;
2305         diff_tick = tick - prev_tick;
2306 
2307         ASSERT(diff_tick >= 0);
2308 
2309         if (diff_tod < 0) {
2310                 /* ERROR - tod reversed */
2311                 tod_bad = TOD_REVERSED;
2312                 off = (int)(prev_tod - tod);
2313         } else if (diff_tod == 0) {
2314                 /* tod did not advance */
2315                 if (diff_tick > TOD_STALL_THRESHOLD) {
2316                         /* ERROR - tod stalled */
2317                         tod_bad = TOD_STALLED;
2318                 } else {
2319                         /*
2320                          * Make sure we don't update prev_tick
2321                          * so that diff_tick is calculated since
2322                          * the first diff_tod == 0
2323                          */
2324                         return (tod);
2325                 }
2326         } else {
2327                 /* calculate dtick */
2328                 dtick = diff_tick / diff_tod;
2329 
2330                 /* update dtick averages */
2331                 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
2332 
2333                 /*
2334                  * Calculate dtick_delta as
2335                  * variation from reference freq in quartiles
2336                  */
2337                 dtick_delta = (dtick_avg - TOD_REF_FREQ) /
2338                     (TOD_REF_FREQ >> 2);
2339 
2340                 /*
2341                  * Even with a perfectly functioning TOD device,
2342                  * when the number of elapsed seconds is low the
2343                  * algorithm can calculate a rate that is beyond
2344                  * tolerance, causing an error.  The algorithm is
2345                  * inaccurate when elapsed time is low (less than
2346                  * 5 seconds).
2347                  */
2348                 if (diff_tod > 4) {
2349                         if (dtick < TOD_JUMP_THRESHOLD) {
2350                                 /*
2351                                  * If we've just done a CPR resume, we detect
2352                                  * a jump in the TOD but, actually, what's
2353                                  * happened is that the TOD has been increasing
2354                                  * whilst the system was suspended and the tick
2355                                  * count hasn't kept up.  We consider the first
2356                                  * occurrence of this after a resume as normal
2357                                  * and ignore it; otherwise, in a non-resume
2358                                  * case, we regard it as a TOD problem.
2359                                  */
2360                                 if (!cpr_resume_done) {
2361                                         /* ERROR - tod jumped */
2362                                         tod_bad = TOD_JUMPED;
2363                                         off = (int)diff_tod;
2364                                 }
2365                         }
2366                         if (dtick_delta) {
2367                                 /*
2368                                  * If we've just done a DR resume, dtick_avg
2369                                  * can go a bit askew so we reset it and carry
2370                                  * on; otherwise, the TOD is in error.
2371                                  */
2372                                 if (dr_resume_done) {
2373                                         dtick_avg = TOD_REF_FREQ;
2374                                 } else {
2375                                         /* ERROR - change in clock rate */
2376                                         tod_bad = TOD_RATECHANGED;
2377                                 }
2378                         }
2379                 }
2380         }
2381 
2382         if (tod_bad != TOD_NOFAULT) {
2383                 (void) tod_fault(tod_bad, off);
2384 
2385                 /*
2386                  * Disable dosynctodr since we are going to fault
2387                  * the TOD chip anyway here
2388                  */
2389                 dosynctodr = 0;
2390 
2391                 /*
2392                  * Set tod to the correct value from hrestime
2393                  */
2394                 tod = hrestime.tv_sec;
2395         }
2396 
2397         prev_tod = tod;
2398         prev_tick = tick;
2399         return (tod);
2400 }
2401 
2402 static void
2403 calcloadavg(int nrun, uint64_t *hp_ave)
2404 {
2405         static int64_t f[3] = { 135, 27, 9 };
2406         uint_t i;
2407         int64_t q, r;
2408 
2409         /*
2410          * Compute load average over the last 1, 5, and 15 minutes
2411          * (60, 300, and 900 seconds).  The constants in f[3] are for
2412          * exponential decay:
2413          * (1 - exp(-1/60)) << 13 = 135,
2414          * (1 - exp(-1/300)) << 13 = 27,
2415          * (1 - exp(-1/900)) << 13 = 9.
2416          */
2417 
2418         /*
2419          * a little hoop-jumping to avoid integer overflow
2420          */
2421         for (i = 0; i < 3; i++) {
2422                 q = (hp_ave[i]  >> 16) << 7;
2423                 r = (hp_ave[i]  & 0xffff) << 7;
2424                 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
2425         }
2426 }
2427 
2428 /*
2429  * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2430  * calculate the value of lbolt according to the current mode. In the event
2431  * driven mode (the default), lbolt is calculated by dividing the current hires
2432  * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2433  * an internal variable is incremented at each firing of the lbolt cyclic
2434  * and returned by lbolt_cyclic_driven().
2435  *
2436  * The system will transition from event to cyclic driven mode when the number
2437  * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2438  * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2439  * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2440  * causing enough activity to cross the thresholds.
2441  */
2442 int64_t
2443 lbolt_bootstrap(void)
2444 {
2445         return (0);
2446 }
2447 
2448 /* ARGSUSED */
2449 uint_t
2450 lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
2451 {
2452         hrtime_t ts, exp;
2453         int ret;
2454 
2455         ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
2456 
2457         kpreempt_disable();
2458 
2459         ts = gethrtime();
2460         lb_info->lbi_internal = (ts/nsec_per_tick);
2461 
2462         /*
2463          * Align the next expiration to a clock tick boundary.
2464          */
2465         exp = ts + nsec_per_tick - 1;
2466         exp = (exp/nsec_per_tick) * nsec_per_tick;
2467 
2468         ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp);
2469         ASSERT(ret);
2470 
2471         lbolt_hybrid = lbolt_cyclic_driven;
2472         lb_info->lbi_cyc_deactivate = B_FALSE;
2473         lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2474 
2475         kpreempt_enable();
2476 
2477         ret = atomic_dec_32_nv(&lb_info->lbi_token);
2478         ASSERT(ret == 0);
2479 
2480         return (1);
2481 }
2482 
2483 int64_t
2484 lbolt_event_driven(void)
2485 {
2486         hrtime_t ts;
2487         int64_t lb;
2488         int ret, cpu = CPU->cpu_seqid;
2489 
2490         ts = gethrtime();
2491         ASSERT(ts > 0);
2492 
2493         ASSERT(nsec_per_tick > 0);
2494         lb = (ts/nsec_per_tick);
2495 
2496         /*
2497          * Switch to cyclic mode if the number of calls to this routine
2498          * has reached the threshold within the interval.
2499          */
2500         if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
2501 
2502                 if (--lb_cpu[cpu].lbc_counter == 0) {
2503                         /*
2504                          * Reached the threshold within the interval, reset
2505                          * the usage statistics.
2506                          */
2507                         lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2508                         lb_cpu[cpu].lbc_cnt_start = lb;
2509 
2510                         /*
2511                          * Make sure only one thread reprograms the
2512                          * lbolt cyclic and changes the mode.
2513                          */
2514                         if (panicstr == NULL &&
2515                             atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2516 
2517                                 if (lbolt_hybrid == lbolt_cyclic_driven) {
2518                                         ret = atomic_dec_32_nv(
2519                                             &lb_info->lbi_token);
2520                                         ASSERT(ret == 0);
2521                                 } else {
2522                                         lbolt_softint_post();
2523                                 }
2524                         }
2525                 }
2526         } else {
2527                 /*
2528                  * Exceeded the interval, reset the usage statistics.
2529                  */
2530                 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2531                 lb_cpu[cpu].lbc_cnt_start = lb;
2532         }
2533 
2534         ASSERT(lb >= lb_info->lbi_debug_time);
2535 
2536         return (lb - lb_info->lbi_debug_time);
2537 }
2538 
2539 int64_t
2540 lbolt_cyclic_driven(void)
2541 {
2542         int64_t lb = lb_info->lbi_internal;
2543         int cpu;
2544 
2545         /*
2546          * If a CPU has already prevented the lbolt cyclic from deactivating
2547          * itself, don't bother tracking the usage. Otherwise check if we're
2548          * within the interval and how the per CPU counter is doing.
2549          */
2550         if (lb_info->lbi_cyc_deactivate) {
2551                 cpu = CPU->cpu_seqid;
2552                 if ((lb - lb_cpu[cpu].lbc_cnt_start) <
2553                     lb_info->lbi_thresh_interval) {
2554 
2555                         if (lb_cpu[cpu].lbc_counter == 0)
2556                                 /*
2557                                  * Reached the threshold within the interval,
2558                                  * prevent the lbolt cyclic from turning itself
2559                                  * off.
2560                                  */
2561                                 lb_info->lbi_cyc_deactivate = B_FALSE;
2562                         else
2563                                 lb_cpu[cpu].lbc_counter--;
2564                 } else {
2565                         /*
2566                          * Only reset the usage statistics when we have
2567                          * exceeded the interval.
2568                          */
2569                         lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2570                         lb_cpu[cpu].lbc_cnt_start = lb;
2571                 }
2572         }
2573 
2574         ASSERT(lb >= lb_info->lbi_debug_time);
2575 
2576         return (lb - lb_info->lbi_debug_time);
2577 }
2578 
2579 /*
2580  * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2581  * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2582  * It is inactive by default, and will be activated when switching from event
2583  * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2584  * by lbolt_cyclic_driven().
2585  */
2586 static void
2587 lbolt_cyclic(void)
2588 {
2589         int ret;
2590 
2591         lb_info->lbi_internal++;
2592 
2593         if (!lbolt_cyc_only) {
2594 
2595                 if (lb_info->lbi_cyc_deactivate) {
2596                         /*
2597                          * Switching from cyclic to event driven mode.
2598                          */
2599                         if (panicstr == NULL &&
2600                             atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2601 
2602                                 if (lbolt_hybrid == lbolt_event_driven) {
2603                                         ret = atomic_dec_32_nv(
2604                                             &lb_info->lbi_token);
2605                                         ASSERT(ret == 0);
2606                                         return;
2607                                 }
2608 
2609                                 kpreempt_disable();
2610 
2611                                 lbolt_hybrid = lbolt_event_driven;
2612                                 ret = cyclic_reprogram(
2613                                     lb_info->id.lbi_cyclic_id,
2614                                     CY_INFINITY);
2615                                 ASSERT(ret);
2616 
2617                                 kpreempt_enable();
2618 
2619                                 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2620                                 ASSERT(ret == 0);
2621                         }
2622                 }
2623 
2624                 /*
2625                  * The lbolt cyclic should not try to deactivate itself before
2626                  * the sampling period has elapsed.
2627                  */
2628                 if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
2629                     lb_info->lbi_thresh_interval) {
2630                         lb_info->lbi_cyc_deactivate = B_TRUE;
2631                         lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2632                 }
2633         }
2634 }
2635 
2636 /*
2637  * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2638  * when the system drops into the kernel debugger. lbolt_debug_entry() is
2639  * called by the KDI system claim callbacks to record a hires timestamp at
2640  * debug enter time. lbolt_debug_return() is called by the sistem release
2641  * callbacks to account for the time spent in the debugger. The value is then
2642  * accumulated in the lb_info structure and used by lbolt_event_driven() and
2643  * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2644  */
2645 void
2646 lbolt_debug_entry(void)
2647 {
2648         if (lbolt_hybrid != lbolt_bootstrap) {
2649                 ASSERT(lb_info != NULL);
2650                 lb_info->lbi_debug_ts = gethrtime();
2651         }
2652 }
2653 
2654 /*
2655  * Calculate the time spent in the debugger and add it to the lbolt info
2656  * structure. We also update the internal lbolt value in case we were in
2657  * cyclic driven mode going in.
2658  */
2659 void
2660 lbolt_debug_return(void)
2661 {
2662         hrtime_t ts;
2663 
2664         if (lbolt_hybrid != lbolt_bootstrap) {
2665                 ASSERT(lb_info != NULL);
2666                 ASSERT(nsec_per_tick > 0);
2667 
2668                 ts = gethrtime();
2669                 lb_info->lbi_internal = (ts/nsec_per_tick);
2670                 lb_info->lbi_debug_time +=
2671                     ((ts - lb_info->lbi_debug_ts)/nsec_per_tick);
2672 
2673                 lb_info->lbi_debug_ts = 0;
2674         }
2675 }