1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
22 /* All Rights Reserved */
23
24 /*
25 * Copyright (c) 1988, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 */
28
29 #include <sys/param.h>
30 #include <sys/t_lock.h>
31 #include <sys/types.h>
32 #include <sys/tuneable.h>
33 #include <sys/sysmacros.h>
34 #include <sys/systm.h>
35 #include <sys/cpuvar.h>
36 #include <sys/lgrp.h>
37 #include <sys/user.h>
38 #include <sys/proc.h>
39 #include <sys/callo.h>
40 #include <sys/kmem.h>
41 #include <sys/var.h>
42 #include <sys/cmn_err.h>
43 #include <sys/swap.h>
44 #include <sys/vmsystm.h>
45 #include <sys/class.h>
46 #include <sys/time.h>
47 #include <sys/debug.h>
48 #include <sys/vtrace.h>
49 #include <sys/spl.h>
50 #include <sys/atomic.h>
51 #include <sys/dumphdr.h>
52 #include <sys/archsystm.h>
53 #include <sys/fs/swapnode.h>
54 #include <sys/panic.h>
55 #include <sys/disp.h>
56 #include <sys/msacct.h>
57 #include <sys/mem_cage.h>
58
59 #include <vm/page.h>
60 #include <vm/anon.h>
61 #include <vm/rm.h>
62 #include <sys/cyclic.h>
63 #include <sys/cpupart.h>
64 #include <sys/rctl.h>
65 #include <sys/task.h>
66 #include <sys/sdt.h>
67 #include <sys/ddi_timer.h>
68 #include <sys/random.h>
69 #include <sys/modctl.h>
70 #include <sys/zone.h>
71
72 /*
73 * for NTP support
74 */
75 #include <sys/timex.h>
76 #include <sys/inttypes.h>
77
78 #include <sys/sunddi.h>
79 #include <sys/clock_impl.h>
80
81 /*
82 * clock() is called straight from the clock cyclic; see clock_init().
83 *
84 * Functions:
85 * reprime clock
86 * maintain date
87 * jab the scheduler
88 */
89
90 extern kcondvar_t fsflush_cv;
91 extern sysinfo_t sysinfo;
92 extern vminfo_t vminfo;
93 extern int idleswtch; /* flag set while idle in pswtch() */
94 extern hrtime_t volatile devinfo_freeze;
95
96 /*
97 * high-precision avenrun values. These are needed to make the
98 * regular avenrun values accurate.
99 */
100 static uint64_t hp_avenrun[3];
101 int avenrun[3]; /* FSCALED average run queue lengths */
102 time_t time; /* time in seconds since 1970 - for compatibility only */
103
104 static struct loadavg_s loadavg;
105 /*
106 * Phase/frequency-lock loop (PLL/FLL) definitions
107 *
108 * The following variables are read and set by the ntp_adjtime() system
109 * call.
110 *
111 * time_state shows the state of the system clock, with values defined
112 * in the timex.h header file.
113 *
114 * time_status shows the status of the system clock, with bits defined
115 * in the timex.h header file.
116 *
117 * time_offset is used by the PLL/FLL to adjust the system time in small
118 * increments.
119 *
120 * time_constant determines the bandwidth or "stiffness" of the PLL.
121 *
122 * time_tolerance determines maximum frequency error or tolerance of the
123 * CPU clock oscillator and is a property of the architecture; however,
124 * in principle it could change as result of the presence of external
125 * discipline signals, for instance.
126 *
127 * time_precision is usually equal to the kernel tick variable; however,
128 * in cases where a precision clock counter or external clock is
129 * available, the resolution can be much less than this and depend on
130 * whether the external clock is working or not.
131 *
132 * time_maxerror is initialized by a ntp_adjtime() call and increased by
133 * the kernel once each second to reflect the maximum error bound
134 * growth.
135 *
136 * time_esterror is set and read by the ntp_adjtime() call, but
137 * otherwise not used by the kernel.
138 */
139 int32_t time_state = TIME_OK; /* clock state */
140 int32_t time_status = STA_UNSYNC; /* clock status bits */
141 int32_t time_offset = 0; /* time offset (us) */
142 int32_t time_constant = 0; /* pll time constant */
143 int32_t time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */
144 int32_t time_precision = 1; /* clock precision (us) */
145 int32_t time_maxerror = MAXPHASE; /* maximum error (us) */
146 int32_t time_esterror = MAXPHASE; /* estimated error (us) */
147
148 /*
149 * The following variables establish the state of the PLL/FLL and the
150 * residual time and frequency offset of the local clock. The scale
151 * factors are defined in the timex.h header file.
152 *
153 * time_phase and time_freq are the phase increment and the frequency
154 * increment, respectively, of the kernel time variable.
155 *
156 * time_freq is set via ntp_adjtime() from a value stored in a file when
157 * the synchronization daemon is first started. Its value is retrieved
158 * via ntp_adjtime() and written to the file about once per hour by the
159 * daemon.
160 *
161 * time_adj is the adjustment added to the value of tick at each timer
162 * interrupt and is recomputed from time_phase and time_freq at each
163 * seconds rollover.
164 *
165 * time_reftime is the second's portion of the system time at the last
166 * call to ntp_adjtime(). It is used to adjust the time_freq variable
167 * and to increase the time_maxerror as the time since last update
168 * increases.
169 */
170 int32_t time_phase = 0; /* phase offset (scaled us) */
171 int32_t time_freq = 0; /* frequency offset (scaled ppm) */
172 int32_t time_adj = 0; /* tick adjust (scaled 1 / hz) */
173 int32_t time_reftime = 0; /* time at last adjustment (s) */
174
175 /*
176 * The scale factors of the following variables are defined in the
177 * timex.h header file.
178 *
179 * pps_time contains the time at each calibration interval, as read by
180 * microtime(). pps_count counts the seconds of the calibration
181 * interval, the duration of which is nominally pps_shift in powers of
182 * two.
183 *
184 * pps_offset is the time offset produced by the time median filter
185 * pps_tf[], while pps_jitter is the dispersion (jitter) measured by
186 * this filter.
187 *
188 * pps_freq is the frequency offset produced by the frequency median
189 * filter pps_ff[], while pps_stabil is the dispersion (wander) measured
190 * by this filter.
191 *
192 * pps_usec is latched from a high resolution counter or external clock
193 * at pps_time. Here we want the hardware counter contents only, not the
194 * contents plus the time_tv.usec as usual.
195 *
196 * pps_valid counts the number of seconds since the last PPS update. It
197 * is used as a watchdog timer to disable the PPS discipline should the
198 * PPS signal be lost.
199 *
200 * pps_glitch counts the number of seconds since the beginning of an
201 * offset burst more than tick/2 from current nominal offset. It is used
202 * mainly to suppress error bursts due to priority conflicts between the
203 * PPS interrupt and timer interrupt.
204 *
205 * pps_intcnt counts the calibration intervals for use in the interval-
206 * adaptation algorithm. It's just too complicated for words.
207 */
208 struct timeval pps_time; /* kernel time at last interval */
209 int32_t pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */
210 int32_t pps_offset = 0; /* pps time offset (us) */
211 int32_t pps_jitter = MAXTIME; /* time dispersion (jitter) (us) */
212 int32_t pps_ff[] = {0, 0, 0}; /* pps frequency offset median filter */
213 int32_t pps_freq = 0; /* frequency offset (scaled ppm) */
214 int32_t pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */
215 int32_t pps_usec = 0; /* microsec counter at last interval */
216 int32_t pps_valid = PPS_VALID; /* pps signal watchdog counter */
217 int32_t pps_glitch = 0; /* pps signal glitch counter */
218 int32_t pps_count = 0; /* calibration interval counter (s) */
219 int32_t pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */
220 int32_t pps_intcnt = 0; /* intervals at current duration */
221
222 /*
223 * PPS signal quality monitors
224 *
225 * pps_jitcnt counts the seconds that have been discarded because the
226 * jitter measured by the time median filter exceeds the limit MAXTIME
227 * (100 us).
228 *
229 * pps_calcnt counts the frequency calibration intervals, which are
230 * variable from 4 s to 256 s.
231 *
232 * pps_errcnt counts the calibration intervals which have been discarded
233 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the
234 * calibration interval jitter exceeds two ticks.
235 *
236 * pps_stbcnt counts the calibration intervals that have been discarded
237 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us).
238 */
239 int32_t pps_jitcnt = 0; /* jitter limit exceeded */
240 int32_t pps_calcnt = 0; /* calibration intervals */
241 int32_t pps_errcnt = 0; /* calibration errors */
242 int32_t pps_stbcnt = 0; /* stability limit exceeded */
243
244 kcondvar_t lbolt_cv;
245
246 /*
247 * Hybrid lbolt implementation:
248 *
249 * The service historically provided by the lbolt and lbolt64 variables has
250 * been replaced by the ddi_get_lbolt() and ddi_get_lbolt64() routines, and the
251 * original symbols removed from the system. The once clock driven variables are
252 * now implemented in an event driven fashion, backed by gethrtime() coarsed to
253 * the appropriate clock resolution. The default event driven implementation is
254 * complemented by a cyclic driven one, active only during periods of intense
255 * activity around the DDI lbolt routines, when a lbolt specific cyclic is
256 * reprogramed to fire at a clock tick interval to serve consumers of lbolt who
257 * rely on the original low cost of consulting a memory position.
258 *
259 * The implementation uses the number of calls to these routines and the
260 * frequency of these to determine when to transition from event to cyclic
261 * driven and vice-versa. These values are kept on a per CPU basis for
262 * scalability reasons and to prevent CPUs from constantly invalidating a single
263 * cache line when modifying a global variable. The transition from event to
264 * cyclic mode happens once the thresholds are crossed, and activity on any CPU
265 * can cause such transition.
266 *
267 * The lbolt_hybrid function pointer is called by ddi_get_lbolt() and
268 * ddi_get_lbolt64(), and will point to lbolt_event_driven() or
269 * lbolt_cyclic_driven() according to the current mode. When the thresholds
270 * are exceeded, lbolt_event_driven() will reprogram the lbolt cyclic to
271 * fire at a nsec_per_tick interval and increment an internal variable at
272 * each firing. lbolt_hybrid will then point to lbolt_cyclic_driven(), which
273 * will simply return the value of such variable. lbolt_cyclic() will attempt
274 * to shut itself off at each threshold interval (sampling period for calls
275 * to the DDI lbolt routines), and return to the event driven mode, but will
276 * be prevented from doing so if lbolt_cyclic_driven() is being heavily used.
277 *
278 * lbolt_bootstrap is used during boot to serve lbolt consumers who don't wait
279 * for the cyclic subsystem to be intialized.
280 *
281 */
282 int64_t lbolt_bootstrap(void);
283 int64_t lbolt_event_driven(void);
284 int64_t lbolt_cyclic_driven(void);
285 int64_t (*lbolt_hybrid)(void) = lbolt_bootstrap;
286 uint_t lbolt_ev_to_cyclic(caddr_t, caddr_t);
287
288 /*
289 * lbolt's cyclic, installed by clock_init().
290 */
291 static void lbolt_cyclic(void);
292
293 /*
294 * Tunable to keep lbolt in cyclic driven mode. This will prevent the system
295 * from switching back to event driven, once it reaches cyclic mode.
296 */
297 static boolean_t lbolt_cyc_only = B_FALSE;
298
299 /*
300 * Cache aligned, per CPU structure with lbolt usage statistics.
301 */
302 static lbolt_cpu_t *lb_cpu;
303
304 /*
305 * Single, cache aligned, structure with all the information required by
306 * the lbolt implementation.
307 */
308 lbolt_info_t *lb_info;
309
310
311 int one_sec = 1; /* turned on once every second */
312 static int fsflushcnt; /* counter for t_fsflushr */
313 int dosynctodr = 1; /* patchable; enable/disable sync to TOD chip */
314 int tod_needsync = 0; /* need to sync tod chip with software time */
315 static int tod_broken = 0; /* clock chip doesn't work */
316 time_t boot_time = 0; /* Boot time in seconds since 1970 */
317 cyclic_id_t clock_cyclic; /* clock()'s cyclic_id */
318 cyclic_id_t deadman_cyclic; /* deadman()'s cyclic_id */
319 cyclic_id_t ddi_timer_cyclic; /* cyclic_timer()'s cyclic_id */
320
321 extern void clock_tick_schedule(int);
322
323 static int lgrp_ticks; /* counter to schedule lgrp load calcs */
324
325 /*
326 * for tod fault detection
327 */
328 #define TOD_REF_FREQ ((longlong_t)(NANOSEC))
329 #define TOD_STALL_THRESHOLD (TOD_REF_FREQ * 3 / 2)
330 #define TOD_JUMP_THRESHOLD (TOD_REF_FREQ / 2)
331 #define TOD_FILTER_N 4
332 #define TOD_FILTER_SETTLE (4 * TOD_FILTER_N)
333 static int tod_faulted = TOD_NOFAULT;
334
335 static int tod_status_flag = 0; /* used by tod_validate() */
336
337 static hrtime_t prev_set_tick = 0; /* gethrtime() prior to tod_set() */
338 static time_t prev_set_tod = 0; /* tv_sec value passed to tod_set() */
339
340 /* patchable via /etc/system */
341 int tod_validate_enable = 1;
342
343 /* Diagnose/Limit messages about delay(9F) called from interrupt context */
344 int delay_from_interrupt_diagnose = 0;
345 volatile uint32_t delay_from_interrupt_msg = 20;
346
347 /*
348 * On non-SPARC systems, TOD validation must be deferred until gethrtime
349 * returns non-zero values (after mach_clkinit's execution).
350 * On SPARC systems, it must be deferred until after hrtime_base
351 * and hres_last_tick are set (in the first invocation of hres_tick).
352 * Since in both cases the prerequisites occur before the invocation of
353 * tod_get() in clock(), the deferment is lifted there.
354 */
355 static boolean_t tod_validate_deferred = B_TRUE;
356
357 /*
358 * tod_fault_table[] must be aligned with
359 * enum tod_fault_type in systm.h
360 */
361 static char *tod_fault_table[] = {
362 "Reversed", /* TOD_REVERSED */
363 "Stalled", /* TOD_STALLED */
364 "Jumped", /* TOD_JUMPED */
365 "Changed in Clock Rate", /* TOD_RATECHANGED */
366 "Is Read-Only" /* TOD_RDONLY */
367 /*
368 * no strings needed for TOD_NOFAULT
369 */
370 };
371
372 /*
373 * test hook for tod broken detection in tod_validate
374 */
375 int tod_unit_test = 0;
376 time_t tod_test_injector;
377
378 #define CLOCK_ADJ_HIST_SIZE 4
379
380 static int adj_hist_entry;
381
382 int64_t clock_adj_hist[CLOCK_ADJ_HIST_SIZE];
383
384 static void calcloadavg(int, uint64_t *);
385 static int genloadavg(struct loadavg_s *);
386 static void loadavg_update();
387
388 void (*cmm_clock_callout)() = NULL;
389 void (*cpucaps_clock_callout)() = NULL;
390
391 extern clock_t clock_tick_proc_max;
392
393 static int64_t deadman_counter = 0;
394
395 static void
396 clock(void)
397 {
398 kthread_t *t;
399 uint_t nrunnable;
400 uint_t w_io;
401 cpu_t *cp;
402 cpupart_t *cpupart;
403 extern void set_freemem();
404 void (*funcp)();
405 int32_t ltemp;
406 int64_t lltemp;
407 int s;
408 int do_lgrp_load;
409 int i;
410 clock_t now = LBOLT_NO_ACCOUNT; /* current tick */
411
412 if (panicstr)
413 return;
414
415 /*
416 * Make sure that 'freemem' do not drift too far from the truth
417 */
418 set_freemem();
419
420
421 /*
422 * Before the section which is repeated is executed, we do
423 * the time delta processing which occurs every clock tick
424 *
425 * There is additional processing which happens every time
426 * the nanosecond counter rolls over which is described
427 * below - see the section which begins with : if (one_sec)
428 *
429 * This section marks the beginning of the precision-kernel
430 * code fragment.
431 *
432 * First, compute the phase adjustment. If the low-order bits
433 * (time_phase) of the update overflow, bump the higher order
434 * bits (time_update).
435 */
436 time_phase += time_adj;
437 if (time_phase <= -FINEUSEC) {
438 ltemp = -time_phase / SCALE_PHASE;
439 time_phase += ltemp * SCALE_PHASE;
440 s = hr_clock_lock();
441 timedelta -= ltemp * (NANOSEC/MICROSEC);
442 hr_clock_unlock(s);
443 } else if (time_phase >= FINEUSEC) {
444 ltemp = time_phase / SCALE_PHASE;
445 time_phase -= ltemp * SCALE_PHASE;
446 s = hr_clock_lock();
447 timedelta += ltemp * (NANOSEC/MICROSEC);
448 hr_clock_unlock(s);
449 }
450
451 /*
452 * End of precision-kernel code fragment which is processed
453 * every timer interrupt.
454 *
455 * Continue with the interrupt processing as scheduled.
456 */
457 /*
458 * Count the number of runnable threads and the number waiting
459 * for some form of I/O to complete -- gets added to
460 * sysinfo.waiting. To know the state of the system, must add
461 * wait counts from all CPUs. Also add up the per-partition
462 * statistics.
463 */
464 w_io = 0;
465 nrunnable = 0;
466
467 /*
468 * keep track of when to update lgrp/part loads
469 */
470
471 do_lgrp_load = 0;
472 if (lgrp_ticks++ >= hz / 10) {
473 lgrp_ticks = 0;
474 do_lgrp_load = 1;
475 }
476
477 if (one_sec) {
478 loadavg_update();
479 deadman_counter++;
480 }
481
482 /*
483 * First count the threads waiting on kpreempt queues in each
484 * CPU partition.
485 */
486
487 cpupart = cp_list_head;
488 do {
489 uint_t cpupart_nrunnable = cpupart->cp_kp_queue.disp_nrunnable;
490
491 cpupart->cp_updates++;
492 nrunnable += cpupart_nrunnable;
493 cpupart->cp_nrunnable_cum += cpupart_nrunnable;
494 if (one_sec) {
495 cpupart->cp_nrunning = 0;
496 cpupart->cp_nrunnable = cpupart_nrunnable;
497 }
498 } while ((cpupart = cpupart->cp_next) != cp_list_head);
499
500
501 /* Now count the per-CPU statistics. */
502 cp = cpu_list;
503 do {
504 uint_t cpu_nrunnable = cp->cpu_disp->disp_nrunnable;
505
506 nrunnable += cpu_nrunnable;
507 cpupart = cp->cpu_part;
508 cpupart->cp_nrunnable_cum += cpu_nrunnable;
509 if (one_sec) {
510 cpupart->cp_nrunnable += cpu_nrunnable;
511 /*
512 * Update user, system, and idle cpu times.
513 */
514 cpupart->cp_nrunning++;
515 /*
516 * w_io is used to update sysinfo.waiting during
517 * one_second processing below. Only gather w_io
518 * information when we walk the list of cpus if we're
519 * going to perform one_second processing.
520 */
521 w_io += CPU_STATS(cp, sys.iowait);
522 }
523
524 if (one_sec && (cp->cpu_flags & CPU_EXISTS)) {
525 int i, load, change;
526 hrtime_t intracct, intrused;
527 const hrtime_t maxnsec = 1000000000;
528 const int precision = 100;
529
530 /*
531 * Estimate interrupt load on this cpu each second.
532 * Computes cpu_intrload as %utilization (0-99).
533 */
534
535 /* add up interrupt time from all micro states */
536 for (intracct = 0, i = 0; i < NCMSTATES; i++)
537 intracct += cp->cpu_intracct[i];
538 scalehrtime(&intracct);
539
540 /* compute nsec used in the past second */
541 intrused = intracct - cp->cpu_intrlast;
542 cp->cpu_intrlast = intracct;
543
544 /* limit the value for safety (and the first pass) */
545 if (intrused >= maxnsec)
546 intrused = maxnsec - 1;
547
548 /* calculate %time in interrupt */
549 load = (precision * intrused) / maxnsec;
550 ASSERT(load >= 0 && load < precision);
551 change = cp->cpu_intrload - load;
552
553 /* jump to new max, or decay the old max */
554 if (change < 0)
555 cp->cpu_intrload = load;
556 else if (change > 0)
557 cp->cpu_intrload -= (change + 3) / 4;
558
559 DTRACE_PROBE3(cpu_intrload,
560 cpu_t *, cp,
561 hrtime_t, intracct,
562 hrtime_t, intrused);
563 }
564
565 if (do_lgrp_load &&
566 (cp->cpu_flags & CPU_EXISTS)) {
567 /*
568 * When updating the lgroup's load average,
569 * account for the thread running on the CPU.
570 * If the CPU is the current one, then we need
571 * to account for the underlying thread which
572 * got the clock interrupt not the thread that is
573 * handling the interrupt and caculating the load
574 * average
575 */
576 t = cp->cpu_thread;
577 if (CPU == cp)
578 t = t->t_intr;
579
580 /*
581 * Account for the load average for this thread if
582 * it isn't the idle thread or it is on the interrupt
583 * stack and not the current CPU handling the clock
584 * interrupt
585 */
586 if ((t && t != cp->cpu_idle_thread) || (CPU != cp &&
587 CPU_ON_INTR(cp))) {
588 if (t->t_lpl == cp->cpu_lpl) {
589 /* local thread */
590 cpu_nrunnable++;
591 } else {
592 /*
593 * This is a remote thread, charge it
594 * against its home lgroup. Note that
595 * we notice that a thread is remote
596 * only if it's currently executing.
597 * This is a reasonable approximation,
598 * since queued remote threads are rare.
599 * Note also that if we didn't charge
600 * it to its home lgroup, remote
601 * execution would often make a system
602 * appear balanced even though it was
603 * not, and thread placement/migration
604 * would often not be done correctly.
605 */
606 lgrp_loadavg(t->t_lpl,
607 LGRP_LOADAVG_IN_THREAD_MAX, 0);
608 }
609 }
610 lgrp_loadavg(cp->cpu_lpl,
611 cpu_nrunnable * LGRP_LOADAVG_IN_THREAD_MAX, 1);
612 }
613 } while ((cp = cp->cpu_next) != cpu_list);
614
615 clock_tick_schedule(one_sec);
616
617 /*
618 * Check for a callout that needs be called from the clock
619 * thread to support the membership protocol in a clustered
620 * system. Copy the function pointer so that we can reset
621 * this to NULL if needed.
622 */
623 if ((funcp = cmm_clock_callout) != NULL)
624 (*funcp)();
625
626 if ((funcp = cpucaps_clock_callout) != NULL)
627 (*funcp)();
628
629 /*
630 * Wakeup the cageout thread waiters once per second.
631 */
632 if (one_sec)
633 kcage_tick();
634
635 if (one_sec) {
636
637 int drift, absdrift;
638 timestruc_t tod;
639 int s;
640
641 /*
642 * Beginning of precision-kernel code fragment executed
643 * every second.
644 *
645 * On rollover of the second the phase adjustment to be
646 * used for the next second is calculated. Also, the
647 * maximum error is increased by the tolerance. If the
648 * PPS frequency discipline code is present, the phase is
649 * increased to compensate for the CPU clock oscillator
650 * frequency error.
651 *
652 * On a 32-bit machine and given parameters in the timex.h
653 * header file, the maximum phase adjustment is +-512 ms
654 * and maximum frequency offset is (a tad less than)
655 * +-512 ppm. On a 64-bit machine, you shouldn't need to ask.
656 */
657 time_maxerror += time_tolerance / SCALE_USEC;
658
659 /*
660 * Leap second processing. If in leap-insert state at
661 * the end of the day, the system clock is set back one
662 * second; if in leap-delete state, the system clock is
663 * set ahead one second. The microtime() routine or
664 * external clock driver will insure that reported time
665 * is always monotonic. The ugly divides should be
666 * replaced.
667 */
668 switch (time_state) {
669
670 case TIME_OK:
671 if (time_status & STA_INS)
672 time_state = TIME_INS;
673 else if (time_status & STA_DEL)
674 time_state = TIME_DEL;
675 break;
676
677 case TIME_INS:
678 if (hrestime.tv_sec % 86400 == 0) {
679 s = hr_clock_lock();
680 hrestime.tv_sec--;
681 hr_clock_unlock(s);
682 time_state = TIME_OOP;
683 }
684 break;
685
686 case TIME_DEL:
687 if ((hrestime.tv_sec + 1) % 86400 == 0) {
688 s = hr_clock_lock();
689 hrestime.tv_sec++;
690 hr_clock_unlock(s);
691 time_state = TIME_WAIT;
692 }
693 break;
694
695 case TIME_OOP:
696 time_state = TIME_WAIT;
697 break;
698
699 case TIME_WAIT:
700 if (!(time_status & (STA_INS | STA_DEL)))
701 time_state = TIME_OK;
702 default:
703 break;
704 }
705
706 /*
707 * Compute the phase adjustment for the next second. In
708 * PLL mode, the offset is reduced by a fixed factor
709 * times the time constant. In FLL mode the offset is
710 * used directly. In either mode, the maximum phase
711 * adjustment for each second is clamped so as to spread
712 * the adjustment over not more than the number of
713 * seconds between updates.
714 */
715 if (time_offset == 0)
716 time_adj = 0;
717 else if (time_offset < 0) {
718 lltemp = -time_offset;
719 if (!(time_status & STA_FLL)) {
720 if ((1 << time_constant) >= SCALE_KG)
721 lltemp *= (1 << time_constant) /
722 SCALE_KG;
723 else
724 lltemp = (lltemp / SCALE_KG) >>
725 time_constant;
726 }
727 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
728 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
729 time_offset += lltemp;
730 time_adj = -(lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
731 } else {
732 lltemp = time_offset;
733 if (!(time_status & STA_FLL)) {
734 if ((1 << time_constant) >= SCALE_KG)
735 lltemp *= (1 << time_constant) /
736 SCALE_KG;
737 else
738 lltemp = (lltemp / SCALE_KG) >>
739 time_constant;
740 }
741 if (lltemp > (MAXPHASE / MINSEC) * SCALE_UPDATE)
742 lltemp = (MAXPHASE / MINSEC) * SCALE_UPDATE;
743 time_offset -= lltemp;
744 time_adj = (lltemp * SCALE_PHASE) / hz / SCALE_UPDATE;
745 }
746
747 /*
748 * Compute the frequency estimate and additional phase
749 * adjustment due to frequency error for the next
750 * second. When the PPS signal is engaged, gnaw on the
751 * watchdog counter and update the frequency computed by
752 * the pll and the PPS signal.
753 */
754 pps_valid++;
755 if (pps_valid == PPS_VALID) {
756 pps_jitter = MAXTIME;
757 pps_stabil = MAXFREQ;
758 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
759 STA_PPSWANDER | STA_PPSERROR);
760 }
761 lltemp = time_freq + pps_freq;
762
763 if (lltemp)
764 time_adj += (lltemp * SCALE_PHASE) / (SCALE_USEC * hz);
765
766 /*
767 * End of precision kernel-code fragment
768 *
769 * The section below should be modified if we are planning
770 * to use NTP for synchronization.
771 *
772 * Note: the clock synchronization code now assumes
773 * the following:
774 * - if dosynctodr is 1, then compute the drift between
775 * the tod chip and software time and adjust one or
776 * the other depending on the circumstances
777 *
778 * - if dosynctodr is 0, then the tod chip is independent
779 * of the software clock and should not be adjusted,
780 * but allowed to free run. this allows NTP to sync.
781 * hrestime without any interference from the tod chip.
782 */
783
784 tod_validate_deferred = B_FALSE;
785 mutex_enter(&tod_lock);
786 tod = tod_get();
787 drift = tod.tv_sec - hrestime.tv_sec;
788 absdrift = (drift >= 0) ? drift : -drift;
789 if (tod_needsync || absdrift > 1) {
790 int s;
791 if (absdrift > 2) {
792 if (!tod_broken && tod_faulted == TOD_NOFAULT) {
793 s = hr_clock_lock();
794 hrestime = tod;
795 membar_enter(); /* hrestime visible */
796 timedelta = 0;
797 timechanged++;
798 tod_needsync = 0;
799 hr_clock_unlock(s);
800 callout_hrestime();
801
802 }
803 } else {
804 if (tod_needsync || !dosynctodr) {
805 gethrestime(&tod);
806 tod_set(tod);
807 s = hr_clock_lock();
808 if (timedelta == 0)
809 tod_needsync = 0;
810 hr_clock_unlock(s);
811 } else {
812 /*
813 * If the drift is 2 seconds on the
814 * money, then the TOD is adjusting
815 * the clock; record that.
816 */
817 clock_adj_hist[adj_hist_entry++ %
818 CLOCK_ADJ_HIST_SIZE] = now;
819 s = hr_clock_lock();
820 timedelta = (int64_t)drift*NANOSEC;
821 hr_clock_unlock(s);
822 }
823 }
824 }
825 one_sec = 0;
826 time = gethrestime_sec(); /* for crusty old kmem readers */
827 mutex_exit(&tod_lock);
828
829 /*
830 * Some drivers still depend on this... XXX
831 */
832 cv_broadcast(&lbolt_cv);
833
834 vminfo.freemem += freemem;
835 {
836 pgcnt_t maxswap, resv, free;
837 pgcnt_t avail =
838 MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
839
840 maxswap = k_anoninfo.ani_mem_resv +
841 k_anoninfo.ani_max +avail;
842 /* Update ani_free */
843 set_anoninfo();
844 free = k_anoninfo.ani_free + avail;
845 resv = k_anoninfo.ani_phys_resv +
846 k_anoninfo.ani_mem_resv;
847
848 vminfo.swap_resv += resv;
849 /* number of reserved and allocated pages */
850 #ifdef DEBUG
851 if (maxswap < free)
852 cmn_err(CE_WARN, "clock: maxswap < free");
853 if (maxswap < resv)
854 cmn_err(CE_WARN, "clock: maxswap < resv");
855 #endif
856 vminfo.swap_alloc += maxswap - free;
857 vminfo.swap_avail += maxswap - resv;
858 vminfo.swap_free += free;
859 }
860 vminfo.updates++;
861 if (nrunnable) {
862 sysinfo.runque += nrunnable;
863 sysinfo.runocc++;
864 }
865 if (nswapped) {
866 sysinfo.swpque += nswapped;
867 sysinfo.swpocc++;
868 }
869 sysinfo.waiting += w_io;
870 sysinfo.updates++;
871
872 /*
873 * Wake up fsflush to write out DELWRI
874 * buffers, dirty pages and other cached
875 * administrative data, e.g. inodes.
876 */
877 if (--fsflushcnt <= 0) {
878 fsflushcnt = tune.t_fsflushr;
879 cv_signal(&fsflush_cv);
880 }
881
882 vmmeter();
883 calcloadavg(genloadavg(&loadavg), hp_avenrun);
884 for (i = 0; i < 3; i++)
885 /*
886 * At the moment avenrun[] can only hold 31
887 * bits of load average as it is a signed
888 * int in the API. We need to ensure that
889 * hp_avenrun[i] >> (16 - FSHIFT) will not be
890 * too large. If it is, we put the largest value
891 * that we can use into avenrun[i]. This is
892 * kludgey, but about all we can do until we
893 * avenrun[] is declared as an array of uint64[]
894 */
895 if (hp_avenrun[i] < ((uint64_t)1<<(31+16-FSHIFT)))
896 avenrun[i] = (int32_t)(hp_avenrun[i] >>
897 (16 - FSHIFT));
898 else
899 avenrun[i] = 0x7fffffff;
900
901 cpupart = cp_list_head;
902 do {
903 calcloadavg(genloadavg(&cpupart->cp_loadavg),
904 cpupart->cp_hp_avenrun);
905 } while ((cpupart = cpupart->cp_next) != cp_list_head);
906
907 /*
908 * Wake up the swapper thread if necessary.
909 */
910 if (runin ||
911 (runout && (avefree < desfree || wake_sched_sec))) {
912 t = &t0;
913 thread_lock(t);
914 if (t->t_state == TS_STOPPED) {
915 runin = runout = 0;
916 wake_sched_sec = 0;
917 t->t_whystop = 0;
918 t->t_whatstop = 0;
919 t->t_schedflag &= ~TS_ALLSTART;
920 THREAD_TRANSITION(t);
921 setfrontdq(t);
922 }
923 thread_unlock(t);
924 }
925 }
926
927 /*
928 * Wake up the swapper if any high priority swapped-out threads
929 * became runable during the last tick.
930 */
931 if (wake_sched) {
932 t = &t0;
933 thread_lock(t);
934 if (t->t_state == TS_STOPPED) {
935 runin = runout = 0;
936 wake_sched = 0;
937 t->t_whystop = 0;
938 t->t_whatstop = 0;
939 t->t_schedflag &= ~TS_ALLSTART;
940 THREAD_TRANSITION(t);
941 setfrontdq(t);
942 }
943 thread_unlock(t);
944 }
945 }
946
947 void
948 clock_init(void)
949 {
950 cyc_handler_t clk_hdlr, lbolt_hdlr;
951 cyc_time_t clk_when, lbolt_when;
952 int i, sz;
953 intptr_t buf;
954
955 /*
956 * Setup handler and timer for the clock cyclic.
957 */
958 clk_hdlr.cyh_func = (cyc_func_t)clock;
959 clk_hdlr.cyh_level = CY_LOCK_LEVEL;
960 clk_hdlr.cyh_arg = NULL;
961
962 clk_when.cyt_when = 0;
963 clk_when.cyt_interval = nsec_per_tick;
964
965 /*
966 * The lbolt cyclic will be reprogramed to fire at a nsec_per_tick
967 * interval to satisfy performance needs of the DDI lbolt consumers.
968 * It is off by default.
969 */
970 lbolt_hdlr.cyh_func = (cyc_func_t)lbolt_cyclic;
971 lbolt_hdlr.cyh_level = CY_LOCK_LEVEL;
972 lbolt_hdlr.cyh_arg = NULL;
973
974 lbolt_when.cyt_interval = nsec_per_tick;
975
976 /*
977 * Allocate cache line aligned space for the per CPU lbolt data and
978 * lbolt info structures, and initialize them with their default
979 * values. Note that these structures are also cache line sized.
980 */
981 sz = sizeof (lbolt_info_t) + CPU_CACHE_COHERENCE_SIZE;
982 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
983 lb_info = (lbolt_info_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
984
985 if (hz != HZ_DEFAULT)
986 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL *
987 hz/HZ_DEFAULT;
988 else
989 lb_info->lbi_thresh_interval = LBOLT_THRESH_INTERVAL;
990
991 lb_info->lbi_thresh_calls = LBOLT_THRESH_CALLS;
992
993 sz = (sizeof (lbolt_cpu_t) * max_ncpus) + CPU_CACHE_COHERENCE_SIZE;
994 buf = (intptr_t)kmem_zalloc(sz, KM_SLEEP);
995 lb_cpu = (lbolt_cpu_t *)P2ROUNDUP(buf, CPU_CACHE_COHERENCE_SIZE);
996
997 for (i = 0; i < max_ncpus; i++)
998 lb_cpu[i].lbc_counter = lb_info->lbi_thresh_calls;
999
1000 /*
1001 * Install the softint used to switch between event and cyclic driven
1002 * lbolt. We use a soft interrupt to make sure the context of the
1003 * cyclic reprogram call is safe.
1004 */
1005 lbolt_softint_add();
1006
1007 /*
1008 * Since the hybrid lbolt implementation is based on a hardware counter
1009 * that is reset at every hardware reboot and that we'd like to have
1010 * the lbolt value starting at zero after both a hardware and a fast
1011 * reboot, we calculate the number of clock ticks the system's been up
1012 * and store it in the lbi_debug_time field of the lbolt info structure.
1013 * The value of this field will be subtracted from lbolt before
1014 * returning it.
1015 */
1016 lb_info->lbi_internal = lb_info->lbi_debug_time =
1017 (gethrtime()/nsec_per_tick);
1018
1019 /*
1020 * lbolt_hybrid points at lbolt_bootstrap until now. The LBOLT_* macros
1021 * and lbolt_debug_{enter,return} use this value as an indication that
1022 * the initializaion above hasn't been completed. Setting lbolt_hybrid
1023 * to either lbolt_{cyclic,event}_driven here signals those code paths
1024 * that the lbolt related structures can be used.
1025 */
1026 if (lbolt_cyc_only) {
1027 lbolt_when.cyt_when = 0;
1028 lbolt_hybrid = lbolt_cyclic_driven;
1029 } else {
1030 lbolt_when.cyt_when = CY_INFINITY;
1031 lbolt_hybrid = lbolt_event_driven;
1032 }
1033
1034 /*
1035 * Grab cpu_lock and install all three cyclics.
1036 */
1037 mutex_enter(&cpu_lock);
1038
1039 clock_cyclic = cyclic_add(&clk_hdlr, &clk_when);
1040 lb_info->id.lbi_cyclic_id = cyclic_add(&lbolt_hdlr, &lbolt_when);
1041
1042 mutex_exit(&cpu_lock);
1043 }
1044
1045 /*
1046 * Called before calcloadavg to get 10-sec moving loadavg together
1047 */
1048
1049 static int
1050 genloadavg(struct loadavg_s *avgs)
1051 {
1052 int avg;
1053 int spos; /* starting position */
1054 int cpos; /* moving current position */
1055 int i;
1056 int slen;
1057 hrtime_t hr_avg;
1058
1059 /* 10-second snapshot, calculate first positon */
1060 if (avgs->lg_len == 0) {
1061 return (0);
1062 }
1063 slen = avgs->lg_len < S_MOVAVG_SZ ? avgs->lg_len : S_MOVAVG_SZ;
1064
1065 spos = (avgs->lg_cur - 1) >= 0 ? avgs->lg_cur - 1 :
1066 S_LOADAVG_SZ + (avgs->lg_cur - 1);
1067 for (i = hr_avg = 0; i < slen; i++) {
1068 cpos = (spos - i) >= 0 ? spos - i : S_LOADAVG_SZ + (spos - i);
1069 hr_avg += avgs->lg_loads[cpos];
1070 }
1071
1072 hr_avg = hr_avg / slen;
1073 avg = hr_avg / (NANOSEC / LGRP_LOADAVG_IN_THREAD_MAX);
1074
1075 return (avg);
1076 }
1077
1078 /*
1079 * Run every second from clock () to update the loadavg count available to the
1080 * system and cpu-partitions.
1081 *
1082 * This works by sampling the previous usr, sys, wait time elapsed,
1083 * computing a delta, and adding that delta to the elapsed usr, sys,
1084 * wait increase.
1085 */
1086
1087 static void
1088 loadavg_update()
1089 {
1090 cpu_t *cp;
1091 cpupart_t *cpupart;
1092 hrtime_t cpu_total;
1093 int prev;
1094
1095 cp = cpu_list;
1096 loadavg.lg_total = 0;
1097
1098 /*
1099 * first pass totals up per-cpu statistics for system and cpu
1100 * partitions
1101 */
1102
1103 do {
1104 struct loadavg_s *lavg;
1105
1106 lavg = &cp->cpu_loadavg;
1107
1108 cpu_total = cp->cpu_acct[CMS_USER] +
1109 cp->cpu_acct[CMS_SYSTEM] + cp->cpu_waitrq;
1110 /* compute delta against last total */
1111 scalehrtime(&cpu_total);
1112 prev = (lavg->lg_cur - 1) >= 0 ? lavg->lg_cur - 1 :
1113 S_LOADAVG_SZ + (lavg->lg_cur - 1);
1114 if (lavg->lg_loads[prev] <= 0) {
1115 lavg->lg_loads[lavg->lg_cur] = cpu_total;
1116 cpu_total = 0;
1117 } else {
1118 lavg->lg_loads[lavg->lg_cur] = cpu_total;
1119 cpu_total = cpu_total - lavg->lg_loads[prev];
1120 if (cpu_total < 0)
1121 cpu_total = 0;
1122 }
1123
1124 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1125 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1126 lavg->lg_len + 1 : S_LOADAVG_SZ;
1127
1128 loadavg.lg_total += cpu_total;
1129 cp->cpu_part->cp_loadavg.lg_total += cpu_total;
1130
1131 } while ((cp = cp->cpu_next) != cpu_list);
1132
1133 loadavg.lg_loads[loadavg.lg_cur] = loadavg.lg_total;
1134 loadavg.lg_cur = (loadavg.lg_cur + 1) % S_LOADAVG_SZ;
1135 loadavg.lg_len = (loadavg.lg_len + 1) < S_LOADAVG_SZ ?
1136 loadavg.lg_len + 1 : S_LOADAVG_SZ;
1137 /*
1138 * Second pass updates counts
1139 */
1140 cpupart = cp_list_head;
1141
1142 do {
1143 struct loadavg_s *lavg;
1144
1145 lavg = &cpupart->cp_loadavg;
1146 lavg->lg_loads[lavg->lg_cur] = lavg->lg_total;
1147 lavg->lg_total = 0;
1148 lavg->lg_cur = (lavg->lg_cur + 1) % S_LOADAVG_SZ;
1149 lavg->lg_len = (lavg->lg_len + 1) < S_LOADAVG_SZ ?
1150 lavg->lg_len + 1 : S_LOADAVG_SZ;
1151
1152 } while ((cpupart = cpupart->cp_next) != cp_list_head);
1153
1154 /*
1155 * Third pass totals up per-zone statistics.
1156 */
1157 zone_loadavg_update();
1158 }
1159
1160 /*
1161 * clock_update() - local clock update
1162 *
1163 * This routine is called by ntp_adjtime() to update the local clock
1164 * phase and frequency. The implementation is of an
1165 * adaptive-parameter, hybrid phase/frequency-lock loop (PLL/FLL). The
1166 * routine computes new time and frequency offset estimates for each
1167 * call. The PPS signal itself determines the new time offset,
1168 * instead of the calling argument. Presumably, calls to
1169 * ntp_adjtime() occur only when the caller believes the local clock
1170 * is valid within some bound (+-128 ms with NTP). If the caller's
1171 * time is far different than the PPS time, an argument will ensue,
1172 * and it's not clear who will lose.
1173 *
1174 * For uncompensated quartz crystal oscillatores and nominal update
1175 * intervals less than 1024 s, operation should be in phase-lock mode
1176 * (STA_FLL = 0), where the loop is disciplined to phase. For update
1177 * intervals greater than this, operation should be in frequency-lock
1178 * mode (STA_FLL = 1), where the loop is disciplined to frequency.
1179 *
1180 * Note: mutex(&tod_lock) is in effect.
1181 */
1182 void
1183 clock_update(int offset)
1184 {
1185 int ltemp, mtemp, s;
1186
1187 ASSERT(MUTEX_HELD(&tod_lock));
1188
1189 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME))
1190 return;
1191 ltemp = offset;
1192 if ((time_status & STA_PPSTIME) && (time_status & STA_PPSSIGNAL))
1193 ltemp = pps_offset;
1194
1195 /*
1196 * Scale the phase adjustment and clamp to the operating range.
1197 */
1198 if (ltemp > MAXPHASE)
1199 time_offset = MAXPHASE * SCALE_UPDATE;
1200 else if (ltemp < -MAXPHASE)
1201 time_offset = -(MAXPHASE * SCALE_UPDATE);
1202 else
1203 time_offset = ltemp * SCALE_UPDATE;
1204
1205 /*
1206 * Select whether the frequency is to be controlled and in which
1207 * mode (PLL or FLL). Clamp to the operating range. Ugly
1208 * multiply/divide should be replaced someday.
1209 */
1210 if (time_status & STA_FREQHOLD || time_reftime == 0)
1211 time_reftime = hrestime.tv_sec;
1212
1213 mtemp = hrestime.tv_sec - time_reftime;
1214 time_reftime = hrestime.tv_sec;
1215
1216 if (time_status & STA_FLL) {
1217 if (mtemp >= MINSEC) {
1218 ltemp = ((time_offset / mtemp) * (SCALE_USEC /
1219 SCALE_UPDATE));
1220 if (ltemp)
1221 time_freq += ltemp / SCALE_KH;
1222 }
1223 } else {
1224 if (mtemp < MAXSEC) {
1225 ltemp *= mtemp;
1226 if (ltemp)
1227 time_freq += (int)(((int64_t)ltemp *
1228 SCALE_USEC) / SCALE_KF)
1229 / (1 << (time_constant * 2));
1230 }
1231 }
1232 if (time_freq > time_tolerance)
1233 time_freq = time_tolerance;
1234 else if (time_freq < -time_tolerance)
1235 time_freq = -time_tolerance;
1236
1237 s = hr_clock_lock();
1238 tod_needsync = 1;
1239 hr_clock_unlock(s);
1240 }
1241
1242 /*
1243 * ddi_hardpps() - discipline CPU clock oscillator to external PPS signal
1244 *
1245 * This routine is called at each PPS interrupt in order to discipline
1246 * the CPU clock oscillator to the PPS signal. It measures the PPS phase
1247 * and leaves it in a handy spot for the clock() routine. It
1248 * integrates successive PPS phase differences and calculates the
1249 * frequency offset. This is used in clock() to discipline the CPU
1250 * clock oscillator so that intrinsic frequency error is cancelled out.
1251 * The code requires the caller to capture the time and hardware counter
1252 * value at the on-time PPS signal transition.
1253 *
1254 * Note that, on some Unix systems, this routine runs at an interrupt
1255 * priority level higher than the timer interrupt routine clock().
1256 * Therefore, the variables used are distinct from the clock()
1257 * variables, except for certain exceptions: The PPS frequency pps_freq
1258 * and phase pps_offset variables are determined by this routine and
1259 * updated atomically. The time_tolerance variable can be considered a
1260 * constant, since it is infrequently changed, and then only when the
1261 * PPS signal is disabled. The watchdog counter pps_valid is updated
1262 * once per second by clock() and is atomically cleared in this
1263 * routine.
1264 *
1265 * tvp is the time of the last tick; usec is a microsecond count since the
1266 * last tick.
1267 *
1268 * Note: In Solaris systems, the tick value is actually given by
1269 * usec_per_tick. This is called from the serial driver cdintr(),
1270 * or equivalent, at a high PIL. Because the kernel keeps a
1271 * highresolution time, the following code can accept either
1272 * the traditional argument pair, or the current highres timestamp
1273 * in tvp and zero in usec.
1274 */
1275 void
1276 ddi_hardpps(struct timeval *tvp, int usec)
1277 {
1278 int u_usec, v_usec, bigtick;
1279 time_t cal_sec;
1280 int cal_usec;
1281
1282 /*
1283 * An occasional glitch can be produced when the PPS interrupt
1284 * occurs in the clock() routine before the time variable is
1285 * updated. Here the offset is discarded when the difference
1286 * between it and the last one is greater than tick/2, but not
1287 * if the interval since the first discard exceeds 30 s.
1288 */
1289 time_status |= STA_PPSSIGNAL;
1290 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
1291 pps_valid = 0;
1292 u_usec = -tvp->tv_usec;
1293 if (u_usec < -(MICROSEC/2))
1294 u_usec += MICROSEC;
1295 v_usec = pps_offset - u_usec;
1296 if (v_usec < 0)
1297 v_usec = -v_usec;
1298 if (v_usec > (usec_per_tick >> 1)) {
1299 if (pps_glitch > MAXGLITCH) {
1300 pps_glitch = 0;
1301 pps_tf[2] = u_usec;
1302 pps_tf[1] = u_usec;
1303 } else {
1304 pps_glitch++;
1305 u_usec = pps_offset;
1306 }
1307 } else
1308 pps_glitch = 0;
1309
1310 /*
1311 * A three-stage median filter is used to help deglitch the pps
1312 * time. The median sample becomes the time offset estimate; the
1313 * difference between the other two samples becomes the time
1314 * dispersion (jitter) estimate.
1315 */
1316 pps_tf[2] = pps_tf[1];
1317 pps_tf[1] = pps_tf[0];
1318 pps_tf[0] = u_usec;
1319 if (pps_tf[0] > pps_tf[1]) {
1320 if (pps_tf[1] > pps_tf[2]) {
1321 pps_offset = pps_tf[1]; /* 0 1 2 */
1322 v_usec = pps_tf[0] - pps_tf[2];
1323 } else if (pps_tf[2] > pps_tf[0]) {
1324 pps_offset = pps_tf[0]; /* 2 0 1 */
1325 v_usec = pps_tf[2] - pps_tf[1];
1326 } else {
1327 pps_offset = pps_tf[2]; /* 0 2 1 */
1328 v_usec = pps_tf[0] - pps_tf[1];
1329 }
1330 } else {
1331 if (pps_tf[1] < pps_tf[2]) {
1332 pps_offset = pps_tf[1]; /* 2 1 0 */
1333 v_usec = pps_tf[2] - pps_tf[0];
1334 } else if (pps_tf[2] < pps_tf[0]) {
1335 pps_offset = pps_tf[0]; /* 1 0 2 */
1336 v_usec = pps_tf[1] - pps_tf[2];
1337 } else {
1338 pps_offset = pps_tf[2]; /* 1 2 0 */
1339 v_usec = pps_tf[1] - pps_tf[0];
1340 }
1341 }
1342 if (v_usec > MAXTIME)
1343 pps_jitcnt++;
1344 v_usec = (v_usec << PPS_AVG) - pps_jitter;
1345 pps_jitter += v_usec / (1 << PPS_AVG);
1346 if (pps_jitter > (MAXTIME >> 1))
1347 time_status |= STA_PPSJITTER;
1348
1349 /*
1350 * During the calibration interval adjust the starting time when
1351 * the tick overflows. At the end of the interval compute the
1352 * duration of the interval and the difference of the hardware
1353 * counters at the beginning and end of the interval. This code
1354 * is deliciously complicated by the fact valid differences may
1355 * exceed the value of tick when using long calibration
1356 * intervals and small ticks. Note that the counter can be
1357 * greater than tick if caught at just the wrong instant, but
1358 * the values returned and used here are correct.
1359 */
1360 bigtick = (int)usec_per_tick * SCALE_USEC;
1361 pps_usec -= pps_freq;
1362 if (pps_usec >= bigtick)
1363 pps_usec -= bigtick;
1364 if (pps_usec < 0)
1365 pps_usec += bigtick;
1366 pps_time.tv_sec++;
1367 pps_count++;
1368 if (pps_count < (1 << pps_shift))
1369 return;
1370 pps_count = 0;
1371 pps_calcnt++;
1372 u_usec = usec * SCALE_USEC;
1373 v_usec = pps_usec - u_usec;
1374 if (v_usec >= bigtick >> 1)
1375 v_usec -= bigtick;
1376 if (v_usec < -(bigtick >> 1))
1377 v_usec += bigtick;
1378 if (v_usec < 0)
1379 v_usec = -(-v_usec >> pps_shift);
1380 else
1381 v_usec = v_usec >> pps_shift;
1382 pps_usec = u_usec;
1383 cal_sec = tvp->tv_sec;
1384 cal_usec = tvp->tv_usec;
1385 cal_sec -= pps_time.tv_sec;
1386 cal_usec -= pps_time.tv_usec;
1387 if (cal_usec < 0) {
1388 cal_usec += MICROSEC;
1389 cal_sec--;
1390 }
1391 pps_time = *tvp;
1392
1393 /*
1394 * Check for lost interrupts, noise, excessive jitter and
1395 * excessive frequency error. The number of timer ticks during
1396 * the interval may vary +-1 tick. Add to this a margin of one
1397 * tick for the PPS signal jitter and maximum frequency
1398 * deviation. If the limits are exceeded, the calibration
1399 * interval is reset to the minimum and we start over.
1400 */
1401 u_usec = (int)usec_per_tick << 1;
1402 if (!((cal_sec == -1 && cal_usec > (MICROSEC - u_usec)) ||
1403 (cal_sec == 0 && cal_usec < u_usec)) ||
1404 v_usec > time_tolerance || v_usec < -time_tolerance) {
1405 pps_errcnt++;
1406 pps_shift = PPS_SHIFT;
1407 pps_intcnt = 0;
1408 time_status |= STA_PPSERROR;
1409 return;
1410 }
1411
1412 /*
1413 * A three-stage median filter is used to help deglitch the pps
1414 * frequency. The median sample becomes the frequency offset
1415 * estimate; the difference between the other two samples
1416 * becomes the frequency dispersion (stability) estimate.
1417 */
1418 pps_ff[2] = pps_ff[1];
1419 pps_ff[1] = pps_ff[0];
1420 pps_ff[0] = v_usec;
1421 if (pps_ff[0] > pps_ff[1]) {
1422 if (pps_ff[1] > pps_ff[2]) {
1423 u_usec = pps_ff[1]; /* 0 1 2 */
1424 v_usec = pps_ff[0] - pps_ff[2];
1425 } else if (pps_ff[2] > pps_ff[0]) {
1426 u_usec = pps_ff[0]; /* 2 0 1 */
1427 v_usec = pps_ff[2] - pps_ff[1];
1428 } else {
1429 u_usec = pps_ff[2]; /* 0 2 1 */
1430 v_usec = pps_ff[0] - pps_ff[1];
1431 }
1432 } else {
1433 if (pps_ff[1] < pps_ff[2]) {
1434 u_usec = pps_ff[1]; /* 2 1 0 */
1435 v_usec = pps_ff[2] - pps_ff[0];
1436 } else if (pps_ff[2] < pps_ff[0]) {
1437 u_usec = pps_ff[0]; /* 1 0 2 */
1438 v_usec = pps_ff[1] - pps_ff[2];
1439 } else {
1440 u_usec = pps_ff[2]; /* 1 2 0 */
1441 v_usec = pps_ff[1] - pps_ff[0];
1442 }
1443 }
1444
1445 /*
1446 * Here the frequency dispersion (stability) is updated. If it
1447 * is less than one-fourth the maximum (MAXFREQ), the frequency
1448 * offset is updated as well, but clamped to the tolerance. It
1449 * will be processed later by the clock() routine.
1450 */
1451 v_usec = (v_usec >> 1) - pps_stabil;
1452 if (v_usec < 0)
1453 pps_stabil -= -v_usec >> PPS_AVG;
1454 else
1455 pps_stabil += v_usec >> PPS_AVG;
1456 if (pps_stabil > MAXFREQ >> 2) {
1457 pps_stbcnt++;
1458 time_status |= STA_PPSWANDER;
1459 return;
1460 }
1461 if (time_status & STA_PPSFREQ) {
1462 if (u_usec < 0) {
1463 pps_freq -= -u_usec >> PPS_AVG;
1464 if (pps_freq < -time_tolerance)
1465 pps_freq = -time_tolerance;
1466 u_usec = -u_usec;
1467 } else {
1468 pps_freq += u_usec >> PPS_AVG;
1469 if (pps_freq > time_tolerance)
1470 pps_freq = time_tolerance;
1471 }
1472 }
1473
1474 /*
1475 * Here the calibration interval is adjusted. If the maximum
1476 * time difference is greater than tick / 4, reduce the interval
1477 * by half. If this is not the case for four consecutive
1478 * intervals, double the interval.
1479 */
1480 if (u_usec << pps_shift > bigtick >> 2) {
1481 pps_intcnt = 0;
1482 if (pps_shift > PPS_SHIFT)
1483 pps_shift--;
1484 } else if (pps_intcnt >= 4) {
1485 pps_intcnt = 0;
1486 if (pps_shift < PPS_SHIFTMAX)
1487 pps_shift++;
1488 } else
1489 pps_intcnt++;
1490
1491 /*
1492 * If recovering from kmdb, then make sure the tod chip gets resynced.
1493 * If we took an early exit above, then we don't yet have a stable
1494 * calibration signal to lock onto, so don't mark the tod for sync
1495 * until we get all the way here.
1496 */
1497 {
1498 int s = hr_clock_lock();
1499
1500 tod_needsync = 1;
1501 hr_clock_unlock(s);
1502 }
1503 }
1504
1505 /*
1506 * Handle clock tick processing for a thread.
1507 * Check for timer action, enforce CPU rlimit, do profiling etc.
1508 */
1509 void
1510 clock_tick(kthread_t *t, int pending)
1511 {
1512 struct proc *pp;
1513 klwp_id_t lwp;
1514 struct as *as;
1515 clock_t ticks;
1516 int poke = 0; /* notify another CPU */
1517 int user_mode;
1518 size_t rss;
1519 int i, total_usec, usec;
1520 rctl_qty_t secs;
1521
1522 ASSERT(pending > 0);
1523
1524 /* Must be operating on a lwp/thread */
1525 if ((lwp = ttolwp(t)) == NULL) {
1526 panic("clock_tick: no lwp");
1527 /*NOTREACHED*/
1528 }
1529
1530 for (i = 0; i < pending; i++) {
1531 CL_TICK(t); /* Class specific tick processing */
1532 DTRACE_SCHED1(tick, kthread_t *, t);
1533 }
1534
1535 pp = ttoproc(t);
1536
1537 /* pp->p_lock makes sure that the thread does not exit */
1538 ASSERT(MUTEX_HELD(&pp->p_lock));
1539
1540 user_mode = (lwp->lwp_state == LWP_USER);
1541
1542 ticks = (pp->p_utime + pp->p_stime) % hz;
1543 /*
1544 * Update process times. Should use high res clock and state
1545 * changes instead of statistical sampling method. XXX
1546 */
1547 if (user_mode) {
1548 pp->p_utime += pending;
1549 } else {
1550 pp->p_stime += pending;
1551 }
1552
1553 pp->p_ttime += pending;
1554 as = pp->p_as;
1555
1556 /*
1557 * Update user profiling statistics. Get the pc from the
1558 * lwp when the AST happens.
1559 */
1560 if (pp->p_prof.pr_scale) {
1561 atomic_add_32(&lwp->lwp_oweupc, (int32_t)pending);
1562 if (user_mode) {
1563 poke = 1;
1564 aston(t);
1565 }
1566 }
1567
1568 /*
1569 * If CPU was in user state, process lwp-virtual time
1570 * interval timer. The value passed to itimerdecr() has to be
1571 * in microseconds and has to be less than one second. Hence
1572 * this loop.
1573 */
1574 total_usec = usec_per_tick * pending;
1575 while (total_usec > 0) {
1576 usec = MIN(total_usec, (MICROSEC - 1));
1577 if (user_mode &&
1578 timerisset(&lwp->lwp_timer[ITIMER_VIRTUAL].it_value) &&
1579 itimerdecr(&lwp->lwp_timer[ITIMER_VIRTUAL], usec) == 0) {
1580 poke = 1;
1581 sigtoproc(pp, t, SIGVTALRM);
1582 }
1583 total_usec -= usec;
1584 }
1585
1586 /*
1587 * If CPU was in user state, process lwp-profile
1588 * interval timer.
1589 */
1590 total_usec = usec_per_tick * pending;
1591 while (total_usec > 0) {
1592 usec = MIN(total_usec, (MICROSEC - 1));
1593 if (timerisset(&lwp->lwp_timer[ITIMER_PROF].it_value) &&
1594 itimerdecr(&lwp->lwp_timer[ITIMER_PROF], usec) == 0) {
1595 poke = 1;
1596 sigtoproc(pp, t, SIGPROF);
1597 }
1598 total_usec -= usec;
1599 }
1600
1601 /*
1602 * Enforce CPU resource controls:
1603 * (a) process.max-cpu-time resource control
1604 *
1605 * Perform the check only if we have accumulated more a second.
1606 */
1607 if ((ticks + pending) >= hz) {
1608 (void) rctl_test(rctlproc_legacy[RLIMIT_CPU], pp->p_rctls, pp,
1609 (pp->p_utime + pp->p_stime)/hz, RCA_UNSAFE_SIGINFO);
1610 }
1611
1612 /*
1613 * (b) task.max-cpu-time resource control
1614 *
1615 * If we have accumulated enough ticks, increment the task CPU
1616 * time usage and test for the resource limit. This minimizes the
1617 * number of calls to the rct_test(). The task CPU time mutex
1618 * is highly contentious as many processes can be sharing a task.
1619 */
1620 if (pp->p_ttime >= clock_tick_proc_max) {
1621 secs = task_cpu_time_incr(pp->p_task, pp->p_ttime);
1622 pp->p_ttime = 0;
1623 if (secs) {
1624 (void) rctl_test(rc_task_cpu_time, pp->p_task->tk_rctls,
1625 pp, secs, RCA_UNSAFE_SIGINFO);
1626 }
1627 }
1628
1629 /*
1630 * Update memory usage for the currently running process.
1631 */
1632 rss = rm_asrss(as);
1633 PTOU(pp)->u_mem += rss;
1634 if (rss > PTOU(pp)->u_mem_max)
1635 PTOU(pp)->u_mem_max = rss;
1636
1637 /*
1638 * Notify the CPU the thread is running on.
1639 */
1640 if (poke && t->t_cpu != CPU)
1641 poke_cpu(t->t_cpu->cpu_id);
1642 }
1643
1644 void
1645 profil_tick(uintptr_t upc)
1646 {
1647 int ticks;
1648 proc_t *p = ttoproc(curthread);
1649 klwp_t *lwp = ttolwp(curthread);
1650 struct prof *pr = &p->p_prof;
1651
1652 do {
1653 ticks = lwp->lwp_oweupc;
1654 } while (cas32(&lwp->lwp_oweupc, ticks, 0) != ticks);
1655
1656 mutex_enter(&p->p_pflock);
1657 if (pr->pr_scale >= 2 && upc >= pr->pr_off) {
1658 /*
1659 * Old-style profiling
1660 */
1661 uint16_t *slot = pr->pr_base;
1662 uint16_t old, new;
1663 if (pr->pr_scale != 2) {
1664 uintptr_t delta = upc - pr->pr_off;
1665 uintptr_t byteoff = ((delta >> 16) * pr->pr_scale) +
1666 (((delta & 0xffff) * pr->pr_scale) >> 16);
1667 if (byteoff >= (uintptr_t)pr->pr_size) {
1668 mutex_exit(&p->p_pflock);
1669 return;
1670 }
1671 slot += byteoff / sizeof (uint16_t);
1672 }
1673 if (fuword16(slot, &old) < 0 ||
1674 (new = old + ticks) > SHRT_MAX ||
1675 suword16(slot, new) < 0) {
1676 pr->pr_scale = 0;
1677 }
1678 } else if (pr->pr_scale == 1) {
1679 /*
1680 * PC Sampling
1681 */
1682 model_t model = lwp_getdatamodel(lwp);
1683 int result;
1684 #ifdef __lint
1685 model = model;
1686 #endif
1687 while (ticks-- > 0) {
1688 if (pr->pr_samples == pr->pr_size) {
1689 /* buffer full, turn off sampling */
1690 pr->pr_scale = 0;
1691 break;
1692 }
1693 switch (SIZEOF_PTR(model)) {
1694 case sizeof (uint32_t):
1695 result = suword32(pr->pr_base, (uint32_t)upc);
1696 break;
1697 #ifdef _LP64
1698 case sizeof (uint64_t):
1699 result = suword64(pr->pr_base, (uint64_t)upc);
1700 break;
1701 #endif
1702 default:
1703 cmn_err(CE_WARN, "profil_tick: unexpected "
1704 "data model");
1705 result = -1;
1706 break;
1707 }
1708 if (result != 0) {
1709 pr->pr_scale = 0;
1710 break;
1711 }
1712 pr->pr_base = (caddr_t)pr->pr_base + SIZEOF_PTR(model);
1713 pr->pr_samples++;
1714 }
1715 }
1716 mutex_exit(&p->p_pflock);
1717 }
1718
1719 static void
1720 delay_wakeup(void *arg)
1721 {
1722 kthread_t *t = arg;
1723
1724 mutex_enter(&t->t_delay_lock);
1725 cv_signal(&t->t_delay_cv);
1726 mutex_exit(&t->t_delay_lock);
1727 }
1728
1729 /*
1730 * The delay(9F) man page indicates that it can only be called from user or
1731 * kernel context - detect and diagnose bad calls. The following macro will
1732 * produce a limited number of messages identifying bad callers. This is done
1733 * in a macro so that caller() is meaningful. When a bad caller is identified,
1734 * switching to 'drv_usecwait(TICK_TO_USEC(ticks));' may be appropriate.
1735 */
1736 #define DELAY_CONTEXT_CHECK() { \
1737 uint32_t m; \
1738 char *f; \
1739 ulong_t off; \
1740 \
1741 m = delay_from_interrupt_msg; \
1742 if (delay_from_interrupt_diagnose && servicing_interrupt() && \
1743 !panicstr && !devinfo_freeze && \
1744 atomic_cas_32(&delay_from_interrupt_msg, m ? m : 1, m-1)) { \
1745 f = modgetsymname((uintptr_t)caller(), &off); \
1746 cmn_err(CE_WARN, "delay(9F) called from " \
1747 "interrupt context: %s`%s", \
1748 mod_containing_pc(caller()), f ? f : "..."); \
1749 } \
1750 }
1751
1752 /*
1753 * delay_common: common delay code.
1754 */
1755 static void
1756 delay_common(clock_t ticks)
1757 {
1758 kthread_t *t = curthread;
1759 clock_t deadline;
1760 clock_t timeleft;
1761 callout_id_t id;
1762
1763 /* If timeouts aren't running all we can do is spin. */
1764 if (panicstr || devinfo_freeze) {
1765 /* Convert delay(9F) call into drv_usecwait(9F) call. */
1766 if (ticks > 0)
1767 drv_usecwait(TICK_TO_USEC(ticks));
1768 return;
1769 }
1770
1771 deadline = ddi_get_lbolt() + ticks;
1772 while ((timeleft = deadline - ddi_get_lbolt()) > 0) {
1773 mutex_enter(&t->t_delay_lock);
1774 id = timeout_default(delay_wakeup, t, timeleft);
1775 cv_wait(&t->t_delay_cv, &t->t_delay_lock);
1776 mutex_exit(&t->t_delay_lock);
1777 (void) untimeout_default(id, 0);
1778 }
1779 }
1780
1781 /*
1782 * Delay specified number of clock ticks.
1783 */
1784 void
1785 delay(clock_t ticks)
1786 {
1787 DELAY_CONTEXT_CHECK();
1788
1789 delay_common(ticks);
1790 }
1791
1792 /*
1793 * Delay a random number of clock ticks between 1 and ticks.
1794 */
1795 void
1796 delay_random(clock_t ticks)
1797 {
1798 int r;
1799
1800 DELAY_CONTEXT_CHECK();
1801
1802 (void) random_get_pseudo_bytes((void *)&r, sizeof (r));
1803 if (ticks == 0)
1804 ticks = 1;
1805 ticks = (r % ticks) + 1;
1806 delay_common(ticks);
1807 }
1808
1809 /*
1810 * Like delay, but interruptible by a signal.
1811 */
1812 int
1813 delay_sig(clock_t ticks)
1814 {
1815 kthread_t *t = curthread;
1816 clock_t deadline;
1817 clock_t rc;
1818
1819 /* If timeouts aren't running all we can do is spin. */
1820 if (panicstr || devinfo_freeze) {
1821 if (ticks > 0)
1822 drv_usecwait(TICK_TO_USEC(ticks));
1823 return (0);
1824 }
1825
1826 deadline = ddi_get_lbolt() + ticks;
1827 mutex_enter(&t->t_delay_lock);
1828 do {
1829 rc = cv_timedwait_sig(&t->t_delay_cv,
1830 &t->t_delay_lock, deadline);
1831 /* loop until past deadline or signaled */
1832 } while (rc > 0);
1833 mutex_exit(&t->t_delay_lock);
1834 if (rc == 0)
1835 return (EINTR);
1836 return (0);
1837 }
1838
1839
1840 #define SECONDS_PER_DAY 86400
1841
1842 /*
1843 * Initialize the system time based on the TOD chip. approx is used as
1844 * an approximation of time (e.g. from the filesystem) in the event that
1845 * the TOD chip has been cleared or is unresponsive. An approx of -1
1846 * means the filesystem doesn't keep time.
1847 */
1848 void
1849 clkset(time_t approx)
1850 {
1851 timestruc_t ts;
1852 int spl;
1853 int set_clock = 0;
1854
1855 mutex_enter(&tod_lock);
1856 ts = tod_get();
1857
1858 if (ts.tv_sec > 365 * SECONDS_PER_DAY) {
1859 /*
1860 * If the TOD chip is reporting some time after 1971,
1861 * then it probably didn't lose power or become otherwise
1862 * cleared in the recent past; check to assure that
1863 * the time coming from the filesystem isn't in the future
1864 * according to the TOD chip.
1865 */
1866 if (approx != -1 && approx > ts.tv_sec) {
1867 cmn_err(CE_WARN, "Last shutdown is later "
1868 "than time on time-of-day chip; check date.");
1869 }
1870 } else {
1871 /*
1872 * If the TOD chip isn't giving correct time, set it to the
1873 * greater of i) approx and ii) 1987. That way if approx
1874 * is negative or is earlier than 1987, we set the clock
1875 * back to a time when Oliver North, ALF and Dire Straits
1876 * were all on the collective brain: 1987.
1877 */
1878 timestruc_t tmp;
1879 time_t diagnose_date = (1987 - 1970) * 365 * SECONDS_PER_DAY;
1880 ts.tv_sec = (approx > diagnose_date ? approx : diagnose_date);
1881 ts.tv_nsec = 0;
1882
1883 /*
1884 * Attempt to write the new time to the TOD chip. Set spl high
1885 * to avoid getting preempted between the tod_set and tod_get.
1886 */
1887 spl = splhi();
1888 tod_set(ts);
1889 tmp = tod_get();
1890 splx(spl);
1891
1892 if (tmp.tv_sec != ts.tv_sec && tmp.tv_sec != ts.tv_sec + 1) {
1893 tod_broken = 1;
1894 dosynctodr = 0;
1895 cmn_err(CE_WARN, "Time-of-day chip unresponsive.");
1896 } else {
1897 cmn_err(CE_WARN, "Time-of-day chip had "
1898 "incorrect date; check and reset.");
1899 }
1900 set_clock = 1;
1901 }
1902
1903 if (!boot_time) {
1904 boot_time = ts.tv_sec;
1905 set_clock = 1;
1906 }
1907
1908 if (set_clock)
1909 set_hrestime(&ts);
1910
1911 mutex_exit(&tod_lock);
1912 }
1913
1914 int timechanged; /* for testing if the system time has been reset */
1915
1916 void
1917 set_hrestime(timestruc_t *ts)
1918 {
1919 int spl = hr_clock_lock();
1920 hrestime = *ts;
1921 membar_enter(); /* hrestime must be visible before timechanged++ */
1922 timedelta = 0;
1923 timechanged++;
1924 hr_clock_unlock(spl);
1925 callout_hrestime();
1926 }
1927
1928 static uint_t deadman_seconds;
1929 static uint32_t deadman_panics;
1930 static int deadman_enabled = 0;
1931 static int deadman_panic_timers = 1;
1932
1933 static void
1934 deadman(void)
1935 {
1936 if (panicstr) {
1937 /*
1938 * During panic, other CPUs besides the panic
1939 * master continue to handle cyclics and some other
1940 * interrupts. The code below is intended to be
1941 * single threaded, so any CPU other than the master
1942 * must keep out.
1943 */
1944 if (CPU->cpu_id != panic_cpu.cpu_id)
1945 return;
1946
1947 if (!deadman_panic_timers)
1948 return; /* allow all timers to be manually disabled */
1949
1950 /*
1951 * If we are generating a crash dump or syncing filesystems and
1952 * the corresponding timer is set, decrement it and re-enter
1953 * the panic code to abort it and advance to the next state.
1954 * The panic states and triggers are explained in panic.c.
1955 */
1956 if (panic_dump) {
1957 if (dump_timeleft && (--dump_timeleft == 0)) {
1958 panic("panic dump timeout");
1959 /*NOTREACHED*/
1960 }
1961 } else if (panic_sync) {
1962 if (sync_timeleft && (--sync_timeleft == 0)) {
1963 panic("panic sync timeout");
1964 /*NOTREACHED*/
1965 }
1966 }
1967
1968 return;
1969 }
1970
1971 if (deadman_counter != CPU->cpu_deadman_counter) {
1972 CPU->cpu_deadman_counter = deadman_counter;
1973 CPU->cpu_deadman_countdown = deadman_seconds;
1974 return;
1975 }
1976
1977 if (--CPU->cpu_deadman_countdown > 0)
1978 return;
1979
1980 /*
1981 * Regardless of whether or not we actually bring the system down,
1982 * bump the deadman_panics variable.
1983 *
1984 * N.B. deadman_panics is incremented once for each CPU that
1985 * passes through here. It's expected that all the CPUs will
1986 * detect this condition within one second of each other, so
1987 * when deadman_enabled is off, deadman_panics will
1988 * typically be a multiple of the total number of CPUs in
1989 * the system.
1990 */
1991 atomic_add_32(&deadman_panics, 1);
1992
1993 if (!deadman_enabled) {
1994 CPU->cpu_deadman_countdown = deadman_seconds;
1995 return;
1996 }
1997
1998 /*
1999 * If we're here, we want to bring the system down.
2000 */
2001 panic("deadman: timed out after %d seconds of clock "
2002 "inactivity", deadman_seconds);
2003 /*NOTREACHED*/
2004 }
2005
2006 /*ARGSUSED*/
2007 static void
2008 deadman_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
2009 {
2010 cpu->cpu_deadman_counter = 0;
2011 cpu->cpu_deadman_countdown = deadman_seconds;
2012
2013 hdlr->cyh_func = (cyc_func_t)deadman;
2014 hdlr->cyh_level = CY_HIGH_LEVEL;
2015 hdlr->cyh_arg = NULL;
2016
2017 /*
2018 * Stagger the CPUs so that they don't all run deadman() at
2019 * the same time. Simplest reason to do this is to make it
2020 * more likely that only one CPU will panic in case of a
2021 * timeout. This is (strictly speaking) an aesthetic, not a
2022 * technical consideration.
2023 */
2024 when->cyt_when = cpu->cpu_id * (NANOSEC / NCPU);
2025 when->cyt_interval = NANOSEC;
2026 }
2027
2028
2029 void
2030 deadman_init(void)
2031 {
2032 cyc_omni_handler_t hdlr;
2033
2034 if (deadman_seconds == 0)
2035 deadman_seconds = snoop_interval / MICROSEC;
2036
2037 if (snooping)
2038 deadman_enabled = 1;
2039
2040 hdlr.cyo_online = deadman_online;
2041 hdlr.cyo_offline = NULL;
2042 hdlr.cyo_arg = NULL;
2043
2044 mutex_enter(&cpu_lock);
2045 deadman_cyclic = cyclic_add_omni(&hdlr);
2046 mutex_exit(&cpu_lock);
2047 }
2048
2049 /*
2050 * tod_fault() is for updating tod validate mechanism state:
2051 * (1) TOD_NOFAULT: for resetting the state to 'normal'.
2052 * currently used for debugging only
2053 * (2) The following four cases detected by tod validate mechanism:
2054 * TOD_REVERSED: current tod value is less than previous value.
2055 * TOD_STALLED: current tod value hasn't advanced.
2056 * TOD_JUMPED: current tod value advanced too far from previous value.
2057 * TOD_RATECHANGED: the ratio between average tod delta and
2058 * average tick delta has changed.
2059 * (3) TOD_RDONLY: when the TOD clock is not writeable e.g. because it is
2060 * a virtual TOD provided by a hypervisor.
2061 */
2062 enum tod_fault_type
2063 tod_fault(enum tod_fault_type ftype, int off)
2064 {
2065 ASSERT(MUTEX_HELD(&tod_lock));
2066
2067 if (tod_faulted != ftype) {
2068 switch (ftype) {
2069 case TOD_NOFAULT:
2070 plat_tod_fault(TOD_NOFAULT);
2071 cmn_err(CE_NOTE, "Restarted tracking "
2072 "Time of Day clock.");
2073 tod_faulted = ftype;
2074 break;
2075 case TOD_REVERSED:
2076 case TOD_JUMPED:
2077 if (tod_faulted == TOD_NOFAULT) {
2078 plat_tod_fault(ftype);
2079 cmn_err(CE_WARN, "Time of Day clock error: "
2080 "reason [%s by 0x%x]. -- "
2081 " Stopped tracking Time Of Day clock.",
2082 tod_fault_table[ftype], off);
2083 tod_faulted = ftype;
2084 }
2085 break;
2086 case TOD_STALLED:
2087 case TOD_RATECHANGED:
2088 if (tod_faulted == TOD_NOFAULT) {
2089 plat_tod_fault(ftype);
2090 cmn_err(CE_WARN, "Time of Day clock error: "
2091 "reason [%s]. -- "
2092 " Stopped tracking Time Of Day clock.",
2093 tod_fault_table[ftype]);
2094 tod_faulted = ftype;
2095 }
2096 break;
2097 case TOD_RDONLY:
2098 if (tod_faulted == TOD_NOFAULT) {
2099 plat_tod_fault(ftype);
2100 cmn_err(CE_NOTE, "!Time of Day clock is "
2101 "Read-Only; set of Date/Time will not "
2102 "persist across reboot.");
2103 tod_faulted = ftype;
2104 }
2105 break;
2106 default:
2107 break;
2108 }
2109 }
2110 return (tod_faulted);
2111 }
2112
2113 /*
2114 * Two functions that allow tod_status_flag to be manipulated by functions
2115 * external to this file.
2116 */
2117
2118 void
2119 tod_status_set(int tod_flag)
2120 {
2121 tod_status_flag |= tod_flag;
2122 }
2123
2124 void
2125 tod_status_clear(int tod_flag)
2126 {
2127 tod_status_flag &= ~tod_flag;
2128 }
2129
2130 /*
2131 * Record a timestamp and the value passed to tod_set(). The next call to
2132 * tod_validate() can use these values, prev_set_tick and prev_set_tod,
2133 * when checking the timestruc_t returned by tod_get(). Ordinarily,
2134 * tod_validate() will use prev_tick and prev_tod for this task but these
2135 * become obsolete, and will be re-assigned with the prev_set_* values,
2136 * in the case when the TOD is re-written.
2137 */
2138 void
2139 tod_set_prev(timestruc_t ts)
2140 {
2141 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2142 tod_validate_deferred) {
2143 return;
2144 }
2145 prev_set_tick = gethrtime();
2146 /*
2147 * A negative value will be set to zero in utc_to_tod() so we fake
2148 * a zero here in such a case. This would need to change if the
2149 * behavior of utc_to_tod() changes.
2150 */
2151 prev_set_tod = ts.tv_sec < 0 ? 0 : ts.tv_sec;
2152 }
2153
2154 /*
2155 * tod_validate() is used for checking values returned by tod_get().
2156 * Four error cases can be detected by this routine:
2157 * TOD_REVERSED: current tod value is less than previous.
2158 * TOD_STALLED: current tod value hasn't advanced.
2159 * TOD_JUMPED: current tod value advanced too far from previous value.
2160 * TOD_RATECHANGED: the ratio between average tod delta and
2161 * average tick delta has changed.
2162 */
2163 time_t
2164 tod_validate(time_t tod)
2165 {
2166 time_t diff_tod;
2167 hrtime_t diff_tick;
2168
2169 long dtick;
2170 int dtick_delta;
2171
2172 int off = 0;
2173 enum tod_fault_type tod_bad = TOD_NOFAULT;
2174
2175 static int firsttime = 1;
2176
2177 static time_t prev_tod = 0;
2178 static hrtime_t prev_tick = 0;
2179 static long dtick_avg = TOD_REF_FREQ;
2180
2181 int cpr_resume_done = 0;
2182 int dr_resume_done = 0;
2183
2184 hrtime_t tick = gethrtime();
2185
2186 ASSERT(MUTEX_HELD(&tod_lock));
2187
2188 /*
2189 * tod_validate_enable is patchable via /etc/system.
2190 * If TOD is already faulted, or if TOD validation is deferred,
2191 * there is nothing to do.
2192 */
2193 if ((tod_validate_enable == 0) || (tod_faulted != TOD_NOFAULT) ||
2194 tod_validate_deferred) {
2195 return (tod);
2196 }
2197
2198 /*
2199 * If this is the first time through, we just need to save the tod
2200 * we were called with and hrtime so we can use them next time to
2201 * validate tod_get().
2202 */
2203 if (firsttime) {
2204 firsttime = 0;
2205 prev_tod = tod;
2206 prev_tick = tick;
2207 return (tod);
2208 }
2209
2210 /*
2211 * Handle any flags that have been turned on by tod_status_set().
2212 * In the case where a tod_set() is done and then a subsequent
2213 * tod_get() fails (ie, both TOD_SET_DONE and TOD_GET_FAILED are
2214 * true), we treat the TOD_GET_FAILED with precedence by switching
2215 * off the flag, returning tod and leaving TOD_SET_DONE asserted
2216 * until such time as tod_get() completes successfully.
2217 */
2218 if (tod_status_flag & TOD_GET_FAILED) {
2219 /*
2220 * tod_get() has encountered an issue, possibly transitory,
2221 * when reading TOD. We'll just return the incoming tod
2222 * value (which is actually hrestime.tv_sec in this case)
2223 * and when we get a genuine tod, following a successful
2224 * tod_get(), we can validate using prev_tod and prev_tick.
2225 */
2226 tod_status_flag &= ~TOD_GET_FAILED;
2227 return (tod);
2228 } else if (tod_status_flag & TOD_SET_DONE) {
2229 /*
2230 * TOD has been modified. Just before the TOD was written,
2231 * tod_set_prev() saved tod and hrtime; we can now use
2232 * those values, prev_set_tod and prev_set_tick, to validate
2233 * the incoming tod that's just been read.
2234 */
2235 prev_tod = prev_set_tod;
2236 prev_tick = prev_set_tick;
2237 dtick_avg = TOD_REF_FREQ;
2238 tod_status_flag &= ~TOD_SET_DONE;
2239 /*
2240 * If a tod_set() preceded a cpr_suspend() without an
2241 * intervening tod_validate(), we need to ensure that a
2242 * TOD_JUMPED condition is ignored.
2243 * Note this isn't a concern in the case of DR as we've
2244 * just reassigned dtick_avg, above.
2245 */
2246 if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2247 cpr_resume_done = 1;
2248 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2249 }
2250 } else if (tod_status_flag & TOD_CPR_RESUME_DONE) {
2251 /*
2252 * The system's coming back from a checkpoint resume.
2253 */
2254 cpr_resume_done = 1;
2255 tod_status_flag &= ~TOD_CPR_RESUME_DONE;
2256 /*
2257 * We need to handle the possibility of a CPR suspend
2258 * operation having been initiated whilst a DR event was
2259 * in-flight.
2260 */
2261 if (tod_status_flag & TOD_DR_RESUME_DONE) {
2262 dr_resume_done = 1;
2263 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2264 }
2265 } else if (tod_status_flag & TOD_DR_RESUME_DONE) {
2266 /*
2267 * A Dynamic Reconfiguration event has taken place.
2268 */
2269 dr_resume_done = 1;
2270 tod_status_flag &= ~TOD_DR_RESUME_DONE;
2271 }
2272
2273 /* test hook */
2274 switch (tod_unit_test) {
2275 case 1: /* for testing jumping tod */
2276 tod += tod_test_injector;
2277 tod_unit_test = 0;
2278 break;
2279 case 2: /* for testing stuck tod bit */
2280 tod |= 1 << tod_test_injector;
2281 tod_unit_test = 0;
2282 break;
2283 case 3: /* for testing stalled tod */
2284 tod = prev_tod;
2285 tod_unit_test = 0;
2286 break;
2287 case 4: /* reset tod fault status */
2288 (void) tod_fault(TOD_NOFAULT, 0);
2289 tod_unit_test = 0;
2290 break;
2291 default:
2292 break;
2293 }
2294
2295 diff_tod = tod - prev_tod;
2296 diff_tick = tick - prev_tick;
2297
2298 ASSERT(diff_tick >= 0);
2299
2300 if (diff_tod < 0) {
2301 /* ERROR - tod reversed */
2302 tod_bad = TOD_REVERSED;
2303 off = (int)(prev_tod - tod);
2304 } else if (diff_tod == 0) {
2305 /* tod did not advance */
2306 if (diff_tick > TOD_STALL_THRESHOLD) {
2307 /* ERROR - tod stalled */
2308 tod_bad = TOD_STALLED;
2309 } else {
2310 /*
2311 * Make sure we don't update prev_tick
2312 * so that diff_tick is calculated since
2313 * the first diff_tod == 0
2314 */
2315 return (tod);
2316 }
2317 } else {
2318 /* calculate dtick */
2319 dtick = diff_tick / diff_tod;
2320
2321 /* update dtick averages */
2322 dtick_avg += ((dtick - dtick_avg) / TOD_FILTER_N);
2323
2324 /*
2325 * Calculate dtick_delta as
2326 * variation from reference freq in quartiles
2327 */
2328 dtick_delta = (dtick_avg - TOD_REF_FREQ) /
2329 (TOD_REF_FREQ >> 2);
2330
2331 /*
2332 * Even with a perfectly functioning TOD device,
2333 * when the number of elapsed seconds is low the
2334 * algorithm can calculate a rate that is beyond
2335 * tolerance, causing an error. The algorithm is
2336 * inaccurate when elapsed time is low (less than
2337 * 5 seconds).
2338 */
2339 if (diff_tod > 4) {
2340 if (dtick < TOD_JUMP_THRESHOLD) {
2341 /*
2342 * If we've just done a CPR resume, we detect
2343 * a jump in the TOD but, actually, what's
2344 * happened is that the TOD has been increasing
2345 * whilst the system was suspended and the tick
2346 * count hasn't kept up. We consider the first
2347 * occurrence of this after a resume as normal
2348 * and ignore it; otherwise, in a non-resume
2349 * case, we regard it as a TOD problem.
2350 */
2351 if (!cpr_resume_done) {
2352 /* ERROR - tod jumped */
2353 tod_bad = TOD_JUMPED;
2354 off = (int)diff_tod;
2355 }
2356 }
2357 if (dtick_delta) {
2358 /*
2359 * If we've just done a DR resume, dtick_avg
2360 * can go a bit askew so we reset it and carry
2361 * on; otherwise, the TOD is in error.
2362 */
2363 if (dr_resume_done) {
2364 dtick_avg = TOD_REF_FREQ;
2365 } else {
2366 /* ERROR - change in clock rate */
2367 tod_bad = TOD_RATECHANGED;
2368 }
2369 }
2370 }
2371 }
2372
2373 if (tod_bad != TOD_NOFAULT) {
2374 (void) tod_fault(tod_bad, off);
2375
2376 /*
2377 * Disable dosynctodr since we are going to fault
2378 * the TOD chip anyway here
2379 */
2380 dosynctodr = 0;
2381
2382 /*
2383 * Set tod to the correct value from hrestime
2384 */
2385 tod = hrestime.tv_sec;
2386 }
2387
2388 prev_tod = tod;
2389 prev_tick = tick;
2390 return (tod);
2391 }
2392
2393 static void
2394 calcloadavg(int nrun, uint64_t *hp_ave)
2395 {
2396 static int64_t f[3] = { 135, 27, 9 };
2397 uint_t i;
2398 int64_t q, r;
2399
2400 /*
2401 * Compute load average over the last 1, 5, and 15 minutes
2402 * (60, 300, and 900 seconds). The constants in f[3] are for
2403 * exponential decay:
2404 * (1 - exp(-1/60)) << 13 = 135,
2405 * (1 - exp(-1/300)) << 13 = 27,
2406 * (1 - exp(-1/900)) << 13 = 9.
2407 */
2408
2409 /*
2410 * a little hoop-jumping to avoid integer overflow
2411 */
2412 for (i = 0; i < 3; i++) {
2413 q = (hp_ave[i] >> 16) << 7;
2414 r = (hp_ave[i] & 0xffff) << 7;
2415 hp_ave[i] += ((nrun - q) * f[i] - ((r * f[i]) >> 16)) >> 4;
2416 }
2417 }
2418
2419 /*
2420 * lbolt_hybrid() is used by ddi_get_lbolt() and ddi_get_lbolt64() to
2421 * calculate the value of lbolt according to the current mode. In the event
2422 * driven mode (the default), lbolt is calculated by dividing the current hires
2423 * time by the number of nanoseconds per clock tick. In the cyclic driven mode
2424 * an internal variable is incremented at each firing of the lbolt cyclic
2425 * and returned by lbolt_cyclic_driven().
2426 *
2427 * The system will transition from event to cyclic driven mode when the number
2428 * of calls to lbolt_event_driven() exceeds the (per CPU) threshold within a
2429 * window of time. It does so by reprograming lbolt_cyclic from CY_INFINITY to
2430 * nsec_per_tick. The lbolt cyclic will remain ON while at least one CPU is
2431 * causing enough activity to cross the thresholds.
2432 */
2433 int64_t
2434 lbolt_bootstrap(void)
2435 {
2436 return (0);
2437 }
2438
2439 /* ARGSUSED */
2440 uint_t
2441 lbolt_ev_to_cyclic(caddr_t arg1, caddr_t arg2)
2442 {
2443 hrtime_t ts, exp;
2444 int ret;
2445
2446 ASSERT(lbolt_hybrid != lbolt_cyclic_driven);
2447
2448 kpreempt_disable();
2449
2450 ts = gethrtime();
2451 lb_info->lbi_internal = (ts/nsec_per_tick);
2452
2453 /*
2454 * Align the next expiration to a clock tick boundary.
2455 */
2456 exp = ts + nsec_per_tick - 1;
2457 exp = (exp/nsec_per_tick) * nsec_per_tick;
2458
2459 ret = cyclic_reprogram(lb_info->id.lbi_cyclic_id, exp);
2460 ASSERT(ret);
2461
2462 lbolt_hybrid = lbolt_cyclic_driven;
2463 lb_info->lbi_cyc_deactivate = B_FALSE;
2464 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2465
2466 kpreempt_enable();
2467
2468 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2469 ASSERT(ret == 0);
2470
2471 return (1);
2472 }
2473
2474 int64_t
2475 lbolt_event_driven(void)
2476 {
2477 hrtime_t ts;
2478 int64_t lb;
2479 int ret, cpu = CPU->cpu_seqid;
2480
2481 ts = gethrtime();
2482 ASSERT(ts > 0);
2483
2484 ASSERT(nsec_per_tick > 0);
2485 lb = (ts/nsec_per_tick);
2486
2487 /*
2488 * Switch to cyclic mode if the number of calls to this routine
2489 * has reached the threshold within the interval.
2490 */
2491 if ((lb - lb_cpu[cpu].lbc_cnt_start) < lb_info->lbi_thresh_interval) {
2492
2493 if (--lb_cpu[cpu].lbc_counter == 0) {
2494 /*
2495 * Reached the threshold within the interval, reset
2496 * the usage statistics.
2497 */
2498 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2499 lb_cpu[cpu].lbc_cnt_start = lb;
2500
2501 /*
2502 * Make sure only one thread reprograms the
2503 * lbolt cyclic and changes the mode.
2504 */
2505 if (panicstr == NULL &&
2506 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2507
2508 if (lbolt_hybrid == lbolt_cyclic_driven) {
2509 ret = atomic_dec_32_nv(
2510 &lb_info->lbi_token);
2511 ASSERT(ret == 0);
2512 } else {
2513 lbolt_softint_post();
2514 }
2515 }
2516 }
2517 } else {
2518 /*
2519 * Exceeded the interval, reset the usage statistics.
2520 */
2521 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2522 lb_cpu[cpu].lbc_cnt_start = lb;
2523 }
2524
2525 ASSERT(lb >= lb_info->lbi_debug_time);
2526
2527 return (lb - lb_info->lbi_debug_time);
2528 }
2529
2530 int64_t
2531 lbolt_cyclic_driven(void)
2532 {
2533 int64_t lb = lb_info->lbi_internal;
2534 int cpu;
2535
2536 /*
2537 * If a CPU has already prevented the lbolt cyclic from deactivating
2538 * itself, don't bother tracking the usage. Otherwise check if we're
2539 * within the interval and how the per CPU counter is doing.
2540 */
2541 if (lb_info->lbi_cyc_deactivate) {
2542 cpu = CPU->cpu_seqid;
2543 if ((lb - lb_cpu[cpu].lbc_cnt_start) <
2544 lb_info->lbi_thresh_interval) {
2545
2546 if (lb_cpu[cpu].lbc_counter == 0)
2547 /*
2548 * Reached the threshold within the interval,
2549 * prevent the lbolt cyclic from turning itself
2550 * off.
2551 */
2552 lb_info->lbi_cyc_deactivate = B_FALSE;
2553 else
2554 lb_cpu[cpu].lbc_counter--;
2555 } else {
2556 /*
2557 * Only reset the usage statistics when we have
2558 * exceeded the interval.
2559 */
2560 lb_cpu[cpu].lbc_counter = lb_info->lbi_thresh_calls;
2561 lb_cpu[cpu].lbc_cnt_start = lb;
2562 }
2563 }
2564
2565 ASSERT(lb >= lb_info->lbi_debug_time);
2566
2567 return (lb - lb_info->lbi_debug_time);
2568 }
2569
2570 /*
2571 * The lbolt_cyclic() routine will fire at a nsec_per_tick interval to satisfy
2572 * performance needs of ddi_get_lbolt() and ddi_get_lbolt64() consumers.
2573 * It is inactive by default, and will be activated when switching from event
2574 * to cyclic driven lbolt. The cyclic will turn itself off unless signaled
2575 * by lbolt_cyclic_driven().
2576 */
2577 static void
2578 lbolt_cyclic(void)
2579 {
2580 int ret;
2581
2582 lb_info->lbi_internal++;
2583
2584 if (!lbolt_cyc_only) {
2585
2586 if (lb_info->lbi_cyc_deactivate) {
2587 /*
2588 * Switching from cyclic to event driven mode.
2589 */
2590 if (panicstr == NULL &&
2591 atomic_cas_32(&lb_info->lbi_token, 0, 1) == 0) {
2592
2593 if (lbolt_hybrid == lbolt_event_driven) {
2594 ret = atomic_dec_32_nv(
2595 &lb_info->lbi_token);
2596 ASSERT(ret == 0);
2597 return;
2598 }
2599
2600 kpreempt_disable();
2601
2602 lbolt_hybrid = lbolt_event_driven;
2603 ret = cyclic_reprogram(
2604 lb_info->id.lbi_cyclic_id,
2605 CY_INFINITY);
2606 ASSERT(ret);
2607
2608 kpreempt_enable();
2609
2610 ret = atomic_dec_32_nv(&lb_info->lbi_token);
2611 ASSERT(ret == 0);
2612 }
2613 }
2614
2615 /*
2616 * The lbolt cyclic should not try to deactivate itself before
2617 * the sampling period has elapsed.
2618 */
2619 if (lb_info->lbi_internal - lb_info->lbi_cyc_deac_start >=
2620 lb_info->lbi_thresh_interval) {
2621 lb_info->lbi_cyc_deactivate = B_TRUE;
2622 lb_info->lbi_cyc_deac_start = lb_info->lbi_internal;
2623 }
2624 }
2625 }
2626
2627 /*
2628 * Since the lbolt service was historically cyclic driven, it must be 'stopped'
2629 * when the system drops into the kernel debugger. lbolt_debug_entry() is
2630 * called by the KDI system claim callbacks to record a hires timestamp at
2631 * debug enter time. lbolt_debug_return() is called by the sistem release
2632 * callbacks to account for the time spent in the debugger. The value is then
2633 * accumulated in the lb_info structure and used by lbolt_event_driven() and
2634 * lbolt_cyclic_driven(), as well as the mdb_get_lbolt() routine.
2635 */
2636 void
2637 lbolt_debug_entry(void)
2638 {
2639 if (lbolt_hybrid != lbolt_bootstrap) {
2640 ASSERT(lb_info != NULL);
2641 lb_info->lbi_debug_ts = gethrtime();
2642 }
2643 }
2644
2645 /*
2646 * Calculate the time spent in the debugger and add it to the lbolt info
2647 * structure. We also update the internal lbolt value in case we were in
2648 * cyclic driven mode going in.
2649 */
2650 void
2651 lbolt_debug_return(void)
2652 {
2653 hrtime_t ts;
2654
2655 if (lbolt_hybrid != lbolt_bootstrap) {
2656 ASSERT(lb_info != NULL);
2657 ASSERT(nsec_per_tick > 0);
2658
2659 ts = gethrtime();
2660 lb_info->lbi_internal = (ts/nsec_per_tick);
2661 lb_info->lbi_debug_time +=
2662 ((ts - lb_info->lbi_debug_ts)/nsec_per_tick);
2663
2664 lb_info->lbi_debug_ts = 0;
2665 }
2666 }