1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/systm.h> 28 #include <sys/cyclic.h> 29 #include <sys/cyclic_impl.h> 30 #include <sys/spl.h> 31 #include <sys/x_call.h> 32 #include <sys/kmem.h> 33 #include <sys/machsystm.h> 34 #include <sys/smp_impldefs.h> 35 #include <sys/psm_types.h> 36 #include <sys/psm.h> 37 #include <sys/atomic.h> 38 #include <sys/clock.h> 39 #include <sys/x86_archext.h> 40 #include <sys/ddi_impldefs.h> 41 #include <sys/ddi_intr.h> 42 #include <sys/avintr.h> 43 #include <sys/note.h> 44 45 static int cbe_vector; 46 static int cbe_ticks = 0; 47 48 /* 49 * cbe_xcall_lock is used to protect the xcall globals since the cyclic 50 * reprogramming API does not use cpu_lock. 51 */ 52 static kmutex_t cbe_xcall_lock; 53 static cyc_func_t volatile cbe_xcall_func; 54 static cpu_t *volatile cbe_xcall_cpu; 55 static void *cbe_xcall_farg; 56 static cpuset_t cbe_enabled; 57 58 static ddi_softint_hdl_impl_t cbe_low_hdl = 59 {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}; 60 static ddi_softint_hdl_impl_t cbe_clock_hdl = 61 {NULL, 0, {{NULL}}, NULL, NULL, NULL, NULL, NULL}; 62 63 cyclic_id_t cbe_hres_cyclic; 64 int cbe_psm_timer_mode = TIMER_ONESHOT; 65 static hrtime_t cbe_timer_resolution; 66 67 extern int tsc_gethrtime_enable; 68 69 void cbe_hres_tick(void); 70 71 int 72 cbe_softclock(void) 73 { 74 cyclic_softint(CPU, CY_LOCK_LEVEL); 75 return (1); 76 } 77 78 int 79 cbe_low_level(void) 80 { 81 cpu_t *cpu = CPU; 82 83 cyclic_softint(cpu, CY_LOW_LEVEL); 84 return (1); 85 } 86 87 /* 88 * We can be in cbe_fire() either due to a cyclic-induced cross call, or due 89 * to the timer firing at level-14. Because cyclic_fire() can tolerate 90 * spurious calls, it would not matter if we called cyclic_fire() in both 91 * cases. 92 */ 93 int 94 cbe_fire(void) 95 { 96 cpu_t *cpu = CPU; 97 processorid_t me = cpu->cpu_id, i; 98 int cross_call = (cbe_xcall_func != NULL && cbe_xcall_cpu == cpu); 99 100 cyclic_fire(cpu); 101 102 if (cbe_psm_timer_mode != TIMER_ONESHOT && me == 0 && !cross_call) { 103 for (i = 1; i < NCPU; i++) { 104 if (CPU_IN_SET(cbe_enabled, i)) { 105 send_dirint(i, CBE_HIGH_PIL); 106 } 107 } 108 } 109 110 if (cross_call) { 111 ASSERT(cbe_xcall_func != NULL && cbe_xcall_cpu == cpu); 112 (*cbe_xcall_func)(cbe_xcall_farg); 113 cbe_xcall_func = NULL; 114 cbe_xcall_cpu = NULL; 115 } 116 117 return (1); 118 } 119 120 /*ARGSUSED*/ 121 void 122 cbe_softint(void *arg, cyc_level_t level) 123 { 124 switch (level) { 125 case CY_LOW_LEVEL: 126 (*setsoftint)(CBE_LOW_PIL, cbe_low_hdl.ih_pending); 127 break; 128 case CY_LOCK_LEVEL: 129 (*setsoftint)(CBE_LOCK_PIL, cbe_clock_hdl.ih_pending); 130 break; 131 default: 132 panic("cbe_softint: unexpected soft level %d", level); 133 } 134 } 135 136 /*ARGSUSED*/ 137 void 138 cbe_reprogram(void *arg, hrtime_t time) 139 { 140 if (cbe_psm_timer_mode == TIMER_ONESHOT) 141 (*psm_timer_reprogram)(time); 142 } 143 144 /*ARGSUSED*/ 145 cyc_cookie_t 146 cbe_set_level(void *arg, cyc_level_t level) 147 { 148 int ipl; 149 150 switch (level) { 151 case CY_LOW_LEVEL: 152 ipl = CBE_LOW_PIL; 153 break; 154 case CY_LOCK_LEVEL: 155 ipl = CBE_LOCK_PIL; 156 break; 157 case CY_HIGH_LEVEL: 158 ipl = CBE_HIGH_PIL; 159 break; 160 default: 161 panic("cbe_set_level: unexpected level %d", level); 162 } 163 164 return (splr(ipltospl(ipl))); 165 } 166 167 /*ARGSUSED*/ 168 void 169 cbe_restore_level(void *arg, cyc_cookie_t cookie) 170 { 171 splx(cookie); 172 } 173 174 /*ARGSUSED*/ 175 void 176 cbe_xcall(void *arg, cpu_t *dest, cyc_func_t func, void *farg) 177 { 178 kpreempt_disable(); 179 180 if (dest == CPU) { 181 (*func)(farg); 182 kpreempt_enable(); 183 return; 184 } 185 186 mutex_enter(&cbe_xcall_lock); 187 188 ASSERT(cbe_xcall_func == NULL); 189 190 cbe_xcall_farg = farg; 191 membar_producer(); 192 cbe_xcall_cpu = dest; 193 cbe_xcall_func = func; 194 195 send_dirint(dest->cpu_id, CBE_HIGH_PIL); 196 197 while (cbe_xcall_func != NULL || cbe_xcall_cpu != NULL) 198 continue; 199 200 mutex_exit(&cbe_xcall_lock); 201 202 kpreempt_enable(); 203 } 204 205 void * 206 cbe_configure(cpu_t *cpu) 207 { 208 return (cpu); 209 } 210 211 void 212 cbe_unconfigure(void *arg) 213 { 214 _NOTE(ARGUNUSED(arg)); 215 ASSERT(!CPU_IN_SET(cbe_enabled, ((cpu_t *)arg)->cpu_id)); 216 } 217 218 #ifndef __xpv 219 /* 220 * declarations needed for time adjustment 221 */ 222 extern void tsc_suspend(void); 223 extern void tsc_resume(void); 224 /* 225 * Call the resume function in the cyclic, instead of inline in the 226 * resume path. 227 */ 228 extern int tsc_resume_in_cyclic; 229 #endif 230 231 /*ARGSUSED*/ 232 static void 233 cbe_suspend(cyb_arg_t arg) 234 { 235 #ifndef __xpv 236 /* 237 * This is an x86 backend, so let the tsc_suspend 238 * that is specific to x86 platforms do the work. 239 */ 240 tsc_suspend(); 241 #endif 242 } 243 244 /*ARGSUSED*/ 245 static void 246 cbe_resume(cyb_arg_t arg) 247 { 248 #ifndef __xpv 249 if (tsc_resume_in_cyclic) { 250 tsc_resume(); 251 } 252 #endif 253 } 254 255 void 256 cbe_enable(void *arg) 257 { 258 processorid_t me = ((cpu_t *)arg)->cpu_id; 259 260 /* neither enable nor disable cpu0 if TIMER_PERIODIC is set */ 261 if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0)) 262 return; 263 264 /* 265 * Added (me == 0) to the ASSERT because the timer isn't 266 * disabled on CPU 0, and cbe_enable is called when we resume. 267 */ 268 ASSERT((me == 0) || !CPU_IN_SET(cbe_enabled, me)); 269 CPUSET_ADD(cbe_enabled, me); 270 if (cbe_psm_timer_mode == TIMER_ONESHOT) 271 (*psm_timer_enable)(); 272 } 273 274 void 275 cbe_disable(void *arg) 276 { 277 processorid_t me = ((cpu_t *)arg)->cpu_id; 278 279 /* neither enable nor disable cpu0 if TIMER_PERIODIC is set */ 280 if ((cbe_psm_timer_mode != TIMER_ONESHOT) && (me == 0)) 281 return; 282 283 ASSERT(CPU_IN_SET(cbe_enabled, me)); 284 CPUSET_DEL(cbe_enabled, me); 285 if (cbe_psm_timer_mode == TIMER_ONESHOT) 286 (*psm_timer_disable)(); 287 } 288 289 /* 290 * Unbound cyclic, called once per tick (every nsec_per_tick ns). 291 */ 292 void 293 cbe_hres_tick(void) 294 { 295 int s; 296 297 dtrace_hres_tick(); 298 299 /* 300 * Because hres_tick effectively locks hres_lock, we must be at the 301 * same PIL as that used for CLOCK_LOCK. 302 */ 303 s = splr(ipltospl(XC_HI_PIL)); 304 hres_tick(); 305 splx(s); 306 307 if ((cbe_ticks % hz) == 0) 308 (*hrtime_tick)(); 309 310 cbe_ticks++; 311 312 } 313 314 void 315 cbe_init_pre(void) 316 { 317 cbe_vector = (*psm_get_clockirq)(CBE_HIGH_PIL); 318 319 CPUSET_ZERO(cbe_enabled); 320 321 cbe_timer_resolution = (*clkinitf)(TIMER_ONESHOT, &cbe_psm_timer_mode); 322 } 323 324 void 325 cbe_init(void) 326 { 327 cyc_backend_t cbe = { 328 cbe_configure, /* cyb_configure */ 329 cbe_unconfigure, /* cyb_unconfigure */ 330 cbe_enable, /* cyb_enable */ 331 cbe_disable, /* cyb_disable */ 332 cbe_reprogram, /* cyb_reprogram */ 333 cbe_softint, /* cyb_softint */ 334 cbe_set_level, /* cyb_set_level */ 335 cbe_restore_level, /* cyb_restore_level */ 336 cbe_xcall, /* cyb_xcall */ 337 cbe_suspend, /* cyb_suspend */ 338 cbe_resume /* cyb_resume */ 339 }; 340 cyc_handler_t hdlr; 341 cyc_time_t when; 342 343 mutex_init(&cbe_xcall_lock, NULL, MUTEX_DEFAULT, NULL); 344 345 mutex_enter(&cpu_lock); 346 cyclic_init(&cbe, cbe_timer_resolution); 347 mutex_exit(&cpu_lock); 348 349 (void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire, 350 "cbe_fire_master", cbe_vector, 0, NULL, NULL, NULL); 351 352 if (psm_get_ipivect != NULL) { 353 (void) add_avintr(NULL, CBE_HIGH_PIL, (avfunc)cbe_fire, 354 "cbe_fire_slave", 355 (*psm_get_ipivect)(CBE_HIGH_PIL, PSM_INTR_IPI_HI), 356 0, NULL, NULL, NULL); 357 } 358 359 (void) add_avsoftintr((void *)&cbe_clock_hdl, CBE_LOCK_PIL, 360 (avfunc)cbe_softclock, "softclock", NULL, NULL); 361 362 (void) add_avsoftintr((void *)&cbe_low_hdl, CBE_LOW_PIL, 363 (avfunc)cbe_low_level, "low level", NULL, NULL); 364 365 mutex_enter(&cpu_lock); 366 367 hdlr.cyh_level = CY_HIGH_LEVEL; 368 hdlr.cyh_func = (cyc_func_t)cbe_hres_tick; 369 hdlr.cyh_arg = NULL; 370 371 when.cyt_when = 0; 372 when.cyt_interval = nsec_per_tick; 373 374 cbe_hres_cyclic = cyclic_add(&hdlr, &when); 375 376 if (psm_post_cyclic_setup != NULL) 377 (*psm_post_cyclic_setup)(NULL); 378 379 mutex_exit(&cpu_lock); 380 }