Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c
+++ new/usr/src/uts/i86pc/cpu/generic_cpu/gcpu_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26 /*
27 27 * Copyright (c) 2010, Intel Corporation.
28 28 * All rights reserved.
29 29 */
30 30
31 31 /*
32 32 * Generic x86 CPU Module
33 33 *
34 34 * This CPU module is used for generic x86 CPUs when Solaris has no other
35 35 * CPU-specific support module available. Code in this module should be the
36 36 * absolute bare-bones support and must be cognizant of both Intel and AMD etc.
37 37 */
38 38
39 39 #include <sys/types.h>
40 40 #include <sys/cpu_module_impl.h>
41 41 #include <sys/cpuvar.h>
42 42 #include <sys/kmem.h>
43 43 #include <sys/modctl.h>
44 44 #include <sys/pghw.h>
45 45
46 46 #include "gcpu.h"
47 47
48 48 /*
49 49 * Prevent generic cpu support from loading.
50 50 */
51 51 int gcpu_disable = 0;
52 52
53 53 #define GCPU_MAX_CHIPID 32
54 54 static struct gcpu_chipshared *gcpu_shared[GCPU_MAX_CHIPID];
55 55
56 56 /*
57 57 * Our cmi_init entry point, called during startup of each cpu instance.
58 58 */
59 59 int
60 60 gcpu_init(cmi_hdl_t hdl, void **datap)
61 61 {
62 62 uint_t chipid = cmi_hdl_chipid(hdl);
63 63 struct gcpu_chipshared *sp, *osp;
64 64 gcpu_data_t *gcpu;
65 65
66 66 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
67 67 return (ENOTSUP);
68 68
69 69 /*
70 70 * Allocate the state structure for this cpu. We will only
71 71 * allocate the bank logout areas in gcpu_mca_init once we
72 72 * know how many banks there are.
73 73 */
74 74 gcpu = *datap = kmem_zalloc(sizeof (gcpu_data_t), KM_SLEEP);
75 75 cmi_hdl_hold(hdl); /* release in gcpu_fini */
76 76 gcpu->gcpu_hdl = hdl;
77 77
78 78 /*
79 79 * Allocate a chipshared structure if no sibling cpu has already
80 80 * allocated it, but allow for the fact that a sibling core may
81 81 * be starting up in parallel.
82 82 */
83 83 if ((sp = gcpu_shared[chipid]) == NULL) {
84 84 sp = kmem_zalloc(sizeof (struct gcpu_chipshared), KM_SLEEP);
85 85 mutex_init(&sp->gcpus_poll_lock, NULL, MUTEX_DRIVER, NULL);
86 86 mutex_init(&sp->gcpus_cfglock, NULL, MUTEX_DRIVER, NULL);
87 87 osp = atomic_cas_ptr(&gcpu_shared[chipid], NULL, sp);
88 88 if (osp != NULL) {
89 89 mutex_destroy(&sp->gcpus_cfglock);
90 90 mutex_destroy(&sp->gcpus_poll_lock);
91 91 kmem_free(sp, sizeof (struct gcpu_chipshared));
92 92 sp = osp;
93 93 }
94 94 }
95 95
96 96 atomic_inc_32(&sp->gcpus_actv_cnt);
97 97 gcpu->gcpu_shared = sp;
98 98
99 99 return (0);
100 100 }
101 101
102 102 /*
103 103 * deconfigure gcpu_init()
104 104 */
105 105 void
106 106 gcpu_fini(cmi_hdl_t hdl)
107 107 {
108 108 uint_t chipid = cmi_hdl_chipid(hdl);
109 109 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
110 110 struct gcpu_chipshared *sp;
111 111
112 112 if (gcpu_disable || chipid >= GCPU_MAX_CHIPID)
113 113 return;
114 114
115 115 gcpu_mca_fini(hdl);
116 116
117 117 /*
118 118 * Keep shared data in cache for reuse.
119 119 */
120 120 sp = gcpu_shared[chipid];
121 121 ASSERT(sp != NULL);
122 122 atomic_dec_32(&sp->gcpus_actv_cnt);
123 123
124 124 if (gcpu != NULL)
125 125 kmem_free(gcpu, sizeof (gcpu_data_t));
126 126
127 127 /* Release reference count held in gcpu_init(). */
128 128 cmi_hdl_rele(hdl);
129 129 }
130 130
131 131 void
132 132 gcpu_post_startup(cmi_hdl_t hdl)
133 133 {
134 134 gcpu_data_t *gcpu = cmi_hdl_getcmidata(hdl);
135 135
136 136 if (gcpu_disable)
137 137 return;
138 138
139 139 if (gcpu != NULL)
140 140 cms_post_startup(hdl);
141 141 #ifdef __xpv
142 142 /*
143 143 * All cpu handles are initialized so we can begin polling now.
144 144 * Furthermore, our virq mechanism requires that everything
145 145 * be run on cpu 0 so we can assure that by starting from here.
146 146 */
147 147 gcpu_mca_poll_start(hdl);
148 148 #endif
149 149 }
150 150
151 151 void
152 152 gcpu_post_mpstartup(cmi_hdl_t hdl)
153 153 {
154 154 if (gcpu_disable)
155 155 return;
156 156
157 157 cms_post_mpstartup(hdl);
158 158
159 159 #ifndef __xpv
160 160 /*
161 161 * All cpu handles are initialized only once all cpus
162 162 * are started, so we can begin polling post mp startup.
163 163 */
164 164 gcpu_mca_poll_start(hdl);
165 165 #endif
166 166 }
167 167
168 168 #ifdef __xpv
169 169 #define GCPU_OP(ntvop, xpvop) xpvop
170 170 #else
171 171 #define GCPU_OP(ntvop, xpvop) ntvop
172 172 #endif
173 173
174 174 cmi_api_ver_t _cmi_api_version = CMI_API_VERSION_3;
175 175
176 176 const cmi_ops_t _cmi_ops = {
177 177 gcpu_init, /* cmi_init */
178 178 gcpu_post_startup, /* cmi_post_startup */
179 179 gcpu_post_mpstartup, /* cmi_post_mpstartup */
180 180 gcpu_faulted_enter, /* cmi_faulted_enter */
181 181 gcpu_faulted_exit, /* cmi_faulted_exit */
182 182 gcpu_mca_init, /* cmi_mca_init */
183 183 GCPU_OP(gcpu_mca_trap, NULL), /* cmi_mca_trap */
184 184 GCPU_OP(gcpu_cmci_trap, NULL), /* cmi_cmci_trap */
185 185 gcpu_msrinject, /* cmi_msrinject */
186 186 GCPU_OP(gcpu_hdl_poke, NULL), /* cmi_hdl_poke */
187 187 gcpu_fini, /* cmi_fini */
↓ open down ↓ |
187 lines elided |
↑ open up ↑ |
188 188 GCPU_OP(NULL, gcpu_xpv_panic_callback), /* cmi_panic_callback */
189 189 };
190 190
191 191 static struct modlcpu modlcpu = {
192 192 &mod_cpuops,
193 193 "Generic x86 CPU Module"
194 194 };
195 195
196 196 static struct modlinkage modlinkage = {
197 197 MODREV_1,
198 - (void *)&modlcpu,
199 - NULL
198 + { (void *)&modlcpu, NULL }
200 199 };
201 200
202 201 int
203 202 _init(void)
204 203 {
205 204 return (mod_install(&modlinkage));
206 205 }
207 206
208 207 int
209 208 _info(struct modinfo *modinfop)
210 209 {
211 210 return (mod_info(&modlinkage, modinfop));
212 211 }
213 212
214 213 int
215 214 _fini(void)
216 215 {
217 216 return (mod_remove(&modlinkage));
218 217 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX