1 2 /* 3 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 4 * Use is subject to license terms. 5 */ 6 /* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */ 7 /* 8 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 9 * 10 * The Weather Channel (TM) funded Tungsten Graphics to develop the 11 * initial release of the Radeon 8500 driver under the XFree86 license. 12 * This notice must be preserved. 13 * 14 * Permission is hereby granted, free of charge, to any person obtaining a 15 * copy of this software and associated documentation files (the "Software"), 16 * to deal in the Software without restriction, including without limitation 17 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 18 * and/or sell copies of the Software, and to permit persons to whom the 19 * Software is furnished to do so, subject to the following conditions: 20 * 21 * The above copyright notice and this permission notice (including the next 22 * paragraph) shall be included in all copies or substantial portions of the 23 * Software. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 28 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 29 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 30 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 31 * DEALINGS IN THE SOFTWARE. 32 * 33 * Authors: 34 * Keith Whitwell <keith@tungstengraphics.com> 35 */ 36 37 #pragma ident "%Z%%M% %I% %E% SMI" 38 39 #include "drmP.h" 40 #include "drm.h" 41 #include "radeon_drm.h" 42 #include "radeon_drv.h" 43 #include "radeon_io32.h" 44 45 /* 46 * Very simple allocator for GART memory, working on a static range 47 * already mapped into each client's address space. 48 */ 49 50 static struct mem_block * 51 split_block(struct mem_block *p, int start, int size, drm_file_t *filp) 52 { 53 /* Maybe cut off the start of an existing block */ 54 if (start > p->start) { 55 struct mem_block *newblock = 56 drm_alloc(sizeof (*newblock), DRM_MEM_BUFS); 57 if (!newblock) 58 goto out; 59 newblock->start = start; 60 newblock->size = p->size - (start - p->start); 61 newblock->filp = NULL; 62 newblock->next = p->next; 63 newblock->prev = p; 64 p->next->prev = newblock; 65 p->next = newblock; 66 p->size -= newblock->size; 67 p = newblock; 68 } 69 70 /* Maybe cut off the end of an existing block */ 71 if (size < p->size) { 72 struct mem_block *newblock = 73 drm_alloc(sizeof (*newblock), DRM_MEM_BUFS); 74 if (!newblock) 75 goto out; 76 newblock->start = start + size; 77 newblock->size = p->size - size; 78 newblock->filp = NULL; 79 newblock->next = p->next; 80 newblock->prev = p; 81 p->next->prev = newblock; 82 p->next = newblock; 83 p->size = size; 84 } 85 86 out: 87 /* Our block is in the middle */ 88 p->filp = filp; 89 return (p); 90 } 91 92 static struct mem_block * 93 alloc_block(struct mem_block *heap, int size, int align2, drm_file_t *filp) 94 { 95 struct mem_block *p; 96 int mask = (1 << align2) - 1; 97 98 for (p = heap->next; p != heap; p = p->next) { 99 int start = (p->start + mask) & ~mask; 100 if (p->filp == 0 && start + size <= p->start + p->size) 101 return (split_block(p, start, size, filp)); 102 } 103 104 return (NULL); 105 } 106 107 static struct mem_block * 108 find_block(struct mem_block *heap, int start) 109 { 110 struct mem_block *p; 111 112 for (p = heap->next; p != heap; p = p->next) 113 if (p->start == start) 114 return (p); 115 116 return (NULL); 117 } 118 119 static void 120 free_block(struct mem_block *p) 121 { 122 p->filp = NULL; 123 124 /* 125 * Assumes a single contiguous range. Needs a special filp in 126 * 'heap' to stop it being subsumed. 127 */ 128 if (p->next->filp == 0) { 129 struct mem_block *q = p->next; 130 p->size += q->size; 131 p->next = q->next; 132 p->next->prev = p; 133 drm_free(q, sizeof (*q), DRM_MEM_BUFS); 134 } 135 136 if (p->prev->filp == 0) { 137 struct mem_block *q = p->prev; 138 q->size += p->size; 139 q->next = p->next; 140 q->next->prev = q; 141 drm_free(p, sizeof (*q), DRM_MEM_BUFS); 142 } 143 } 144 145 /* 146 * Initialize. How to check for an uninitialized heap? 147 */ 148 static int 149 init_heap(struct mem_block **heap, int start, int size) 150 { 151 struct mem_block *blocks = drm_alloc(sizeof (*blocks), DRM_MEM_BUFS); 152 153 if (!blocks) 154 return (ENOMEM); 155 156 *heap = drm_alloc(sizeof (**heap), DRM_MEM_BUFS); 157 if (!*heap) { 158 drm_free(blocks, sizeof (*blocks), DRM_MEM_BUFS); 159 return (ENOMEM); 160 } 161 162 blocks->start = start; 163 blocks->size = size; 164 blocks->filp = NULL; 165 blocks->next = blocks->prev = *heap; 166 167 (void) memset(*heap, 0, sizeof (**heap)); 168 (*heap)->filp = (drm_file_t *)-1; 169 (*heap)->next = (*heap)->prev = blocks; 170 return (0); 171 } 172 173 /* 174 * Free all blocks associated with the releasing file. 175 */ 176 void 177 radeon_mem_release(drm_file_t *filp, struct mem_block *heap) 178 { 179 struct mem_block *p; 180 181 if (!heap || !heap->next) 182 return; 183 184 for (p = heap->next; p != heap; p = p->next) { 185 if (p->filp == filp) 186 p->filp = NULL; 187 } 188 189 /* 190 * Assumes a single contiguous range. Needs a special filp in 191 * 'heap' to stop it being subsumed. 192 */ 193 for (p = heap->next; p != heap; p = p->next) { 194 while (p->filp == 0 && p->next->filp == 0) { 195 struct mem_block *q = p->next; 196 p->size += q->size; 197 p->next = q->next; 198 p->next->prev = p; 199 drm_free(q, sizeof (*q), DRM_MEM_DRIVER); 200 } 201 } 202 } 203 204 /* 205 * Shutdown. 206 */ 207 void 208 radeon_mem_takedown(struct mem_block **heap) 209 { 210 struct mem_block *p; 211 212 if (!*heap) 213 return; 214 215 for (p = (*heap)->next; p != *heap; ) { 216 struct mem_block *q = p; 217 p = p->next; 218 drm_free(q, sizeof (*q), DRM_MEM_DRIVER); 219 } 220 221 drm_free(*heap, sizeof (**heap), DRM_MEM_DRIVER); 222 *heap = NULL; 223 } 224 225 /* IOCTL HANDLERS */ 226 227 static struct mem_block ** 228 get_heap(drm_radeon_private_t *dev_priv, int region) 229 { 230 switch (region) { 231 case RADEON_MEM_REGION_GART: 232 return (&dev_priv->gart_heap); 233 case RADEON_MEM_REGION_FB: 234 return (&dev_priv->fb_heap); 235 default: 236 return (NULL); 237 } 238 } 239 240 /*ARGSUSED*/ 241 int 242 radeon_mem_alloc(DRM_IOCTL_ARGS) 243 { 244 DRM_DEVICE; 245 drm_radeon_private_t *dev_priv = dev->dev_private; 246 drm_radeon_mem_alloc_t alloc; 247 struct mem_block *block, **heap; 248 249 if (!dev_priv) { 250 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 251 return (EINVAL); 252 } 253 254 #ifdef _MULTI_DATAMODEL 255 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) { 256 drm_radeon_mem_alloc_32_t alloc32; 257 258 DRM_COPYFROM_WITH_RETURN(&alloc32, (void *) data, 259 sizeof (alloc32)); 260 alloc.region = alloc32.region; 261 alloc.alignment = alloc32.alignment; 262 alloc.size = alloc32.size; 263 alloc.region_offset = (void *)(uintptr_t)alloc32.region_offset; 264 } else { 265 #endif 266 DRM_COPYFROM_WITH_RETURN(&alloc, (void *) data, sizeof (alloc)); 267 #ifdef _MULTI_DATAMODEL 268 } 269 #endif 270 271 heap = get_heap(dev_priv, alloc.region); 272 if (!heap || !*heap) 273 return (EFAULT); 274 275 /* 276 * Make things easier on ourselves: all allocations at least 277 * 4k aligned. 278 */ 279 if (alloc.alignment < 12) 280 alloc.alignment = 12; 281 282 block = alloc_block(*heap, alloc.size, alloc.alignment, fpriv); 283 284 if (!block) 285 return (ENOMEM); 286 287 if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, 288 sizeof (int))) { 289 DRM_ERROR("copy_to_user\n"); 290 return (EFAULT); 291 } 292 293 return (0); 294 } 295 296 /*ARGSUSED*/ 297 int 298 radeon_mem_free(DRM_IOCTL_ARGS) 299 { 300 DRM_DEVICE; 301 drm_radeon_private_t *dev_priv = dev->dev_private; 302 drm_radeon_mem_free_t memfree; 303 struct mem_block *block, **heap; 304 305 if (!dev_priv) { 306 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 307 return (EINVAL); 308 } 309 310 DRM_COPYFROM_WITH_RETURN(&memfree, (void *) data, sizeof (memfree)); 311 312 heap = get_heap(dev_priv, memfree.region); 313 if (!heap || !*heap) 314 return (EFAULT); 315 316 block = find_block(*heap, memfree.region_offset); 317 if (!block) 318 return (EFAULT); 319 320 if (block->filp != fpriv) 321 return (EPERM); 322 323 free_block(block); 324 return (0); 325 } 326 327 /*ARGSUSED*/ 328 int 329 radeon_mem_init_heap(DRM_IOCTL_ARGS) 330 { 331 DRM_DEVICE; 332 drm_radeon_private_t *dev_priv = dev->dev_private; 333 drm_radeon_mem_init_heap_t initheap; 334 struct mem_block **heap; 335 336 if (!dev_priv) { 337 DRM_ERROR("%s called with no initialization\n", __FUNCTION__); 338 return (EINVAL); 339 } 340 341 DRM_COPYFROM_WITH_RETURN(&initheap, (void *) data, sizeof (initheap)); 342 343 heap = get_heap(dev_priv, initheap.region); 344 if (!heap) 345 return (EFAULT); 346 347 if (*heap) { 348 DRM_ERROR("heap already initialized?"); 349 return (EFAULT); 350 } 351 352 return (init_heap(heap, initheap.start, initheap.size)); 353 }