1 /*
   2  * Copyright (c) 2009, Intel Corporation.
   3  * All Rights Reserved.
   4  *
   5  * Permission is hereby granted, free of charge, to any person obtaining a
   6  * copy of this software and associated documentation files (the "Software"),
   7  * to deal in the Software without restriction, including without limitation
   8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   9  * and/or sell copies of the Software, and to permit persons to whom the
  10  * Software is furnished to do so, subject to the following conditions:
  11  *
  12  * The above copyright notice and this permission notice (including the next
  13  * paragraph) shall be included in all copies or substantial portions of the
  14  * Software.
  15  *
  16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  22  * IN THE SOFTWARE.
  23  *
  24  * Authors:
  25  *    Eric Anholt <eric@anholt.net>
  26  *
  27  */
  28 
  29 /*
  30  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  31  * Use is subject to license terms.
  32  */
  33 
  34 #include <vm/anon.h>
  35 #include <vm/seg_kmem.h>
  36 #include <vm/seg_kp.h>
  37 #include <vm/seg_map.h>
  38 #include <sys/fcntl.h>
  39 #include <sys/vnode.h>
  40 #include <sys/file.h>
  41 #include <sys/bitmap.h>
  42 #include <sys/ddi.h>
  43 #include <sys/sunddi.h>
  44 #include <sys/gfx_private.h>
  45 #include "drmP.h"
  46 #include "drm.h"
  47 
  48 /*
  49  * @file drm_gem.c
  50  *
  51  * This file provides some of the base ioctls and library routines for
  52  * the graphics memory manager implemented by each device driver.
  53  *
  54  * Because various devices have different requirements in terms of
  55  * synchronization and migration strategies, implementing that is left up to
  56  * the driver, and all that the general API provides should be generic --
  57  * allocating objects, reading/writing data with the cpu, freeing objects.
  58  * Even there, platform-dependent optimizations for reading/writing data with
  59  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  60  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  61  *
  62  * The goal was to have swap-backed object allocation managed through
  63  * struct file.  However, file descriptors as handles to a struct file have
  64  * two major failings:
  65  * - Process limits prevent more than 1024 or so being used at a time by
  66  *   default.
  67  * - Inability to allocate high fds will aggravate the X Server's select()
  68  *   handling, and likely that of many GL client applications as well.
  69  *
  70  * This led to a plan of using our own integer IDs(called handles, following
  71  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  72  * ioctls.  The objects themselves will still include the struct file so
  73  * that we can transition to fds if the required kernel infrastructure shows
  74  * up at a later date, and as our interface with shmfs for memory allocation.
  75  */
  76 
  77 void
  78 idr_list_init(struct idr_list  *head)
  79 {
  80         struct idr_list  *entry;
  81         /* HASH for accelerate */
  82         entry = kmem_zalloc(DRM_GEM_OBJIDR_HASHNODE
  83             * sizeof (struct idr_list), KM_SLEEP);
  84         head->next = entry;
  85         for (int i = 0; i < DRM_GEM_OBJIDR_HASHNODE; i++) {
  86                 INIT_LIST_HEAD(&entry[i]);
  87         }
  88 }
  89 
  90 int
  91 idr_list_get_new_above(struct idr_list  *head,
  92                         struct drm_gem_object *obj,
  93                         int *handlep)
  94 {
  95         struct idr_list  *entry;
  96         int key;
  97         entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
  98         key = obj->name % DRM_GEM_OBJIDR_HASHNODE;
  99         list_add(entry, &head->next[key], NULL);
 100         entry->obj = obj;
 101         entry->handle = obj->name;
 102         *handlep = obj->name;
 103         return (0);
 104 }
 105 
 106 struct drm_gem_object *
 107 idr_list_find(struct idr_list  *head,
 108                 uint32_t        name)
 109 {
 110         struct idr_list  *entry;
 111         int key;
 112         key = name % DRM_GEM_OBJIDR_HASHNODE;
 113 
 114         list_for_each(entry, &head->next[key]) {
 115                 if (entry->handle == name)
 116                         return (entry->obj);
 117         }
 118         return (NULL);
 119 }
 120 
 121 int
 122 idr_list_remove(struct idr_list  *head,
 123                 uint32_t        name)
 124 {
 125         struct idr_list  *entry, *temp;
 126         int key;
 127         key = name % DRM_GEM_OBJIDR_HASHNODE;
 128         list_for_each_safe(entry, temp, &head->next[key]) {
 129                 if (entry->handle == name) {
 130                         list_del(entry);
 131                         kmem_free(entry, sizeof (*entry));
 132                         return (0);
 133                 }
 134         }
 135         DRM_ERROR("Failed to remove the object %d", name);
 136         return (-1);
 137 }
 138 
 139 void
 140 idr_list_free(struct idr_list  *head)
 141 {
 142         struct idr_list  *entry, *temp;
 143         for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
 144                 list_for_each_safe(entry, temp, &head->next[key]) {
 145                         list_del(entry);
 146                         kmem_free(entry, sizeof (*entry));
 147                 }
 148         }
 149         kmem_free(head->next,
 150             DRM_GEM_OBJIDR_HASHNODE * sizeof (struct idr_list));
 151         head->next = NULL;
 152 }
 153 
 154 int
 155 idr_list_empty(struct idr_list  *head)
 156 {
 157         int empty;
 158         for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) {
 159                 empty = list_empty(&(head)->next[key]);
 160                 if (!empty)
 161                         return (empty);
 162         }
 163         return (1);
 164 }
 165 
 166 static  uint32_t        shfile_name = 0;
 167 #define SHFILE_NAME_MAX 0xffffffff
 168 
 169 /*
 170  * will be set to 1 for 32 bit x86 systems only, in startup.c
 171  */
 172 extern int      segkp_fromheap;
 173 extern ulong_t *segkp_bitmap;
 174 
 175 void
 176 drm_gem_object_reference(struct drm_gem_object *obj)
 177 {
 178         atomic_inc(&obj->refcount);
 179 }
 180 
 181 void
 182 drm_gem_object_unreference(struct drm_gem_object *obj)
 183 {
 184         if (obj == NULL)
 185                 return;
 186 
 187         atomic_sub(1, &obj->refcount);
 188         if (obj->refcount == 0)
 189                 drm_gem_object_free(obj);
 190 }
 191 
 192 void
 193 drm_gem_object_handle_reference(struct drm_gem_object *obj)
 194 {
 195         drm_gem_object_reference(obj);
 196         atomic_inc(&obj->handlecount);
 197 }
 198 
 199 void
 200 drm_gem_object_handle_unreference(struct drm_gem_object *obj)
 201 {
 202         if (obj == NULL)
 203                 return;
 204 
 205         /*
 206          * Must bump handle count first as this may be the last
 207          * ref, in which case the object would disappear before we
 208          * checked for a name
 209          */
 210         atomic_sub(1, &obj->handlecount);
 211         if (obj->handlecount == 0)
 212                 drm_gem_object_handle_free(obj);
 213         drm_gem_object_unreference(obj);
 214 }
 215 
 216 /*
 217  * Initialize the GEM device fields
 218  */
 219 
 220 int
 221 drm_gem_init(struct drm_device *dev)
 222 {
 223         mutex_init(&dev->object_name_lock, NULL, MUTEX_DRIVER, NULL);
 224         idr_list_init(&dev->object_name_idr);
 225 
 226         atomic_set(&dev->object_count, 0);
 227         atomic_set(&dev->object_memory, 0);
 228         atomic_set(&dev->pin_count, 0);
 229         atomic_set(&dev->pin_memory, 0);
 230         atomic_set(&dev->gtt_count, 0);
 231         atomic_set(&dev->gtt_memory, 0);
 232         return (0);
 233 }
 234 
 235 /*
 236  * Allocate a GEM object of the specified size with shmfs backing store
 237  */
 238 struct drm_gem_object *
 239 drm_gem_object_alloc(struct drm_device *dev, size_t size)
 240 {
 241         static ddi_dma_attr_t dma_attr = {
 242                 DMA_ATTR_V0,
 243                 0U,                             /* dma_attr_addr_lo */
 244                 0xffffffffU,                    /* dma_attr_addr_hi */
 245                 0xffffffffU,                    /* dma_attr_count_max */
 246                 4096,                           /* dma_attr_align */
 247                 0x1fffU,                        /* dma_attr_burstsizes */
 248                 1,                              /* dma_attr_minxfer */
 249                 0xffffffffU,                    /* dma_attr_maxxfer */
 250                 0xffffffffU,                    /* dma_attr_seg */
 251                 1,                              /* dma_attr_sgllen, variable */
 252                 4,                              /* dma_attr_granular */
 253                 0                               /* dma_attr_flags */
 254         };
 255         static ddi_device_acc_attr_t acc_attr = {
 256                 DDI_DEVICE_ATTR_V0,
 257                 DDI_NEVERSWAP_ACC,
 258                 DDI_MERGING_OK_ACC
 259         };
 260         struct drm_gem_object *obj;
 261         ddi_dma_cookie_t cookie;
 262         uint_t cookie_cnt;
 263         drm_local_map_t *map;
 264 
 265         pgcnt_t real_pgcnt, pgcnt = btopr(size);
 266         uint32_t paddr, cookie_end;
 267         int i, n;
 268 
 269         obj = kmem_zalloc(sizeof (struct drm_gem_object), KM_NOSLEEP);
 270         if (obj == NULL)
 271                 return (NULL);
 272 
 273         obj->dev = dev;
 274         obj->flink = 0;
 275         obj->size = size;
 276 
 277         if (shfile_name == SHFILE_NAME_MAX) {
 278                 DRM_ERROR("No name space for object");
 279                 goto err1;
 280         } else {
 281                 obj->name = ++shfile_name;
 282         }
 283 
 284         dma_attr.dma_attr_sgllen = (int)pgcnt;
 285 
 286         if (ddi_dma_alloc_handle(dev->dip, &dma_attr,
 287             DDI_DMA_DONTWAIT, NULL, &obj->dma_hdl)) {
 288                 DRM_ERROR("drm_gem_object_alloc: "
 289                     "ddi_dma_alloc_handle failed");
 290                 goto err1;
 291         }
 292         if (ddi_dma_mem_alloc(obj->dma_hdl, ptob(pgcnt), &acc_attr,
 293             IOMEM_DATA_UC_WR_COMBINE, DDI_DMA_DONTWAIT, NULL,
 294             &obj->kaddr, &obj->real_size, &obj->acc_hdl)) {
 295                 DRM_ERROR("drm_gem_object_alloc: "
 296                     "ddi_dma_mem_alloc failed");
 297                 goto err2;
 298         }
 299         if (ddi_dma_addr_bind_handle(obj->dma_hdl, NULL,
 300             obj->kaddr, obj->real_size, DDI_DMA_RDWR,
 301             DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_cnt)
 302             != DDI_DMA_MAPPED) {
 303                 DRM_ERROR("drm_gem_object_alloc: "
 304                     "ddi_dma_addr_bind_handle failed");
 305                 goto err3;
 306         }
 307 
 308         real_pgcnt = btopr(obj->real_size);
 309 
 310         obj->pfnarray = kmem_zalloc(real_pgcnt * sizeof (pfn_t), KM_NOSLEEP);
 311         if (obj->pfnarray == NULL) {
 312                 goto err4;
 313         }
 314         for (n = 0, i = 1; ; i++) {
 315                 for (paddr = cookie.dmac_address,
 316                     cookie_end = cookie.dmac_address + cookie.dmac_size;
 317                     paddr < cookie_end;
 318                     paddr += PAGESIZE) {
 319                         obj->pfnarray[n++] = btop(paddr);
 320                         if (n >= real_pgcnt)
 321                                 goto addmap;
 322                 }
 323                 if (i >= cookie_cnt)
 324                         break;
 325                 ddi_dma_nextcookie(obj->dma_hdl, &cookie);
 326         }
 327 
 328 addmap:
 329         map = drm_alloc(sizeof (struct drm_local_map), DRM_MEM_MAPS);
 330         if (map == NULL) {
 331                 goto err5;
 332         }
 333 
 334         map->handle = obj;
 335         map->offset = (uintptr_t)map->handle;
 336         map->offset &= 0xffffffffUL;
 337         map->dev_addr = map->handle;
 338         map->size = obj->real_size;
 339         map->type = _DRM_TTM;
 340         map->flags = _DRM_WRITE_COMBINING | _DRM_REMOVABLE;
 341         map->drm_umem_cookie =
 342             gfxp_umem_cookie_init(obj->kaddr, obj->real_size);
 343         if (map->drm_umem_cookie == NULL) {
 344                 goto err6;
 345         }
 346 
 347         obj->map = map;
 348 
 349         atomic_set(&obj->refcount, 1);
 350         atomic_set(&obj->handlecount, 1);
 351         if (dev->driver->gem_init_object != NULL &&
 352             dev->driver->gem_init_object(obj) != 0) {
 353                 goto err7;
 354         }
 355         atomic_inc(&dev->object_count);
 356         atomic_add(obj->size, &dev->object_memory);
 357 
 358         return (obj);
 359 
 360 err7:
 361         gfxp_umem_cookie_destroy(map->drm_umem_cookie);
 362 err6:
 363         drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
 364 err5:
 365         kmem_free(obj->pfnarray, real_pgcnt * sizeof (pfn_t));
 366 err4:
 367         (void) ddi_dma_unbind_handle(obj->dma_hdl);
 368 err3:
 369         ddi_dma_mem_free(&obj->acc_hdl);
 370 err2:
 371         ddi_dma_free_handle(&obj->dma_hdl);
 372 err1:
 373         kmem_free(obj, sizeof (struct drm_gem_object));
 374 
 375         return (NULL);
 376 }
 377 
 378 /*
 379  * Removes the mapping from handle to filp for this object.
 380  */
 381 static int
 382 drm_gem_handle_delete(struct drm_file *filp, int handle)
 383 {
 384         struct drm_device *dev;
 385         struct drm_gem_object *obj;
 386         int err;
 387         /*
 388          * This is gross. The idr system doesn't let us try a delete and
 389          * return an error code.  It just spews if you fail at deleting.
 390          * So, we have to grab a lock around finding the object and then
 391          * doing the delete on it and dropping the refcount, or the user
 392          * could race us to double-decrement the refcount and cause a
 393          * use-after-free later.  Given the frequency of our handle lookups,
 394          * we may want to use ida for number allocation and a hash table
 395          * for the pointers, anyway.
 396          */
 397         spin_lock(&filp->table_lock);
 398 
 399         /* Check if we currently have a reference on the object */
 400         obj = idr_list_find(&filp->object_idr, handle);
 401         if (obj == NULL) {
 402                 spin_unlock(&filp->table_lock);
 403                 DRM_ERROR("obj %d is not in tne list, failed to close", handle);
 404                 return (EINVAL);
 405         }
 406         dev = obj->dev;
 407 
 408         /* Release reference and decrement refcount. */
 409         err = idr_list_remove(&filp->object_idr, handle);
 410         if (err == -1)
 411                 DRM_ERROR("%s", __func__);
 412 
 413         spin_unlock(&filp->table_lock);
 414 
 415         spin_lock(&dev->struct_mutex);
 416         drm_gem_object_handle_unreference(obj);
 417         spin_unlock(&dev->struct_mutex);
 418         return (0);
 419 }
 420 
 421 /*
 422  * Create a handle for this object. This adds a handle reference
 423  * to the object, which includes a regular reference count. Callers
 424  * will likely want to dereference the object afterwards.
 425  */
 426 int
 427 drm_gem_handle_create(struct drm_file *file_priv,
 428                     struct drm_gem_object *obj,
 429                     int *handlep)
 430 {
 431         int     ret;
 432 
 433         /*
 434          * Get the user-visible handle using idr.
 435          */
 436 again:
 437         /* ensure there is space available to allocate a handle */
 438 
 439         /* do the allocation under our spinlock */
 440         spin_lock(&file_priv->table_lock);
 441         ret = idr_list_get_new_above(&file_priv->object_idr, obj, handlep);
 442         spin_unlock(&file_priv->table_lock);
 443         if (ret == -EAGAIN)
 444                 goto again;
 445 
 446         if (ret != 0) {
 447                 DRM_ERROR("Failed to create handle");
 448                 return (ret);
 449         }
 450 
 451         drm_gem_object_handle_reference(obj);
 452         return (0);
 453 }
 454 
 455 /* Returns a reference to the object named by the handle. */
 456 struct drm_gem_object *
 457 drm_gem_object_lookup(struct drm_file *filp,
 458                             int handle)
 459 {
 460         struct drm_gem_object *obj;
 461 
 462         spin_lock(&filp->table_lock);
 463 
 464         /* Check if we currently have a reference on the object */
 465         obj = idr_list_find(&filp->object_idr, handle);
 466                 if (obj == NULL) {
 467                         spin_unlock(&filp->table_lock);
 468                         DRM_ERROR("object_lookup failed, handle %d", handle);
 469                         return (NULL);
 470                 }
 471 
 472         drm_gem_object_reference(obj);
 473 
 474         spin_unlock(&filp->table_lock);
 475 
 476         return (obj);
 477 }
 478 
 479 /*
 480  * Releases the handle to an mm object.
 481  */
 482 /*ARGSUSED*/
 483 int
 484 drm_gem_close_ioctl(DRM_IOCTL_ARGS)
 485 {
 486         DRM_DEVICE;
 487         struct drm_gem_close args;
 488         int ret;
 489 
 490         if (!(dev->driver->use_gem == 1))
 491                 return (ENODEV);
 492 
 493         DRM_COPYFROM_WITH_RETURN(&args,
 494             (void *)data, sizeof (args));
 495 
 496         ret = drm_gem_handle_delete(fpriv, args.handle);
 497 
 498         return (ret);
 499 }
 500 
 501 /*
 502  * Create a global name for an object, returning the name.
 503  *
 504  * Note that the name does not hold a reference; when the object
 505  * is freed, the name goes away.
 506  */
 507 /*ARGSUSED*/
 508 int
 509 drm_gem_flink_ioctl(DRM_IOCTL_ARGS)
 510 {
 511         DRM_DEVICE;
 512         struct drm_gem_flink args;
 513         struct drm_gem_object *obj;
 514         int ret, handle;
 515 
 516         if (!(dev->driver->use_gem == 1))
 517                 return (ENODEV);
 518 
 519         DRM_COPYFROM_WITH_RETURN(&args,
 520             (void *)data, sizeof (args));
 521         obj = drm_gem_object_lookup(fpriv, args.handle);
 522         if (obj == NULL)
 523                 return (EINVAL);
 524         handle = args.handle;
 525         spin_lock(&dev->object_name_lock);
 526         if (!obj->flink) {
 527                 /* only creat a node in object_name_idr, no update anything */
 528                 ret = idr_list_get_new_above(&dev->object_name_idr,
 529                     obj, &handle);
 530                 obj->flink = obj->name;
 531                 /* Allocate a reference for the name table.  */
 532                 drm_gem_object_reference(obj);
 533         }
 534         /*
 535          * Leave the reference from the lookup around as the
 536          * name table now holds one
 537          */
 538         args.name = obj->name;
 539 
 540         spin_unlock(&dev->object_name_lock);
 541         ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
 542         if (ret != 0)
 543                 DRM_ERROR(" gem flink error! %d", ret);
 544 
 545         spin_lock(&dev->struct_mutex);
 546         drm_gem_object_unreference(obj);
 547         spin_unlock(&dev->struct_mutex);
 548 
 549         return (ret);
 550 }
 551 
 552 /*
 553  * Open an object using the global name, returning a handle and the size.
 554  *
 555  * This handle (of course) holds a reference to the object, so the object
 556  * will not go away until the handle is deleted.
 557  */
 558 /*ARGSUSED*/
 559 int
 560 drm_gem_open_ioctl(DRM_IOCTL_ARGS)
 561 {
 562         DRM_DEVICE;
 563         struct drm_gem_open args;
 564         struct drm_gem_object *obj;
 565         int ret;
 566         int handle;
 567 
 568         if (!(dev->driver->use_gem == 1)) {
 569                 DRM_ERROR("Not support GEM");
 570                 return (ENODEV);
 571         }
 572         DRM_COPYFROM_WITH_RETURN(&args,
 573             (void *) data, sizeof (args));
 574 
 575         spin_lock(&dev->object_name_lock);
 576 
 577         obj = idr_list_find(&dev->object_name_idr, args.name);
 578 
 579         if (obj)
 580                 drm_gem_object_reference(obj);
 581         spin_unlock(&dev->object_name_lock);
 582         if (!obj) {
 583                 DRM_ERROR("Can't find the obj %d", args.name);
 584                 return (ENOENT);
 585         }
 586 
 587         ret = drm_gem_handle_create(fpriv, obj, &handle);
 588         spin_lock(&dev->struct_mutex);
 589         drm_gem_object_unreference(obj);
 590         spin_unlock(&dev->struct_mutex);
 591 
 592         args.handle = args.name;
 593         args.size = obj->size;
 594 
 595         ret = DRM_COPY_TO_USER((void *) data, &args, sizeof (args));
 596         if (ret != 0)
 597                 DRM_ERROR(" gem open error! %d", ret);
 598         return (ret);
 599 }
 600 
 601 /*
 602  * Called at device open time, sets up the structure for handling refcounting
 603  * of mm objects.
 604  */
 605 void
 606 drm_gem_open(struct drm_file *file_private)
 607 {
 608         idr_list_init(&file_private->object_idr);
 609         mutex_init(&file_private->table_lock, NULL, MUTEX_DRIVER, NULL);
 610 }
 611 
 612 /*
 613  * Called at device close to release the file's
 614  * handle references on objects.
 615  */
 616 static void
 617 drm_gem_object_release_handle(struct drm_gem_object *obj)
 618 {
 619         drm_gem_object_handle_unreference(obj);
 620 }
 621 
 622 /*
 623  * Called at close time when the filp is going away.
 624  *
 625  * Releases any remaining references on objects by this filp.
 626  */
 627 void
 628 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 629 {
 630         struct idr_list  *entry;
 631         spin_lock(&dev->struct_mutex);
 632 
 633         idr_list_for_each(entry, &file_private->object_idr)
 634             drm_gem_object_release_handle(entry->obj);
 635 
 636         idr_list_free(&file_private->object_idr);
 637         spin_unlock(&dev->struct_mutex);
 638 
 639 }
 640 
 641 /*
 642  * Called after the last reference to the object has been lost.
 643  *
 644  * Frees the object
 645  */
 646 void
 647 drm_gem_object_free(struct drm_gem_object *obj)
 648 {
 649         struct drm_device *dev = obj->dev;
 650         struct drm_local_map *map = obj->map;
 651 
 652         if (dev->driver->gem_free_object != NULL)
 653                 dev->driver->gem_free_object(obj);
 654 
 655         gfxp_umem_cookie_destroy(map->drm_umem_cookie);
 656         drm_free(map, sizeof (struct drm_local_map), DRM_MEM_MAPS);
 657 
 658         kmem_free(obj->pfnarray, btopr(obj->real_size) * sizeof (pfn_t));
 659 
 660         (void) ddi_dma_unbind_handle(obj->dma_hdl);
 661         ddi_dma_mem_free(&obj->acc_hdl);
 662         ddi_dma_free_handle(&obj->dma_hdl);
 663 
 664         atomic_dec(&dev->object_count);
 665         atomic_sub(obj->size, &dev->object_memory);
 666         kmem_free(obj, sizeof (struct drm_gem_object));
 667 }
 668 
 669 /*
 670  * Called after the last handle to the object has been closed
 671  *
 672  * Removes any name for the object. Note that this must be
 673  * called before drm_gem_object_free or we'll be touching
 674  * freed memory
 675  */
 676 void
 677 drm_gem_object_handle_free(struct drm_gem_object *obj)
 678 {
 679         int err;
 680         struct drm_device *dev = obj->dev;
 681         /* Remove any name for this object */
 682         spin_lock(&dev->object_name_lock);
 683         if (obj->flink) {
 684                 err = idr_list_remove(&dev->object_name_idr, obj->name);
 685                 if (err == -1)
 686                         DRM_ERROR("%s", __func__);
 687                 obj->flink = 0;
 688                 spin_unlock(&dev->object_name_lock);
 689                 /*
 690                  * The object name held a reference to this object, drop
 691                  * that now.
 692                  */
 693                 drm_gem_object_unreference(obj);
 694         } else
 695 
 696                 spin_unlock(&dev->object_name_lock);
 697 
 698 }