Print this page
4045 zfs write throttle & i/o scheduler performance work
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
          +++ new/usr/src/uts/common/fs/zfs/sys/vdev_impl.h
↓ open down ↓ 91 lines elided ↑ open up ↑
  92   92          uint16_t        ve_missed_update;
  93   93          zio_t           *ve_fill_io;
  94   94  };
  95   95  
  96   96  struct vdev_cache {
  97   97          avl_tree_t      vc_offset_tree;
  98   98          avl_tree_t      vc_lastused_tree;
  99   99          kmutex_t        vc_lock;
 100  100  };
 101  101  
      102 +typedef struct vdev_queue_class {
      103 +        uint32_t        vqc_active;
      104 +
      105 +        /*
      106 +         * Sorted by offset or timestamp, depending on if the queue is
      107 +         * LBA-ordered vs FIFO.
      108 +         */
      109 +        avl_tree_t      vqc_queued_tree;
      110 +} vdev_queue_class_t;
      111 +
 102  112  struct vdev_queue {
 103      -        avl_tree_t      vq_deadline_tree;
 104      -        avl_tree_t      vq_read_tree;
 105      -        avl_tree_t      vq_write_tree;
 106      -        avl_tree_t      vq_pending_tree;
 107      -        hrtime_t        vq_io_complete_ts;
      113 +        vdev_t          *vq_vdev;
      114 +        vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
      115 +        avl_tree_t      vq_active_tree;
      116 +        uint64_t        vq_last_offset;
      117 +        hrtime_t        vq_io_complete_ts; /* time last i/o completed */
 108  118          kmutex_t        vq_lock;
 109  119  };
 110  120  
 111  121  /*
 112  122   * Virtual device descriptor
 113  123   */
 114  124  struct vdev {
 115  125          /*
 116  126           * Common to all vdev types.
 117  127           */
↓ open down ↓ 221 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX