1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 
  27 /*
  28  *
  29  * IEEE 1284 Parallel Port Device Driver
  30  *
  31  */
  32 
  33 #include <sys/param.h>
  34 #include <sys/errno.h>
  35 #include <sys/file.h>
  36 #include <sys/cmn_err.h>
  37 #include <sys/stropts.h>
  38 #include <sys/debug.h>
  39 #include <sys/stream.h>
  40 #include <sys/strsun.h>
  41 #include <sys/kmem.h>
  42 #include <sys/ddi.h>
  43 #include <sys/sunddi.h>
  44 #include <sys/conf.h>             /* req. by dev_ops flags MTSAFE etc. */
  45 #include <sys/modctl.h>           /* for modldrv */
  46 #include <sys/stat.h>             /* ddi_create_minor_node S_IFCHR */
  47 #include <sys/open.h>
  48 #include <sys/ddi_impldefs.h>
  49 #include <sys/kstat.h>
  50 
  51 #include <sys/prnio.h>
  52 #include <sys/ecppreg.h>  /* hw description */
  53 #include <sys/ecppio.h>           /* ioctl description */
  54 #include <sys/ecppvar.h>  /* driver description */
  55 #include <sys/dma_engine.h>
  56 #include <sys/dma_i8237A.h>
  57 
  58 /*
  59  * Background
  60  * ==========
  61  * IEEE 1284-1994 standard defines "a signalling method for asynchronous,
  62  * fully interlocked, bidirectional parallel communications between hosts
  63  * and printers or other peripherals." (1.1) The standard defines 5 modes
  64  * of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ
  65  * in direction, bandwidth, pins assignment, DMA capability, etc.
  66  *
  67  * Negotiation is a mechanism for moving between modes. Compatibility mode
  68  * is a default mode, from which negotiations to other modes occur and
  69  * to which both host and peripheral break in case of interface errors.
  70  * Compatibility mode provides a unidirectional (forward) channel for
  71  * communicating with old pre-1284 peripherals.
  72  *
  73  * Each mode has a number of phases. [Mode, phase] pair represents the
  74  * interface state. Host initiates all transfers, though peripheral can
  75  * request backchannel transfer by asserting nErr pin.
  76  *
  77  * Ecpp driver implements an IEEE 1284-compliant host using a combination
  78  * of hardware and software. Hardware part is represented by a controller,
  79  * which is a part of the SuperIO chip. Ecpp supports the following SuperIOs:
  80  * PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover).
  81  * Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach().
  82  *
  83  * Negotiation is performed in software. Transfer may be performed either
  84  * in software by driving output pins for each byte (PIO method), or with
  85  * hardware assistance - SuperIO has a 16-byte FIFO, which is filled by
  86  * the driver (normally using DMA), while the chip performs the actual xfer.
  87  * PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes.
  88  *
  89  * Driver currently supports the following modes:
  90  *
  91  * - Compatibility mode: byte-wide forward channel ~50KB/sec;
  92  *   pp->io_mode defines PIO or DMA method of transfer;
  93  * - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec;
  94  * - ECP mode: byte-wide bidirectional channel (~1MB/sec);
  95  *
  96  * Theory of operation
  97  * ===================
  98  * The manner in which ecpp drives 1284 interface is that of a state machine.
  99  * State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*}
 100  * and transfer method {PIO, DMA}. State is a function of application actions
 101  * {write(2), ioctl(2)} and peripheral reaction.
 102  *
 103  * 1284 interface state is described by the following variables:
 104  *   pp->current_mode  -- 1284 mode used for forward transfers;
 105  *   pp->backchannel   -- 1284 mode used for backward transfers;
 106  *   pp->curent_phase  -- 1284 phase;
 107  *
 108  * Bidirectional operation in Compatibility mode is provided by a combination:
 109  * pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE
 110  * ECPP_CENTRONICS means no backchannel
 111  *
 112  * Driver internal state is defined by pp->e_busy as follows:
 113  *   ECPP_IDLE  -- idle, no active transfers;
 114  *   ECPP_BUSY  -- transfer is in progress;
 115  *   ECPP_ERR   -- have data to transfer, but peripheral can`t receive data;
 116  *   ECPP_FLUSH -- flushing the queues;
 117  *
 118  * When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS
 119  * Default negotiation tries to negotiate to the best mode supported by printer,
 120  * sets pp->current_mode and pp->backchannel accordingly.
 121  *
 122  * When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue
 123  * to let ecpp_wsrv() concatenate small blocks into one big transfer
 124  * by copying them into pp->ioblock. If first the mblk data is bigger than
 125  * pp->ioblock, then it is used instead of i/o block (pointed by pp->msg)
 126  *
 127  * Before starting the transfer the driver will check if peripheral is ready
 128  * by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state
 129  * and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively
 130  * rechecking the peripheral readiness and restarting itself until it is ready.
 131  * The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY
 132  *
 133  * While transfer is in progress all arriving messages will be queued up.
 134  * Transfer can end up in either of two ways:
 135  * - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so
 136  *   cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable();
 137  * - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data;
 138  *
 139  * PIO transfer method is very CPU intensive: for each sent byte the peripheral
 140  * state is checked, then the byte is transfered and driver waits for an nAck
 141  * interrupt; ecpp_isr() will then look if there is more data and if so
 142  * triggers the soft interrupt, which transfers the next byte. PIO method
 143  * is needed only for legacy printers which are sensitive to strobe problem
 144  * (Bugid 4192788).
 145  *
 146  * ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and
 147  * going idle (ecpp_idle_phase()). Many routines qenable() the write queue,
 148  * meaning "check if there are pending requests, process them and go idle".
 149  *
 150  * In it`s idle state the driver will always try to listen to the backchannel
 151  * (as advised by 1284).
 152  *
 153  * The mechanism for handling backchannel requests is as follows:
 154  * - when the peripheral has data to send it asserts nErr pin
 155  *   (and also nAck in Nibble Mode) which results in an interrupt on the host;
 156  * - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and
 157  *   puts it back on the write queue;
 158  * - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off
 159  *   the transfer;
 160  *
 161  * This way Nibble and ECP mode backchannel are implemented.
 162  * If the read queue gets full, backchannel request is rejected.
 163  * As the application reads data and queue size falls below the low watermark,
 164  * ecpp_rsrv() gets called and enables the backchannel again.
 165  *
 166  * Future enhancements
 167  * ===================
 168  *
 169  * Support new modes: Byte and EPP.
 170  */
 171 
 172 #ifndef ECPP_DEBUG
 173 #define ECPP_DEBUG 0
 174 #endif  /* ECPP_DEBUG */
 175 int ecpp_debug = ECPP_DEBUG;
 176 
 177 int noecp = 0;  /* flag not to use ECP mode */
 178 
 179 /* driver entry point fn definitions */
 180 static int      ecpp_open(queue_t *, dev_t *, int, int, cred_t *);
 181 static int      ecpp_close(queue_t *, int, cred_t *);
 182 static uint_t   ecpp_isr(caddr_t);
 183 static uint_t   ecpp_softintr(caddr_t);
 184 
 185 /* configuration entry point fn definitions */
 186 static int      ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
 187 static int      ecpp_attach(dev_info_t *, ddi_attach_cmd_t);
 188 static int      ecpp_detach(dev_info_t *, ddi_detach_cmd_t);
 189 static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *);
 190 
 191 /* isr support routines */
 192 static uint_t   ecpp_nErr_ihdlr(struct ecppunit *);
 193 static uint_t   ecpp_pio_ihdlr(struct ecppunit *);
 194 static uint_t   ecpp_dma_ihdlr(struct ecppunit *);
 195 static uint_t   ecpp_M1553_intr(struct ecppunit *);
 196 
 197 /* configuration support routines */
 198 static void     ecpp_get_props(struct ecppunit *);
 199 
 200 /* Streams Routines */
 201 static int      ecpp_wput(queue_t *, mblk_t *);
 202 static int      ecpp_wsrv(queue_t *);
 203 static int      ecpp_rsrv(queue_t *);
 204 static void     ecpp_flush(struct ecppunit *, int);
 205 static void     ecpp_start(struct ecppunit *, caddr_t, size_t);
 206 
 207 /* ioctl handling */
 208 static void     ecpp_putioc(queue_t *, mblk_t *);
 209 static void     ecpp_srvioc(queue_t *, mblk_t *);
 210 static void     ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t);
 211 static void     ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int);
 212 static void     ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t);
 213 static void     ecpp_srvioc_devid(queue_t *, mblk_t *,
 214                                 struct ecpp_device_id *, int *);
 215 static void     ecpp_srvioc_prnif(queue_t *, mblk_t *);
 216 static void     ecpp_ack_ioctl(queue_t *, mblk_t *);
 217 static void     ecpp_nack_ioctl(queue_t *, mblk_t *, int);
 218 
 219 /* kstat routines */
 220 static void     ecpp_kstat_init(struct ecppunit *);
 221 static int      ecpp_kstat_update(kstat_t *, int);
 222 static int      ecpp_kstatintr_update(kstat_t *, int);
 223 
 224 /* dma routines */
 225 static void     ecpp_putback_untransfered(struct ecppunit *, void *, uint_t);
 226 static uint8_t  ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t);
 227 static uint8_t  ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t);
 228 
 229 /* pio routines */
 230 static void     ecpp_pio_writeb(struct ecppunit *);
 231 static void     ecpp_xfer_cleanup(struct ecppunit *);
 232 static uint8_t  ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t);
 233 
 234 /* misc */
 235 static uchar_t  ecpp_reset_port_regs(struct ecppunit *);
 236 static void     ecpp_xfer_timeout(void *);
 237 static void     ecpp_fifo_timer(void *);
 238 static void     ecpp_wsrv_timer(void *);
 239 static uchar_t  dcr_write(struct ecppunit *, uint8_t);
 240 static uchar_t  ecr_write(struct ecppunit *, uint8_t);
 241 static uchar_t  ecpp_check_status(struct ecppunit *);
 242 static int      ecpp_backchan_req(struct ecppunit *);
 243 static void     ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *);
 244 static uint_t   ecpp_get_prn_ifcap(struct ecppunit *);
 245 
 246 /* stubs */
 247 static void     empty_config_mode(struct ecppunit *);
 248 static void     empty_mask_intr(struct ecppunit *);
 249 
 250 /* PC87332 support */
 251 static int      pc87332_map_regs(struct ecppunit *);
 252 static void     pc87332_unmap_regs(struct ecppunit *);
 253 static int      pc87332_config_chip(struct ecppunit *);
 254 static void     pc87332_config_mode(struct ecppunit *);
 255 static uint8_t  pc87332_read_config_reg(struct ecppunit *, uint8_t);
 256 static void     pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
 257 static void     cheerio_mask_intr(struct ecppunit *);
 258 static void     cheerio_unmask_intr(struct ecppunit *);
 259 static int      cheerio_dma_start(struct ecppunit *);
 260 static int      cheerio_dma_stop(struct ecppunit *, size_t *);
 261 static size_t   cheerio_getcnt(struct ecppunit *);
 262 static void     cheerio_reset_dcsr(struct ecppunit *);
 263 
 264 /* PC97317 support */
 265 static int      pc97317_map_regs(struct ecppunit *);
 266 static void     pc97317_unmap_regs(struct ecppunit *);
 267 static int      pc97317_config_chip(struct ecppunit *);
 268 static void     pc97317_config_mode(struct ecppunit *);
 269 
 270 /* M1553 Southbridge support */
 271 static int      m1553_map_regs(struct ecppunit *pp);
 272 static void     m1553_unmap_regs(struct ecppunit *pp);
 273 static int      m1553_config_chip(struct ecppunit *);
 274 static uint8_t  m1553_read_config_reg(struct ecppunit *, uint8_t);
 275 static void     m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t);
 276 
 277 /* M1553 Southbridge DMAC 8237 support routines */
 278 static int      dma8237_dma_start(struct ecppunit *);
 279 static int      dma8237_dma_stop(struct ecppunit *, size_t *);
 280 static size_t   dma8237_getcnt(struct ecppunit *);
 281 static void     dma8237_write_addr(struct ecppunit *, uint32_t);
 282 static void     dma8237_write_count(struct ecppunit *, uint32_t);
 283 static uint32_t dma8237_read_count(struct ecppunit *);
 284 static void     dma8237_write(struct ecppunit *, int, uint8_t);
 285 static uint8_t  dma8237_read(struct ecppunit *, int);
 286 #ifdef INCLUDE_DMA8237_READ_ADDR
 287 static uint32_t dma8237_read_addr(struct ecppunit *);
 288 #endif
 289 
 290 /* i86 PC support rountines */
 291 
 292 #if defined(__x86)
 293 static int      x86_dma_start(struct ecppunit *);
 294 static int      x86_dma_stop(struct ecppunit *, size_t *);
 295 static int      x86_map_regs(struct ecppunit *);
 296 static void     x86_unmap_regs(struct ecppunit *);
 297 static int      x86_config_chip(struct ecppunit *);
 298 static size_t   x86_getcnt(struct ecppunit *);
 299 #endif
 300 
 301 /* IEEE 1284 phase transitions */
 302 static void     ecpp_1284_init_interface(struct ecppunit *);
 303 static int      ecpp_1284_termination(struct ecppunit *);
 304 static uchar_t  ecpp_idle_phase(struct ecppunit *);
 305 static int      ecp_forward2reverse(struct ecppunit *);
 306 static int      ecp_reverse2forward(struct ecppunit *);
 307 static int      read_nibble_backchan(struct ecppunit *);
 308 
 309 /* reverse transfers */
 310 static uint_t   ecpp_peripheral2host(struct ecppunit *);
 311 static uchar_t  ecp_peripheral2host(struct ecppunit *);
 312 static uchar_t  nibble_peripheral2host(struct ecppunit *pp, uint8_t *);
 313 static int      ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int);
 314 static void     ecpp_ecp_read_timeout(void *);
 315 static void     ecpp_ecp_read_completion(struct ecppunit *);
 316 
 317 /* IEEE 1284 mode transitions */
 318 static void     ecpp_default_negotiation(struct ecppunit *);
 319 static int      ecpp_mode_negotiation(struct ecppunit *, uchar_t);
 320 static int      ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *);
 321 static int      ecp_negotiation(struct ecppunit *);
 322 static int      nibble_negotiation(struct ecppunit *);
 323 static int      devidnib_negotiation(struct ecppunit *);
 324 
 325 /* IEEE 1284 utility routines */
 326 static int      wait_dsr(struct ecppunit *, uint8_t, uint8_t, int);
 327 
 328 /* debugging functions */
 329 static void     ecpp_error(dev_info_t *, char *, ...);
 330 static uchar_t  ecpp_get_error_status(uchar_t);
 331 
 332 /*
 333  * Chip-dependent structures
 334  */
 335 static ddi_dma_attr_t cheerio_dma_attr = {
 336         DMA_ATTR_VERSION,       /* version */
 337         0x00000000ull,          /* dlim_addr_lo */
 338         0xfffffffeull,          /* dlim_addr_hi */
 339         0xffffff,               /* DMA counter register */
 340         1,                      /* DMA address alignment */
 341         0x74,                   /* burst sizes */
 342         0x0001,                 /* min effective DMA size */
 343         0xffff,                 /* maximum transfer size */
 344         0xffff,                 /* segment boundary */
 345         1,                      /* s/g list length */
 346         1,                      /* granularity of device */
 347         0                       /* DMA flags */
 348 };
 349 
 350 static struct ecpp_hw pc87332 = {
 351         pc87332_map_regs,
 352         pc87332_unmap_regs,
 353         pc87332_config_chip,
 354         pc87332_config_mode,
 355         cheerio_mask_intr,
 356         cheerio_unmask_intr,
 357         cheerio_dma_start,
 358         cheerio_dma_stop,
 359         cheerio_getcnt,
 360         &cheerio_dma_attr
 361 };
 362 
 363 static struct ecpp_hw pc97317 = {
 364         pc97317_map_regs,
 365         pc97317_unmap_regs,
 366         pc97317_config_chip,
 367         pc97317_config_mode,
 368         cheerio_mask_intr,
 369         cheerio_unmask_intr,
 370         cheerio_dma_start,
 371         cheerio_dma_stop,
 372         cheerio_getcnt,
 373         &cheerio_dma_attr
 374 };
 375 
 376 static ddi_dma_attr_t i8237_dma_attr = {
 377         DMA_ATTR_VERSION,       /* version */
 378         0x00000000ull,          /* dlim_addr_lo */
 379         0xfffffffeull,          /* dlim_addr_hi */
 380         0xffff,                 /* DMA counter register */
 381         1,                      /* DMA address alignment */
 382         0x01,                   /* burst sizes */
 383         0x0001,                 /* min effective DMA size */
 384         0xffff,                 /* maximum transfer size */
 385         0x7fff,                 /* segment boundary */
 386         1,                      /* s/g list length */
 387         1,                      /* granularity of device */
 388         0                       /* DMA flags */
 389 };
 390 
 391 static struct ecpp_hw m1553 = {
 392         m1553_map_regs,
 393         m1553_unmap_regs,
 394         m1553_config_chip,
 395         empty_config_mode,      /* no config_mode */
 396         empty_mask_intr,        /* no mask_intr */
 397         empty_mask_intr,        /* no unmask_intr */
 398         dma8237_dma_start,
 399         dma8237_dma_stop,
 400         dma8237_getcnt,
 401         &i8237_dma_attr
 402 };
 403 
 404 #if defined(__x86)
 405 static ddi_dma_attr_t sb_dma_attr = {
 406         DMA_ATTR_VERSION,       /* version */
 407         0x00000000ull,          /* dlim_addr_lo */
 408         0xffffff,               /* dlim_addr_hi */
 409         0xffff,                 /* DMA counter register */
 410         1,                      /* DMA address alignment */
 411         0x01,                   /* burst sizes */
 412         0x0001,                 /* min effective DMA size */
 413         0xffffffff,             /* maximum transfer size */
 414         0xffff,                 /* segment boundary */
 415         1,                      /* s/g list length */
 416         1,                      /* granularity of device */
 417         0                       /* DMA flags */
 418 };
 419 
 420 static struct ecpp_hw x86 = {
 421         x86_map_regs,
 422         x86_unmap_regs,
 423         x86_config_chip,
 424         empty_config_mode,      /* no config_mode */
 425         empty_mask_intr,        /* no mask_intr */
 426         empty_mask_intr,        /* no unmask_intr */
 427         x86_dma_start,
 428         x86_dma_stop,
 429         x86_getcnt,
 430         &sb_dma_attr
 431 };
 432 #endif
 433 
 434 /*
 435  * list of supported devices
 436  */
 437 struct ecpp_hw_bind ecpp_hw_bind[] = {
 438         { "ns87317-ecpp",       &pc97317,   "PC97317" },
 439         { "pnpALI,1533,3",      &m1553,             "M1553" },
 440         { "ecpp",               &pc87332,   "PC87332" },
 441 #if defined(__x86)
 442         { "lp",                 &x86,               "i86pc"},
 443 #endif
 444 };
 445 
 446 static ddi_device_acc_attr_t acc_attr = {
 447         DDI_DEVICE_ATTR_V0,
 448         DDI_STRUCTURE_LE_ACC,
 449         DDI_STRICTORDER_ACC
 450 };
 451 
 452 static struct ecpp_transfer_parms default_xfer_parms = {
 453         FWD_TIMEOUT_DEFAULT,    /* write timeout in seconds */
 454         ECPP_CENTRONICS         /* supported mode */
 455 };
 456 
 457 /* prnio interface info string */
 458 static const char prn_ifinfo[] = PRN_PARALLEL;
 459 
 460 /* prnio timeouts */
 461 static const struct prn_timeouts prn_timeouts_default = {
 462         FWD_TIMEOUT_DEFAULT,    /* forward timeout */
 463         REV_TIMEOUT_DEFAULT     /* reverse timeout */
 464 };
 465 
 466 static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY;
 467 static int ecpp_def_timeout = 90;  /* left in for 2.7 compatibility */
 468 
 469 static void    *ecppsoft_statep;
 470 
 471 /*
 472  * STREAMS framework manages locks for these structures
 473  */
 474 _NOTE(SCHEME_PROTECTS_DATA("unique per call", iocblk))
 475 _NOTE(SCHEME_PROTECTS_DATA("unique per call", datab))
 476 _NOTE(SCHEME_PROTECTS_DATA("unique per call", msgb))
 477 _NOTE(SCHEME_PROTECTS_DATA("unique per call", queue))
 478 _NOTE(SCHEME_PROTECTS_DATA("unique per call", copyreq))
 479 _NOTE(SCHEME_PROTECTS_DATA("unique per call", stroptions))
 480 
 481 struct module_info ecppinfo = {
 482         /* id, name, min pkt siz, max pkt siz, hi water, low water */
 483         42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT
 484 };
 485 
 486 static struct qinit ecpp_rinit = {
 487         putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
 488 };
 489 
 490 static struct qinit ecpp_wint = {
 491         ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL
 492 };
 493 
 494 struct streamtab ecpp_str_info = {
 495         &ecpp_rinit, &ecpp_wint, NULL, NULL
 496 };
 497 
 498 static struct cb_ops ecpp_cb_ops = {
 499         nodev,                  /* cb_open */
 500         nodev,                  /* cb_close */
 501         nodev,                  /* cb_strategy */
 502         nodev,                  /* cb_print */
 503         nodev,                  /* cb_dump */
 504         nodev,                  /* cb_read */
 505         nodev,                  /* cb_write */
 506         nodev,                  /* cb_ioctl */
 507         nodev,                  /* cb_devmap */
 508         nodev,                  /* cb_mmap */
 509         nodev,                  /* cb_segmap */
 510         nochpoll,               /* cb_chpoll */
 511         ddi_prop_op,            /* cb_prop_op */
 512         &ecpp_str_info,             /* cb_stream */
 513         (D_NEW | D_MP | D_MTPERQ)       /* cb_flag */
 514 };
 515 
 516 /*
 517  * Declare ops vectors for auto configuration.
 518  */
 519 struct dev_ops  ecpp_ops = {
 520         DEVO_REV,               /* devo_rev */
 521         0,                      /* devo_refcnt */
 522         ecpp_getinfo,           /* devo_getinfo */
 523         nulldev,                /* devo_identify */
 524         nulldev,                /* devo_probe */
 525         ecpp_attach,            /* devo_attach */
 526         ecpp_detach,            /* devo_detach */
 527         nodev,                  /* devo_reset */
 528         &ecpp_cb_ops,               /* devo_cb_ops */
 529         (struct bus_ops *)NULL, /* devo_bus_ops */
 530         nulldev,                /* devo_power */
 531         ddi_quiesce_not_needed, /* devo_quiesce */
 532 };
 533 
 534 extern struct mod_ops mod_driverops;
 535 
 536 static struct modldrv ecppmodldrv = {
 537         &mod_driverops,             /* type of module - driver */
 538         "parallel port driver",
 539         &ecpp_ops,
 540 };
 541 
 542 static struct modlinkage ecppmodlinkage = {
 543         MODREV_1,
 544         &ecppmodldrv,
 545         0
 546 };
 547 
 548 
 549 /*
 550  *
 551  * DDI/DKI entry points and supplementary routines
 552  *
 553  */
 554 
 555 
 556 int
 557 _init(void)
 558 {
 559         int    error;
 560 
 561         if ((error = mod_install(&ecppmodlinkage)) == 0) {
 562                 (void) ddi_soft_state_init(&ecppsoft_statep,
 563                     sizeof (struct ecppunit), 1);
 564         }
 565 
 566         return (error);
 567 }
 568 
 569 int
 570 _fini(void)
 571 {
 572         int    error;
 573 
 574         if ((error = mod_remove(&ecppmodlinkage)) == 0) {
 575                 ddi_soft_state_fini(&ecppsoft_statep);
 576         }
 577 
 578         return (error);
 579 }
 580 
 581 int
 582 _info(struct modinfo *modinfop)
 583 {
 584         return (mod_info(&ecppmodlinkage, modinfop));
 585 }
 586 
 587 static int
 588 ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 589 {
 590         int                     instance;
 591         char                    name[16];
 592         struct ecppunit         *pp;
 593         struct ecpp_hw_bind     *hw_bind;
 594 
 595         instance = ddi_get_instance(dip);
 596 
 597         switch (cmd) {
 598         case DDI_ATTACH:
 599                 break;
 600 
 601         case DDI_RESUME:
 602                 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
 603                         return (DDI_FAILURE);
 604                 }
 605 
 606                 mutex_enter(&pp->umutex);
 607 
 608                 pp->suspended = FALSE;
 609 
 610                 /*
 611                  * Initialize the chip and restore current mode if needed
 612                  */
 613                 (void) ECPP_CONFIG_CHIP(pp);
 614                 (void) ecpp_reset_port_regs(pp);
 615 
 616                 if (pp->oflag == TRUE) {
 617                         int current_mode = pp->current_mode;
 618 
 619                         (void) ecpp_1284_termination(pp);
 620                         (void) ecpp_mode_negotiation(pp, current_mode);
 621                 }
 622 
 623                 mutex_exit(&pp->umutex);
 624 
 625                 return (DDI_SUCCESS);
 626 
 627         default:
 628                 return (DDI_FAILURE);
 629         }
 630 
 631         if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) {
 632                 ecpp_error(dip, "ddi_soft_state_zalloc failed\n");
 633                 goto fail;
 634         }
 635 
 636         pp = ddi_get_soft_state(ecppsoft_statep, instance);
 637 
 638         pp->dip = dip;
 639         pp->suspended = FALSE;
 640 
 641         /*
 642          * Determine SuperIO type and set chip-dependent variables
 643          */
 644         hw_bind = ecpp_determine_sio_type(pp);
 645 
 646         if (hw_bind == NULL) {
 647                 cmn_err(CE_NOTE, "parallel port controller not supported");
 648                 goto fail_sio;
 649         } else {
 650                 pp->hw = hw_bind->hw;
 651                 ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info);
 652         }
 653 
 654         /*
 655          * Map registers
 656          */
 657         if (ECPP_MAP_REGS(pp) != SUCCESS) {
 658                 goto fail_map;
 659         }
 660 
 661         if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT,
 662             NULL, &pp->dma_handle) != DDI_SUCCESS) {
 663                 ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n");
 664                 goto fail_dma;
 665         }
 666 
 667         if (ddi_get_iblock_cookie(dip, 0,
 668             &pp->ecpp_trap_cookie) != DDI_SUCCESS) {
 669                 ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n");
 670                 goto fail_ibc;
 671         }
 672 
 673         mutex_init(&pp->umutex, NULL, MUTEX_DRIVER,
 674             (void *)pp->ecpp_trap_cookie);
 675 
 676         cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL);
 677 
 678         if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr,
 679             (caddr_t)pp) != DDI_SUCCESS) {
 680                 ecpp_error(dip, "ecpp_attach: failed to add hard intr\n");
 681                 goto fail_intr;
 682         }
 683 
 684         if (ddi_add_softintr(dip, DDI_SOFTINT_LOW,
 685             &pp->softintr_id, 0, 0, ecpp_softintr,
 686             (caddr_t)pp) != DDI_SUCCESS) {
 687                 ecpp_error(dip, "ecpp_attach: failed to add soft intr\n");
 688                 goto fail_softintr;
 689         }
 690 
 691         (void) sprintf(name, "ecpp%d", instance);
 692 
 693         if (ddi_create_minor_node(dip, name, S_IFCHR, instance,
 694             DDI_NT_PRINTER, NULL) == DDI_FAILURE) {
 695                 ecpp_error(dip, "ecpp_attach: create_minor_node failed\n");
 696                 goto fail_minor;
 697         }
 698 
 699         pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP);
 700         if (pp->ioblock == NULL) {
 701                 ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n");
 702                 goto fail_iob;
 703         } else {
 704                 ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock);
 705         }
 706 
 707         ecpp_get_props(pp);
 708 #if defined(__x86)
 709         if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) {
 710                 if (ddi_dmae_alloc(dip, pp->uh.x86.chn,
 711                     DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS)
 712                         ecpp_error(pp->dip, "dmae_alloc success!\n");
 713         }
 714 #endif
 715         if (ECPP_CONFIG_CHIP(pp) == FAILURE) {
 716                 ecpp_error(pp->dip, "config_chip failed.\n");
 717                 goto fail_config;
 718         }
 719 
 720         ecpp_kstat_init(pp);
 721 
 722         ddi_report_dev(dip);
 723 
 724         return (DDI_SUCCESS);
 725 
 726 fail_config:
 727         ddi_prop_remove_all(dip);
 728         kmem_free(pp->ioblock, IO_BLOCK_SZ);
 729 fail_iob:
 730         ddi_remove_minor_node(dip, NULL);
 731 fail_minor:
 732         ddi_remove_softintr(pp->softintr_id);
 733 fail_softintr:
 734         ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
 735 fail_intr:
 736         mutex_destroy(&pp->umutex);
 737         cv_destroy(&pp->pport_cv);
 738 fail_ibc:
 739         ddi_dma_free_handle(&pp->dma_handle);
 740 fail_dma:
 741         ECPP_UNMAP_REGS(pp);
 742 fail_map:
 743 fail_sio:
 744         ddi_soft_state_free(ecppsoft_statep, instance);
 745 fail:
 746         ecpp_error(dip, "ecpp_attach: failed.\n");
 747 
 748         return (DDI_FAILURE);
 749 }
 750 
 751 static int
 752 ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 753 {
 754         int             instance;
 755         struct ecppunit *pp;
 756 
 757         instance = ddi_get_instance(dip);
 758 
 759         switch (cmd) {
 760         case DDI_DETACH:
 761                 break;
 762 
 763         case DDI_SUSPEND:
 764                 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) {
 765                         return (DDI_FAILURE);
 766                 }
 767 
 768                 mutex_enter(&pp->umutex);
 769                 ASSERT(pp->suspended == FALSE);
 770 
 771                 pp->suspended = TRUE;        /* prevent new transfers */
 772 
 773                 /*
 774                  * Wait if there's any activity on the port
 775                  */
 776                 if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) {
 777                         (void) cv_reltimedwait(&pp->pport_cv, &pp->umutex,
 778                             SUSPEND_TOUT * drv_usectohz(1000000),
 779                             TR_CLOCK_TICK);
 780                         if ((pp->e_busy == ECPP_BUSY) ||
 781                             (pp->e_busy == ECPP_FLUSH)) {
 782                                 pp->suspended = FALSE;
 783                                 mutex_exit(&pp->umutex);
 784                                 ecpp_error(pp->dip,
 785                                     "ecpp_detach: suspend timeout\n");
 786                                 return (DDI_FAILURE);
 787                         }
 788                 }
 789 
 790                 mutex_exit(&pp->umutex);
 791                 return (DDI_SUCCESS);
 792 
 793         default:
 794                 return (DDI_FAILURE);
 795         }
 796 
 797         pp = ddi_get_soft_state(ecppsoft_statep, instance);
 798 #if defined(__x86)
 799         if (pp->hw == &x86 && pp->uh.x86.chn != 0xff)
 800                 (void) ddi_dmae_release(pp->dip, pp->uh.x86.chn);
 801 #endif
 802         if (pp->dma_handle != NULL)
 803                 ddi_dma_free_handle(&pp->dma_handle);
 804 
 805         ddi_remove_minor_node(dip, NULL);
 806 
 807         ddi_remove_softintr(pp->softintr_id);
 808 
 809         ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie);
 810 
 811         if (pp->ksp) {
 812                 kstat_delete(pp->ksp);
 813         }
 814         if (pp->intrstats) {
 815                 kstat_delete(pp->intrstats);
 816         }
 817 
 818         cv_destroy(&pp->pport_cv);
 819 
 820         mutex_destroy(&pp->umutex);
 821 
 822         ECPP_UNMAP_REGS(pp);
 823 
 824         kmem_free(pp->ioblock, IO_BLOCK_SZ);
 825 
 826         ddi_prop_remove_all(dip);
 827 
 828         ddi_soft_state_free(ecppsoft_statep, instance);
 829 
 830         return (DDI_SUCCESS);
 831 
 832 }
 833 
 834 /*
 835  * ecpp_get_props() reads ecpp.conf for user defineable tuneables.
 836  * If the file or a particular variable is not there, a default value
 837  * is assigned.
 838  */
 839 
 840 static void
 841 ecpp_get_props(struct ecppunit *pp)
 842 {
 843         char    *prop;
 844 #if defined(__x86)
 845         int     len;
 846         int     value;
 847 #endif
 848         /*
 849          * If fast_centronics is TRUE, non-compliant IEEE 1284
 850          * peripherals ( Centronics peripherals) will operate in DMA mode.
 851          * Transfers betwee main memory and the device will be via DMA;
 852          * peripheral handshaking will be conducted by superio logic.
 853          * If ecpp can not read the variable correctly fast_centronics will
 854          * be set to FALSE.  In this case, transfers and handshaking
 855          * will be conducted by PIO for Centronics devices.
 856          */
 857         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
 858             "fast-centronics", &prop) == DDI_PROP_SUCCESS) {
 859                 pp->fast_centronics =
 860                     (strcmp(prop, "true") == 0) ? TRUE : FALSE;
 861                 ddi_prop_free(prop);
 862         } else {
 863                 pp->fast_centronics = FALSE;
 864         }
 865 
 866         /*
 867          * If fast-1284-compatible is set to TRUE, when ecpp communicates
 868          * with IEEE 1284 compliant peripherals, data transfers between
 869          * main memory and the parallel port will be conducted by DMA.
 870          * Handshaking between the port and peripheral will be conducted
 871          * by superio logic.  This is the default characteristic.  If
 872          * fast-1284-compatible is set to FALSE, transfers and handshaking
 873          * will be conducted by PIO.
 874          */
 875 
 876         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
 877             "fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) {
 878                 pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
 879                 ddi_prop_free(prop);
 880         } else {
 881                 pp->fast_compat = TRUE;
 882         }
 883 
 884         /*
 885          * Some centronics peripherals require the nInit signal to be
 886          * toggled to reset the device.  If centronics_init_seq is set
 887          * to TRUE, ecpp will toggle the nInit signal upon every ecpp_open().
 888          * Applications have the opportunity to toggle the nInit signal
 889          * with ioctl(2) calls as well.  The default is to set it to FALSE.
 890          */
 891         if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0,
 892             "centronics-init-seq", &prop) == DDI_PROP_SUCCESS) {
 893                 pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE;
 894                 ddi_prop_free(prop);
 895         } else {
 896                 pp->init_seq = FALSE;
 897         }
 898 
 899         /*
 900          * If one of the centronics status signals are in an erroneous
 901          * state, ecpp_wsrv() will be reinvoked centronics-retry ms to
 902          * check if the status is ok to transfer.  If the property is not
 903          * found, wsrv_retry will be set to CENTRONICS_RETRY ms.
 904          */
 905         pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
 906             "centronics-retry", CENTRONICS_RETRY);
 907 
 908         /*
 909          * In PIO mode, ecpp_isr() will loop for wait for the busy signal
 910          * to be deasserted before transferring the next byte. wait_for_busy
 911          * is specificied in microseconds.  If the property is not found
 912          * ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us.
 913          */
 914         pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
 915             "centronics-wait-for-busy", WAIT_FOR_BUSY);
 916 
 917         /*
 918          * In PIO mode, centronics transfers must hold the data signals
 919          * for a data_setup_time milliseconds before the strobe is asserted.
 920          */
 921         pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
 922             "centronics-data-setup-time", DATA_SETUP_TIME);
 923 
 924         /*
 925          * In PIO mode, centronics transfers asserts the strobe signal
 926          * for a period of strobe_pulse_width milliseconds.
 927          */
 928         pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
 929             "centronics-strobe-pulse-width", STROBE_PULSE_WIDTH);
 930 
 931         /*
 932          * Upon a transfer the peripheral, ecpp waits write_timeout seconds
 933          * for the transmission to complete.
 934          */
 935         default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY,
 936             pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout);
 937 
 938         pp->xfer_parms = default_xfer_parms;
 939 
 940         /*
 941          * Get dma channel for M1553
 942          */
 943         if (pp->hw == &m1553) {
 944                 pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY,
 945                     pp->dip, 0, "dma-channel", 0x1);
 946                 ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn);
 947         }
 948 #if defined(__x86)
 949         len = sizeof (value);
 950         /* Get dma channel for i86 pc */
 951         if (pp->hw == &x86) {
 952                 if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF,
 953                     DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len)
 954                     != DDI_PROP_SUCCESS) {
 955                         ecpp_error(pp->dip, "No dma channel found\n");
 956                         pp->uh.x86.chn = 0xff;
 957                         pp->fast_compat = FALSE;
 958                         pp->noecpregs = TRUE;
 959                 } else
 960                         pp->uh.x86.chn = (uint8_t)value;
 961         }
 962 #endif
 963         /*
 964          * these properties are not yet public
 965          */
 966         pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
 967             "ecp-rev-speed", ECP_REV_SPEED);
 968 
 969         pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0,
 970             "rev-watchdog", REV_WATCHDOG);
 971 
 972         ecpp_error(pp->dip,
 973             "ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n"
 974             "ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n"
 975             "ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n"
 976             "ecpp_get_prop: transfer-timeout=%d\n",
 977             pp->fast_centronics, pp->fast_compat,
 978             pp->wsrv_retry, pp->wait_for_busy,
 979             pp->data_setup_time, pp->strobe_pulse_width,
 980             pp->xfer_parms.write_timeout);
 981 }
 982 
 983 /*ARGSUSED*/
 984 int
 985 ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
 986 {
 987         dev_t   dev = (dev_t)arg;
 988         struct ecppunit *pp;
 989         int     instance, ret;
 990 
 991         instance = getminor(dev);
 992 
 993         switch (infocmd) {
 994         case DDI_INFO_DEVT2DEVINFO:
 995                 pp = ddi_get_soft_state(ecppsoft_statep, instance);
 996                 if (pp != NULL) {
 997                         *result = pp->dip;
 998                         ret = DDI_SUCCESS;
 999                 } else {
1000                         ret = DDI_FAILURE;
1001                 }
1002                 break;
1003 
1004         case DDI_INFO_DEVT2INSTANCE:
1005                 *result = (void *)(uintptr_t)instance;
1006                 ret = DDI_SUCCESS;
1007                 break;
1008 
1009         default:
1010                 ret = DDI_FAILURE;
1011                 break;
1012         }
1013 
1014         return (ret);
1015 }
1016 
1017 /*ARGSUSED2*/
1018 static int
1019 ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp)
1020 {
1021         struct ecppunit *pp;
1022         int             instance;
1023         struct stroptions *sop;
1024         mblk_t          *mop;
1025 
1026         instance = getminor(*dev);
1027 
1028         if (instance < 0) {
1029                 return (ENXIO);
1030         }
1031 
1032         pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance);
1033 
1034         if (pp == NULL) {
1035                 return (ENXIO);
1036         }
1037 
1038         mutex_enter(&pp->umutex);
1039 
1040         /*
1041          * Parallel port is an exclusive-use device
1042          * thus providing print job integrity
1043          */
1044         if (pp->oflag == TRUE) {
1045                 ecpp_error(pp->dip, "ecpp open failed");
1046                 mutex_exit(&pp->umutex);
1047                 return (EBUSY);
1048         }
1049 
1050         pp->oflag = TRUE;
1051 
1052         /* initialize state variables */
1053         pp->prn_timeouts = prn_timeouts_default;
1054         pp->xfer_parms = default_xfer_parms;
1055         pp->current_mode = ECPP_CENTRONICS;
1056         pp->backchannel = ECPP_CENTRONICS;
1057         pp->current_phase = ECPP_PHASE_PO;
1058         pp->port = ECPP_PORT_DMA;
1059         pp->instance = instance;
1060         pp->timeout_error = 0;
1061         pp->saved_dsr = DSR_READ(pp);
1062         pp->ecpp_drain_counter = 0;
1063         pp->dma_cancelled = FALSE;
1064         pp->io_mode = ECPP_DMA;
1065         pp->joblen = 0;
1066         pp->tfifo_intr = 0;
1067         pp->softintr_pending = 0;
1068         pp->nread = 0;
1069 
1070         /* clear the state flag */
1071         pp->e_busy = ECPP_IDLE;
1072 
1073         pp->readq = RD(q);
1074         pp->writeq = WR(q);
1075         pp->msg = NULL;
1076 
1077         RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp;
1078 
1079         /*
1080          * Get ready: check host/peripheral, negotiate into default mode
1081          */
1082         if (ecpp_reset_port_regs(pp) == FAILURE) {
1083                 mutex_exit(&pp->umutex);
1084                 return (EIO);
1085         }
1086 
1087         mutex_exit(&pp->umutex);
1088 
1089         /*
1090          * Configure the Stream head and enable the Stream
1091          */
1092         if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) {
1093                 return (EAGAIN);
1094         }
1095 
1096         mop->b_datap->db_type = M_SETOPTS;
1097         mop->b_wptr += sizeof (struct stroptions);
1098 
1099         /*
1100          * if device is open with O_NONBLOCK flag set, let read(2) return 0
1101          * if no data waiting to be read.  Writes will block on flow control.
1102          */
1103         sop = (struct stroptions *)mop->b_rptr;
1104         sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON;
1105         sop->so_hiwat = ECPPHIWAT;
1106         sop->so_lowat = ECPPLOWAT;
1107 
1108         /* enable the stream */
1109         qprocson(q);
1110 
1111         putnext(q, mop);
1112 
1113         mutex_enter(&pp->umutex);
1114 
1115         ecpp_default_negotiation(pp);
1116 
1117         /* go revidle */
1118         (void) ecpp_idle_phase(pp);
1119 
1120         ecpp_error(pp->dip,
1121             "ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n",
1122             pp->current_mode, pp->current_phase,
1123             ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
1124 
1125         mutex_exit(&pp->umutex);
1126 
1127         return (0);
1128 }
1129 
1130 /*ARGSUSED1*/
1131 static int
1132 ecpp_close(queue_t *q, int flag, cred_t *cred_p)
1133 {
1134         struct ecppunit *pp;
1135         timeout_id_t    timeout_id, fifo_timer_id, wsrv_timer_id;
1136 
1137         pp = (struct ecppunit *)q->q_ptr;
1138 
1139         ecpp_error(pp->dip, "ecpp_close: entering ...\n");
1140 
1141         mutex_enter(&pp->umutex);
1142 
1143         /*
1144          * ecpp_close() will continue to loop until the
1145          * queue has been drained or if the thread
1146          * has received a SIG.  Typically, when the queue
1147          * has data, the port will be ECPP_BUSY.  However,
1148          * after a dma completes and before the wsrv
1149          * starts the next transfer, the port may be IDLE.
1150          * In this case, ecpp_close() will loop within this
1151          * while(qsize) segment.  Since, ecpp_wsrv() runs
1152          * at software interupt level, this shouldn't loop
1153          * very long.
1154          */
1155         while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) {
1156                 if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) {
1157                         ecpp_error(pp->dip, "ecpp_close:B: received SIG\n");
1158                         /*
1159                          * Returning from a signal such as
1160                          * SIGTERM or SIGKILL
1161                          */
1162                         ecpp_flush(pp, FWRITE);
1163                         break;
1164                 } else {
1165                         ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n");
1166                 }
1167         }
1168 
1169         ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, "
1170             "qsize(WR(q))=%d, qsize(RD(q))=%d\n",
1171             pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q));
1172 
1173         /*
1174          * Cancel all timeouts, disable interrupts
1175          *
1176          * Note that we can`t call untimeout(9F) with mutex held:
1177          * callout may be blocked on the same mutex, and untimeout() will
1178          * cv_wait() while callout is executing, thus creating a deadlock
1179          * So we zero the timeout id's inside mutex and call untimeout later
1180          */
1181         timeout_id = pp->timeout_id;
1182         fifo_timer_id = pp->fifo_timer_id;
1183         wsrv_timer_id = pp->wsrv_timer_id;
1184 
1185         pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0;
1186 
1187         pp->softintr_pending = 0;
1188         pp->dma_cancelled = TRUE;
1189         ECPP_MASK_INTR(pp);
1190 
1191         mutex_exit(&pp->umutex);
1192 
1193         qprocsoff(q);
1194 
1195         if (timeout_id) {
1196                 (void) untimeout(timeout_id);
1197         }
1198         if (fifo_timer_id) {
1199                 (void) untimeout(fifo_timer_id);
1200         }
1201         if (wsrv_timer_id) {
1202                 (void) untimeout(wsrv_timer_id);
1203         }
1204 
1205         mutex_enter(&pp->umutex);
1206 
1207         /* set link to Compatible mode */
1208         if ((pp->current_mode == ECPP_ECP_MODE) &&
1209             (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
1210                 (void) ecp_reverse2forward(pp);
1211         }
1212 
1213         (void) ecpp_1284_termination(pp);
1214 
1215         pp->oflag = FALSE;
1216         q->q_ptr = WR(q)->q_ptr = NULL;
1217         pp->readq = pp->writeq = NULL;
1218         pp->msg = NULL;
1219 
1220         ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n",
1221             ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
1222 
1223         mutex_exit(&pp->umutex);
1224 
1225         return (0);
1226 }
1227 
1228 /*
1229  * standard put procedure for ecpp
1230  */
1231 static int
1232 ecpp_wput(queue_t *q, mblk_t *mp)
1233 {
1234         struct msgb *nmp;
1235         struct ecppunit *pp;
1236 
1237         pp = (struct ecppunit *)q->q_ptr;
1238 
1239         if (!mp) {
1240                 return (0);
1241         }
1242 
1243         if ((mp->b_wptr - mp->b_rptr) <= 0) {
1244                 ecpp_error(pp->dip,
1245                     "ecpp_wput:bogus packet recieved mp=%x\n", mp);
1246                 freemsg(mp);
1247                 return (0);
1248         }
1249 
1250         switch (DB_TYPE(mp)) {
1251         case M_DATA:
1252                 /*
1253                  * This is a quick fix for multiple message block problem,
1254                  * it will be changed later with better performance code.
1255                  */
1256                 if (mp->b_cont) {
1257                         /*
1258                          * mblk has scattered data ... do msgpullup
1259                          * if it fails, continue with the current mblk
1260                          */
1261                         if ((nmp = msgpullup(mp, -1)) != NULL) {
1262                                 freemsg(mp);
1263                                 mp = nmp;
1264                                 ecpp_error(pp->dip,
1265                                     "ecpp_wput:msgpullup: mp=%p len=%d\n",
1266                                     mp, mp->b_wptr - mp->b_rptr);
1267                         }
1268                 }
1269 
1270                 /* let ecpp_wsrv() concatenate small blocks */
1271                 (void) putq(q, mp);
1272 
1273                 break;
1274 
1275         case M_CTL:
1276                 (void) putq(q, mp);
1277 
1278                 break;
1279 
1280         case M_IOCTL: {
1281                 struct iocblk *iocbp;
1282 
1283                 iocbp = (struct iocblk *)mp->b_rptr;
1284 
1285                 ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd);
1286 
1287                 mutex_enter(&pp->umutex);
1288 
1289                 /* TESTIO and GET_STATUS can be used during transfer */
1290                 if ((pp->e_busy == ECPP_BUSY) &&
1291                     (iocbp->ioc_cmd != BPPIOC_TESTIO) &&
1292                     (iocbp->ioc_cmd != PRNIOC_GET_STATUS)) {
1293                         mutex_exit(&pp->umutex);
1294                         (void) putq(q, mp);
1295                 } else {
1296                         mutex_exit(&pp->umutex);
1297                         ecpp_putioc(q, mp);
1298                 }
1299 
1300                 break;
1301         }
1302 
1303         case M_IOCDATA: {
1304                 struct copyresp *csp;
1305 
1306                 ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n");
1307 
1308                 csp = (struct copyresp *)mp->b_rptr;
1309 
1310                 /*
1311                  * If copy request failed, quit now
1312                  */
1313                 if (csp->cp_rval != 0) {
1314                         freemsg(mp);
1315                         return (0);
1316                 }
1317 
1318                 switch (csp->cp_cmd) {
1319                 case ECPPIOC_SETPARMS:
1320                 case ECPPIOC_SETREGS:
1321                 case ECPPIOC_SETPORT:
1322                 case ECPPIOC_SETDATA:
1323                 case PRNIOC_SET_IFCAP:
1324                 case PRNIOC_SET_TIMEOUTS:
1325                         /*
1326                          * need to retrieve and use the data, but if the
1327                          * device is busy, wait.
1328                          */
1329                         (void) putq(q, mp);
1330                         break;
1331 
1332                 case ECPPIOC_GETPARMS:
1333                 case ECPPIOC_GETREGS:
1334                 case ECPPIOC_GETPORT:
1335                 case ECPPIOC_GETDATA:
1336                 case BPPIOC_GETERR:
1337                 case BPPIOC_TESTIO:
1338                 case PRNIOC_GET_IFCAP:
1339                 case PRNIOC_GET_STATUS:
1340                 case PRNIOC_GET_1284_STATUS:
1341                 case PRNIOC_GET_TIMEOUTS:
1342                         /* data transfered to user space okay */
1343                         ecpp_ack_ioctl(q, mp);
1344                         break;
1345 
1346                 case ECPPIOC_GETDEVID:
1347                         ecpp_wput_iocdata_devid(q, mp,
1348                             offsetof(struct ecpp_device_id, rlen));
1349                         break;
1350 
1351                 case PRNIOC_GET_1284_DEVID:
1352                         ecpp_wput_iocdata_devid(q, mp,
1353                             offsetof(struct prn_1284_device_id, id_rlen));
1354                         break;
1355 
1356                 case PRNIOC_GET_IFINFO:
1357                         ecpp_wput_iocdata_devid(q, mp,
1358                             offsetof(struct prn_interface_info, if_rlen));
1359                         break;
1360 
1361                 default:
1362                         ecpp_nack_ioctl(q, mp, EINVAL);
1363                         break;
1364                 }
1365 
1366                 break;
1367         }
1368 
1369         case M_FLUSH:
1370                 ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n");
1371 
1372                 if (*mp->b_rptr & FLUSHW) {
1373                         mutex_enter(&pp->umutex);
1374                         ecpp_flush(pp, FWRITE);
1375                         mutex_exit(&pp->umutex);
1376                 }
1377 
1378                 if (*mp->b_rptr & FLUSHR) {
1379                         mutex_enter(&pp->umutex);
1380                         ecpp_flush(pp, FREAD);
1381                         mutex_exit(&pp->umutex);
1382                         qreply(q, mp);
1383                 } else {
1384                         freemsg(mp);
1385                 }
1386 
1387                 break;
1388 
1389         case M_READ:
1390                 /*
1391                  * When the user calls read(2), M_READ message is sent to us,
1392                  * first byte of which is the number of requested bytes
1393                  * We add up user requests and use resulting number
1394                  * to calculate the reverse transfer block size
1395                  */
1396                 mutex_enter(&pp->umutex);
1397                 if (pp->e_busy == ECPP_IDLE) {
1398                         pp->nread += *(size_t *)mp->b_rptr;
1399                         ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread);
1400                         freemsg(mp);
1401                 } else {
1402                         ecpp_error(pp->dip, "ecpp_wput: M_READ queueing");
1403                         (void) putq(q, mp);
1404                 }
1405                 mutex_exit(&pp->umutex);
1406                 break;
1407 
1408         default:
1409                 ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n",
1410                     DB_TYPE(mp));
1411                 freemsg(mp);
1412                 break;
1413         }
1414 
1415         return (0);
1416 }
1417 
1418 /*
1419  * Process ECPPIOC_GETDEVID-like ioctls
1420  */
1421 static void
1422 ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset)
1423 {
1424         struct copyresp         *csp;
1425         struct ecpp_copystate   *stp;
1426         mblk_t                  *datamp;
1427 
1428         csp = (struct copyresp *)mp->b_rptr;
1429         stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
1430 
1431         /* determine the state of copyin/copyout process */
1432         switch (stp->state) {
1433         case ECPP_STRUCTIN:
1434                 /* user structure has arrived */
1435                 (void) putq(q, mp);
1436                 break;
1437 
1438         case ECPP_ADDROUT:
1439                 /*
1440                  * data transfered to user space okay
1441                  * now update user structure
1442                  */
1443                 datamp = allocb(sizeof (int), BPRI_MED);
1444                 if (datamp == NULL) {
1445                         ecpp_nack_ioctl(q, mp, ENOSR);
1446                         break;
1447                 }
1448 
1449                 *(int *)datamp->b_rptr =
1450                     *(int *)((char *)&stp->un + rlen_offset);
1451                 stp->state = ECPP_STRUCTOUT;
1452 
1453                 mcopyout(mp, csp->cp_private, sizeof (int),
1454                     (char *)stp->uaddr + rlen_offset, datamp);
1455                 qreply(q, mp);
1456                 break;
1457 
1458         case ECPP_STRUCTOUT:
1459                 /* user structure was updated okay */
1460                 freemsg(csp->cp_private);
1461                 ecpp_ack_ioctl(q, mp);
1462                 break;
1463 
1464         default:
1465                 ecpp_nack_ioctl(q, mp, EINVAL);
1466                 break;
1467         }
1468 }
1469 
1470 static uchar_t
1471 ecpp_get_error_status(uchar_t status)
1472 {
1473         uchar_t pin_status = 0;
1474 
1475         if (!(status & ECPP_nERR)) {
1476                 pin_status |= BPP_ERR_ERR;
1477         }
1478 
1479         if (status & ECPP_PE) {
1480                 pin_status |= BPP_PE_ERR;
1481         }
1482 
1483         if (!(status & ECPP_SLCT)) {
1484                 pin_status |= BPP_SLCT_ERR;
1485         }
1486 
1487         if (!(status & ECPP_nBUSY)) {
1488                 pin_status |= BPP_SLCT_ERR;
1489         }
1490 
1491         return (pin_status);
1492 }
1493 
1494 /*
1495  * ioctl handler for output PUT procedure.
1496  */
1497 static void
1498 ecpp_putioc(queue_t *q, mblk_t *mp)
1499 {
1500         struct iocblk   *iocbp;
1501         struct ecppunit *pp;
1502 
1503         pp = (struct ecppunit *)q->q_ptr;
1504 
1505         iocbp = (struct iocblk *)mp->b_rptr;
1506 
1507         /* I_STR ioctls are invalid */
1508         if (iocbp->ioc_count != TRANSPARENT) {
1509                 ecpp_nack_ioctl(q, mp, EINVAL);
1510                 return;
1511         }
1512 
1513         switch (iocbp->ioc_cmd) {
1514         case ECPPIOC_SETPARMS: {
1515                 mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL);
1516                 qreply(q, mp);
1517                 break;
1518         }
1519 
1520         case ECPPIOC_GETPARMS: {
1521                 struct ecpp_transfer_parms xfer_parms;
1522 
1523                 mutex_enter(&pp->umutex);
1524 
1525                 pp->xfer_parms.mode = pp->current_mode;
1526                 xfer_parms = pp->xfer_parms;
1527 
1528                 mutex_exit(&pp->umutex);
1529 
1530                 ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms));
1531                 break;
1532         }
1533 
1534         case ECPPIOC_SETREGS: {
1535                 mutex_enter(&pp->umutex);
1536                 if (pp->current_mode != ECPP_DIAG_MODE) {
1537                         mutex_exit(&pp->umutex);
1538                         ecpp_nack_ioctl(q, mp, EINVAL);
1539                         break;
1540                 }
1541                 mutex_exit(&pp->umutex);
1542 
1543                 mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL);
1544                 qreply(q, mp);
1545                 break;
1546         }
1547 
1548         case ECPPIOC_GETREGS: {
1549                 struct ecpp_regs rg;
1550 
1551                 mutex_enter(&pp->umutex);
1552 
1553                 if (pp->current_mode != ECPP_DIAG_MODE) {
1554                         mutex_exit(&pp->umutex);
1555                         ecpp_nack_ioctl(q, mp, EINVAL);
1556                         break;
1557                 }
1558 
1559                 rg.dsr = DSR_READ(pp);
1560                 rg.dcr = DCR_READ(pp);
1561 
1562                 mutex_exit(&pp->umutex);
1563 
1564                 ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n",
1565                     rg.dsr, rg.dcr);
1566 
1567                 /* these bits must be 1 */
1568                 rg.dsr |= ECPP_SETREGS_DSR_MASK;
1569                 rg.dcr |= ECPP_SETREGS_DCR_MASK;
1570 
1571                 ecpp_putioc_copyout(q, mp, &rg, sizeof (rg));
1572                 break;
1573         }
1574 
1575         case ECPPIOC_SETPORT:
1576         case ECPPIOC_SETDATA: {
1577                 mutex_enter(&pp->umutex);
1578                 if (pp->current_mode != ECPP_DIAG_MODE) {
1579                         mutex_exit(&pp->umutex);
1580                         ecpp_nack_ioctl(q, mp, EINVAL);
1581                         break;
1582                 }
1583                 mutex_exit(&pp->umutex);
1584 
1585                 /*
1586                  * each of the commands fetches a byte quantity.
1587                  */
1588                 mcopyin(mp, NULL, sizeof (uchar_t), NULL);
1589                 qreply(q, mp);
1590                 break;
1591         }
1592 
1593         case ECPPIOC_GETDATA:
1594         case ECPPIOC_GETPORT: {
1595                 uchar_t byte;
1596 
1597                 mutex_enter(&pp->umutex);
1598 
1599                 /* must be in diagnostic mode for these commands to work */
1600                 if (pp->current_mode != ECPP_DIAG_MODE) {
1601                         mutex_exit(&pp->umutex);
1602                         ecpp_nack_ioctl(q, mp, EINVAL);
1603                         break;
1604                 }
1605 
1606                 if (iocbp->ioc_cmd == ECPPIOC_GETPORT) {
1607                         byte = pp->port;
1608                 } else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) {
1609                         switch (pp->port) {
1610                         case ECPP_PORT_PIO:
1611                                 byte = DATAR_READ(pp);
1612                                 break;
1613                         case ECPP_PORT_TDMA:
1614                                 byte = TFIFO_READ(pp);
1615                                 ecpp_error(pp->dip, "GETDATA=0x%x\n", byte);
1616                                 break;
1617                         default:
1618                                 ecpp_nack_ioctl(q, mp, EINVAL);
1619                                 break;
1620                         }
1621                 } else {
1622                         mutex_exit(&pp->umutex);
1623                         ecpp_error(pp->dip, "weird command");
1624                         ecpp_nack_ioctl(q, mp, EINVAL);
1625                         break;
1626                 }
1627 
1628                 mutex_exit(&pp->umutex);
1629 
1630                 ecpp_putioc_copyout(q, mp, &byte, sizeof (byte));
1631 
1632                 break;
1633         }
1634 
1635         case BPPIOC_GETERR: {
1636                 struct bpp_error_status bpp_status;
1637 
1638                 mutex_enter(&pp->umutex);
1639 
1640                 bpp_status.timeout_occurred = pp->timeout_error;
1641                 bpp_status.bus_error = 0;       /* not used */
1642                 bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr);
1643 
1644                 mutex_exit(&pp->umutex);
1645 
1646                 ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status));
1647 
1648                 break;
1649         }
1650 
1651         case BPPIOC_TESTIO: {
1652                 mutex_enter(&pp->umutex);
1653 
1654                 if (!((pp->current_mode == ECPP_CENTRONICS) ||
1655                     (pp->current_mode == ECPP_COMPAT_MODE))) {
1656                         ecpp_nack_ioctl(q, mp, EINVAL);
1657                 } else {
1658                         pp->saved_dsr = DSR_READ(pp);
1659 
1660                         if ((pp->saved_dsr & ECPP_PE) ||
1661                             !(pp->saved_dsr & ECPP_SLCT) ||
1662                             !(pp->saved_dsr & ECPP_nERR)) {
1663                                 ecpp_nack_ioctl(q, mp, EIO);
1664                         } else {
1665                                 ecpp_ack_ioctl(q, mp);
1666                         }
1667                 }
1668 
1669                 mutex_exit(&pp->umutex);
1670 
1671                 break;
1672         }
1673 
1674         case PRNIOC_RESET:
1675                 /*
1676                  * Initialize interface only if no transfer is in progress
1677                  */
1678                 mutex_enter(&pp->umutex);
1679                 if (pp->e_busy == ECPP_BUSY) {
1680                         mutex_exit(&pp->umutex);
1681                         ecpp_nack_ioctl(q, mp, EIO);
1682                 } else {
1683                         (void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
1684 
1685                         DCR_WRITE(pp, ECPP_SLCTIN);
1686                         drv_usecwait(2);
1687                         DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
1688 
1689                         ecpp_default_negotiation(pp);
1690 
1691                         mutex_exit(&pp->umutex);
1692                         ecpp_ack_ioctl(q, mp);
1693                 }
1694                 break;
1695 
1696         case PRNIOC_GET_IFCAP: {
1697                 uint_t          ifcap;
1698 
1699                 mutex_enter(&pp->umutex);
1700 
1701                 ifcap = ecpp_get_prn_ifcap(pp);
1702 
1703                 mutex_exit(&pp->umutex);
1704 
1705                 ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap));
1706                 break;
1707         }
1708 
1709         case PRNIOC_SET_IFCAP: {
1710                 mcopyin(mp, NULL, sizeof (uint_t), NULL);
1711                 qreply(q, mp);
1712                 break;
1713         }
1714 
1715         case PRNIOC_GET_TIMEOUTS: {
1716                 struct prn_timeouts timeouts;
1717 
1718                 mutex_enter(&pp->umutex);
1719                 timeouts = pp->prn_timeouts;
1720                 mutex_exit(&pp->umutex);
1721 
1722                 ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts));
1723 
1724                 break;
1725         }
1726 
1727         case PRNIOC_SET_TIMEOUTS:
1728                 mcopyin(mp, NULL, sizeof (struct prn_timeouts),
1729                     *(caddr_t *)(void *)mp->b_cont->b_rptr);
1730                 qreply(q, mp);
1731                 break;
1732 
1733         case PRNIOC_GET_STATUS: {
1734                 uint8_t dsr;
1735                 uint_t  status;
1736 
1737                 mutex_enter(&pp->umutex);
1738 
1739                 /* DSR only makes sense in Centronics & Compat mode */
1740                 if (pp->current_mode == ECPP_CENTRONICS ||
1741                     pp->current_mode == ECPP_COMPAT_MODE) {
1742                         dsr = DSR_READ(pp);
1743                         if ((dsr & ECPP_PE) ||
1744                             !(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) {
1745                                 status = PRN_ONLINE;
1746                         } else {
1747                                 status = PRN_ONLINE | PRN_READY;
1748                         }
1749                 } else {
1750                         status = PRN_ONLINE | PRN_READY;
1751                 }
1752 
1753                 mutex_exit(&pp->umutex);
1754 
1755                 ecpp_putioc_copyout(q, mp, &status, sizeof (status));
1756                 break;
1757         }
1758 
1759         case PRNIOC_GET_1284_STATUS: {
1760                 uint8_t dsr;
1761                 uchar_t status;
1762 
1763                 mutex_enter(&pp->umutex);
1764 
1765                 /* status only makes sense in Centronics & Compat mode */
1766                 if (pp->current_mode != ECPP_COMPAT_MODE &&
1767                     pp->current_mode != ECPP_CENTRONICS) {
1768                         mutex_exit(&pp->umutex);
1769                         ecpp_nack_ioctl(q, mp, EINVAL);
1770                         break;
1771                 }
1772 
1773                 dsr = DSR_READ(pp);             /* read status */
1774 
1775                 mutex_exit(&pp->umutex);
1776 
1777                 ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr);
1778 
1779                 status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) |
1780                     (~dsr & ECPP_nBUSY);
1781 
1782                 ecpp_putioc_copyout(q, mp, &status, sizeof (status));
1783                 break;
1784         }
1785 
1786         case ECPPIOC_GETDEVID:
1787                 ecpp_putioc_stateful_copyin(q, mp,
1788                     sizeof (struct ecpp_device_id));
1789                 break;
1790 
1791         case PRNIOC_GET_1284_DEVID:
1792                 ecpp_putioc_stateful_copyin(q, mp,
1793                     sizeof (struct prn_1284_device_id));
1794                 break;
1795 
1796         case PRNIOC_GET_IFINFO:
1797                 ecpp_putioc_stateful_copyin(q, mp,
1798                     sizeof (struct prn_interface_info));
1799                 break;
1800 
1801         default:
1802                 ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n",
1803                     iocbp->ioc_cmd);
1804                 ecpp_nack_ioctl(q, mp, EINVAL);
1805                 break;
1806         }
1807 }
1808 
1809 /*
1810  * allocate mblk and copyout the requested number of bytes
1811  */
1812 static void
1813 ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len)
1814 {
1815         mblk_t  *tmp;
1816 
1817         if ((tmp = allocb(len, BPRI_MED)) == NULL) {
1818                 ecpp_nack_ioctl(q, mp, ENOSR);
1819                 return;
1820         }
1821 
1822         bcopy(buf, tmp->b_wptr, len);
1823 
1824         mcopyout(mp, NULL, len, NULL, tmp);
1825         qreply(q, mp);
1826 }
1827 
1828 /*
1829  * copyin the structure using struct ecpp_copystate
1830  */
1831 static void
1832 ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size)
1833 {
1834         mblk_t *tmp;
1835         struct ecpp_copystate *stp;
1836 
1837         if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) {
1838                 ecpp_nack_ioctl(q, mp, EAGAIN);
1839                 return;
1840         }
1841 
1842         stp = (struct ecpp_copystate *)tmp->b_rptr;
1843         stp->state = ECPP_STRUCTIN;
1844         stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr;
1845 
1846         tmp->b_wptr += sizeof (struct ecpp_copystate);
1847 
1848         mcopyin(mp, tmp, size, stp->uaddr);
1849         qreply(q, mp);
1850 }
1851 
1852 /*
1853  * read queue is only used when the peripheral sends data faster,
1854  * then the application consumes it;
1855  * once the low water mark is reached, this routine will be scheduled
1856  */
1857 static int
1858 ecpp_rsrv(queue_t *q)
1859 {
1860         struct msgb     *mp;
1861 
1862         /*
1863          * send data upstream until next queue is full or the queue is empty
1864          */
1865         while (canputnext(q) && (mp = getq(q))) {
1866                 putnext(q, mp);
1867         }
1868 
1869         /*
1870          * if there is still space on the queue, enable backchannel
1871          */
1872         if (canputnext(q)) {
1873                 struct ecppunit *pp = (struct ecppunit *)q->q_ptr;
1874 
1875                 mutex_enter(&pp->umutex);
1876 
1877                 if (pp->e_busy == ECPP_IDLE) {
1878                         (void) ecpp_idle_phase(pp);
1879                         cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
1880                 }
1881 
1882                 mutex_exit(&pp->umutex);
1883         }
1884 
1885         return (0);
1886 }
1887 
1888 static int
1889 ecpp_wsrv(queue_t *q)
1890 {
1891         struct ecppunit *pp = (struct ecppunit *)q->q_ptr;
1892         struct msgb     *mp;
1893         size_t          len, total_len;
1894         size_t          my_ioblock_sz;
1895         caddr_t         my_ioblock;
1896         caddr_t         start_addr;
1897 
1898         mutex_enter(&pp->umutex);
1899 
1900         ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy);
1901 
1902         /* if channel is actively doing work, wait till completed */
1903         if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) {
1904                 mutex_exit(&pp->umutex);
1905                 return (0);
1906         } else if (pp->suspended == TRUE) {
1907                 /*
1908                  * if the system is about to suspend and ecpp_detach()
1909                  * is blocked due to active transfers, wake it up and exit
1910                  */
1911                 cv_signal(&pp->pport_cv);
1912                 mutex_exit(&pp->umutex);
1913                 return (0);
1914         }
1915 
1916         /* peripheral status should be okay before starting transfer */
1917         if (pp->e_busy == ECPP_ERR) {
1918                 if (ecpp_check_status(pp) == FAILURE) {
1919                         if (pp->wsrv_timer_id == 0) {
1920                                 ecpp_error(pp->dip, "wsrv: start wrsv_timer\n");
1921                                 pp->wsrv_timer_id = timeout(ecpp_wsrv_timer,
1922                                     (caddr_t)pp,
1923                                     drv_usectohz(pp->wsrv_retry * 1000));
1924                         } else {
1925                                 ecpp_error(pp->dip,
1926                                     "ecpp_wsrv: wrsv_timer is active\n");
1927                         }
1928 
1929                         mutex_exit(&pp->umutex);
1930                         return (0);
1931                 } else {
1932                         pp->e_busy = ECPP_IDLE;
1933                 }
1934         }
1935 
1936         my_ioblock = pp->ioblock;
1937         my_ioblock_sz = IO_BLOCK_SZ;
1938 
1939         /*
1940          * it`s important to null pp->msg here,
1941          * cleaning up from the previous transfer attempts
1942          */
1943         pp->msg = NULL;
1944 
1945         start_addr = NULL;
1946         len = total_len = 0;
1947         /*
1948          * The following loop is implemented to gather the
1949          * many small writes that the lp subsystem makes and
1950          * compile them into one large dma transfer. The len and
1951          * total_len variables are a running count of the number of
1952          * bytes that have been gathered. They are bcopied to the
1953          * ioblock buffer. The pp->e_busy is set to E_BUSY as soon as
1954          * we start gathering packets to indicate the following transfer.
1955          */
1956         while (mp = getq(q)) {
1957                 switch (DB_TYPE(mp)) {
1958                 case M_DATA:
1959                         pp->e_busy = ECPP_BUSY;
1960                         len = mp->b_wptr - mp->b_rptr;
1961 
1962                         if ((total_len == 0) && (len >= my_ioblock_sz)) {
1963                                 /*
1964                                  * if the first M_DATA is bigger than ioblock,
1965                                  * just use this mblk and start the transfer
1966                                  */
1967                                 total_len = len;
1968                                 start_addr = (caddr_t)mp->b_rptr;
1969                                 pp->msg = mp;
1970                                 goto breakout;
1971                         } else if (total_len + len > my_ioblock_sz) {
1972                                 /*
1973                                  * current M_DATA does not fit in ioblock,
1974                                  * put it back and start the transfer
1975                                  */
1976                                 (void) putbq(q, mp);
1977                                 goto breakout;
1978                         } else {
1979                                 /*
1980                                  * otherwise add data to ioblock and free mblk
1981                                  */
1982                                 bcopy(mp->b_rptr, my_ioblock, len);
1983                                 my_ioblock += len;
1984                                 total_len += len;
1985                                 start_addr = (caddr_t)pp->ioblock;
1986                                 freemsg(mp);
1987                         }
1988                         break;
1989 
1990                 case M_IOCTL:
1991                         /*
1992                          * Assume a simple loopback test: an application
1993                          * writes data into the TFIFO, reads it using
1994                          * ECPPIOC_GETDATA and compares. If the transfer
1995                          * times out (which is only possible on Grover),
1996                          * the ioctl might be processed before the data
1997                          * got to the TFIFO, which leads to miscompare.
1998                          * So if we met ioctl, postpone it until after xfer.
1999                          */
2000                         if (total_len > 0) {
2001                                 (void) putbq(q, mp);
2002                                 goto breakout;
2003                         }
2004 
2005                         ecpp_error(pp->dip, "M_IOCTL.\n");
2006 
2007                         mutex_exit(&pp->umutex);
2008 
2009                         ecpp_putioc(q, mp);
2010 
2011                         mutex_enter(&pp->umutex);
2012 
2013                         break;
2014 
2015                 case M_IOCDATA: {
2016                         struct copyresp *csp = (struct copyresp *)mp->b_rptr;
2017 
2018                         ecpp_error(pp->dip, "M_IOCDATA\n");
2019 
2020                         /*
2021                          * If copy request failed, quit now
2022                          */
2023                         if (csp->cp_rval != 0) {
2024                                 freemsg(mp);
2025                                 break;
2026                         }
2027 
2028                         switch (csp->cp_cmd) {
2029                         case ECPPIOC_SETPARMS:
2030                         case ECPPIOC_SETREGS:
2031                         case ECPPIOC_SETPORT:
2032                         case ECPPIOC_SETDATA:
2033                         case ECPPIOC_GETDEVID:
2034                         case PRNIOC_SET_IFCAP:
2035                         case PRNIOC_GET_1284_DEVID:
2036                         case PRNIOC_SET_TIMEOUTS:
2037                         case PRNIOC_GET_IFINFO:
2038                                 ecpp_srvioc(q, mp);
2039                                 break;
2040 
2041                         default:
2042                                 ecpp_nack_ioctl(q, mp, EINVAL);
2043                                 break;
2044                         }
2045 
2046                         break;
2047                 }
2048 
2049                 case M_CTL:
2050                         if (pp->e_busy != ECPP_IDLE) {
2051                                 ecpp_error(pp->dip, "wsrv: M_CTL postponed\n");
2052                                 (void) putbq(q, mp);
2053                                 goto breakout;
2054                         } else {
2055                                 ecpp_error(pp->dip, "wsrv: M_CTL\n");
2056                         }
2057 
2058                         /* sanity check */
2059                         if ((mp->b_wptr - mp->b_rptr != sizeof (int)) ||
2060                             (*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) {
2061                                 ecpp_error(pp->dip, "wsrv: bogus M_CTL");
2062                                 freemsg(mp);
2063                                 break;
2064                         } else {
2065                                 freemsg(mp);
2066                         }
2067 
2068                         /* This was a backchannel request */
2069                         (void) ecpp_peripheral2host(pp);
2070 
2071                         /* exit if transfer have been initiated */
2072                         if (pp->e_busy == ECPP_BUSY) {
2073                                 goto breakout;
2074                         }
2075                         break;
2076 
2077                 case M_READ:
2078                         pp->nread += *(size_t *)mp->b_rptr;
2079                         freemsg(mp);
2080                         ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread);
2081                         break;
2082 
2083                 default:
2084                         ecpp_error(pp->dip, "wsrv: should never get here\n");
2085                         freemsg(mp);
2086                         break;
2087                 }
2088         }
2089 breakout:
2090         /*
2091          * If total_len > 0 then start the transfer, otherwise goto idle state
2092          */
2093         if (total_len > 0) {
2094                 ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len);
2095                 pp->e_busy = ECPP_BUSY;
2096                 ecpp_start(pp, start_addr, total_len);
2097         } else {
2098                 ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy);
2099 
2100                 /* IDLE if xfer_timeout, or FIFO_EMPTY */
2101                 if (pp->e_busy == ECPP_IDLE) {
2102                         (void) ecpp_idle_phase(pp);
2103                         cv_signal(&pp->pport_cv);  /* signal ecpp_close() */
2104                 }
2105         }
2106 
2107         mutex_exit(&pp->umutex);
2108         return (1);
2109 }
2110 
2111 /*
2112  * Ioctl processor for queued ioctl data transfer messages.
2113  */
2114 static void
2115 ecpp_srvioc(queue_t *q, mblk_t *mp)
2116 {
2117         struct iocblk   *iocbp;
2118         struct ecppunit *pp;
2119 
2120         iocbp = (struct iocblk *)mp->b_rptr;
2121         pp = (struct ecppunit *)q->q_ptr;
2122 
2123         switch (iocbp->ioc_cmd) {
2124         case ECPPIOC_SETPARMS: {
2125                 struct ecpp_transfer_parms *xferp;
2126 
2127                 xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr;
2128 
2129                 if (xferp->write_timeout <= 0 ||
2130                     xferp->write_timeout >= ECPP_MAX_TIMEOUT) {
2131                         ecpp_nack_ioctl(q, mp, EINVAL);
2132                         break;
2133                 }
2134 
2135                 if (!((xferp->mode == ECPP_CENTRONICS) ||
2136                     (xferp->mode == ECPP_COMPAT_MODE) ||
2137                     (xferp->mode == ECPP_NIBBLE_MODE) ||
2138                     (xferp->mode == ECPP_ECP_MODE) ||
2139                     (xferp->mode == ECPP_DIAG_MODE))) {
2140                         ecpp_nack_ioctl(q, mp, EINVAL);
2141                         break;
2142                 }
2143 
2144                 pp->xfer_parms = *xferp;
2145                 pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout;
2146 
2147                 ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n",
2148                     pp->current_mode, pp->xfer_parms.mode);
2149 
2150                 if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) {
2151                         ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2152                 } else {
2153                         /*
2154                          * mode nego was a success.  If nibble mode check
2155                          * back channel and set into REVIDLE.
2156                          */
2157                         if ((pp->current_mode == ECPP_NIBBLE_MODE) &&
2158                             (read_nibble_backchan(pp) == FAILURE)) {
2159                                 /*
2160                                  * problems reading the backchannel
2161                                  * returned to centronics;
2162                                  * ioctl fails.
2163                                  */
2164                                 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2165                                 break;
2166                         }
2167 
2168                         ecpp_ack_ioctl(q, mp);
2169                 }
2170                 if (pp->current_mode != ECPP_DIAG_MODE) {
2171                         pp->port = ECPP_PORT_DMA;
2172                 } else {
2173                         pp->port = ECPP_PORT_PIO;
2174                 }
2175 
2176                 pp->xfer_parms.mode = pp->current_mode;
2177 
2178                 break;
2179         }
2180 
2181         case ECPPIOC_SETREGS: {
2182                 struct ecpp_regs *rg;
2183                 uint8_t dcr;
2184 
2185                 rg = (struct ecpp_regs *)mp->b_cont->b_rptr;
2186 
2187                 /* must be in diagnostic mode for these commands to work */
2188                 if (pp->current_mode != ECPP_DIAG_MODE) {
2189                         ecpp_nack_ioctl(q, mp, EINVAL);
2190                         break;
2191                 }
2192 
2193                 /* bits 4-7 must be 1 or return EINVAL */
2194                 if ((rg->dcr & ECPP_SETREGS_DCR_MASK) !=
2195                     ECPP_SETREGS_DCR_MASK) {
2196                         ecpp_nack_ioctl(q, mp, EINVAL);
2197                         break;
2198                 }
2199 
2200                 /* get the old dcr */
2201                 dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
2202                 /* get the new dcr */
2203                 dcr = (dcr & ECPP_SETREGS_DCR_MASK) |
2204                     (rg->dcr & ~ECPP_SETREGS_DCR_MASK);
2205                 DCR_WRITE(pp, dcr);
2206                 ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr);
2207                 ecpp_ack_ioctl(q, mp);
2208                 break;
2209         }
2210 
2211         case ECPPIOC_SETPORT: {
2212                 uchar_t *port;
2213 
2214                 port = (uchar_t *)mp->b_cont->b_rptr;
2215 
2216                 /* must be in diagnostic mode for these commands to work */
2217                 if (pp->current_mode != ECPP_DIAG_MODE) {
2218                         ecpp_nack_ioctl(q, mp, EINVAL);
2219                         break;
2220                 }
2221 
2222                 switch (*port) {
2223                 case ECPP_PORT_PIO:
2224                         /* put superio into PIO mode */
2225                         ECR_WRITE(pp,
2226                             ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
2227                         pp->port = *port;
2228                         ecpp_ack_ioctl(q, mp);
2229                         break;
2230 
2231                 case ECPP_PORT_TDMA:
2232                         ecpp_error(pp->dip, "SETPORT: to TDMA\n");
2233                         pp->tfifo_intr = 1;
2234                         /* change to mode 110 */
2235                         ECR_WRITE(pp,
2236                             ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV);
2237                         pp->port = *port;
2238                         ecpp_ack_ioctl(q, mp);
2239                         break;
2240 
2241                 default:
2242                         ecpp_nack_ioctl(q, mp, EINVAL);
2243                 }
2244 
2245                 break;
2246         }
2247 
2248         case ECPPIOC_SETDATA: {
2249                 uchar_t *data;
2250 
2251                 data = (uchar_t *)mp->b_cont->b_rptr;
2252 
2253                 /* must be in diagnostic mode for these commands to work */
2254                 if (pp->current_mode != ECPP_DIAG_MODE) {
2255                         ecpp_nack_ioctl(q, mp, EINVAL);
2256                         break;
2257                 }
2258 
2259                 switch (pp->port) {
2260                 case ECPP_PORT_PIO:
2261                         DATAR_WRITE(pp, *data);
2262                         ecpp_ack_ioctl(q, mp);
2263                         break;
2264 
2265                 case ECPP_PORT_TDMA:
2266                         TFIFO_WRITE(pp, *data);
2267                         ecpp_ack_ioctl(q, mp);
2268                         break;
2269 
2270                 default:
2271                         ecpp_nack_ioctl(q, mp, EINVAL);
2272                 }
2273 
2274                 break;
2275         }
2276 
2277         case ECPPIOC_GETDEVID: {
2278                 struct copyresp         *csp;
2279                 struct ecpp_copystate   *stp;
2280                 struct ecpp_device_id   *dp;
2281                 struct ecpp_device_id   id;
2282 
2283                 csp = (struct copyresp *)mp->b_rptr;
2284                 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2285                 dp = (struct ecpp_device_id *)mp->b_cont->b_rptr;
2286 
2287 #ifdef _MULTI_DATAMODEL
2288                 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2289                         struct ecpp_device_id32 *dp32;
2290 
2291                         dp32 = (struct ecpp_device_id32 *)dp;
2292                         id.mode = dp32->mode;
2293                         id.len = dp32->len;
2294                         id.addr = (char *)(uintptr_t)dp32->addr;
2295                 } else {
2296 #endif /* _MULTI_DATAMODEL */
2297                         id = *dp;
2298 #ifdef _MULTI_DATAMODEL
2299                 }
2300 #endif /* _MULTI_DATAMODEL */
2301 
2302                 ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen);
2303                 break;
2304         }
2305 
2306         case PRNIOC_GET_1284_DEVID: {
2307                 struct copyresp                 *csp;
2308                 struct ecpp_copystate           *stp;
2309                 struct prn_1284_device_id       *dp;
2310                 struct ecpp_device_id           id;
2311 
2312                 csp = (struct copyresp *)mp->b_rptr;
2313                 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2314                 dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr;
2315 
2316                 /* imitate struct ecpp_device_id */
2317                 id.mode = ECPP_NIBBLE_MODE;
2318 
2319 #ifdef _MULTI_DATAMODEL
2320                 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2321                         struct prn_1284_device_id32 *dp32;
2322 
2323                         dp32 = (struct prn_1284_device_id32 *)dp;
2324                         id.len = dp32->id_len;
2325                         id.addr = (char *)(uintptr_t)dp32->id_data;
2326                 } else {
2327 #endif /* _MULTI_DATAMODEL */
2328                         id.len = dp->id_len;
2329                         id.addr = (char *)dp->id_data;
2330 #ifdef _MULTI_DATAMODEL
2331                 }
2332 #endif /* _MULTI_DATAMODEL */
2333 
2334                 ecpp_srvioc_devid(q, mp, &id,
2335                     (int *)&stp->un.prn_devid.id_rlen);
2336                 break;
2337         }
2338 
2339         case PRNIOC_SET_IFCAP: {
2340                 uint_t  ifcap, new_ifcap;
2341 
2342                 ifcap = ecpp_get_prn_ifcap(pp);
2343                 new_ifcap = *(uint_t *)mp->b_cont->b_rptr;
2344 
2345                 if (ifcap == new_ifcap) {
2346                         ecpp_ack_ioctl(q, mp);
2347                         break;
2348                 }
2349 
2350                 /* only changing PRN_BIDI is supported */
2351                 if ((ifcap ^ new_ifcap) & ~PRN_BIDI) {
2352                         ecpp_nack_ioctl(q, mp, EINVAL);
2353                         break;
2354                 }
2355 
2356                 if (new_ifcap & PRN_BIDI) {         /* go bidirectional */
2357                         ecpp_default_negotiation(pp);
2358                 } else {                        /* go unidirectional */
2359                         (void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS);
2360                 }
2361 
2362                 ecpp_ack_ioctl(q, mp);
2363                 break;
2364         }
2365 
2366         case PRNIOC_SET_TIMEOUTS: {
2367                 struct prn_timeouts     *prn_timeouts;
2368 
2369                 prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr;
2370 
2371                 if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) {
2372                         ecpp_nack_ioctl(q, mp, EINVAL);
2373                         break;
2374                 }
2375 
2376                 pp->prn_timeouts = *prn_timeouts;
2377                 pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward;
2378 
2379                 ecpp_ack_ioctl(q, mp);
2380                 break;
2381         }
2382 
2383         case PRNIOC_GET_IFINFO:
2384                 ecpp_srvioc_prnif(q, mp);
2385                 break;
2386 
2387         default:                /* unexpected ioctl type */
2388                 ecpp_nack_ioctl(q, mp, EINVAL);
2389                 break;
2390         }
2391 }
2392 
2393 static void
2394 ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen)
2395 {
2396         struct ecppunit         *pp;
2397         struct copyresp         *csp;
2398         struct ecpp_copystate   *stp;
2399         int                     error;
2400         int                     len;
2401         int                     mode;
2402         mblk_t                  *datamp;
2403 
2404         pp = (struct ecppunit *)q->q_ptr;
2405         csp = (struct copyresp *)mp->b_rptr;
2406         stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2407         mode = id->mode;
2408 
2409         /* check arguments */
2410         if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) {
2411                 ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n",
2412                     mode, id->len);
2413                 ecpp_nack_ioctl(q, mp, EINVAL);
2414                 return;
2415         }
2416 
2417         /* Currently only Nibble mode is supported */
2418         if (mode != ECPP_NIBBLE_MODE) {
2419                 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT);
2420                 return;
2421         }
2422 
2423         if ((id->addr == NULL) && (id->len != 0)) {
2424                 ecpp_nack_ioctl(q, mp, EFAULT);
2425                 return;
2426         }
2427 
2428         /* read device ID length */
2429         if (error = ecpp_getdevid(pp, NULL, &len, mode)) {
2430                 ecpp_nack_ioctl(q, mp, error);
2431                 goto breakout;
2432         }
2433 
2434         /* don't take into account two length bytes */
2435         len -= 2;
2436         *rlen = len;
2437 
2438         /* limit transfer to user buffer length */
2439         if (id->len < len) {
2440                 len = id->len;
2441         }
2442 
2443         if (len == 0) {
2444                 /* just return rlen */
2445                 stp->state = ECPP_ADDROUT;
2446                 ecpp_wput_iocdata_devid(q, mp,
2447                     (uintptr_t)rlen - (uintptr_t)&stp->un);
2448                 goto breakout;
2449         }
2450 
2451         if ((datamp = allocb(len, BPRI_MED)) == NULL) {
2452                 ecpp_nack_ioctl(q, mp, ENOSR);
2453                 goto breakout;
2454         }
2455 
2456         /* read ID string */
2457         error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode);
2458         if (error) {
2459                 freemsg(datamp);
2460                 ecpp_nack_ioctl(q, mp, error);
2461                 goto breakout;
2462         } else {
2463                 datamp->b_wptr += len;
2464 
2465                 stp->state = ECPP_ADDROUT;
2466                 mcopyout(mp, csp->cp_private, len, id->addr, datamp);
2467                 qreply(q, mp);
2468         }
2469 
2470         return;
2471 
2472 breakout:
2473         (void) ecpp_1284_termination(pp);
2474 }
2475 
2476 /*
2477  * PRNIOC_GET_IFINFO: return prnio interface info string
2478  */
2479 static void
2480 ecpp_srvioc_prnif(queue_t *q, mblk_t *mp)
2481 {
2482         struct copyresp                 *csp;
2483         struct ecpp_copystate           *stp;
2484         uint_t                          len;
2485         struct prn_interface_info       *ip;
2486         struct prn_interface_info       info;
2487         mblk_t                          *datamp;
2488 #ifdef _MULTI_DATAMODEL
2489         struct iocblk           *iocbp = (struct iocblk *)mp->b_rptr;
2490 #endif
2491 
2492         csp = (struct copyresp *)mp->b_rptr;
2493         stp = (struct ecpp_copystate *)csp->cp_private->b_rptr;
2494         ip = (struct prn_interface_info *)mp->b_cont->b_rptr;
2495 
2496 #ifdef _MULTI_DATAMODEL
2497         if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) {
2498                 struct prn_interface_info32 *ip32;
2499 
2500                 ip32 = (struct prn_interface_info32 *)ip;
2501                 info.if_len = ip32->if_len;
2502                 info.if_data = (char *)(uintptr_t)ip32->if_data;
2503         } else {
2504 #endif /* _MULTI_DATAMODEL */
2505                 info = *ip;
2506 #ifdef _MULTI_DATAMODEL
2507         }
2508 #endif /* _MULTI_DATAMODEL */
2509 
2510         len = strlen(prn_ifinfo);
2511         stp->un.prn_if.if_rlen = len;
2512         stp->state = ECPP_ADDROUT;
2513 
2514         /* check arguments */
2515         if ((info.if_data == NULL) && (info.if_len != 0)) {
2516                 ecpp_nack_ioctl(q, mp, EFAULT);
2517                 return;
2518         }
2519 
2520         if (info.if_len == 0) {
2521                 /* just copyout rlen */
2522                 ecpp_wput_iocdata_devid(q, mp,
2523                     offsetof(struct prn_interface_info, if_rlen));
2524                 return;
2525         }
2526 
2527         /* if needed, trim to the buffer size */
2528         if (len > info.if_len) {
2529                 len = info.if_len;
2530         }
2531 
2532         if ((datamp = allocb(len, BPRI_MED)) == NULL) {
2533                 ecpp_nack_ioctl(q, mp, ENOSR);
2534                 return;
2535         }
2536 
2537         bcopy(&prn_ifinfo[0], datamp->b_wptr, len);
2538         datamp->b_wptr += len;
2539 
2540         mcopyout(mp, csp->cp_private, len, info.if_data, datamp);
2541         qreply(q, mp);
2542 }
2543 
2544 static void
2545 ecpp_flush(struct ecppunit *pp, int cmd)
2546 {
2547         queue_t         *q;
2548         uint8_t         ecr, dcr;
2549         timeout_id_t    timeout_id, fifo_timer_id, wsrv_timer_id;
2550 
2551         ASSERT(mutex_owned(&pp->umutex));
2552 
2553         if (!(cmd & FWRITE)) {
2554                 return;
2555         }
2556 
2557         q = pp->writeq;
2558         timeout_id = fifo_timer_id = wsrv_timer_id = 0;
2559 
2560         ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy);
2561 
2562         /* if there is an ongoing DMA, it needs to be turned off. */
2563         switch (pp->e_busy) {
2564         case ECPP_BUSY:
2565                 /*
2566                  * Change the port status to ECPP_FLUSH to
2567                  * indicate to ecpp_wsrv that the wq is being flushed.
2568                  */
2569                 pp->e_busy = ECPP_FLUSH;
2570 
2571                 /*
2572                  * dma_cancelled indicates to ecpp_isr() that we have
2573                  * turned off the DMA.  Since the mutex is held, ecpp_isr()
2574                  * may be blocked.  Once ecpp_flush() finishes and ecpp_isr()
2575                  * gains the mutex, ecpp_isr() will have a _reset_ DMAC.  Most
2576                  * significantly, the DMAC will be reset after ecpp_isr() was
2577                  * invoked.  Therefore we need to have a flag "dma_cancelled"
2578                  * to signify when the described condition has occured.  If
2579                  * ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr
2580                  * and simply claim the interupt.
2581                  */
2582 
2583                 pp->dma_cancelled = TRUE;
2584 
2585                 /* either DMA or PIO transfer */
2586                 if (COMPAT_DMA(pp) ||
2587                     (pp->current_mode == ECPP_ECP_MODE) ||
2588                     (pp->current_mode == ECPP_DIAG_MODE)) {
2589                         /*
2590                          * if the bcr is zero, then DMA is complete and
2591                          * we are waiting for the fifo to drain.  Therefore,
2592                          * turn off dma.
2593                          */
2594                         if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
2595                                 ecpp_error(pp->dip,
2596                                     "ecpp_flush: dma_stop failed.\n");
2597                         }
2598 
2599                         /*
2600                          * If the status of the port is ECPP_BUSY,
2601                          * the DMA is stopped by either explicitly above, or by
2602                          * ecpp_isr() but the FIFO hasn't drained yet. In either
2603                          * case, we need to unbind the dma mappings.
2604                          */
2605                         if (ddi_dma_unbind_handle(
2606                             pp->dma_handle) != DDI_SUCCESS)
2607                                 ecpp_error(pp->dip,
2608                                     "ecpp_flush: unbind failed.\n");
2609 
2610                         if (pp->msg != NULL) {
2611                                 freemsg(pp->msg);
2612                                 pp->msg = NULL;
2613                         }
2614                 } else {
2615                         /*
2616                          * PIO transfer: disable nAck interrups
2617                          */
2618                         dcr = DCR_READ(pp);
2619                         dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN);
2620                         DCR_WRITE(pp, dcr);
2621                         ECPP_MASK_INTR(pp);
2622                 }
2623 
2624                 /*
2625                  * The transfer is cleaned up.  There may or may not be data
2626                  * in the fifo.  We don't care at this point.  Ie. SuperIO may
2627                  * transfer the remaining bytes in the fifo or not. it doesn't
2628                  * matter.  All that is important at this stage is that no more
2629                  * fifo timers are started.
2630                  */
2631 
2632                 timeout_id = pp->timeout_id;
2633                 fifo_timer_id = pp->fifo_timer_id;
2634                 pp->timeout_id = pp->fifo_timer_id = 0;
2635                 pp->softintr_pending = 0;
2636 
2637                 break;
2638 
2639         case ECPP_ERR:
2640                 /*
2641                  * Change the port status to ECPP_FLUSH to
2642                  * indicate to ecpp_wsrv that the wq is being flushed.
2643                  */
2644                 pp->e_busy = ECPP_FLUSH;
2645 
2646                 /*
2647                  *  Most likely there are mblks in the queue,
2648                  *  but the driver can not transmit because
2649                  *  of the bad port status.  In this case,
2650                  *  ecpp_flush() should make sure ecpp_wsrv_timer()
2651                  *  is turned off.
2652                  */
2653                 wsrv_timer_id = pp->wsrv_timer_id;
2654                 pp->wsrv_timer_id = 0;
2655 
2656                 break;
2657 
2658         case ECPP_IDLE:
2659                 /* No work to do. Ready to flush */
2660                 break;
2661 
2662         default:
2663                 ecpp_error(pp->dip,
2664                     "ecpp_flush: illegal state %x\n", pp->e_busy);
2665         }
2666 
2667         /* in DIAG mode clear TFIFO if needed */
2668         if (pp->current_mode == ECPP_DIAG_MODE) {
2669                 ecr = ECR_READ(pp);
2670                 if (!(ecr & ECPP_FIFO_EMPTY)) {
2671                         ECR_WRITE(pp,
2672                             ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
2673                         ECR_WRITE(pp, ecr);
2674                 }
2675         }
2676 
2677         /* Discard all messages on the output queue. */
2678         flushq(q, FLUSHDATA);
2679 
2680         /* The port is no longer flushing or dma'ing for that matter. */
2681         pp->e_busy = ECPP_IDLE;
2682 
2683         /* Set the right phase */
2684         if (pp->current_mode == ECPP_ECP_MODE) {
2685                 if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
2686                         pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
2687                 } else {
2688                         pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
2689                 }
2690         }
2691 
2692         /* cancel timeouts if any */
2693         mutex_exit(&pp->umutex);
2694 
2695         if (timeout_id) {
2696                 (void) untimeout(timeout_id);
2697         }
2698         if (fifo_timer_id) {
2699                 (void) untimeout(fifo_timer_id);
2700         }
2701         if (wsrv_timer_id) {
2702                 (void) untimeout(wsrv_timer_id);
2703         }
2704 
2705         mutex_enter(&pp->umutex);
2706 
2707         cv_signal(&pp->pport_cv);        /* wake up ecpp_close() */
2708 }
2709 
2710 static void
2711 ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len)
2712 {
2713         ASSERT(mutex_owned(&pp->umutex));
2714         ASSERT(pp->e_busy == ECPP_BUSY);
2715 
2716         ecpp_error(pp->dip,
2717             "ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n",
2718             pp->current_mode, pp->current_phase, ECR_READ(pp), len);
2719 
2720         pp->dma_dir = DDI_DMA_WRITE; /* this is a forward transfer */
2721 
2722         switch (pp->current_mode) {
2723         case ECPP_NIBBLE_MODE:
2724                 (void) ecpp_1284_termination(pp);
2725 
2726                 /* After termination we are either Compatible or Centronics */
2727 
2728                 /* FALLTHRU */
2729 
2730         case ECPP_CENTRONICS:
2731         case ECPP_COMPAT_MODE:
2732                 if (pp->io_mode == ECPP_DMA) {
2733                         if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2734                                 return;
2735                         }
2736                 } else {
2737                         /* PIO mode */
2738                         if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) {
2739                                 return;
2740                         }
2741                         (void) ecpp_pio_writeb(pp);
2742                 }
2743                 break;
2744 
2745         case ECPP_DIAG_MODE: {
2746                 int     oldlen;
2747 
2748                 /* put superio into TFIFO mode, if not already */
2749                 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
2750                 /*
2751                  * DMA would block if the TFIFO is not empty
2752                  * if by this moment nobody read these bytes, they`re gone
2753                  */
2754                 drv_usecwait(1);
2755                 if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
2756                         ecpp_error(pp->dip,
2757                             "ecpp_start: TFIFO not empty, clearing\n");
2758                         ECR_WRITE(pp,
2759                             ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
2760                         ECR_WRITE(pp,
2761                             ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110);
2762                 }
2763 
2764                 /* we can DMA at most 16 bytes into TFIFO */
2765                 oldlen = len;
2766                 if (len > ECPP_FIFO_SZ) {
2767                         len = ECPP_FIFO_SZ;
2768                 }
2769 
2770                 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2771                         return;
2772                 }
2773 
2774                 /* put the rest of data back on the queue */
2775                 if (oldlen > len) {
2776                         ecpp_putback_untransfered(pp, addr + len, oldlen - len);
2777                 }
2778 
2779                 break;
2780         }
2781 
2782         case ECPP_ECP_MODE:
2783                 ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
2784                     pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
2785 
2786                 /* if in Reverse Phase negotiate to Forward */
2787                 if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) {
2788                         if (ecp_reverse2forward(pp) == FAILURE) {
2789                                 if (pp->msg) {
2790                                         (void) putbq(pp->writeq, pp->msg);
2791                                 } else {
2792                                         ecpp_putback_untransfered(pp,
2793                                             addr, len);
2794                                 }
2795                         }
2796                 }
2797 
2798                 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) {
2799                         return;
2800                 }
2801 
2802                 break;
2803         }
2804 
2805         /* schedule transfer timeout */
2806         pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp,
2807             pp->xfer_parms.write_timeout * drv_usectohz(1000000));
2808 }
2809 
2810 /*
2811  * Transfer a PIO "block" a byte at a time.
2812  * The block is starts at addr and ends at pp->last_byte
2813  */
2814 static uint8_t
2815 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2816 {
2817         pp->next_byte = addr;
2818         pp->last_byte = (caddr_t)((ulong_t)addr + len);
2819 
2820         if (ecpp_check_status(pp) == FAILURE) {
2821                 /*
2822                  * if status signals are bad, do not start PIO,
2823                  * put everything back on the queue.
2824                  */
2825                 ecpp_error(pp->dip,
2826                     "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len);
2827 
2828                 if (pp->msg != NULL) {
2829                         /*
2830                          * this circumstance we want to copy the
2831                          * untransfered section of msg to a new mblk,
2832                          * then free the orignal one.
2833                          */
2834                         ecpp_putback_untransfered(pp,
2835                             (void *)pp->msg->b_rptr, len);
2836                         ecpp_error(pp->dip,
2837                             "ecpp_prep_pio_xfer: len1=%d\n", len);
2838 
2839                         freemsg(pp->msg);
2840                         pp->msg = NULL;
2841                 } else {
2842                         ecpp_putback_untransfered(pp, pp->ioblock, len);
2843                         ecpp_error(pp->dip,
2844                             "ecpp_prep_pio_xfer: len2=%d\n", len);
2845                 }
2846                 qenable(pp->writeq);
2847 
2848                 return (FAILURE);
2849         }
2850 
2851         pp->dma_cancelled = FALSE;
2852 
2853         /* pport must be in PIO mode */
2854         if (ecr_write(pp, ECR_mode_001 |
2855             ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) {
2856                 ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n");
2857         }
2858 
2859         ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n",
2860             DCR_READ(pp), ECR_READ(pp));
2861 
2862         return (SUCCESS);
2863 }
2864 
2865 static uint8_t
2866 ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len)
2867 {
2868         uint8_t ecr_mode[] = {
2869                 0,
2870                 ECR_mode_010,   /* Centronix */
2871                 ECR_mode_010,   /* Compat */
2872                 0,              /* Byte */
2873                 0,              /* Nibble */
2874                 ECR_mode_011,   /* ECP */
2875                 0,              /* Failure */
2876                 ECR_mode_110,   /* Diag */
2877         };
2878         uint8_t ecr;
2879 
2880         ASSERT((pp->current_mode <= ECPP_DIAG_MODE) &&
2881             (ecr_mode[pp->current_mode] != 0));
2882 
2883         if (ecpp_setup_dma_resources(pp, addr, len) == FAILURE) {
2884                 qenable(pp->writeq);
2885                 return (FAILURE);
2886         }
2887 
2888         if (ecpp_check_status(pp) == FAILURE) {
2889                 /*
2890                  * if status signals are bad, do not start DMA, but
2891                  * rather put everything back on the queue.
2892                  */
2893                 ecpp_error(pp->dip,
2894                     "ecpp_init_dma_xfer: suspending DMA len=%d\n",
2895                     pp->dma_cookie.dmac_size);
2896 
2897                 if (pp->msg != NULL) {
2898                         /*
2899                          * this circumstance we want to copy the
2900                          * untransfered section of msg to a new mblk,
2901                          * then free the orignal one.
2902                          */
2903                         ecpp_putback_untransfered(pp,
2904                             (void *)pp->msg->b_rptr, len);
2905                         ecpp_error(pp->dip,
2906                             "ecpp_init_dma_xfer:a:len=%d\n", len);
2907 
2908                         freemsg(pp->msg);
2909                         pp->msg = NULL;
2910                 } else {
2911                         ecpp_putback_untransfered(pp, pp->ioblock, len);
2912                         ecpp_error(pp->dip,
2913                             "ecpp_init_dma_xfer:b:len=%d\n", len);
2914                 }
2915 
2916                 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
2917                         ecpp_error(pp->dip,
2918                             "ecpp_init_dma_xfer: unbind FAILURE.\n");
2919                 }
2920                 qenable(pp->writeq);
2921                 return (FAILURE);
2922         }
2923 
2924         pp->xfercnt = pp->resid = len;
2925         pp->dma_cancelled = FALSE;
2926         pp->tfifo_intr = 0;
2927 
2928         /* set the right ECR mode and disable DMA */
2929         ecr = ecr_mode[pp->current_mode];
2930         (void) ecr_write(pp, ecr | ECPP_INTR_SRV | ECPP_INTR_MASK);
2931 
2932         /* prepare DMAC for a transfer */
2933         if (ECPP_DMA_START(pp) == FAILURE) {
2934                 ecpp_error(pp->dip, "ecpp_init_dma_xfer: dma_start FAILED.\n");
2935                 return (FAILURE);
2936         }
2937 
2938         /* GO! */
2939         (void) ecr_write(pp, ecr | ECPP_DMA_ENABLE | ECPP_INTR_MASK);
2940 
2941         return (SUCCESS);
2942 }
2943 
2944 static uint8_t
2945 ecpp_setup_dma_resources(struct ecppunit *pp, caddr_t addr, size_t len)
2946 {
2947         int     err;
2948         off_t   woff;
2949         size_t  wlen;
2950 
2951         ASSERT(pp->dma_dir == DDI_DMA_READ || pp->dma_dir == DDI_DMA_WRITE);
2952 
2953         err = ddi_dma_addr_bind_handle(pp->dma_handle, NULL,
2954             addr, len, pp->dma_dir | DDI_DMA_PARTIAL,
2955             DDI_DMA_DONTWAIT, NULL,
2956             &pp->dma_cookie, &pp->dma_cookie_count);
2957 
2958         switch (err) {
2959         case DDI_DMA_MAPPED:
2960                 ecpp_error(pp->dip, "ecpp_setup_dma: DMA_MAPPED\n");
2961 
2962                 pp->dma_nwin = 1;
2963                 pp->dma_curwin = 1;
2964                 break;
2965 
2966         case DDI_DMA_PARTIAL_MAP: {
2967                 ecpp_error(pp->dip, "ecpp_setup_dma: DMA_PARTIAL_MAP\n");
2968 
2969                 if (ddi_dma_numwin(pp->dma_handle,
2970                     &pp->dma_nwin) != DDI_SUCCESS) {
2971                         (void) ddi_dma_unbind_handle(pp->dma_handle);
2972                         return (FAILURE);
2973                 }
2974                 pp->dma_curwin = 1;
2975 
2976                 /*
2977                  * The very first window is returned by bind_handle,
2978                  * but we must do this explicitly here, otherwise
2979                  * next getwin would return wrong cookie dmac_size
2980                  */
2981                 if (ddi_dma_getwin(pp->dma_handle, 0, &woff, &wlen,
2982                     &pp->dma_cookie, &pp->dma_cookie_count) != DDI_SUCCESS) {
2983                         ecpp_error(pp->dip,
2984                             "ecpp_setup_dma: ddi_dma_getwin failed!");
2985                         (void) ddi_dma_unbind_handle(pp->dma_handle);
2986                         return (FAILURE);
2987                 }
2988 
2989                 ecpp_error(pp->dip,
2990                     "ecpp_setup_dma: cookies=%d, windows=%d"
2991                     " addr=%lx len=%d\n",
2992                     pp->dma_cookie_count, pp->dma_nwin,
2993                     pp->dma_cookie.dmac_address, pp->dma_cookie.dmac_size);
2994 
2995                 break;
2996         }
2997 
2998         default:
2999                 ecpp_error(pp->dip, "ecpp_setup_dma: err=%x\n", err);
3000                 return (FAILURE);
3001         }
3002 
3003         return (SUCCESS);
3004 }
3005 
3006 static void
3007 ecpp_ack_ioctl(queue_t *q, mblk_t *mp)
3008 {
3009         struct iocblk  *iocbp;
3010 
3011         mp->b_datap->db_type = M_IOCACK;
3012         mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
3013 
3014         if (mp->b_cont) {
3015                 freemsg(mp->b_cont);
3016                 mp->b_cont = NULL;
3017         }
3018 
3019         iocbp = (struct iocblk *)mp->b_rptr;
3020         iocbp->ioc_error = 0;
3021         iocbp->ioc_count = 0;
3022         iocbp->ioc_rval = 0;
3023 
3024         qreply(q, mp);
3025 }
3026 
3027 static void
3028 ecpp_nack_ioctl(queue_t *q, mblk_t *mp, int err)
3029 {
3030         struct iocblk  *iocbp;
3031 
3032         mp->b_datap->db_type = M_IOCNAK;
3033         mp->b_wptr = mp->b_rptr + sizeof (struct iocblk);
3034         iocbp = (struct iocblk *)mp->b_rptr;
3035         iocbp->ioc_error = err;
3036 
3037         if (mp->b_cont) {
3038                 freemsg(mp->b_cont);
3039                 mp->b_cont = NULL;
3040         }
3041 
3042         qreply(q, mp);
3043 }
3044 
3045 uint_t
3046 ecpp_isr(caddr_t arg)
3047 {
3048         struct ecppunit *pp = (struct ecppunit *)(void *)arg;
3049         uint32_t        dcsr;
3050         uint8_t         dsr;
3051         int             cheerio_pend_counter;
3052         int             retval = DDI_INTR_UNCLAIMED;
3053         hrtime_t        now;
3054 
3055         mutex_enter(&pp->umutex);
3056         /*
3057          * interrupt may occur while other thread is holding the lock
3058          * and cancels DMA transfer (e.g. ecpp_flush())
3059          * since it cannot cancel the interrupt thread,
3060          * it just sets dma_cancelled to TRUE,
3061          * telling interrupt handler to exit immediately
3062          */
3063         if (pp->dma_cancelled == TRUE) {
3064                 ecpp_error(pp->dip, "dma-cancel isr\n");
3065 
3066                 pp->intr_hard++;
3067                 pp->dma_cancelled = FALSE;
3068 
3069                 mutex_exit(&pp->umutex);
3070                 return (DDI_INTR_CLAIMED);
3071         }
3072 
3073         /* Southbridge interrupts are handled separately */
3074 #if defined(__x86)
3075         if (pp->hw == &x86)
3076 #else
3077         if (pp->hw == &m1553)
3078 #endif
3079         {
3080                 retval = ecpp_M1553_intr(pp);
3081                 if (retval == DDI_INTR_UNCLAIMED) {
3082                         goto unexpected;
3083                 }
3084                 mutex_exit(&pp->umutex);
3085                 return (DDI_INTR_CLAIMED);
3086         }
3087 
3088         /*
3089          * the intr is through the motherboard. it is faster than PCI route.
3090          * sometimes ecpp_isr() is invoked before cheerio csr is updated.
3091          */
3092         cheerio_pend_counter = ecpp_isr_max_delay;
3093         dcsr = GET_DMAC_CSR(pp);
3094 
3095         while (!(dcsr & DCSR_INT_PEND) && cheerio_pend_counter-- > 0) {
3096                 drv_usecwait(1);
3097                 dcsr = GET_DMAC_CSR(pp);
3098         }
3099 
3100         /*
3101          * This is a workaround for what seems to be a timing problem
3102          * with the delivery of interrupts and CSR updating with the
3103          * ebus2 csr, superio and the n_ERR pin from the peripheral.
3104          *
3105          * delay is not needed for PIO mode
3106          */
3107         if (!COMPAT_PIO(pp)) {
3108                 drv_usecwait(100);
3109                 dcsr = GET_DMAC_CSR(pp);
3110         }
3111 
3112         /* on 97317 in Extended mode IRQ_ST of DSR is deasserted when read */
3113         dsr = DSR_READ(pp);
3114 
3115         /*
3116          * check if interrupt is for this device:
3117          * it should be reflected either in cheerio DCSR register
3118          * or in IRQ_ST bit of DSR on 97317
3119          */
3120         if ((dcsr & DCSR_INT_PEND) == 0) {
3121                 if (pp->hw != &pc97317) {
3122                         goto unclaimed;
3123                 }
3124                 /*
3125                  * on Excalibur, reading DSR will deassert SuperIO IRQx line
3126                  * RIO's DCSR_INT_PEND seems to follow IRQx transitions,
3127                  * so if DSR is read after interrupt occured, but before
3128                  * we get here, IRQx and hence INT_PEND will be deasserted
3129                  * as a result, we can miss a service interrupt in PIO mode
3130                  *
3131                  * malicious DSR reader is BPPIOC_TESTIO, which is called
3132                  * by LP in between data blocks to check printer status
3133                  * this workaround lets us not to miss an interrupt
3134                  *
3135                  * also, nErr interrupt (ECP mode) not always reflected in DCSR
3136                  */
3137                 if (((dsr & ECPP_IRQ_ST) == 0) ||
3138                     ((COMPAT_PIO(pp)) && (pp->e_busy == ECPP_BUSY)) ||
3139                     (((dsr & ECPP_nERR) == 0) &&
3140                     (pp->current_mode == ECPP_ECP_MODE))) {
3141                         dcsr = 0;
3142                 } else {
3143                         goto unclaimed;
3144                 }
3145         }
3146 
3147         pp->intr_hard++;
3148 
3149         /* the intr is for us - check all possible interrupt sources */
3150         if (dcsr & DCSR_ERR_PEND) {
3151                 size_t  bcr;
3152 
3153                 /* we are expecting a data transfer interrupt */
3154                 ASSERT(pp->e_busy == ECPP_BUSY);
3155 
3156                 /*
3157                  * some kind of DMA error
3158                  */
3159                 if (ECPP_DMA_STOP(pp, &bcr) == FAILURE) {
3160                         ecpp_error(pp->dip, "ecpp_isr: dma_stop failed\n");
3161                 }
3162 
3163                 ecpp_error(pp->dip, "ecpp_isr: DMAC ERROR bcr=%d\n", bcr);
3164 
3165                 ecpp_xfer_cleanup(pp);
3166 
3167                 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
3168                         ecpp_error(pp->dip, "ecpp_isr(e): unbind failed\n");
3169                 }
3170 
3171                 mutex_exit(&pp->umutex);
3172                 return (DDI_INTR_CLAIMED);
3173         }
3174 
3175         if (dcsr & DCSR_TC) {
3176                 retval = ecpp_dma_ihdlr(pp);
3177                 mutex_exit(&pp->umutex);
3178                 return (DDI_INTR_CLAIMED);
3179         }
3180 
3181         if (COMPAT_PIO(pp)) {
3182                 retval = ecpp_pio_ihdlr(pp);
3183                 mutex_exit(&pp->umutex);
3184                 return (DDI_INTR_CLAIMED);
3185         }
3186 
3187         /* does peripheral need attention? */
3188         if ((dsr & ECPP_nERR) == 0) {
3189                 retval = ecpp_nErr_ihdlr(pp);
3190                 mutex_exit(&pp->umutex);
3191                 return (DDI_INTR_CLAIMED);
3192         }
3193 
3194         pp->intr_hard--;
3195 
3196 unexpected:
3197 
3198         pp->intr_spurious++;
3199 
3200         /*
3201          * The following procedure tries to prevent soft hangs
3202          * in event of peripheral/superio misbehaviour:
3203          * if number of unexpected interrupts in the last SPUR_PERIOD ns
3204          * exceeded SPUR_CRITICAL, then shut up interrupts
3205          */
3206         now = gethrtime();
3207         if (pp->lastspur == 0 || now - pp->lastspur > SPUR_PERIOD) {
3208                 /* last unexpected interrupt was long ago */
3209                 pp->lastspur = now;
3210                 pp->nspur = 1;
3211         } else {
3212                 /* last unexpected interrupt was recently */
3213                 pp->nspur++;
3214         }
3215 
3216         if (pp->nspur >= SPUR_CRITICAL) {
3217                 ECPP_MASK_INTR(pp);
3218                 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK | ECPP_INTR_SRV);
3219                 pp->nspur = 0;
3220                 cmn_err(CE_NOTE, "%s%d: too many interrupt requests",
3221                     ddi_get_name(pp->dip), ddi_get_instance(pp->dip));
3222         } else {
3223                 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_SRV | ECPP_INTR_MASK);
3224         }
3225 
3226         ecpp_error(pp->dip,
3227             "isr:unknown: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
3228             dcsr, ECR_READ(pp), dsr, DCR_READ(pp),
3229             pp->current_mode, pp->current_phase);
3230 
3231         mutex_exit(&pp->umutex);
3232         return (DDI_INTR_CLAIMED);
3233 
3234 unclaimed:
3235 
3236         pp->intr_spurious++;
3237 
3238         ecpp_error(pp->dip,
3239             "isr:UNCL: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n",
3240             dcsr, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp),
3241             pp->current_mode, pp->current_phase);
3242 
3243         mutex_exit(&pp->umutex);
3244         return (DDI_INTR_UNCLAIMED);
3245 }
3246 
3247 /*
3248  * M1553 intr handler
3249  */
3250 static uint_t
3251 ecpp_M1553_intr(struct ecppunit *pp)
3252 {
3253         int retval = DDI_INTR_UNCLAIMED;
3254 
3255         pp->intr_hard++;
3256 
3257         if (pp->e_busy == ECPP_BUSY) {
3258                 /* Centronics or Compat PIO transfer */
3259                 if (COMPAT_PIO(pp)) {
3260                         return (ecpp_pio_ihdlr(pp));
3261                 }
3262 
3263                 /* Centronics or Compat DMA transfer */
3264                 if (COMPAT_DMA(pp) ||
3265                     (pp->current_mode == ECPP_ECP_MODE) ||
3266                     (pp->current_mode == ECPP_DIAG_MODE)) {
3267                         return (ecpp_dma_ihdlr(pp));
3268                 }
3269         }
3270 
3271         /* Nibble or ECP backchannel request? */
3272         if ((DSR_READ(pp) & ECPP_nERR) == 0) {
3273                 return (ecpp_nErr_ihdlr(pp));
3274         }
3275 
3276         return (retval);
3277 }
3278 
3279 /*
3280  * DMA completion interrupt handler
3281  */
3282 static uint_t
3283 ecpp_dma_ihdlr(struct ecppunit *pp)
3284 {
3285         clock_t tm;
3286 
3287         ecpp_error(pp->dip, "ecpp_dma_ihdlr(%x): ecr=%x, dsr=%x, dcr=%x\n",
3288             pp->current_mode, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
3289 
3290         /* we are expecting a data transfer interrupt */
3291         ASSERT(pp->e_busy == ECPP_BUSY);
3292 
3293         /* Intr generated while invoking TFIFO mode. Exit */
3294         if (pp->tfifo_intr == 1) {
3295                 pp->tfifo_intr = 0;
3296                 ecpp_error(pp->dip, "ecpp_dma_ihdlr: tfifo_intr is 1\n");
3297                 return (DDI_INTR_CLAIMED);
3298         }
3299 
3300         if (ECPP_DMA_STOP(pp, NULL) == FAILURE) {
3301                 ecpp_error(pp->dip, "ecpp_dma_ihdlr: dma_stop failed\n");
3302         }
3303 
3304         if (pp->current_mode == ECPP_ECP_MODE &&
3305             pp->current_phase == ECPP_PHASE_ECP_REV_XFER) {
3306                 ecpp_ecp_read_completion(pp);
3307         } else {
3308                 /*
3309                  * fifo_timer() will do the cleanup when the FIFO drains
3310                  */
3311                 if ((ECR_READ(pp) & ECPP_FIFO_EMPTY) ||
3312                     (pp->current_mode == ECPP_DIAG_MODE)) {
3313                         tm = 0; /* no use in waiting if FIFO is already empty */
3314                 } else {
3315                         tm = drv_usectohz(FIFO_DRAIN_PERIOD);
3316                 }
3317                 pp->fifo_timer_id = timeout(ecpp_fifo_timer, (caddr_t)pp, tm);
3318         }
3319 
3320         /*
3321          * Stop the DMA transfer timeout timer
3322          * this operation will temporarily give up the mutex,
3323          * so we do it in the end of the handler to avoid races
3324          */
3325         ecpp_untimeout_unblock(pp, &pp->timeout_id);
3326 
3327         return (DDI_INTR_CLAIMED);
3328 }
3329 
3330 /*
3331  * ecpp_pio_ihdlr() is a PIO interrupt processing routine
3332  * It masks interrupts, updates statistics and initiates next byte transfer
3333  */
3334 static uint_t
3335 ecpp_pio_ihdlr(struct ecppunit *pp)
3336 {
3337         ASSERT(mutex_owned(&pp->umutex));
3338         ASSERT(pp->e_busy == ECPP_BUSY);
3339 
3340         /* update statistics */
3341         pp->joblen++;
3342         pp->ctxpio_obytes++;
3343 
3344         /* disable nAck interrups */
3345         ECPP_MASK_INTR(pp);
3346         DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_REV_DIR | ECPP_INTR_EN));
3347 
3348         /*
3349          * If it was the last byte of the data block cleanup,
3350          * otherwise trigger a soft interrupt to send the next byte
3351          */
3352         if (pp->next_byte >= pp->last_byte) {
3353                 ecpp_xfer_cleanup(pp);
3354                 ecpp_error(pp->dip,
3355                     "ecpp_pio_ihdlr: pp->joblen=%d,pp->ctx_cf=%d,\n",
3356                     pp->joblen, pp->ctx_cf);
3357         } else {
3358                 if (pp->softintr_pending) {
3359                         ecpp_error(pp->dip,
3360                             "ecpp_pio_ihdlr:E: next byte in progress\n");
3361                 } else {
3362                         pp->softintr_flags = ECPP_SOFTINTR_PIONEXT;
3363                         pp->softintr_pending = 1;
3364                         ddi_trigger_softintr(pp->softintr_id);
3365                 }
3366         }
3367 
3368         return (DDI_INTR_CLAIMED);
3369 }
3370 
3371 /*
3372  * ecpp_pio_writeb() sends a byte using Centronics handshake
3373  */
3374 static void
3375 ecpp_pio_writeb(struct ecppunit *pp)
3376 {
3377         uint8_t dcr;
3378 
3379         dcr = DCR_READ(pp) & ~ECPP_REV_DIR;
3380         dcr |= ECPP_INTR_EN;
3381 
3382         /* send the next byte */
3383         DATAR_WRITE(pp, *(pp->next_byte++));
3384 
3385         drv_usecwait(pp->data_setup_time);
3386 
3387         /* Now Assert (neg logic) nStrobe */
3388         if (dcr_write(pp, dcr | ECPP_STB) == FAILURE) {
3389                 ecpp_error(pp->dip, "ecpp_pio_writeb:1: failed w/DCR\n");
3390         }
3391 
3392         /* Enable nAck interrupts */
3393         (void) DSR_READ(pp);    /* ensure IRQ_ST is armed */
3394         ECPP_UNMASK_INTR(pp);
3395 
3396         drv_usecwait(pp->strobe_pulse_width);
3397 
3398         if (dcr_write(pp, dcr & ~ECPP_STB) == FAILURE) {
3399                 ecpp_error(pp->dip, "ecpp_pio_writeb:2: failed w/DCR\n");
3400         }
3401 }
3402 
3403 /*
3404  * Backchannel request interrupt handler
3405  */
3406 static uint_t
3407 ecpp_nErr_ihdlr(struct ecppunit *pp)
3408 {
3409         ecpp_error(pp->dip, "ecpp_nErr_ihdlr: mode=%x, phase=%x\n",
3410             pp->current_mode, pp->current_phase);
3411 
3412         if (pp->oflag != TRUE) {
3413                 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: not open!\n");
3414                 return (DDI_INTR_UNCLAIMED);
3415         }
3416 
3417         if (pp->e_busy == ECPP_BUSY) {
3418                 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: busy\n");
3419                 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
3420                 return (DDI_INTR_CLAIMED);
3421         }
3422 
3423         /* mask nErr & nAck interrupts */
3424         ECPP_MASK_INTR(pp);
3425         DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_INTR_EN | ECPP_REV_DIR));
3426         ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK);
3427 
3428         /* going reverse */
3429         switch (pp->current_mode) {
3430         case ECPP_ECP_MODE:
3431                 /*
3432                  * Peripheral asserts nPeriphRequest (nFault)
3433                  */
3434                 break;
3435         case ECPP_NIBBLE_MODE:
3436                 /*
3437                  * Event 18: Periph asserts nErr to indicate data avail
3438                  * Event 19: After waiting minimum pulse width,
3439                  *   periph sets nAck high to generate an interrupt
3440                  *
3441                  * Interface is in Interrupt Phase
3442                  */
3443                 pp->current_phase = ECPP_PHASE_NIBT_REVINTR;
3444 
3445                 break;
3446         default:
3447                 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: wrong mode!\n");
3448                 return (DDI_INTR_UNCLAIMED);
3449         }
3450 
3451         (void) ecpp_backchan_req(pp);   /* put backchannel request on the wq */
3452 
3453         return (DDI_INTR_CLAIMED);
3454 }
3455 
3456 /*
3457  * Softintr handler does work according to softintr_flags:
3458  * in case of ECPP_SOFTINTR_PIONEXT it sends next byte of PIO transfer
3459  */
3460 static uint_t
3461 ecpp_softintr(caddr_t arg)
3462 {
3463         struct ecppunit *pp = (struct ecppunit *)arg;
3464         uint32_t unx_len, ecpp_reattempts = 0;
3465 
3466         mutex_enter(&pp->umutex);
3467 
3468         pp->intr_soft++;
3469 
3470         if (!pp->softintr_pending) {
3471                 mutex_exit(&pp->umutex);
3472                 return (DDI_INTR_CLAIMED);
3473         } else {
3474                 pp->softintr_pending = 0;
3475         }
3476 
3477         if (pp->softintr_flags & ECPP_SOFTINTR_PIONEXT) {
3478                 pp->softintr_flags &= ~ECPP_SOFTINTR_PIONEXT;
3479                 /*
3480                  * Sent next byte in PIO mode
3481                  */
3482                 ecpp_reattempts = 0;
3483                 do {
3484                         if (ecpp_check_status(pp) == SUCCESS) {
3485                                 pp->e_busy = ECPP_BUSY;
3486                                 break;
3487                         }
3488                         drv_usecwait(1);
3489                         if (pp->isr_reattempt_high < ecpp_reattempts) {
3490                                 pp->isr_reattempt_high = ecpp_reattempts;
3491                         }
3492                 } while (++ecpp_reattempts < pp->wait_for_busy);
3493 
3494                 /* if the peripheral still not recovered suspend the transfer */
3495                 if (pp->e_busy == ECPP_ERR) {
3496                         ++pp->ctx_cf; /* check status fail */
3497                         ecpp_error(pp->dip, "ecpp_softintr:check_status:F: "
3498                             "dsr=%x jl=%d cf_isr=%d\n",
3499                             DSR_READ(pp), pp->joblen, pp->ctx_cf);
3500 
3501                         /*
3502                          * if status signals are bad,
3503                          * put everything back on the wq.
3504                          */
3505                         unx_len = pp->last_byte - pp->next_byte;
3506                         if (pp->msg != NULL) {
3507                                 ecpp_putback_untransfered(pp,
3508                                     (void *)pp->msg->b_rptr, unx_len);
3509                                 ecpp_error(pp->dip,
3510                                     "ecpp_softintr:e1:unx_len=%d\n", unx_len);
3511 
3512                                 freemsg(pp->msg);
3513                                 pp->msg = NULL;
3514                         } else {
3515                                 ecpp_putback_untransfered(pp,
3516                                     pp->next_byte, unx_len);
3517                                 ecpp_error(pp->dip,
3518                                     "ecpp_softintr:e2:unx_len=%d\n", unx_len);
3519                         }
3520 
3521                         ecpp_xfer_cleanup(pp);
3522                         pp->e_busy = ECPP_ERR;
3523                         qenable(pp->writeq);
3524                 } else {
3525                         /* send the next one */
3526                         pp->e_busy = ECPP_BUSY;
3527                         (void) ecpp_pio_writeb(pp);
3528                 }
3529         }
3530 
3531         mutex_exit(&pp->umutex);
3532         return (DDI_INTR_CLAIMED);
3533 }
3534 
3535 
3536 /*
3537  * Transfer clean-up:
3538  *      shut down the DMAC
3539  *      stop the transfer timer
3540  *      enable write queue
3541  */
3542 static void
3543 ecpp_xfer_cleanup(struct ecppunit *pp)
3544 {
3545         ASSERT(mutex_owned(&pp->umutex));
3546 
3547         /*
3548          * if we did not use the ioblock, the mblk that
3549          * was used should be freed.
3550          */
3551         if (pp->msg != NULL) {
3552                 freemsg(pp->msg);
3553                 pp->msg = NULL;
3554         }
3555 
3556         /* The port is no longer active */
3557         pp->e_busy = ECPP_IDLE;
3558 
3559         /* Stop the transfer timeout timer */
3560         ecpp_untimeout_unblock(pp, &pp->timeout_id);
3561 
3562         qenable(pp->writeq);
3563 }
3564 
3565 /*VARARGS*/
3566 static void
3567 ecpp_error(dev_info_t *dip, char *fmt, ...)
3568 {
3569         static  long    last;
3570         static  char    *lastfmt;
3571         char            msg_buffer[255];
3572         va_list ap;
3573         time_t  now;
3574 
3575         if (!ecpp_debug) {
3576                 return;
3577         }
3578 
3579         /*
3580          * This function is supposed to be a quick non-blockable
3581          * wrapper for cmn_err(9F), which provides a sensible degree
3582          * of debug message throttling.  Not using any type of lock
3583          * is a requirement, but this also leaves two static variables
3584          * - last and lastfmt - unprotected. However, this will not do
3585          * any harm to driver functionality, it can only weaken throttling.
3586          * The following directive asks warlock to not worry about these
3587          * variables.
3588          */
3589         _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(last, lastfmt))
3590 
3591         /*
3592          * Don't print same error message too often.
3593          */
3594         now = gethrestime_sec();
3595         if ((last == (now & ~1)) && (lastfmt == fmt))
3596                 return;
3597 
3598         last = now & ~1;
3599         lastfmt = fmt;
3600 
3601         va_start(ap, fmt);
3602         (void) vsprintf(msg_buffer, fmt, ap);
3603         cmn_err(CE_CONT, "%s%d: %s", ddi_get_name(dip),
3604             ddi_get_instance(dip), msg_buffer);
3605         va_end(ap);
3606 }
3607 
3608 /*
3609  * Forward transfer timeout
3610  */
3611 static void
3612 ecpp_xfer_timeout(void *arg)
3613 {
3614         struct ecppunit *pp = arg;
3615         void            *unx_addr;
3616         size_t          unx_len, xferd;
3617         uint8_t         dcr;
3618         timeout_id_t    fifo_timer_id;
3619 
3620         mutex_enter(&pp->umutex);
3621 
3622         if (pp->timeout_id == 0) {
3623                 mutex_exit(&pp->umutex);
3624                 return;
3625         } else {
3626                 pp->timeout_id = 0;
3627         }
3628 
3629         pp->xfer_tout++;
3630 
3631         pp->dma_cancelled = TRUE;    /* prevent race with isr() */
3632 
3633         if (COMPAT_PIO(pp)) {
3634                 /*
3635                  * PIO mode timeout
3636                  */
3637 
3638                 /* turn off nAck interrupts */
3639                 dcr = DCR_READ(pp);
3640                 (void) dcr_write(pp, dcr & ~(ECPP_REV_DIR | ECPP_INTR_EN));
3641                 ECPP_MASK_INTR(pp);
3642 
3643                 pp->softintr_pending = 0;
3644                 unx_len = pp->last_byte - pp->next_byte;
3645                 ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
3646 
3647                 if (unx_len > 0) {
3648                         unx_addr = pp->next_byte;
3649                 } else {
3650                         ecpp_xfer_cleanup(pp);
3651                         qenable(pp->writeq);
3652                         mutex_exit(&pp->umutex);
3653                         return;
3654                 }
3655         } else {
3656                 /*
3657                  * DMA mode timeout
3658                  *
3659                  * If DMAC fails to shut off, continue anyways and attempt
3660                  * to put untransfered data back on queue.
3661                  */
3662                 if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
3663                         ecpp_error(pp->dip,
3664                             "ecpp_xfer_timeout: failed dma_stop\n");
3665                 }
3666 
3667                 ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len);
3668 
3669                 if (ddi_dma_unbind_handle(pp->dma_handle) == DDI_FAILURE) {
3670                         ecpp_error(pp->dip,
3671                             "ecpp_xfer_timeout: failed unbind\n");
3672                 }
3673 
3674                 /*
3675                  * if the bcr is zero, then DMA is complete and
3676                  * we are waiting for the fifo to drain.  So let
3677                  * ecpp_fifo_timer() look after the clean up.
3678                  */
3679                 if (unx_len == 0) {
3680                         qenable(pp->writeq);
3681                         mutex_exit(&pp->umutex);
3682                         return;
3683                 } else {
3684                         xferd = pp->dma_cookie.dmac_size - unx_len;
3685                         pp->resid -= xferd;
3686                         unx_len = pp->resid;
3687 
3688                         /* update statistics */
3689                         pp->obytes[pp->current_mode] += xferd;
3690                         pp->joblen += xferd;
3691 
3692                         if (pp->msg != NULL) {
3693                                 unx_addr = (caddr_t)pp->msg->b_wptr - unx_len;
3694                         } else {
3695                                 unx_addr = pp->ioblock +
3696                                     (pp->xfercnt - unx_len);
3697                         }
3698                 }
3699         }
3700 
3701         /* Following code is common for PIO and DMA modes */
3702 
3703         ecpp_putback_untransfered(pp, (caddr_t)unx_addr, unx_len);
3704 
3705         if (pp->msg != NULL) {
3706                 freemsg(pp->msg);
3707                 pp->msg = NULL;
3708         }
3709 
3710         /* mark the error status structure */
3711         pp->timeout_error = 1;
3712         pp->e_busy = ECPP_ERR;
3713         fifo_timer_id = pp->fifo_timer_id;
3714         pp->fifo_timer_id = 0;
3715 
3716         qenable(pp->writeq);
3717 
3718         mutex_exit(&pp->umutex);
3719 
3720         if (fifo_timer_id) {
3721                 (void) untimeout(fifo_timer_id);
3722         }
3723 }
3724 
3725 static void
3726 ecpp_putback_untransfered(struct ecppunit *pp, void *startp, uint_t len)
3727 {
3728         mblk_t *new_mp;
3729 
3730         ecpp_error(pp->dip, "ecpp_putback_untrans=%d\n", len);
3731 
3732         if (len == 0) {
3733                 return;
3734         }
3735 
3736         new_mp = allocb(len, BPRI_MED);
3737         if (new_mp == NULL) {
3738                 ecpp_error(pp->dip,
3739                     "ecpp_putback_untransfered: allocb FAILURE.\n");
3740                 return;
3741         }
3742 
3743         bcopy(startp, new_mp->b_rptr, len);
3744         new_mp->b_wptr = new_mp->b_rptr + len;
3745 
3746         if (!putbq(pp->writeq, new_mp)) {
3747                 freemsg(new_mp);
3748         }
3749 }
3750 
3751 static uchar_t
3752 ecr_write(struct ecppunit *pp, uint8_t ecr_byte)
3753 {
3754         int i, current_ecr;
3755 
3756         for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
3757                 ECR_WRITE(pp, ecr_byte);
3758 
3759                 current_ecr = ECR_READ(pp);
3760 
3761                 /* mask off the lower two read-only bits */
3762                 if ((ecr_byte & 0xFC) == (current_ecr & 0xFC))
3763                         return (SUCCESS);
3764         }
3765         return (FAILURE);
3766 }
3767 
3768 static uchar_t
3769 dcr_write(struct ecppunit *pp, uint8_t dcr_byte)
3770 {
3771         uint8_t current_dcr;
3772         int i;
3773 
3774         for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) {
3775                 DCR_WRITE(pp, dcr_byte);
3776 
3777                 current_dcr = DCR_READ(pp);
3778 
3779                 /* compare only bits 0-4 (direction bit return 1) */
3780                 if ((dcr_byte & 0x1F) == (current_dcr & 0x1F))
3781                         return (SUCCESS);
3782         }
3783         ecpp_error(pp->dip,
3784             "(%d)dcr_write: dcr written =%x, dcr readback =%x\n",
3785             i, dcr_byte, current_dcr);
3786 
3787         return (FAILURE);
3788 }
3789 
3790 static uchar_t
3791 ecpp_reset_port_regs(struct ecppunit *pp)
3792 {
3793         DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
3794         ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
3795         return (SUCCESS);
3796 }
3797 
3798 /*
3799  * The data transferred by the DMA engine goes through the FIFO,
3800  * so that when the DMA counter reaches zero (and an interrupt occurs)
3801  * the FIFO can still contain data. If this is the case, the ISR will
3802  * schedule this callback to wait until the FIFO drains or a timeout occurs.
3803  */
3804 static void
3805 ecpp_fifo_timer(void *arg)
3806 {
3807         struct ecppunit *pp = arg;
3808         uint8_t ecr;
3809         timeout_id_t    timeout_id;
3810 
3811         mutex_enter(&pp->umutex);
3812 
3813         /*
3814          * If the FIFO timer has been turned off, exit.
3815          */
3816         if (pp->fifo_timer_id == 0) {
3817                 ecpp_error(pp->dip, "ecpp_fifo_timer: untimedout\n");
3818                 mutex_exit(&pp->umutex);
3819                 return;
3820         } else {
3821                 pp->fifo_timer_id = 0;
3822         }
3823 
3824         /*
3825          * If the FIFO is not empty restart timer.  Wait FIFO_DRAIN_PERIOD
3826          * (250 ms) and check FIFO_EMPTY bit again. Repeat until FIFO is
3827          * empty or until 10 * FIFO_DRAIN_PERIOD expires.
3828          */
3829         ecr = ECR_READ(pp);
3830 
3831         if ((pp->current_mode != ECPP_DIAG_MODE) &&
3832             (((ecr & ECPP_FIFO_EMPTY) == 0) &&
3833             (pp->ecpp_drain_counter < 10))) {
3834 
3835                 ecpp_error(pp->dip,
3836                     "ecpp_fifo_timer(%d):FIFO not empty:ecr=%x\n",
3837                     pp->ecpp_drain_counter, ecr);
3838 
3839                 pp->fifo_timer_id = timeout(ecpp_fifo_timer,
3840                     (caddr_t)pp, drv_usectohz(FIFO_DRAIN_PERIOD));
3841                 ++pp->ecpp_drain_counter;
3842 
3843                 mutex_exit(&pp->umutex);
3844                 return;
3845         }
3846 
3847         if (pp->current_mode != ECPP_DIAG_MODE) {
3848                 /*
3849                  * If the FIFO won't drain after 10 FIFO_DRAIN_PERIODs
3850                  * then don't wait any longer.  Simply clean up the transfer.
3851                  */
3852                 if (pp->ecpp_drain_counter >= 10) {
3853                         ecpp_error(pp->dip, "ecpp_fifo_timer(%d):"
3854                             " clearing FIFO,can't wait:ecr=%x\n",
3855                             pp->ecpp_drain_counter, ecr);
3856                 } else {
3857                         ecpp_error(pp->dip,
3858                             "ecpp_fifo_timer(%d):FIFO empty:ecr=%x\n",
3859                             pp->ecpp_drain_counter, ecr);
3860                 }
3861 
3862                 pp->ecpp_drain_counter = 0;
3863         }
3864 
3865         /*
3866          * Main section of routine:
3867          *  - stop the DMA transfer timer
3868          *  - program DMA with next cookie/window or unbind the DMA mapping
3869          *  - update stats
3870          *  - if last mblk in queue, signal to close() & return to idle state
3871          */
3872 
3873         /* Stop the DMA transfer timeout timer */
3874         timeout_id = pp->timeout_id;
3875         pp->timeout_id = 0;
3876 
3877         /* data has drained from fifo, it is ok to free dma resource */
3878         if (pp->current_mode == ECPP_ECP_MODE ||
3879             pp->current_mode == ECPP_DIAG_MODE ||
3880             COMPAT_DMA(pp)) {
3881                 off_t   off;
3882                 size_t  len;
3883 
3884                 /* update residual */
3885                 pp->resid -= pp->dma_cookie.dmac_size;
3886 
3887                 /* update statistics */
3888                 pp->joblen += pp->dma_cookie.dmac_size;
3889                 if (pp->dma_dir == DDI_DMA_WRITE) {
3890                         pp->obytes[pp->current_mode] +=
3891                             pp->dma_cookie.dmac_size;
3892                 } else {
3893                         pp->ibytes[pp->current_mode] +=
3894                             pp->dma_cookie.dmac_size;
3895                 }
3896 
3897                 /*
3898                  * Look if any cookies/windows left
3899                  */
3900                 if (--pp->dma_cookie_count > 0) {
3901                         /* process the next cookie */
3902                         ddi_dma_nextcookie(pp->dma_handle,
3903                             &pp->dma_cookie);
3904                 } else if (pp->dma_curwin < pp->dma_nwin) {
3905                         /* process the next window */
3906                         if (ddi_dma_getwin(pp->dma_handle,
3907                             pp->dma_curwin, &off, &len,
3908                             &pp->dma_cookie,
3909                             &pp->dma_cookie_count) != DDI_SUCCESS) {
3910                                 ecpp_error(pp->dip,
3911                                     "ecpp_fifo_timer: ddi_dma_getwin failed\n");
3912                                 goto dma_done;
3913                         }
3914 
3915                         pp->dma_curwin++;
3916                 } else {
3917                         goto dma_done;
3918                 }
3919 
3920                 ecpp_error(pp->dip, "ecpp_fifo_timer: next addr=%llx len=%d\n",
3921                     pp->dma_cookie.dmac_address,
3922                     pp->dma_cookie.dmac_size);
3923 
3924                 /* kick off new transfer */
3925                 if (ECPP_DMA_START(pp) != SUCCESS) {
3926                         ecpp_error(pp->dip,
3927                             "ecpp_fifo_timer: dma_start failed\n");
3928                         goto dma_done;
3929                 }
3930 
3931                 (void) ecr_write(pp, (ecr & 0xe0) |
3932                     ECPP_DMA_ENABLE | ECPP_INTR_MASK);
3933 
3934                 mutex_exit(&pp->umutex);
3935 
3936                 if (timeout_id) {
3937                         (void) untimeout(timeout_id);
3938                 }
3939                 return;
3940 
3941         dma_done:
3942                 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
3943                         ecpp_error(pp->dip, "ecpp_fifo_timer: unbind failed\n");
3944                 } else {
3945                         ecpp_error(pp->dip, "ecpp_fifo_timer: unbind ok\n");
3946                 }
3947         }
3948 
3949         /*
3950          * if we did not use the dmablock, the mblk that
3951          * was used should be freed.
3952          */
3953         if (pp->msg != NULL) {
3954                 freemsg(pp->msg);
3955                 pp->msg = NULL;
3956         }
3957 
3958         /* The port is no longer active */
3959         pp->e_busy = ECPP_IDLE;
3960 
3961         qenable(pp->writeq);
3962 
3963         mutex_exit(&pp->umutex);
3964 
3965         if (timeout_id) {
3966                 (void) untimeout(timeout_id);
3967         }
3968 }
3969 
3970 /*
3971  * In Compatibility mode, check if the peripheral is ready to accept data
3972  */
3973 static uint8_t
3974 ecpp_check_status(struct ecppunit *pp)
3975 {
3976         uint8_t dsr;
3977         uint8_t statmask;
3978 
3979         if (pp->current_mode == ECPP_ECP_MODE ||
3980             pp->current_mode == ECPP_DIAG_MODE)
3981                 return (SUCCESS);
3982 
3983         statmask = ECPP_nERR | ECPP_SLCT | ECPP_nBUSY | ECPP_nACK;
3984 
3985         dsr = DSR_READ(pp);
3986         if ((dsr & ECPP_PE) || ((dsr & statmask) != statmask)) {
3987                 pp->e_busy = ECPP_ERR;
3988                 return (FAILURE);
3989         } else {
3990                 return (SUCCESS);
3991         }
3992 }
3993 
3994 /*
3995  * if the peripheral is not ready to accept data, write service routine
3996  * periodically reschedules itself to recheck peripheral status
3997  * and start data transfer as soon as possible
3998  */
3999 static void
4000 ecpp_wsrv_timer(void *arg)
4001 {
4002         struct ecppunit *pp = arg;
4003 
4004         ecpp_error(pp->dip, "ecpp_wsrv_timer: starting\n");
4005 
4006         mutex_enter(&pp->umutex);
4007 
4008         if (pp->wsrv_timer_id == 0) {
4009                 mutex_exit(&pp->umutex);
4010                 return;
4011         } else {
4012                 pp->wsrv_timer_id = 0;
4013         }
4014 
4015         ecpp_error(pp->dip, "ecpp_wsrv_timer: qenabling...\n");
4016 
4017         qenable(pp->writeq);
4018 
4019         mutex_exit(&pp->umutex);
4020 }
4021 
4022 /*
4023  * Allocate a message indicating a backchannel request
4024  * and put it on the write queue
4025  */
4026 static int
4027 ecpp_backchan_req(struct ecppunit *pp)
4028 {
4029         mblk_t  *mp;
4030 
4031         if ((mp = allocb(sizeof (int), BPRI_MED)) == NULL) {
4032                 ecpp_error(pp->dip, "ecpp_backchan_req: allocb failed\n");
4033                 return (FAILURE);
4034         } else {
4035                 mp->b_datap->db_type = M_CTL;
4036                 *(int *)mp->b_rptr = ECPP_BACKCHANNEL;
4037                 mp->b_wptr = mp->b_rptr + sizeof (int);
4038                 if (!putbq(pp->writeq, mp)) {
4039                         ecpp_error(pp->dip, "ecpp_backchan_req:putbq failed\n");
4040                         freemsg(mp);
4041                         return (FAILURE);
4042                 }
4043                 return (SUCCESS);
4044         }
4045 }
4046 
4047 /*
4048  * Cancel the function scheduled with timeout(9F)
4049  * This function is to be called with the mutex held
4050  */
4051 static void
4052 ecpp_untimeout_unblock(struct ecppunit *pp, timeout_id_t *id)
4053 {
4054         timeout_id_t    saved_id;
4055 
4056         ASSERT(mutex_owned(&pp->umutex));
4057 
4058         if (*id) {
4059                 saved_id = *id;
4060                 *id = 0;
4061                 mutex_exit(&pp->umutex);
4062                 (void) untimeout(saved_id);
4063                 mutex_enter(&pp->umutex);
4064         }
4065 }
4066 
4067 /*
4068  * get prnio interface capabilities
4069  */
4070 static uint_t
4071 ecpp_get_prn_ifcap(struct ecppunit *pp)
4072 {
4073         uint_t  ifcap;
4074 
4075         ifcap = PRN_1284_DEVID | PRN_TIMEOUTS | PRN_STREAMS;
4076 
4077         /* status (DSR) only makes sense in Centronics & Compat modes */
4078         if (pp->current_mode == ECPP_CENTRONICS ||
4079             pp->current_mode == ECPP_COMPAT_MODE) {
4080                 ifcap |= PRN_1284_STATUS;
4081         } else if (pp->current_mode == ECPP_NIBBLE_MODE ||
4082             pp->current_mode == ECPP_ECP_MODE) {
4083                 ifcap |= PRN_BIDI;
4084         }
4085 
4086         return (ifcap);
4087 }
4088 
4089 /*
4090  * Determine SuperI/O type
4091  */
4092 static struct ecpp_hw_bind *
4093 ecpp_determine_sio_type(struct ecppunit *pp)
4094 {
4095         struct ecpp_hw_bind     *hw_bind;
4096         char                    *name;
4097         int                     i;
4098 
4099         name = ddi_binding_name(pp->dip);
4100 
4101         for (hw_bind = NULL, i = 0; i < NELEM(ecpp_hw_bind); i++) {
4102                 if (strcmp(name, ecpp_hw_bind[i].name) == 0) {
4103                         hw_bind = &ecpp_hw_bind[i];
4104                         break;
4105                 }
4106         }
4107 
4108         return (hw_bind);
4109 }
4110 
4111 
4112 /*
4113  *
4114  * IEEE 1284 support routines:
4115  *      negotiation and termination;
4116  *      phase transitions;
4117  *      device ID;
4118  *
4119  */
4120 
4121 /*
4122  * Interface initialization, abnormal termination into Compatibility mode
4123  *
4124  * Peripheral may be non-1284, so we set current mode to ECPP_CENTRONICS
4125  */
4126 static void
4127 ecpp_1284_init_interface(struct ecppunit *pp)
4128 {
4129         ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4130 
4131         /*
4132          * Toggle the nInit signal if configured in ecpp.conf
4133          * for most peripherals it is not needed
4134          */
4135         if (pp->init_seq == TRUE) {
4136                 DCR_WRITE(pp, ECPP_SLCTIN);
4137                 drv_usecwait(50);       /* T(ER) = 50us */
4138         }
4139 
4140         DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4141 
4142         pp->current_mode = pp->backchannel = ECPP_CENTRONICS;
4143         pp->current_phase = ECPP_PHASE_C_IDLE;
4144         ECPP_CONFIG_MODE(pp);
4145         pp->to_mode[pp->current_mode]++;
4146 
4147         ecpp_error(pp->dip, "ecpp_1284_init_interface: ok\n");
4148 }
4149 
4150 /*
4151  * ECP mode negotiation
4152  */
4153 static int
4154 ecp_negotiation(struct ecppunit *pp)
4155 {
4156         uint8_t dsr;
4157 
4158         /* ECP mode negotiation */
4159 
4160         if (ecpp_1284_negotiation(pp, ECPP_XREQ_ECP, &dsr) == FAILURE)
4161                 return (FAILURE);
4162 
4163         /* Event 5: peripheral deasserts PError and Busy, asserts Select */
4164         if ((dsr & (ECPP_PE | ECPP_nBUSY | ECPP_SLCT)) !=
4165             (ECPP_nBUSY | ECPP_SLCT)) {
4166                 ecpp_error(pp->dip,
4167                     "ecp_negotiation: failed event 5 %x\n", DSR_READ(pp));
4168                 (void) ecpp_1284_termination(pp);
4169                 return (FAILURE);
4170         }
4171 
4172         /* entered Setup Phase */
4173         pp->current_phase = ECPP_PHASE_ECP_SETUP;
4174 
4175         /* Event 30: host asserts nAutoFd */
4176         DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4177 
4178         /* Event 31: peripheral asserts PError */
4179         if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
4180                 ecpp_error(pp->dip,
4181                     "ecp_negotiation: failed event 31 %x\n", DSR_READ(pp));
4182                 (void) ecpp_1284_termination(pp);
4183                 return (FAILURE);
4184         }
4185 
4186         /* entered Forward Idle Phase */
4187         pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
4188 
4189         /* successful negotiation into ECP mode */
4190         pp->current_mode = ECPP_ECP_MODE;
4191         pp->backchannel = ECPP_ECP_MODE;
4192 
4193         ecpp_error(pp->dip, "ecp_negotiation: ok\n");
4194 
4195         return (SUCCESS);
4196 }
4197 
4198 /*
4199  * Nibble mode negotiation
4200  */
4201 static int
4202 nibble_negotiation(struct ecppunit *pp)
4203 {
4204         uint8_t dsr;
4205 
4206         if (ecpp_1284_negotiation(pp, ECPP_XREQ_NIBBLE, &dsr) == FAILURE) {
4207                 return (FAILURE);
4208         }
4209 
4210         /*
4211          * If peripheral has data available, PE and nErr will
4212          * be set low at Event 5 & 6.
4213          */
4214         if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
4215                 pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
4216         } else {
4217                 pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
4218         }
4219 
4220         /* successful negotiation into Nibble mode */
4221         pp->current_mode = ECPP_NIBBLE_MODE;
4222         pp->backchannel = ECPP_NIBBLE_MODE;
4223 
4224         ecpp_error(pp->dip, "nibble_negotiation: ok (phase=%x)\n",
4225             pp->current_phase);
4226 
4227         return (SUCCESS);
4228 
4229 }
4230 
4231 /*
4232  * Wait ptimeout usec for periph to set 'mask' bits to 'val' state
4233  *
4234  * return value < 0 indicates timeout
4235  */
4236 static int
4237 wait_dsr(struct ecppunit *pp, uint8_t mask, uint8_t val, int ptimeout)
4238 {
4239         while (((DSR_READ(pp) & mask) != val) && ptimeout--) {
4240                 drv_usecwait(1);
4241         }
4242 
4243         return (ptimeout);
4244 }
4245 
4246 /*
4247  * 1284 negotiation Events 0..6
4248  * required mode is indicated by extensibility request value
4249  *
4250  * After successful negotiation SUCCESS is returned and
4251  * current mode is set according to xreq,
4252  * otherwise FAILURE is returned and current mode is set to
4253  * either COMPAT (1284 periph) or CENTRONICS (non-1284 periph)
4254  *
4255  * Current phase must be set by the caller (mode-specific negotiation)
4256  *
4257  * If rdsr is not NULL, DSR value after Event 6 is stored here
4258  */
4259 static int
4260 ecpp_1284_negotiation(struct ecppunit *pp, uint8_t xreq, uint8_t *rdsr)
4261 {
4262         int xflag;
4263 
4264         ecpp_error(pp->dip, "nego(%x): entering...\n", xreq);
4265 
4266         /* negotiation should start in Compatibility mode */
4267         (void) ecpp_1284_termination(pp);
4268 
4269         /* Set host into Compat mode */
4270         ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4271 
4272         pp->current_phase = ECPP_PHASE_NEGO;
4273 
4274         /* Event 0: host sets extensibility request on data lines */
4275         DATAR_WRITE(pp, xreq);
4276 
4277         /* Event 1: host deassert nSelectin and assert nAutoFd */
4278         DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4279 
4280         drv_usecwait(1);        /* Tp(ecp) == 0.5us */
4281 
4282         /*
4283          * Event 2: peripheral asserts nAck, deasserts nFault,
4284          *                      asserts Select, asserts PError
4285          */
4286         if (wait_dsr(pp, ECPP_nERR | ECPP_SLCT | ECPP_PE | ECPP_nACK,
4287             ECPP_nERR | ECPP_SLCT | ECPP_PE, 35000) < 0) {
4288                 /* peripheral is not 1284-compliant */
4289                 ecpp_error(pp->dip,
4290                     "nego(%x): failed event 2 %x\n", xreq, DSR_READ(pp));
4291                 (void) ecpp_1284_termination(pp);
4292                 return (FAILURE);
4293         }
4294 
4295         /*
4296          * Event 3: host asserts nStrobe, latching extensibility value into
4297          * peripherals input latch.
4298          */
4299         DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_STB);
4300 
4301         drv_usecwait(2);        /* Tp(ecp) = 0.5us */
4302 
4303         /*
4304          * Event 4: hosts deasserts nStrobe and nAutoFD to acknowledge that
4305          * it has recognized an 1284 compatible peripheral
4306          */
4307         DCR_WRITE(pp, ECPP_nINIT);
4308 
4309         /*
4310          * Event 5: Peripheral confirms it supports requested extension
4311          * For Nibble mode Xflag must be low, otherwise it must be high
4312          */
4313         xflag = (xreq == ECPP_XREQ_NIBBLE) ? 0 : ECPP_SLCT;
4314 
4315         /*
4316          * Event 6: Peripheral sets nAck high
4317          * indicating that status lines are valid
4318          */
4319         if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4320                 /* Something wrong with peripheral */
4321                 ecpp_error(pp->dip,
4322                     "nego(%x): failed event 6 %x\n", xreq, DSR_READ(pp));
4323                 (void) ecpp_1284_termination(pp);
4324                 return (FAILURE);
4325         }
4326 
4327         if ((DSR_READ(pp) & ECPP_SLCT) != xflag) {
4328                 /* Extensibility value is not supported */
4329                 ecpp_error(pp->dip,
4330                     "nego(%x): failed event 5 %x\n", xreq, DSR_READ(pp));
4331                 (void) ecpp_1284_termination(pp);
4332                 return (FAILURE);
4333         }
4334 
4335         if (rdsr) {
4336                 *rdsr = DSR_READ(pp);
4337         }
4338 
4339         return (SUCCESS);
4340 }
4341 
4342 /*
4343  * 1284 Termination: Events 22..28 - set link to Compatibility mode
4344  *
4345  * This routine is not designed for Immediate termination,
4346  * caller must take care of waiting for a valid state,
4347  * (in particular, in ECP mode current phase must be Forward Idle)
4348  * otherwise interface will be reinitialized
4349  *
4350  * In case of Valid state termination SUCCESS is returned and
4351  * current_mode is ECPP_COMPAT_MODE, current phase is ECPP_PHASE_C_IDLE
4352  * Otherwise interface is reinitialized, FAILURE is returned and
4353  * current mode is ECPP_CENTRONICS, current phase is ECPP_PHASE_C_IDLE
4354  */
4355 static int
4356 ecpp_1284_termination(struct ecppunit *pp)
4357 {
4358         int     previous_mode = pp->current_mode;
4359 
4360         if (((pp->current_mode == ECPP_COMPAT_MODE ||
4361             pp->current_mode == ECPP_CENTRONICS) &&
4362             pp->current_phase == ECPP_PHASE_C_IDLE) ||
4363             pp->current_mode == ECPP_DIAG_MODE) {
4364                 ecpp_error(pp->dip, "termination: not needed\n");
4365                 return (SUCCESS);
4366         }
4367 
4368         /* Set host into Compat mode, interrupts disabled */
4369         ECPP_MASK_INTR(pp);
4370         ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001);
4371 
4372         pp->current_mode = ECPP_COMPAT_MODE; /* needed by next function */
4373 
4374         ECPP_CONFIG_MODE(pp);
4375 
4376         /*
4377          * EPP mode uses simple nInit pulse for termination
4378          */
4379         if (previous_mode == ECPP_EPP_MODE) {
4380                 /* Event 68: host sets nInit low */
4381                 DCR_WRITE(pp, 0);
4382 
4383                 drv_usecwait(55);       /* T(ER) = 50us */
4384 
4385                 /* Event 69: host sets nInit high */
4386                 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4387 
4388                 goto endterm;
4389         }
4390 
4391         /* terminate peripheral to Compat mode */
4392         pp->current_phase = ECPP_PHASE_TERM;
4393 
4394         /* Event 22: hosts sets nSelectIn low and nAutoFd high */
4395         DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4396 
4397         /* Event 23: peripheral deasserts nFault and nBusy */
4398         /* Event 24: peripheral asserts nAck */
4399         if (wait_dsr(pp, ECPP_nERR | ECPP_nBUSY | ECPP_nACK,
4400             ECPP_nERR, 35000) < 0) {
4401                 ecpp_error(pp->dip,
4402                     "termination: failed events 23,24 %x\n", DSR_READ(pp));
4403                 ecpp_1284_init_interface(pp);
4404                 return (FAILURE);
4405         }
4406 
4407         drv_usecwait(1);        /* Tp = 0.5us */
4408 
4409         /* Event 25: hosts sets nAutoFd low */
4410         DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN | ECPP_AFX);
4411 
4412         /* Event 26: the peripheral puts itself in Compatible mode */
4413 
4414         /* Event 27: peripheral deasserts nAck */
4415         if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4416                 ecpp_error(pp->dip,
4417                     "termination: failed event 27 %x\n", DSR_READ(pp));
4418                 ecpp_1284_init_interface(pp);
4419                 return (FAILURE);
4420         }
4421 
4422         drv_usecwait(1);        /* Tp = 0.5us */
4423 
4424         /* Event 28: hosts deasserts nAutoFd */
4425         DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN);
4426 
4427         drv_usecwait(1);        /* Tp = 0.5us */
4428 
4429 endterm:
4430         /* Compatible mode Idle Phase */
4431         pp->current_phase = ECPP_PHASE_C_IDLE;
4432 
4433         ecpp_error(pp->dip, "termination: completed %x %x\n",
4434             DSR_READ(pp), DCR_READ(pp));
4435 
4436         return (SUCCESS);
4437 }
4438 
4439 /*
4440  * Initiate ECP backchannel DMA transfer
4441  */
4442 static uchar_t
4443 ecp_peripheral2host(struct ecppunit *pp)
4444 {
4445         mblk_t          *mp = NULL;
4446         size_t          len;
4447         uint32_t        xfer_time;
4448 
4449         ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4450             pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
4451 
4452         /*
4453          * hardware generates cycles to receive data from the peripheral
4454          * we only need to read from FIFO
4455          */
4456 
4457         /*
4458          * If user issued read(2) of rev_resid bytes, xfer exactly this amount
4459          * unless it exceeds ECP_REV_BLKSZ_MAX; otherwise try to read
4460          * ECP_REV_BLKSZ_MAX or at least ECP_REV_BLKSZ bytes
4461          */
4462         if (pp->nread > 0) {
4463                 len = min(pp->nread, ECP_REV_BLKSZ_MAX);
4464         } else {
4465                 len = ECP_REV_BLKSZ_MAX;
4466         }
4467 
4468         pp->nread = 0;       /* clear after use */
4469 
4470         /*
4471          * Allocate mblk for data, make max 2 attepmts:
4472          * if len bytes block fails, try our block size
4473          */
4474         while ((mp = allocb(len, BPRI_MED)) == NULL) {
4475                 ecpp_error(pp->dip,
4476                     "ecp_periph2host: failed allocb(%d)\n", len);
4477                 if (len > ECP_REV_BLKSZ) {
4478                         len = ECP_REV_BLKSZ;
4479                 } else {
4480                         break;
4481                 }
4482         }
4483 
4484         if (mp == NULL) {
4485                 goto fail;
4486         }
4487 
4488         pp->msg = mp;
4489         pp->e_busy = ECPP_BUSY;
4490         pp->dma_dir = DDI_DMA_READ;
4491         pp->current_phase = ECPP_PHASE_ECP_REV_XFER;
4492 
4493         if (ecpp_init_dma_xfer(pp, (caddr_t)mp->b_rptr, len) == FAILURE) {
4494                 goto fail;
4495         }
4496 
4497         /*
4498          * there are two problems with defining ECP backchannel xfer timeout
4499          *
4500          * a) IEEE 1284 allows infinite time between backchannel bytes,
4501          *    but we must stop at some point to send the data upstream,
4502          *    look if any forward transfer requests are pending, etc;
4503          *    all that done, we can continue with backchannel data;
4504          *
4505          * b) we don`t know how much data peripheral has;
4506          *    DMA counter is set to our buffer size, which can be bigger
4507          *    than needed - in this case a timeout must detect this;
4508          *
4509          * The timeout we schedule here serves as both the transfer timeout
4510          * and a means of detecting backchannel stalls; in fact, there are
4511          * two timeouts in one:
4512          *
4513          * - transfer timeout is based on the ECP bandwidth of ~1MB/sec and
4514          *   equals the time needed to transfer the whole buffer
4515          *   (but not less than ECP_REV_MINTOUT ms); if it occurs,
4516          *   DMA is stopped and the data is sent upstream;
4517          *
4518          * - backchannel watchdog, which would look at DMA counter
4519          *   every rev_watchdog ms and stop the transfer only
4520          *   if the counter hasn`t changed since the last time;
4521          *   otherwise it would save DMA counter value and restart itself;
4522          *
4523          * transfer timeout is a multiple of rev_watchdog
4524          * and implemented as a downward counter
4525          *
4526          * on Grover, we can`t access DMAC registers while DMA is in flight,
4527          * so we can`t have watchdog on Grover, only timeout
4528          */
4529 
4530         /* calculate number of watchdog invocations equal to the xfer timeout */
4531         xfer_time = max((1000 * len) / pp->ecp_rev_speed, ECP_REV_MINTOUT);
4532 #if defined(__x86)
4533         pp->rev_timeout_cnt = (pp->hw == &x86) ? 1 :
4534             max(xfer_time / pp->rev_watchdog, 1);
4535 #else
4536         pp->rev_timeout_cnt = (pp->hw == &m1553) ? 1 :
4537             max(xfer_time / pp->rev_watchdog, 1);
4538 #endif
4539 
4540         pp->last_dmacnt = len;       /* nothing xferred yet */
4541 
4542         pp->timeout_id = timeout(ecpp_ecp_read_timeout, (caddr_t)pp,
4543             drv_usectohz(pp->rev_watchdog * 1000));
4544 
4545         ecpp_error(pp->dip, "ecp_periph2host: DMA started len=%d\n"
4546             "xfer_time=%d wdog=%d cnt=%d\n",
4547             len, xfer_time, pp->rev_watchdog, pp->rev_timeout_cnt);
4548 
4549         return (SUCCESS);
4550 
4551 fail:
4552         if (mp) {
4553                 freemsg(mp);
4554         }
4555         pp->e_busy = ECPP_IDLE;
4556         pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4557 
4558         return (FAILURE);
4559 }
4560 
4561 /*
4562  * ECP backchannel read timeout
4563  * implements both backchannel watchdog and transfer timeout in ECP mode
4564  * if the transfer is still in progress, reschedule itself,
4565  * otherwise call completion routine
4566  */
4567 static void
4568 ecpp_ecp_read_timeout(void *arg)
4569 {
4570         struct ecppunit *pp = arg;
4571         size_t          dmacnt;
4572 
4573         mutex_enter(&pp->umutex);
4574 
4575         if (pp->timeout_id == 0) {
4576                 mutex_exit(&pp->umutex);
4577                 return;
4578         } else {
4579                 pp->timeout_id = 0;
4580         }
4581 
4582         if (--pp->rev_timeout_cnt == 0) {
4583                 /*
4584                  * Transfer timed out
4585                  */
4586                 ecpp_error(pp->dip, "ecp_read_timeout: timeout\n");
4587                 pp->xfer_tout++;
4588                 ecpp_ecp_read_completion(pp);
4589         } else {
4590                 /*
4591                  * Backchannel watchdog:
4592                  * look if DMA made any progress from the last time
4593                  */
4594                 dmacnt = ECPP_DMA_GETCNT(pp);
4595                 if (dmacnt - pp->last_dmacnt == 0) {
4596                         /*
4597                          * No progress - stop the transfer and send
4598                          * whatever has been read so far up the stream
4599                          */
4600                         ecpp_error(pp->dip, "ecp_read_timeout: no progress\n");
4601                         pp->xfer_tout++;
4602                         ecpp_ecp_read_completion(pp);
4603                 } else {
4604                         /*
4605                          * Something was transferred - restart ourselves
4606                          */
4607                         ecpp_error(pp->dip, "ecp_read_timeout: restarting\n");
4608                         pp->last_dmacnt = dmacnt;
4609                         pp->timeout_id = timeout(ecpp_ecp_read_timeout,
4610                             (caddr_t)pp,
4611                             drv_usectohz(pp->rev_watchdog * 1000));
4612                 }
4613         }
4614 
4615         mutex_exit(&pp->umutex);
4616 }
4617 
4618 /*
4619  * ECP backchannel read completion:
4620  * stop the DMA, free DMA resources and send read data upstream
4621  */
4622 static void
4623 ecpp_ecp_read_completion(struct ecppunit *pp)
4624 {
4625         size_t  xfer_len, unx_len;
4626         mblk_t  *mp;
4627 
4628         ASSERT(mutex_owned(&pp->umutex));
4629         ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4630             pp->current_phase == ECPP_PHASE_ECP_REV_XFER);
4631         ASSERT(pp->msg != NULL);
4632 
4633         /*
4634          * Stop the transfer and unbind DMA handle
4635          */
4636         if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) {
4637                 unx_len = pp->resid;
4638                 ecpp_error(pp->dip, "ecp_read_completion: failed dma_stop\n");
4639         }
4640 
4641         mp = pp->msg;
4642         xfer_len = pp->resid - unx_len;      /* how much data was transferred */
4643 
4644         if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) {
4645                 ecpp_error(pp->dip, "ecp_read_completion: unbind failed.\n");
4646         }
4647 
4648         ecpp_error(pp->dip, "ecp_read_completion: xfered %d bytes of %d\n",
4649             xfer_len, pp->resid);
4650 
4651         /* clean up and update statistics */
4652         pp->msg = NULL;
4653         pp->resid -= xfer_len;
4654         pp->ibytes[pp->current_mode] += xfer_len;
4655         pp->e_busy = ECPP_IDLE;
4656         pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4657 
4658         /*
4659          * Send the read data up the stream
4660          */
4661         mp->b_wptr += xfer_len;
4662         if (canputnext(pp->readq)) {
4663                 mutex_exit(&pp->umutex);
4664                 putnext(pp->readq, mp);
4665                 mutex_enter(&pp->umutex);
4666         } else {
4667                 ecpp_error(pp->dip, "ecp_read_completion: fail canputnext\n");
4668                 if (!putq(pp->readq, mp)) {
4669                         freemsg(mp);
4670                 }
4671         }
4672 
4673         /* if bytes left in the FIFO another transfer is needed */
4674         if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) {
4675                 (void) ecpp_backchan_req(pp);
4676         }
4677 
4678         qenable(pp->writeq);
4679 }
4680 
4681 /*
4682  * Read one byte in the Nibble mode
4683  */
4684 static uchar_t
4685 nibble_peripheral2host(struct ecppunit *pp, uint8_t *byte)
4686 {
4687         uint8_t n[2];   /* two nibbles */
4688         int     i;
4689 
4690         /*
4691          * One byte is made of two nibbles
4692          */
4693         for (i = 0; i < 2; i++) {
4694                 /* Event 7, 12: host asserts nAutoFd to move to read a nibble */
4695                 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX);
4696 
4697                 /* Event 8: peripheral puts data on the status lines */
4698 
4699                 /* Event 9: peripheral asserts nAck, data available */
4700                 if (wait_dsr(pp, ECPP_nACK, 0, 35000) < 0) {
4701                         ecpp_error(pp->dip,
4702                             "nibble_periph2host(%d): failed event 9 %x\n",
4703                             i + 1, DSR_READ(pp));
4704                         (void) ecpp_1284_termination(pp);
4705                         return (FAILURE);
4706                 }
4707 
4708                 n[i] = DSR_READ(pp);    /* get a nibble */
4709 
4710                 /* Event 10: host deasserts nAutoFd to say it grabbed data */
4711                 DCR_WRITE(pp, ECPP_nINIT);
4712 
4713                 /* (2) Event 13: peripheral asserts PE - end of data phase */
4714 
4715                 /* Event 11: peripheral deasserts nAck to finish handshake */
4716                 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) {
4717                         ecpp_error(pp->dip,
4718                             "nibble_periph2host(%d): failed event 11 %x\n",
4719                             i + 1, DSR_READ(pp));
4720                         (void) ecpp_1284_termination(pp);
4721                         return (FAILURE);
4722                 }
4723         }
4724 
4725         /* extract data byte from two nibbles - optimized formula */
4726         *byte = ((((n[1] & ~ECPP_nACK) << 1) | (~n[1] & ECPP_nBUSY)) & 0xf0) |
4727             ((((n[0] & ~ECPP_nACK) >> 3) | ((~n[0] & ECPP_nBUSY) >> 4)) & 0x0f);
4728 
4729         pp->ibytes[ECPP_NIBBLE_MODE]++;
4730         return (SUCCESS);
4731 }
4732 
4733 /*
4734  * process data transfers requested by the peripheral
4735  */
4736 static uint_t
4737 ecpp_peripheral2host(struct ecppunit *pp)
4738 {
4739         if (!canputnext(pp->readq)) {
4740                 ecpp_error(pp->dip, "ecpp_peripheral2host: readq full\n");
4741                 return (SUCCESS);
4742         }
4743 
4744         switch (pp->backchannel) {
4745         case ECPP_CENTRONICS:
4746                 /* no backchannel */
4747                 return (SUCCESS);
4748 
4749         case ECPP_NIBBLE_MODE:
4750                 ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
4751 
4752                 /*
4753                  * Event 20: Host sets nAutoFd high to ack request
4754                  */
4755                 DCR_WRITE(pp, ECPP_nINIT);
4756 
4757                 /* Event 21: Periph sets PError low to ack host */
4758                 if (wait_dsr(pp, ECPP_PE, 0, 35000) < 0) {
4759                         ecpp_error(pp->dip,
4760                             "ecpp_periph2host: failed event 21 %x\n",
4761                             DSR_READ(pp));
4762                         (void) ecpp_1284_termination(pp);
4763                         return (FAILURE);
4764                 }
4765 
4766                 pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
4767 
4768                 /* this routine will read the data in Nibble mode */
4769                 return (ecpp_idle_phase(pp));
4770 
4771         case ECPP_ECP_MODE:
4772                 if ((pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE) &&
4773                     (ecp_forward2reverse(pp) == FAILURE)) {
4774                         return (FAILURE);
4775                 }
4776 
4777                 return (ecp_peripheral2host(pp));       /* start the transfer */
4778 
4779         case ECPP_DIAG_MODE: {
4780                 mblk_t          *mp;
4781                 int             i;
4782 
4783                 if (ECR_READ(pp) & ECPP_FIFO_EMPTY) {
4784                         ecpp_error(pp->dip, "ecpp_periph2host: fifo empty\n");
4785                         return (SUCCESS);
4786                 }
4787 
4788                 /* allocate the FIFO size */
4789                 if ((mp = allocb(ECPP_FIFO_SZ, BPRI_MED)) == NULL) {
4790                         ecpp_error(pp->dip,
4791                             "ecpp_periph2host: allocb FAILURE.\n");
4792                         return (FAILURE);
4793                 }
4794 
4795                 /*
4796                  * For the time being just read it byte by byte
4797                  */
4798                 i = ECPP_FIFO_SZ;
4799                 while (i-- && (!(ECR_READ(pp) & ECPP_FIFO_EMPTY))) {
4800                         *mp->b_wptr++ = TFIFO_READ(pp);
4801                         drv_usecwait(1); /* ECR is sometimes slow to update */
4802                 }
4803 
4804                 if (canputnext(pp->readq)) {
4805                         mutex_exit(&pp->umutex);
4806                         mp->b_datap->db_type = M_DATA;
4807                         ecpp_error(pp->dip,
4808                             "ecpp_periph2host: sending %d bytes\n",
4809                             mp->b_wptr - mp->b_rptr);
4810                         putnext(pp->readq, mp);
4811                         mutex_enter(&pp->umutex);
4812                         return (SUCCESS);
4813                 } else {
4814                         ecpp_error(pp->dip,
4815                             "ecpp_periph2host: !canputnext data lost\n");
4816                         freemsg(mp);
4817                         return (FAILURE);
4818                 }
4819         }
4820 
4821         default:
4822                 ecpp_error(pp->dip, "ecpp_peripheraltohost: illegal back");
4823                 return (FAILURE);
4824         }
4825 }
4826 
4827 /*
4828  * Negotiate from ECP Forward Idle to Reverse Idle Phase
4829  *
4830  * (manipulations with dcr/ecr are according to ECP Specification)
4831  */
4832 static int
4833 ecp_forward2reverse(struct ecppunit *pp)
4834 {
4835         ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4836             pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE);
4837 
4838         /* place port into PS2 mode */
4839         ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4840 
4841         /* set direction bit (DCR3-0 must be 0100 - National) */
4842         DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
4843 
4844         /* enable hardware assist */
4845         ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4846 
4847         drv_usecwait(1);        /* Tp(ecp) = 0.5us */
4848 
4849         /* Event 39: host sets nInit low */
4850         DCR_WRITE(pp, ECPP_REV_DIR);
4851 
4852         /* Event 40: peripheral sets PError low */
4853 
4854         pp->current_phase = ECPP_PHASE_ECP_REV_IDLE;
4855 
4856         ecpp_error(pp->dip, "ecp_forward2reverse ok\n");
4857 
4858         return (SUCCESS);
4859 }
4860 
4861 /*
4862  * Negotiate from ECP Reverse Idle to Forward Idle Phase
4863  *
4864  * (manipulations with dcr/ecr are according to ECP Specification)
4865  */
4866 static int
4867 ecp_reverse2forward(struct ecppunit *pp)
4868 {
4869         ASSERT(pp->current_mode == ECPP_ECP_MODE &&
4870             pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
4871 
4872         /* Event 47: host deasserts nInit */
4873         DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT);
4874 
4875         /*
4876          * Event 48: peripheral deasserts nAck
4877          * Event 49: peripheral asserts PError
4878          */
4879         if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) {
4880                 ecpp_error(pp->dip,
4881                     "ecp_reverse2forward: failed event 49 %x\n", DSR_READ(pp));
4882                 (void) ecpp_1284_termination(pp);
4883                 return (FAILURE);
4884         }
4885 
4886         /* place port into PS2 mode */
4887         ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4888 
4889         /* clear direction bit */
4890         DCR_WRITE(pp, ECPP_nINIT);
4891 
4892         /* reenable hardware assist */
4893         ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK);
4894 
4895         pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE;
4896 
4897         ecpp_error(pp->dip, "ecp_reverse2forward ok\n");
4898 
4899         return (SUCCESS);
4900 }
4901 
4902 /*
4903  * Default negotiation chooses the best mode supported by peripheral
4904  * Note that backchannel mode may be different from forward mode
4905  */
4906 static void
4907 ecpp_default_negotiation(struct ecppunit *pp)
4908 {
4909         if (!noecp && (ecpp_mode_negotiation(pp, ECPP_ECP_MODE) == SUCCESS)) {
4910                 /* 1284 compatible device */
4911                 pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
4912                 return;
4913         } else if (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == SUCCESS) {
4914                 /* 1284 compatible device */
4915                 pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO;
4916         } else {
4917                 /* Centronics device */
4918                 pp->io_mode =
4919                     (pp->fast_centronics == TRUE) ? ECPP_DMA : ECPP_PIO;
4920         }
4921         ECPP_CONFIG_MODE(pp);
4922 }
4923 
4924 /*
4925  * Negotiate to the mode indicated by newmode
4926  */
4927 static int
4928 ecpp_mode_negotiation(struct ecppunit *pp, uchar_t newmode)
4929 {
4930         /* any other mode is impossible */
4931         ASSERT(pp->current_mode == ECPP_CENTRONICS ||
4932             pp->current_mode == ECPP_COMPAT_MODE ||
4933             pp->current_mode == ECPP_NIBBLE_MODE ||
4934             pp->current_mode == ECPP_ECP_MODE ||
4935             pp->current_mode == ECPP_DIAG_MODE);
4936 
4937         if (pp->current_mode == newmode) {
4938                 return (SUCCESS);
4939         }
4940 
4941         /* termination from ECP is only allowed from the Forward Idle Phase */
4942         if ((pp->current_mode == ECPP_ECP_MODE) &&
4943             (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) {
4944                 /* this may break into Centronics */
4945                 (void) ecp_reverse2forward(pp);
4946         }
4947 
4948         switch (newmode) {
4949         case ECPP_CENTRONICS:
4950                 (void) ecpp_1284_termination(pp);
4951 
4952                 /* put superio into PIO mode */
4953                 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
4954 
4955                 pp->current_mode = ECPP_CENTRONICS;
4956                 pp->backchannel = ECPP_CENTRONICS;
4957                 ECPP_CONFIG_MODE(pp);
4958 
4959                 pp->to_mode[pp->current_mode]++;
4960                 return (SUCCESS);
4961 
4962         case ECPP_COMPAT_MODE:
4963                 /* ECPP_COMPAT_MODE should support Nibble as a backchannel */
4964                 if (pp->current_mode == ECPP_NIBBLE_MODE) {
4965                         if (ecpp_1284_termination(pp) == SUCCESS) {
4966                                 pp->current_mode = ECPP_COMPAT_MODE;
4967                                 pp->backchannel = ECPP_NIBBLE_MODE;
4968                                 ECPP_CONFIG_MODE(pp);
4969                                 pp->to_mode[pp->current_mode]++;
4970                                 return (SUCCESS);
4971                         } else {
4972                                 return (FAILURE);
4973                         }
4974                 }
4975 
4976                 if ((nibble_negotiation(pp) == SUCCESS) &&
4977                     (ecpp_1284_termination(pp) == SUCCESS)) {
4978                         pp->backchannel = ECPP_NIBBLE_MODE;
4979                         pp->current_mode = ECPP_COMPAT_MODE;
4980                         ECPP_CONFIG_MODE(pp);
4981                         pp->to_mode[pp->current_mode]++;
4982                         return (SUCCESS);
4983                 } else {
4984                         return (FAILURE);
4985                 }
4986 
4987         case ECPP_NIBBLE_MODE:
4988                 if (nibble_negotiation(pp) == FAILURE) {
4989                         return (FAILURE);
4990                 }
4991 
4992                 pp->backchannel = ECPP_NIBBLE_MODE;
4993                 ECPP_CONFIG_MODE(pp);
4994                 pp->to_mode[pp->current_mode]++;
4995 
4996                 return (SUCCESS);
4997 
4998         case ECPP_ECP_MODE:
4999                 if (pp->noecpregs)
5000                         return (FAILURE);
5001                 if (ecp_negotiation(pp) == FAILURE) {
5002                         return (FAILURE);
5003                 }
5004 
5005                 /*
5006                  * National says CTR[3:0] should be 0100b before moving to 011
5007                  */
5008                 DCR_WRITE(pp, ECPP_nINIT);
5009 
5010                 if (ecr_write(pp, ECR_mode_011 |
5011                     ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5012                         ecpp_error(pp->dip, "mode_nego:ECP: failed w/ecr\n");
5013                         return (FAILURE);
5014                 }
5015 
5016                 ECPP_CONFIG_MODE(pp);
5017                 pp->to_mode[pp->current_mode]++;
5018 
5019                 return (SUCCESS);
5020 
5021         case ECPP_DIAG_MODE:
5022                 /*
5023                  * In DIAG mode application can do nasty things(e.g drive pins)
5024                  * To keep peripheral sane, terminate to Compatibility mode
5025                  */
5026                 (void) ecpp_1284_termination(pp);
5027 
5028                 /* put superio into TFIFO mode */
5029                 if (ecr_write(pp, ECR_mode_001 |
5030                     ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5031                         ecpp_error(pp->dip, "put to TFIFO: failed w/ecr\n");
5032                         return (FAILURE);
5033                 }
5034 
5035                 pp->current_mode = ECPP_DIAG_MODE;
5036                 pp->backchannel = ECPP_DIAG_MODE;
5037                 ECPP_CONFIG_MODE(pp);
5038                 pp->to_mode[pp->current_mode]++;
5039 
5040                 return (SUCCESS);
5041 
5042         default:
5043                 ecpp_error(pp->dip,
5044                     "ecpp_mode_negotiation: mode %d not supported\n", newmode);
5045                 return (FAILURE);
5046         }
5047 }
5048 
5049 /*
5050  * Standard (9.1): Peripheral data is available only when the host places
5051  * the interface in a mode capable of peripheral-to-host data transfer.
5052  * This requires the host periodically to place the interface in such a mode.
5053  * Polling can be eliminated by leaving the interface in an 1284 idle phase.
5054  */
5055 static uchar_t
5056 ecpp_idle_phase(struct ecppunit *pp)
5057 {
5058         uchar_t         rval = FAILURE;
5059 
5060         /*
5061          * If there is no space on the read queue, do not reverse channel
5062          */
5063         if (!canputnext(pp->readq)) {
5064                 ecpp_error(pp->dip, "ecpp_idle_phase: readq full\n");
5065                 return (SUCCESS);
5066         }
5067 
5068         switch (pp->backchannel) {
5069         case ECPP_CENTRONICS:
5070         case ECPP_COMPAT_MODE:
5071         case ECPP_DIAG_MODE:
5072                 /* nothing */
5073                 ecpp_error(pp->dip, "ecpp_idle_phase: compat idle\n");
5074                 return (SUCCESS);
5075 
5076         case ECPP_NIBBLE_MODE:
5077                 /*
5078                  * read as much data as possible, ending up in either
5079                  * Reverse Idle or Host Busy Data Available phase
5080                  */
5081                 ecpp_error(pp->dip, "ecpp_idle_phase: nibble backchannel\n");
5082                 if ((pp->current_mode != ECPP_NIBBLE_MODE) &&
5083                     (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == FAILURE)) {
5084                         break;
5085                 }
5086 
5087                 rval = read_nibble_backchan(pp);
5088 
5089                 /* put interface into Reverse Idle phase */
5090                 if (pp->current_phase == ECPP_PHASE_NIBT_NAVAIL &&
5091                     canputnext(pp->readq)) {
5092                         ecpp_error(pp->dip, "ecpp_idle_phase: going revidle\n");
5093 
5094                         /*
5095                          * Event 7: host asserts nAutoFd
5096                          * enable nAck interrupt to get a backchannel request
5097                          */
5098                         DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_INTR_EN);
5099 
5100                         ECPP_UNMASK_INTR(pp);
5101                 }
5102 
5103                 break;
5104 
5105         case ECPP_ECP_MODE:
5106                 /*
5107                  * if data is already available, request the backchannel xfer
5108                  * otherwise stay in Forward Idle and enable nErr interrupts
5109                  */
5110                 ecpp_error(pp->dip, "ecpp_idle_phase: ECP forward\n");
5111 
5112                 ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE ||
5113                     pp->current_phase == ECPP_PHASE_ECP_REV_IDLE);
5114 
5115                 /* put interface into Forward Idle phase */
5116                 if ((pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) &&
5117                     (ecp_reverse2forward(pp) == FAILURE)) {
5118                         return (FAILURE);
5119                 }
5120 
5121                 /*
5122                  * if data already available, put backchannel request on the wq
5123                  * otherwise enable nErr interrupts
5124                  */
5125                 if ((DSR_READ(pp) & ECPP_nERR) == 0) {
5126                         (void) ecpp_backchan_req(pp);
5127                 } else {
5128                         ECR_WRITE(pp,
5129                             ECR_READ(pp) & ~ECPP_INTR_MASK | ECPP_INTR_SRV);
5130 
5131                         ECPP_UNMASK_INTR(pp);
5132                 }
5133 
5134                 return (SUCCESS);
5135 
5136         default:
5137                 ecpp_error(pp->dip, "ecpp_idle_phase: illegal backchannel");
5138         }
5139 
5140         return (rval);
5141 }
5142 
5143 /*
5144  * This routine will leave the port in ECPP_PHASE_NIBT_REVIDLE
5145  * Due to flow control, though, it may stop at ECPP_PHASE_NIBT_AVAIL,
5146  * and continue later as the user consumes data from the read queue
5147  *
5148  * The current phase should be NIBT_AVAIL or NIBT_NAVAIL
5149  * If some events fail during transfer, termination puts link
5150  * to Compatibility mode and FAILURE is returned
5151  */
5152 static int
5153 read_nibble_backchan(struct ecppunit *pp)
5154 {
5155         mblk_t          *mp;
5156         int             i;
5157         int             rval = SUCCESS;
5158 
5159         ASSERT(pp->current_mode == ECPP_NIBBLE_MODE);
5160 
5161         pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
5162             ? ECPP_PHASE_NIBT_NAVAIL : ECPP_PHASE_NIBT_AVAIL;
5163 
5164         ecpp_error(pp->dip, "read_nibble_backchan: %x\n", DSR_READ(pp));
5165 
5166         /*
5167          * While data is available, read it in NIBBLE_REV_BLKSZ byte chunks
5168          * and send up the stream
5169          */
5170         while (pp->current_phase == ECPP_PHASE_NIBT_AVAIL && rval == SUCCESS) {
5171                 /* see if there's space on the queue */
5172                 if (!canputnext(pp->readq)) {
5173                         ecpp_error(pp->dip,
5174                             "read_nibble_backchan: canputnext failed\n");
5175                         return (SUCCESS);
5176                 }
5177 
5178                 if ((mp = allocb(NIBBLE_REV_BLKSZ, BPRI_MED)) == NULL) {
5179                         ecpp_error(pp->dip,
5180                             "read_nibble_backchan: allocb failed\n");
5181                         return (SUCCESS);
5182                 }
5183 
5184                 /* read a chunk of data from the peripheral byte by byte */
5185                 i = NIBBLE_REV_BLKSZ;
5186                 while (i-- && !(DSR_READ(pp) & ECPP_nERR)) {
5187                         if (nibble_peripheral2host(pp, mp->b_wptr) != SUCCESS) {
5188                                 rval = FAILURE;
5189                                 break;
5190                         }
5191                         mp->b_wptr++;
5192                 }
5193 
5194                 pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE))
5195                     ? ECPP_PHASE_NIBT_NAVAIL
5196                     : ECPP_PHASE_NIBT_AVAIL;
5197 
5198                 if (mp->b_wptr - mp->b_rptr > 0) {
5199                         ecpp_error(pp->dip,
5200                             "read_nibble_backchan: sending %d bytes\n",
5201                             mp->b_wptr - mp->b_rptr);
5202                         pp->nread = 0;
5203                         mutex_exit(&pp->umutex);
5204                         putnext(pp->readq, mp);
5205                         mutex_enter(&pp->umutex);
5206                 } else {
5207                         freemsg(mp);
5208                 }
5209         }
5210 
5211         return (rval);
5212 }
5213 
5214 /*
5215  * 'Request Device ID using nibble mode' negotiation
5216  */
5217 static int
5218 devidnib_negotiation(struct ecppunit *pp)
5219 {
5220         uint8_t dsr;
5221 
5222         if (ecpp_1284_negotiation(pp,
5223             ECPP_XREQ_NIBBLE | ECPP_XREQ_ID, &dsr) == FAILURE) {
5224                 return (FAILURE);
5225         }
5226 
5227         /*
5228          * If peripheral has data available, PE and nErr will
5229          * be set low at Event 5 & 6.
5230          */
5231         if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) {
5232                 pp->current_phase = ECPP_PHASE_NIBT_AVAIL;
5233         } else {
5234                 pp->current_phase = ECPP_PHASE_NIBT_NAVAIL;
5235         }
5236 
5237         ecpp_error(pp->dip, "ecpp_devidnib_nego: current_phase=%x\n",
5238             pp->current_phase);
5239 
5240         /* successful negotiation into Nibble mode */
5241         pp->current_mode = ECPP_NIBBLE_MODE;
5242         pp->backchannel = ECPP_NIBBLE_MODE;
5243 
5244         ecpp_error(pp->dip, "ecpp_devidnib_nego: ok\n");
5245 
5246         return (SUCCESS);
5247 }
5248 
5249 /*
5250  * Read 1284 device ID sequence
5251  *
5252  * This function should be called two times:
5253  * 1) ecpp_getdevid(pp, NULL, &len) - to retrieve ID length;
5254  * 2) ecpp_getdevid(pp, buffer, &len) - to read len bytes into buffer
5255  *
5256  * After 2) port is in Compatible mode
5257  * If the caller fails to make second call, it must reset port to Centronics
5258  *
5259  */
5260 static int
5261 ecpp_getdevid(struct ecppunit *pp, uint8_t *id, int *lenp, int mode)
5262 {
5263         uint8_t lenhi, lenlo;
5264         uint8_t dsr;
5265         int i;
5266 
5267         switch (mode) {
5268         case ECPP_NIBBLE_MODE:
5269                 /* negotiate only if neccessary */
5270                 if ((pp->current_mode != mode) || (id == NULL)) {
5271                         if (devidnib_negotiation(pp) == FAILURE) {
5272                                 return (EIO);
5273                         }
5274                 }
5275 
5276                 if (pp->current_phase != ECPP_PHASE_NIBT_AVAIL) {
5277                         return (EIO);
5278                 }
5279 
5280                 /*
5281                  * Event 14: Host tristates data bus, peripheral
5282                  * asserts nERR if data available, usually the
5283                  * status bits (7-0) and requires two reads since
5284                  * only nibbles are transfered.
5285                  */
5286                 dsr = DSR_READ(pp);
5287 
5288                 if (id == NULL) {
5289                         /*
5290                          * first two bytes are the length of the sequence
5291                          * (incl. these bytes)
5292                          * first byte is MSB
5293                          */
5294                         if ((dsr & ECPP_nERR) ||
5295                             (nibble_peripheral2host(pp, &lenhi) == FAILURE) ||
5296                             (dsr & ECPP_nERR) ||
5297                             (nibble_peripheral2host(pp, &lenlo) == FAILURE)) {
5298                                 ecpp_error(pp->dip,
5299                                     "ecpp_getdevid: id length read error\n");
5300                                 return (EIO);
5301                         }
5302 
5303                         *lenp = (lenhi << 8) | (lenlo);
5304 
5305                         ecpp_error(pp->dip,
5306                             "ecpp_getdevid: id length = %d\n", *lenp);
5307 
5308                         if (*lenp < 2) {
5309                                 return (EIO);
5310                         }
5311                 } else {
5312                         /*
5313                          * read the rest of the data
5314                          */
5315                         i = *lenp;
5316                         while (i && ((dsr & ECPP_nERR) == 0)) {
5317                                 if (nibble_peripheral2host(pp, id++) == FAILURE)
5318                                         break;
5319 
5320                                 i--;
5321                                 dsr = DSR_READ(pp);
5322                         }
5323                         ecpp_error(pp->dip,
5324                             "ecpp_getdevid: read %d bytes\n", *lenp - i);
5325 
5326                         /*
5327                          * 1284: After receiving the sequence, the host is
5328                          * required to return the link to the Compatibility mode
5329                          */
5330                         (void) ecpp_1284_termination(pp);
5331                 }
5332 
5333                 break;
5334 
5335         /* Other modes are not yet supported */
5336         default:
5337                 return (EINVAL);
5338         }
5339 
5340         return (0);
5341 }
5342 
5343 /*
5344  * Various hardware support
5345  *
5346  * First define some stubs for functions that do nothing
5347  */
5348 
5349 /*ARGSUSED*/
5350 static void
5351 empty_config_mode(struct ecppunit *pp)
5352 {
5353 }
5354 
5355 /*ARGSUSED*/
5356 static void
5357 empty_mask_intr(struct ecppunit *pp)
5358 {
5359 }
5360 
5361 #if defined(__x86)
5362 static size_t
5363 x86_getcnt(struct ecppunit *pp)
5364 {
5365         int count;
5366 
5367         (void) ddi_dmae_getcnt(pp->dip, pp->uh.x86.chn, &count);
5368         return (count);
5369 }
5370 #endif
5371 
5372 /*
5373  *
5374  * National PC87332 and PC97317 SuperIOs support routines
5375  * These chips are used in PCI-based Darwin, Quark, Quasar, Excalibur
5376  * and use EBus DMA facilities (Cheerio or RIO)
5377  *
5378  */
5379 
5380 static int
5381 pc87332_map_regs(struct ecppunit *pp)
5382 {
5383         if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.ebus.c_reg, 0,
5384             sizeof (struct config_reg), &acc_attr,
5385             &pp->uh.ebus.c_handle) != DDI_SUCCESS) {
5386                 ecpp_error(pp->dip, "pc87332_map_regs: failed c_reg\n");
5387                 goto fail;
5388         }
5389 
5390         if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5391             sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5392             != DDI_SUCCESS) {
5393                 ecpp_error(pp->dip, "pc87332_map_regs: failed i_reg\n");
5394                 goto fail;
5395         }
5396 
5397         if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
5398             sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5399             != DDI_SUCCESS) {
5400                 ecpp_error(pp->dip, "pc87332_map_regs: failed f_reg\n");
5401                 goto fail;
5402         }
5403 
5404         if (ddi_regs_map_setup(pp->dip, 2, (caddr_t *)&pp->uh.ebus.dmac, 0,
5405             sizeof (struct cheerio_dma_reg), &acc_attr,
5406             &pp->uh.ebus.d_handle) != DDI_SUCCESS) {
5407                 ecpp_error(pp->dip, "pc87332_map_regs: failed dmac\n");
5408                 goto fail;
5409         }
5410 
5411         return (SUCCESS);
5412 
5413 fail:
5414         pc87332_unmap_regs(pp);
5415         return (FAILURE);
5416 }
5417 
5418 static void
5419 pc87332_unmap_regs(struct ecppunit *pp)
5420 {
5421         if (pp->uh.ebus.c_handle) {
5422                 ddi_regs_map_free(&pp->uh.ebus.c_handle);
5423         }
5424         if (pp->uh.ebus.d_handle) {
5425                 ddi_regs_map_free(&pp->uh.ebus.d_handle);
5426         }
5427         if (pp->i_handle) {
5428                 ddi_regs_map_free(&pp->i_handle);
5429         }
5430         if (pp->f_handle) {
5431                 ddi_regs_map_free(&pp->f_handle);
5432         }
5433 }
5434 
5435 static uint8_t
5436 pc87332_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
5437 {
5438         uint8_t retval;
5439 
5440         PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
5441         retval = PP_GETB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data);
5442 
5443         return (retval);
5444 }
5445 
5446 static void
5447 pc87332_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
5448 {
5449         PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num);
5450         PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
5451 
5452         /*
5453          * second write to this register is needed.  the register behaves as
5454          * a fifo.  the first value written goes to the data register.  the
5455          * second write pushes the initial value to the register indexed.
5456          */
5457 
5458         PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val);
5459 }
5460 
5461 static int
5462 pc87332_config_chip(struct ecppunit *pp)
5463 {
5464         uint8_t pmc, fcr;
5465 
5466         pp->current_phase = ECPP_PHASE_INIT;
5467 
5468         /* ECP DMA configuration bit (PMC4) must be set */
5469         pmc = pc87332_read_config_reg(pp, PMC);
5470         if (!(pmc & PC87332_PMC_ECP_DMA_CONFIG)) {
5471                 pc87332_write_config_reg(pp, PMC,
5472                     pmc | PC87332_PMC_ECP_DMA_CONFIG);
5473         }
5474 
5475         /*
5476          * The Parallel Port Multiplexor pins must be driven.
5477          * Check to see if FCR3 is zero, if not clear FCR3.
5478          */
5479         fcr = pc87332_read_config_reg(pp, FCR);
5480         if (fcr & PC87332_FCR_PPM_FLOAT_CTL) {
5481                 pc87332_write_config_reg(pp, FCR,
5482                     fcr & ~PC87332_FCR_PPM_FLOAT_CTL);
5483         }
5484 
5485         /*
5486          * clear bits 3-0 in CTR (aka DCR) prior to enabling ECP mode
5487          * CTR5 can not be cleared in SPP mode, CTR5 will return 1.
5488          * "FAILURE" in this case is ok.  Better to use dcr_write()
5489          * to ensure reliable writing to DCR.
5490          */
5491         if (dcr_write(pp, ECPP_DCR_SET | ECPP_nINIT) == FAILURE) {
5492                 ecpp_error(pp->dip, "ecpp_config_87332: DCR config\n");
5493         }
5494 
5495         /* enable ECP mode, level intr (note that DCR bits 3-0 == 0x0) */
5496         pc87332_write_config_reg(pp, PCR,
5497             PC87332_PCR_INTR_LEVL | PC87332_PCR_ECP_EN);
5498 
5499         /* put SuperIO in initial state */
5500         if (ecr_write(pp, ECR_mode_001 |
5501             ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5502                 ecpp_error(pp->dip, "ecpp_config_87332: ECR\n");
5503         }
5504 
5505         if (dcr_write(pp, ECPP_DCR_SET | ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
5506                 ecpp_error(pp->dip, "ecpp_config_87332: w/DCR failed2.\n");
5507                 return (FAILURE);
5508 
5509         }
5510         /* we are in centronic mode */
5511         pp->current_mode = ECPP_CENTRONICS;
5512 
5513         /* in compatible mode with no data transfer in progress */
5514         pp->current_phase = ECPP_PHASE_C_IDLE;
5515 
5516         return (SUCCESS);
5517 }
5518 
5519 /*
5520  * A new mode was set, do some mode specific reconfiguration
5521  * in this case - set interrupt characteristic
5522  */
5523 static void
5524 pc87332_config_mode(struct ecppunit *pp)
5525 {
5526         if (COMPAT_PIO(pp)) {
5527                 pc87332_write_config_reg(pp, PCR, 0x04);
5528         } else {
5529                 pc87332_write_config_reg(pp, PCR, 0x14);
5530         }
5531 }
5532 
5533 static int
5534 pc97317_map_regs(struct ecppunit *pp)
5535 {
5536         if (pc87332_map_regs(pp) != SUCCESS) {
5537                 return (FAILURE);
5538         }
5539 
5540         if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->uh.ebus.c2_reg,
5541             0x403, sizeof (struct config2_reg), &acc_attr,
5542             &pp->uh.ebus.c2_handle) != DDI_SUCCESS) {
5543                 ecpp_error(pp->dip, "pc97317_map_regs: failed c2_reg\n");
5544                 pc87332_unmap_regs(pp);
5545                 return (FAILURE);
5546         } else {
5547                 return (SUCCESS);
5548         }
5549 }
5550 
5551 static void
5552 pc97317_unmap_regs(struct ecppunit *pp)
5553 {
5554         if (pp->uh.ebus.c2_handle) {
5555                 ddi_regs_map_free(&pp->uh.ebus.c2_handle);
5556         }
5557 
5558         pc87332_unmap_regs(pp);
5559 }
5560 
5561 /*
5562  * OBP should configure the PC97317 such that it does not need further
5563  * configuration.  Upon sustaining, it may be necessary to examine
5564  * or change the configuration registers.  This routine is left in
5565  * the file for that purpose.
5566  */
5567 static int
5568 pc97317_config_chip(struct ecppunit *pp)
5569 {
5570         uint8_t conreg;
5571 
5572         /* set the logical device name */
5573         pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
5574 
5575         /* SPP Compatibility */
5576         PP_PUTB(pp->uh.ebus.c2_handle,
5577             &pp->uh.ebus.c2_reg->eir, PC97317_CONFIG2_CONTROL2);
5578         PP_PUTB(pp->uh.ebus.c2_handle, &pp->uh.ebus.c2_reg->edr, 0x80);
5579 
5580         /* low interrupt polarity */
5581         pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
5582 
5583         /* ECP mode */
5584         pc87332_write_config_reg(pp, PC97317_CONFIG_PP_CONFIG, 0xf2);
5585 
5586         if (dcr_write(pp, ECPP_SLCTIN | ECPP_nINIT) == FAILURE) {
5587                 ecpp_error(pp->dip, "pc97317_config_chip: failed w/DCR\n");
5588         }
5589 
5590         if (ecr_write(pp, ECR_mode_001 |
5591             ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5592                 ecpp_error(pp->dip, "pc97317_config_chip: failed w/ECR\n");
5593         }
5594 
5595 #ifdef DEBUG
5596         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DEV_NO);
5597         ecpp_error(pp->dip, "97317:conreg7(logical dev)=%x\n", conreg);
5598 
5599         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_MSB);
5600         ecpp_error(pp->dip, "97317:conreg60(addrHi)=%x\n", conreg);
5601 
5602         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_LSB);
5603         ecpp_error(pp->dip, "97317:conreg61(addrLo)=%x\n", conreg);
5604 
5605         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_SEL);
5606         ecpp_error(pp->dip, "97317:conreg70(IRQL)=%x\n", conreg);
5607 
5608         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_TYPE);
5609         ecpp_error(pp->dip, "97317:conreg71(intr type)=%x\n", conreg);
5610 
5611         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_ACTIVATE);
5612         ecpp_error(pp->dip, "97317:conreg30(Active)=%x\n", conreg);
5613 
5614         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_IO_RANGE);
5615         ecpp_error(pp->dip, "97317:conreg31(IO Range Check)=%x\n", conreg);
5616 
5617         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA0_CHAN);
5618         ecpp_error(pp->dip, "97317:conreg74(DMA0 Chan)=%x\n", conreg);
5619         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA1_CHAN);
5620         ecpp_error(pp->dip, "97317:conreg75(DMA1 Chan)=%x\n", conreg);
5621 
5622         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
5623         ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
5624 
5625         conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG);
5626         ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg);
5627 #endif /* DEBUG */
5628 
5629         return (SUCCESS);
5630 }
5631 
5632 /*
5633  * A new mode was set, do some mode specific reconfiguration
5634  * in this case - set interrupt polarity
5635  */
5636 static void
5637 pc97317_config_mode(struct ecppunit *pp)
5638 {
5639         /* set the logical device name */
5640         pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4);
5641 
5642         if (COMPAT_PIO(pp) || pp->current_mode == ECPP_NIBBLE_MODE) {
5643                 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x02);
5644         } else {
5645                 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00);
5646         }
5647 }
5648 
5649 static void
5650 cheerio_mask_intr(struct ecppunit *pp)
5651 {
5652         /* mask Cheerio interrupts */
5653         AND_SET_LONG_R(pp->uh.ebus.d_handle,
5654             &pp->uh.ebus.dmac->csr, ~DCSR_INT_EN);
5655 }
5656 
5657 static void
5658 cheerio_unmask_intr(struct ecppunit *pp)
5659 {
5660         /* unmask Cheerio interrupts */
5661         OR_SET_LONG_R(pp->uh.ebus.d_handle,
5662             &pp->uh.ebus.dmac->csr, DCSR_INT_EN | DCSR_TCI_DIS);
5663 }
5664 
5665 static int
5666 cheerio_dma_start(struct ecppunit *pp)
5667 {
5668         cheerio_reset_dcsr(pp);
5669         SET_DMAC_BCR(pp, pp->dma_cookie.dmac_size);
5670         SET_DMAC_ACR(pp, pp->dma_cookie.dmac_address);
5671 
5672         if (pp->dma_dir == DDI_DMA_READ) {
5673                 SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
5674                     DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0 | DCSR_WRITE);
5675         } else {
5676                 SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA |
5677                     DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0);
5678         }
5679 
5680         return (SUCCESS);
5681 }
5682 
5683 /*
5684  * Note: BCR is reset to 0, so counter should always be read before dma_stop
5685  */
5686 static int
5687 cheerio_dma_stop(struct ecppunit *pp, size_t *countp)
5688 {
5689         uint8_t ecr;
5690 
5691         /* disable DMA and byte counter */
5692         AND_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
5693             ~(DCSR_EN_DMA | DCSR_EN_CNT| DCSR_INT_EN));
5694 
5695         /* ACK and disable the TC interrupt */
5696         OR_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr,
5697             DCSR_TC | DCSR_TCI_DIS);
5698 
5699         /* read DMA count if requested */
5700         if (countp) {
5701                 *countp = cheerio_getcnt(pp);
5702         }
5703 
5704         cheerio_reset_dcsr(pp);
5705         SET_DMAC_BCR(pp, 0);
5706 
5707         /* turn off SuperIO's DMA */
5708         ecr = ECR_READ(pp);
5709         if (ecr_write(pp, ecr & ~ECPP_DMA_ENABLE) == FAILURE) {
5710                 return (FAILURE);
5711         }
5712 
5713         /* Disable SuperIO interrupts and DMA */
5714         ecr = ECR_READ(pp);
5715 
5716         return (ecr_write(pp, ecr | ECPP_INTR_SRV));
5717 }
5718 
5719 static size_t
5720 cheerio_getcnt(struct ecppunit *pp)
5721 {
5722         return (GET_DMAC_BCR(pp));
5723 }
5724 
5725 /*
5726  * Reset the DCSR by first setting the RESET bit to 1.  Poll the
5727  * DCSR_CYC_PEND bit to make sure there are no more pending DMA cycles.
5728  * If there are no more pending cycles, clear the RESET bit.
5729  */
5730 static void
5731 cheerio_reset_dcsr(struct ecppunit *pp)
5732 {
5733         int     timeout = DMAC_RESET_TIMEOUT;
5734 
5735         SET_DMAC_CSR(pp, DCSR_RESET);
5736 
5737         while (GET_DMAC_CSR(pp) & DCSR_CYC_PEND) {
5738                 if (timeout == 0) {
5739                         ecpp_error(pp->dip, "cheerio_reset_dcsr: timeout\n");
5740                         break;
5741                 } else {
5742                         drv_usecwait(1);
5743                         timeout--;
5744                 }
5745         }
5746 
5747         SET_DMAC_CSR(pp, 0);
5748 }
5749 
5750 /*
5751  *
5752  * Grover Southbridge (M1553) support routines
5753  * Southbridge contains an Intel 8237 DMAC onboard which is used
5754  * to transport data to/from PCI space to superio parallel port
5755  *
5756  */
5757 
5758 
5759 static int
5760 m1553_map_regs(struct ecppunit *pp)
5761 {
5762         if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.m1553.isa_space,
5763             0, sizeof (struct isaspace), &acc_attr,
5764             &pp->uh.m1553.d_handle) != DDI_SUCCESS) {
5765                 ecpp_error(pp->dip, "m1553_map_regs: failed isa space\n");
5766                 goto fail;
5767         }
5768 
5769         if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5770             sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5771             != DDI_SUCCESS) {
5772                 ecpp_error(pp->dip, "m1553_map_regs: failed i_reg\n");
5773                 goto fail;
5774         }
5775 
5776         if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400,
5777             sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5778             != DDI_SUCCESS) {
5779                 ecpp_error(pp->dip, "m1553_map_regs: failed f_reg\n");
5780                 goto fail;
5781         }
5782 
5783         return (SUCCESS);
5784 
5785 fail:
5786         m1553_unmap_regs(pp);
5787         return (FAILURE);
5788 }
5789 
5790 static void
5791 m1553_unmap_regs(struct ecppunit *pp)
5792 {
5793         if (pp->uh.m1553.d_handle) {
5794                 ddi_regs_map_free(&pp->uh.m1553.d_handle);
5795         }
5796         if (pp->i_handle) {
5797                 ddi_regs_map_free(&pp->i_handle);
5798         }
5799         if (pp->f_handle) {
5800                 ddi_regs_map_free(&pp->f_handle);
5801         }
5802 }
5803 
5804 #if defined(__x86)
5805 static int
5806 x86_map_regs(struct ecppunit *pp)
5807 {
5808         int nregs = 0;
5809 
5810         if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0,
5811             sizeof (struct info_reg), &acc_attr, &pp->i_handle)
5812             != DDI_SUCCESS) {
5813                 ecpp_error(pp->dip, "x86_map_regs: failed i_reg\n");
5814                 goto fail;
5815         }
5816         if (ddi_dev_nregs(pp->dip, &nregs) == DDI_SUCCESS && nregs == 2) {
5817                 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->f_reg, 0,
5818                     sizeof (struct fifo_reg), &acc_attr, &pp->f_handle)
5819                     != DDI_SUCCESS) {
5820                         ecpp_error(pp->dip, "x86_map_regs: failed f_reg\n");
5821                         goto fail;
5822                 } else
5823                         pp->noecpregs = FALSE;
5824         } else {
5825                 pp->noecpregs = TRUE;
5826         }
5827         return (SUCCESS);
5828 fail:
5829         x86_unmap_regs(pp);
5830         return (FAILURE);
5831 }
5832 
5833 static void
5834 x86_unmap_regs(struct ecppunit *pp)
5835 {
5836         if (pp->i_handle) {
5837                 ddi_regs_map_free(&pp->i_handle);
5838         }
5839         if (pp->f_handle) {
5840                 ddi_regs_map_free(&pp->f_handle);
5841         }
5842 }
5843 #endif
5844 
5845 static uint8_t
5846 m1553_read_config_reg(struct ecppunit *pp, uint8_t reg_num)
5847 {
5848         uint8_t retval;
5849 
5850         dma8237_write(pp, 0x3F0, reg_num);
5851         retval = dma8237_read(pp, 0x3F1);
5852 
5853         return (retval);
5854 }
5855 
5856 static void
5857 m1553_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val)
5858 {
5859         dma8237_write(pp, 0x3F0, reg_num);
5860         dma8237_write(pp, 0x3F1, val);
5861 }
5862 
5863 static int
5864 m1553_config_chip(struct ecppunit *pp)
5865 {
5866         uint8_t conreg;
5867 
5868         /* Unlock configuration regs with "key sequence" */
5869         dma8237_write(pp, 0x3F0, 0x51);
5870         dma8237_write(pp, 0x3F0, 0x23);
5871 
5872         m1553_write_config_reg(pp, PnP_CONFIG_DEV_NO, 0x3);
5873         conreg = m1553_read_config_reg(pp, PnP_CONFIG_DEV_NO);
5874         ecpp_error(pp->dip, "M1553:conreg7(logical dev)=%x\n", conreg);
5875 
5876         conreg = m1553_read_config_reg(pp, PnP_CONFIG_ACTIVATE);
5877         ecpp_error(pp->dip, "M1553:conreg30(Active)=%x\n", conreg);
5878 
5879         conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_MSB);
5880         ecpp_error(pp->dip, "M1553:conreg60(addrHi)=%x\n", conreg);
5881         conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_LSB);
5882         ecpp_error(pp->dip, "M1553:conreg61(addrLo)=%x\n", conreg);
5883 
5884         conreg = m1553_read_config_reg(pp, PnP_CONFIG_INTR_SEL);
5885         ecpp_error(pp->dip, "M1553:conreg70(IRQL)=%x\n", conreg);
5886 
5887         conreg = m1553_read_config_reg(pp, PnP_CONFIG_DMA0_CHAN);
5888         ecpp_error(pp->dip, "M1553:conreg74(DMA0 Chan)=%x\n", conreg);
5889 
5890         /* set FIFO threshold 1 and ECP mode, preserve bit 7 (IRQ polarity) */
5891         conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
5892         conreg = (conreg & ~0x7F) | 0x0A;
5893         m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG0, conreg);
5894         conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0);
5895         ecpp_error(pp->dip, "M1553:conregFO(pport conf)=%x\n", conreg);
5896 
5897         m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG1, 0x04);
5898         conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG1);
5899         ecpp_error(pp->dip, "M1553:conregF1(outconf)=%x\n", conreg);
5900 
5901         /* lock configuration regs with key */
5902         dma8237_write(pp, 0x3F0, 0xBB);
5903 
5904         /* Set ECR, DCR in known state */
5905         ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV);
5906         DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
5907 
5908         ecpp_error(pp->dip, "m1553_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
5909             ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
5910 
5911         return (SUCCESS);
5912 }
5913 
5914 #if defined(__x86)
5915 static int
5916 x86_config_chip(struct ecppunit *pp)
5917 {
5918         if (ecr_write(pp, ECR_mode_001 |
5919             ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) {
5920                 ecpp_error(pp->dip, "config chip: failed w/ecr\n");
5921                 pp->noecpregs = TRUE;
5922         }
5923         if (pp->noecpregs)
5924                 pp->fast_compat = FALSE;
5925         DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT);
5926         ecpp_error(pp->dip, "x86_config_chip: ecr=%x, dsr=%x, dcr=%x\n",
5927             ECR_READ(pp), DSR_READ(pp), DCR_READ(pp));
5928         return (SUCCESS);
5929 }
5930 #endif
5931 
5932 /*
5933  * dma8237_dma_start() programs the selected 8 bit channel
5934  * of DMAC1 with the dma cookie.  pp->dma_cookie must
5935  * be set before this routine is called.
5936  */
5937 static int
5938 dma8237_dma_start(struct ecppunit *pp)
5939 {
5940         uint8_t chn;
5941 
5942         chn = pp->uh.m1553.chn;
5943 
5944         ASSERT(chn <= DMAE_CH3 &&
5945             pp->dma_cookie.dmac_size != 0 &&
5946             pp->dma_cookie.dmac_address != NULL);
5947 
5948         /* At this point Southbridge has not yet asserted DREQ */
5949 
5950         /* set mode to read-from-memory. */
5951         dma8237_write(pp, DMAC2_MODE, DMAMODE_CASC);
5952         if (pp->dma_dir == DDI_DMA_READ) {
5953                 dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
5954                     DMAMODE_READ | chn);
5955         } else {
5956                 dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE |
5957                     DMAMODE_WRITE | chn);
5958         }
5959 
5960         dma8237_write_addr(pp, pp->dma_cookie.dmac_address);
5961         dma8237_write_count(pp, pp->dma_cookie.dmac_size - 1);
5962 
5963         /*
5964          * M1553 chip does not permit to access DMA register banks
5965          * while DMA is in flight. As a result, ecpp and floppy drivers
5966          * can potentially corrupt each other's DMA. The interlocking mechanism
5967          * is provided by a parent nexus driver (isadma), which is enabled
5968          * indirectly through a DMAC1_ALLMASK register access:
5969          *
5970          * writing a non-zero value to this register enters a lock,
5971          * writing zero releases the lock.
5972          *
5973          * DMA transfer must only occur after entering a lock.
5974          * If the lock is already owned by other driver, we will block.
5975          *
5976          * The following operation unmasks our channel and masks all others
5977          */
5978         dma8237_write(pp, DMAC1_ALLMASK, ~(1 << chn));
5979         pp->uh.m1553.isadma_entered = 1;
5980 
5981         return (SUCCESS);
5982 }
5983 
5984 static int
5985 dma8237_dma_stop(struct ecppunit *pp, size_t *countp)
5986 {
5987         uint8_t ecr;
5988 
5989         /* stop DMA */
5990         ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
5991         (void) ecr_write(pp, ecr);
5992 
5993         if (pp->uh.m1553.isadma_entered) {
5994                 /* reset the channel mask so we can issue PIO's to our device */
5995                 dma8237_write(pp, DMAC1_ALLMASK, 0);
5996                 pp->uh.m1553.isadma_entered = 0;
5997 
5998         }
5999 
6000         /* read DMA count if requested */
6001         if (countp) {
6002                 *countp = dma8237_getcnt(pp);
6003                 if (pp->dma_dir == DDI_DMA_READ && *countp > 0) {
6004                         (*countp)++;    /* need correction for reverse xfers */
6005                 }
6006         }
6007         return (SUCCESS);
6008 }
6009 #if defined(__x86)
6010 static int
6011 x86_dma_start(struct ecppunit *pp)
6012 {
6013         uint8_t chn;
6014         struct ddi_dmae_req dmaereq;
6015 
6016         chn = pp->uh.x86.chn;
6017         ASSERT(chn <= DMAE_CH3 &&
6018             pp->dma_cookie.dmac_size != 0 &&
6019             pp->dma_cookie.dmac_address != NULL);
6020         bzero(&dmaereq, sizeof (struct ddi_dmae_req));
6021         dmaereq.der_command =
6022             (pp->dma_dir & DDI_DMA_READ) ? DMAE_CMD_READ : DMAE_CMD_WRITE;
6023         if (ddi_dmae_prog(pp->dip, &dmaereq, &pp->dma_cookie, chn)
6024             != DDI_SUCCESS)
6025                 ecpp_error(pp->dip, "prog failed !!!\n");
6026         ecpp_error(pp->dip, "dma_started..\n");
6027         return (SUCCESS);
6028 }
6029 
6030 static int
6031 x86_dma_stop(struct ecppunit *pp, size_t *countp)
6032 {
6033         uint8_t ecr;
6034 
6035         /* stop DMA */
6036         if (pp->uh.x86.chn == 0xff)
6037                 return (FAILURE);
6038         ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV;
6039         (void) ecr_write(pp, ecr);
6040         ecpp_error(pp->dip, "dma_stop\n");
6041 
6042         /* read DMA count if requested */
6043         if (countp) {
6044                 *countp = x86_getcnt(pp);
6045         }
6046         ecpp_error(pp->dip, "dma_stoped..\n");
6047         return (SUCCESS);
6048 }
6049 #endif
6050 
6051 /* channel must be masked */
6052 static void
6053 dma8237_write_addr(struct ecppunit *pp, uint32_t addr)
6054 {
6055         uint8_t c_addr, c_lpage;
6056         uint16_t c_hpage, *p;
6057 
6058         switch (pp->uh.m1553.chn) {
6059         case DMAE_CH0:
6060                 c_addr = DMA_0ADR;
6061                 c_lpage = DMA_0PAGE;
6062                 c_hpage = DMA_0HPG;
6063                 break;
6064 
6065         case DMAE_CH1:
6066                 c_addr = DMA_1ADR;
6067                 c_lpage = DMA_1PAGE;
6068                 c_hpage = DMA_1HPG;
6069                 break;
6070 
6071         case DMAE_CH2:
6072                 c_addr = DMA_2ADR;
6073                 c_lpage = DMA_2PAGE;
6074                 c_hpage = DMA_2HPG;
6075                 break;
6076 
6077         case DMAE_CH3:
6078                 c_addr = DMA_3ADR;
6079                 c_lpage = DMA_3PAGE;
6080                 c_hpage = DMA_3HPG;
6081                 break;
6082 
6083         default:
6084                 return;
6085         }
6086 
6087         p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
6088         ddi_put16(pp->uh.m1553.d_handle, p, addr & 0xFFFF);
6089 
6090         dma8237_write(pp, c_lpage, (addr & 0xFF0000) >> 16);
6091         dma8237_write(pp, c_hpage, (addr & 0xFF000000) >> 24);
6092 
6093 }
6094 
6095 /*
6096  * This function may be useful during debugging,
6097  * so we leave it in, but do not include in the binary
6098  */
6099 #ifdef INCLUDE_DMA8237_READ_ADDR
6100 static uint32_t
6101 dma8237_read_addr(struct ecppunit *pp)
6102 {
6103         uint8_t rval3, rval4;
6104         uint16_t rval16;
6105         uint32_t rval;
6106         uint8_t c_addr, c_lpage;
6107         uint16_t c_hpage, *p;
6108 
6109         switch (pp->uh.m1553.chn) {
6110         case DMAE_CH0:
6111                 c_addr = DMA_0ADR;
6112                 c_lpage = DMA_0PAGE;
6113                 c_hpage = DMA_0HPG;
6114                 break;
6115 
6116         case DMAE_CH1:
6117                 c_addr = DMA_1ADR;
6118                 c_lpage = DMA_1PAGE;
6119                 c_hpage = DMA_1HPG;
6120                 break;
6121 
6122         case DMAE_CH2:
6123                 c_addr = DMA_2ADR;
6124                 c_lpage = DMA_2PAGE;
6125                 c_hpage = DMA_2HPG;
6126                 break;
6127 
6128         case DMAE_CH3:
6129                 c_addr = DMA_3ADR;
6130                 c_lpage = DMA_3PAGE;
6131                 c_hpage = DMA_3HPG;
6132                 break;
6133 
6134         default:
6135                 return (NULL);
6136         }
6137 
6138         p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr];
6139         rval16 = ddi_get16(pp->uh.m1553.d_handle, p);
6140 
6141         rval3 = dma8237_read(pp, c_lpage);
6142         rval4 = dma8237_read(pp, c_hpage);
6143 
6144         rval = rval16 | (rval3 << 16) | (rval4 <<24);
6145 
6146         return (rval);
6147 }
6148 #endif
6149 
6150 static void
6151 dma8237_write_count(struct ecppunit *pp, uint32_t count)
6152 {
6153         uint8_t c_wcnt;
6154         uint16_t *p;
6155 
6156         switch (pp->uh.m1553.chn) {
6157         case DMAE_CH0:
6158                 c_wcnt = DMA_0WCNT;
6159                 break;
6160 
6161         case DMAE_CH1:
6162                 c_wcnt = DMA_1WCNT;
6163                 break;
6164 
6165         case DMAE_CH2:
6166                 c_wcnt = DMA_2WCNT;
6167                 break;
6168 
6169         case DMAE_CH3:
6170                 c_wcnt = DMA_3WCNT;
6171                 break;
6172 
6173         default:
6174                 return;
6175         }
6176 
6177         p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
6178         ddi_put16(pp->uh.m1553.d_handle, p, count & 0xFFFF);
6179 
6180 }
6181 
6182 static uint32_t
6183 dma8237_read_count(struct ecppunit *pp)
6184 {
6185         uint8_t c_wcnt;
6186         uint16_t *p;
6187 
6188         switch (pp->uh.m1553.chn) {
6189         case DMAE_CH0:
6190                 c_wcnt = DMA_0WCNT;
6191                 break;
6192 
6193         case DMAE_CH1:
6194                 c_wcnt = DMA_1WCNT;
6195                 break;
6196 
6197         case DMAE_CH2:
6198                 c_wcnt = DMA_2WCNT;
6199                 break;
6200 
6201         case DMAE_CH3:
6202                 c_wcnt = DMA_3WCNT;
6203                 break;
6204 
6205         default:
6206                 return (NULL);
6207         }
6208 
6209         p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt];
6210         return (ddi_get16(pp->uh.m1553.d_handle, p));
6211 
6212 }
6213 
6214 static void
6215 dma8237_write(struct ecppunit *pp, int reg_num, uint8_t val)
6216 {
6217         ddi_put8(pp->uh.m1553.d_handle,
6218             &pp->uh.m1553.isa_space->isa_reg[reg_num], val);
6219 }
6220 
6221 static uint8_t
6222 dma8237_read(struct ecppunit *pp, int reg_num)
6223 {
6224         return (ddi_get8(pp->uh.m1553.d_handle,
6225             &pp->uh.m1553.isa_space->isa_reg[reg_num]));
6226 }
6227 
6228 static size_t
6229 dma8237_getcnt(struct ecppunit *pp)
6230 {
6231         uint32_t cnt;
6232 
6233         if ((cnt = dma8237_read_count(pp)) == 0xffff)
6234                 cnt = 0;
6235         else
6236                 cnt++;
6237         return (cnt);
6238 }
6239 
6240 
6241 /*
6242  *
6243  * Kstat support routines
6244  *
6245  */
6246 static void
6247 ecpp_kstat_init(struct ecppunit *pp)
6248 {
6249         struct ecppkstat *ekp;
6250         char buf[16];
6251 
6252         /*
6253          * Allocate, initialize and install interrupt counter kstat
6254          */
6255         (void) sprintf(buf, "ecppc%d", pp->instance);
6256         pp->intrstats = kstat_create("ecpp", pp->instance, buf, "controller",
6257             KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT);
6258         if (pp->intrstats == NULL) {
6259                 ecpp_error(pp->dip, "ecpp_kstat_init:1: kstat_create failed");
6260         } else {
6261                 pp->intrstats->ks_update = ecpp_kstatintr_update;
6262                 pp->intrstats->ks_private = (void *) pp;
6263                 kstat_install(pp->intrstats);
6264         }
6265 
6266         /*
6267          * Allocate, initialize and install misc stats kstat
6268          */
6269         pp->ksp = kstat_create("ecpp", pp->instance, NULL, "misc",
6270             KSTAT_TYPE_NAMED,
6271             sizeof (struct ecppkstat) / sizeof (kstat_named_t),
6272             KSTAT_FLAG_PERSISTENT);
6273         if (pp->ksp == NULL) {
6274                 ecpp_error(pp->dip, "ecpp_kstat_init:2: kstat_create failed");
6275                 return;
6276         }
6277 
6278         ekp = (struct ecppkstat *)pp->ksp->ks_data;
6279 
6280 #define EK_NAMED_INIT(name) \
6281         kstat_named_init(&ekp->ek_##name, #name, KSTAT_DATA_UINT32)
6282 
6283         EK_NAMED_INIT(ctx_obytes);
6284         EK_NAMED_INIT(ctxpio_obytes);
6285         EK_NAMED_INIT(nib_ibytes);
6286         EK_NAMED_INIT(ecp_obytes);
6287         EK_NAMED_INIT(ecp_ibytes);
6288         EK_NAMED_INIT(epp_obytes);
6289         EK_NAMED_INIT(epp_ibytes);
6290         EK_NAMED_INIT(diag_obytes);
6291         EK_NAMED_INIT(to_ctx);
6292         EK_NAMED_INIT(to_nib);
6293         EK_NAMED_INIT(to_ecp);
6294         EK_NAMED_INIT(to_epp);
6295         EK_NAMED_INIT(to_diag);
6296         EK_NAMED_INIT(xfer_tout);
6297         EK_NAMED_INIT(ctx_cf);
6298         EK_NAMED_INIT(joblen);
6299         EK_NAMED_INIT(isr_reattempt_high);
6300         EK_NAMED_INIT(mode);
6301         EK_NAMED_INIT(phase);
6302         EK_NAMED_INIT(backchan);
6303         EK_NAMED_INIT(iomode);
6304         EK_NAMED_INIT(state);
6305 
6306         pp->ksp->ks_update = ecpp_kstat_update;
6307         pp->ksp->ks_private = (void *) pp;
6308         kstat_install(pp->ksp);
6309 }
6310 
6311 static int
6312 ecpp_kstat_update(kstat_t *ksp, int rw)
6313 {
6314         struct ecppunit *pp;
6315         struct ecppkstat *ekp;
6316 
6317         /*
6318          * For the time being there is no point
6319          * in supporting writable kstats
6320          */
6321         if (rw == KSTAT_WRITE) {
6322                 return (EACCES);
6323         }
6324 
6325         pp = (struct ecppunit *)ksp->ks_private;
6326         ekp = (struct ecppkstat *)ksp->ks_data;
6327 
6328         mutex_enter(&pp->umutex);
6329 
6330         ekp->ek_ctx_obytes.value.ui32        = pp->obytes[ECPP_CENTRONICS] +
6331             pp->obytes[ECPP_COMPAT_MODE];
6332         ekp->ek_ctxpio_obytes.value.ui32 = pp->ctxpio_obytes;
6333         ekp->ek_nib_ibytes.value.ui32        = pp->ibytes[ECPP_NIBBLE_MODE];
6334         ekp->ek_ecp_obytes.value.ui32        = pp->obytes[ECPP_ECP_MODE];
6335         ekp->ek_ecp_ibytes.value.ui32        = pp->ibytes[ECPP_ECP_MODE];
6336         ekp->ek_epp_obytes.value.ui32        = pp->obytes[ECPP_EPP_MODE];
6337         ekp->ek_epp_ibytes.value.ui32        = pp->ibytes[ECPP_EPP_MODE];
6338         ekp->ek_diag_obytes.value.ui32       = pp->obytes[ECPP_DIAG_MODE];
6339         ekp->ek_to_ctx.value.ui32    = pp->to_mode[ECPP_CENTRONICS] +
6340             pp->to_mode[ECPP_COMPAT_MODE];
6341         ekp->ek_to_nib.value.ui32    = pp->to_mode[ECPP_NIBBLE_MODE];
6342         ekp->ek_to_ecp.value.ui32    = pp->to_mode[ECPP_ECP_MODE];
6343         ekp->ek_to_epp.value.ui32    = pp->to_mode[ECPP_EPP_MODE];
6344         ekp->ek_to_diag.value.ui32   = pp->to_mode[ECPP_DIAG_MODE];
6345         ekp->ek_xfer_tout.value.ui32 = pp->xfer_tout;
6346         ekp->ek_ctx_cf.value.ui32    = pp->ctx_cf;
6347         ekp->ek_joblen.value.ui32    = pp->joblen;
6348         ekp->ek_isr_reattempt_high.value.ui32        = pp->isr_reattempt_high;
6349         ekp->ek_mode.value.ui32              = pp->current_mode;
6350         ekp->ek_phase.value.ui32     = pp->current_phase;
6351         ekp->ek_backchan.value.ui32  = pp->backchannel;
6352         ekp->ek_iomode.value.ui32    = pp->io_mode;
6353         ekp->ek_state.value.ui32     = pp->e_busy;
6354 
6355         mutex_exit(&pp->umutex);
6356 
6357         return (0);
6358 }
6359 
6360 static int
6361 ecpp_kstatintr_update(kstat_t *ksp, int rw)
6362 {
6363         struct ecppunit *pp;
6364 
6365         /*
6366          * For the time being there is no point
6367          * in supporting writable kstats
6368          */
6369         if (rw == KSTAT_WRITE) {
6370                 return (EACCES);
6371         }
6372 
6373         pp = (struct ecppunit *)ksp->ks_private;
6374 
6375         mutex_enter(&pp->umutex);
6376 
6377         KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_HARD] = pp->intr_hard;
6378         KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SPURIOUS] = pp->intr_spurious;
6379         KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SOFT] = pp->intr_soft;
6380 
6381         mutex_exit(&pp->umutex);
6382 
6383         return (0);
6384 }