1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * 29 * IEEE 1284 Parallel Port Device Driver 30 * 31 */ 32 33 #include <sys/param.h> 34 #include <sys/errno.h> 35 #include <sys/file.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stropts.h> 38 #include <sys/debug.h> 39 #include <sys/stream.h> 40 #include <sys/strsun.h> 41 #include <sys/kmem.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #include <sys/conf.h> /* req. by dev_ops flags MTSAFE etc. */ 45 #include <sys/modctl.h> /* for modldrv */ 46 #include <sys/stat.h> /* ddi_create_minor_node S_IFCHR */ 47 #include <sys/open.h> 48 #include <sys/ddi_impldefs.h> 49 #include <sys/kstat.h> 50 51 #include <sys/prnio.h> 52 #include <sys/ecppreg.h> /* hw description */ 53 #include <sys/ecppio.h> /* ioctl description */ 54 #include <sys/ecppvar.h> /* driver description */ 55 #include <sys/dma_engine.h> 56 #include <sys/dma_i8237A.h> 57 58 /* 59 * Background 60 * ========== 61 * IEEE 1284-1994 standard defines "a signalling method for asynchronous, 62 * fully interlocked, bidirectional parallel communications between hosts 63 * and printers or other peripherals." (1.1) The standard defines 5 modes 64 * of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ 65 * in direction, bandwidth, pins assignment, DMA capability, etc. 66 * 67 * Negotiation is a mechanism for moving between modes. Compatibility mode 68 * is a default mode, from which negotiations to other modes occur and 69 * to which both host and peripheral break in case of interface errors. 70 * Compatibility mode provides a unidirectional (forward) channel for 71 * communicating with old pre-1284 peripherals. 72 * 73 * Each mode has a number of phases. [Mode, phase] pair represents the 74 * interface state. Host initiates all transfers, though peripheral can 75 * request backchannel transfer by asserting nErr pin. 76 * 77 * Ecpp driver implements an IEEE 1284-compliant host using a combination 78 * of hardware and software. Hardware part is represented by a controller, 79 * which is a part of the SuperIO chip. Ecpp supports the following SuperIOs: 80 * PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover). 81 * Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach(). 82 * 83 * Negotiation is performed in software. Transfer may be performed either 84 * in software by driving output pins for each byte (PIO method), or with 85 * hardware assistance - SuperIO has a 16-byte FIFO, which is filled by 86 * the driver (normally using DMA), while the chip performs the actual xfer. 87 * PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes. 88 * 89 * Driver currently supports the following modes: 90 * 91 * - Compatibility mode: byte-wide forward channel ~50KB/sec; 92 * pp->io_mode defines PIO or DMA method of transfer; 93 * - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec; 94 * - ECP mode: byte-wide bidirectional channel (~1MB/sec); 95 * 96 * Theory of operation 97 * =================== 98 * The manner in which ecpp drives 1284 interface is that of a state machine. 99 * State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*} 100 * and transfer method {PIO, DMA}. State is a function of application actions 101 * {write(2), ioctl(2)} and peripheral reaction. 102 * 103 * 1284 interface state is described by the following variables: 104 * pp->current_mode -- 1284 mode used for forward transfers; 105 * pp->backchannel -- 1284 mode used for backward transfers; 106 * pp->curent_phase -- 1284 phase; 107 * 108 * Bidirectional operation in Compatibility mode is provided by a combination: 109 * pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE 110 * ECPP_CENTRONICS means no backchannel 111 * 112 * Driver internal state is defined by pp->e_busy as follows: 113 * ECPP_IDLE -- idle, no active transfers; 114 * ECPP_BUSY -- transfer is in progress; 115 * ECPP_ERR -- have data to transfer, but peripheral can`t receive data; 116 * ECPP_FLUSH -- flushing the queues; 117 * 118 * When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS 119 * Default negotiation tries to negotiate to the best mode supported by printer, 120 * sets pp->current_mode and pp->backchannel accordingly. 121 * 122 * When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue 123 * to let ecpp_wsrv() concatenate small blocks into one big transfer 124 * by copying them into pp->ioblock. If first the mblk data is bigger than 125 * pp->ioblock, then it is used instead of i/o block (pointed by pp->msg) 126 * 127 * Before starting the transfer the driver will check if peripheral is ready 128 * by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state 129 * and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively 130 * rechecking the peripheral readiness and restarting itself until it is ready. 131 * The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY 132 * 133 * While transfer is in progress all arriving messages will be queued up. 134 * Transfer can end up in either of two ways: 135 * - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so 136 * cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable(); 137 * - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data; 138 * 139 * PIO transfer method is very CPU intensive: for each sent byte the peripheral 140 * state is checked, then the byte is transfered and driver waits for an nAck 141 * interrupt; ecpp_isr() will then look if there is more data and if so 142 * triggers the soft interrupt, which transfers the next byte. PIO method 143 * is needed only for legacy printers which are sensitive to strobe problem 144 * (Bugid 4192788). 145 * 146 * ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and 147 * going idle (ecpp_idle_phase()). Many routines qenable() the write queue, 148 * meaning "check if there are pending requests, process them and go idle". 149 * 150 * In it`s idle state the driver will always try to listen to the backchannel 151 * (as advised by 1284). 152 * 153 * The mechanism for handling backchannel requests is as follows: 154 * - when the peripheral has data to send it asserts nErr pin 155 * (and also nAck in Nibble Mode) which results in an interrupt on the host; 156 * - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and 157 * puts it back on the write queue; 158 * - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off 159 * the transfer; 160 * 161 * This way Nibble and ECP mode backchannel are implemented. 162 * If the read queue gets full, backchannel request is rejected. 163 * As the application reads data and queue size falls below the low watermark, 164 * ecpp_rsrv() gets called and enables the backchannel again. 165 * 166 * Future enhancements 167 * =================== 168 * 169 * Support new modes: Byte and EPP. 170 */ 171 172 #ifndef ECPP_DEBUG 173 #define ECPP_DEBUG 0 174 #endif /* ECPP_DEBUG */ 175 int ecpp_debug = ECPP_DEBUG; 176 177 int noecp = 0; /* flag not to use ECP mode */ 178 179 /* driver entry point fn definitions */ 180 static int ecpp_open(queue_t *, dev_t *, int, int, cred_t *); 181 static int ecpp_close(queue_t *, int, cred_t *); 182 static uint_t ecpp_isr(caddr_t); 183 static uint_t ecpp_softintr(caddr_t); 184 185 /* configuration entry point fn definitions */ 186 static int ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 187 static int ecpp_attach(dev_info_t *, ddi_attach_cmd_t); 188 static int ecpp_detach(dev_info_t *, ddi_detach_cmd_t); 189 static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *); 190 191 /* isr support routines */ 192 static uint_t ecpp_nErr_ihdlr(struct ecppunit *); 193 static uint_t ecpp_pio_ihdlr(struct ecppunit *); 194 static uint_t ecpp_dma_ihdlr(struct ecppunit *); 195 static uint_t ecpp_M1553_intr(struct ecppunit *); 196 197 /* configuration support routines */ 198 static void ecpp_get_props(struct ecppunit *); 199 200 /* Streams Routines */ 201 static int ecpp_wput(queue_t *, mblk_t *); 202 static int ecpp_wsrv(queue_t *); 203 static int ecpp_rsrv(queue_t *); 204 static void ecpp_flush(struct ecppunit *, int); 205 static void ecpp_start(struct ecppunit *, caddr_t, size_t); 206 207 /* ioctl handling */ 208 static void ecpp_putioc(queue_t *, mblk_t *); 209 static void ecpp_srvioc(queue_t *, mblk_t *); 210 static void ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t); 211 static void ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int); 212 static void ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t); 213 static void ecpp_srvioc_devid(queue_t *, mblk_t *, 214 struct ecpp_device_id *, int *); 215 static void ecpp_srvioc_prnif(queue_t *, mblk_t *); 216 static void ecpp_ack_ioctl(queue_t *, mblk_t *); 217 static void ecpp_nack_ioctl(queue_t *, mblk_t *, int); 218 219 /* kstat routines */ 220 static void ecpp_kstat_init(struct ecppunit *); 221 static int ecpp_kstat_update(kstat_t *, int); 222 static int ecpp_kstatintr_update(kstat_t *, int); 223 224 /* dma routines */ 225 static void ecpp_putback_untransfered(struct ecppunit *, void *, uint_t); 226 static uint8_t ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t); 227 static uint8_t ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t); 228 229 /* pio routines */ 230 static void ecpp_pio_writeb(struct ecppunit *); 231 static void ecpp_xfer_cleanup(struct ecppunit *); 232 static uint8_t ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t); 233 234 /* misc */ 235 static uchar_t ecpp_reset_port_regs(struct ecppunit *); 236 static void ecpp_xfer_timeout(void *); 237 static void ecpp_fifo_timer(void *); 238 static void ecpp_wsrv_timer(void *); 239 static uchar_t dcr_write(struct ecppunit *, uint8_t); 240 static uchar_t ecr_write(struct ecppunit *, uint8_t); 241 static uchar_t ecpp_check_status(struct ecppunit *); 242 static int ecpp_backchan_req(struct ecppunit *); 243 static void ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *); 244 static uint_t ecpp_get_prn_ifcap(struct ecppunit *); 245 246 /* stubs */ 247 static void empty_config_mode(struct ecppunit *); 248 static void empty_mask_intr(struct ecppunit *); 249 250 /* PC87332 support */ 251 static int pc87332_map_regs(struct ecppunit *); 252 static void pc87332_unmap_regs(struct ecppunit *); 253 static int pc87332_config_chip(struct ecppunit *); 254 static void pc87332_config_mode(struct ecppunit *); 255 static uint8_t pc87332_read_config_reg(struct ecppunit *, uint8_t); 256 static void pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t); 257 static void cheerio_mask_intr(struct ecppunit *); 258 static void cheerio_unmask_intr(struct ecppunit *); 259 static int cheerio_dma_start(struct ecppunit *); 260 static int cheerio_dma_stop(struct ecppunit *, size_t *); 261 static size_t cheerio_getcnt(struct ecppunit *); 262 static void cheerio_reset_dcsr(struct ecppunit *); 263 264 /* PC97317 support */ 265 static int pc97317_map_regs(struct ecppunit *); 266 static void pc97317_unmap_regs(struct ecppunit *); 267 static int pc97317_config_chip(struct ecppunit *); 268 static void pc97317_config_mode(struct ecppunit *); 269 270 /* M1553 Southbridge support */ 271 static int m1553_map_regs(struct ecppunit *pp); 272 static void m1553_unmap_regs(struct ecppunit *pp); 273 static int m1553_config_chip(struct ecppunit *); 274 static uint8_t m1553_read_config_reg(struct ecppunit *, uint8_t); 275 static void m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t); 276 277 /* M1553 Southbridge DMAC 8237 support routines */ 278 static int dma8237_dma_start(struct ecppunit *); 279 static int dma8237_dma_stop(struct ecppunit *, size_t *); 280 static size_t dma8237_getcnt(struct ecppunit *); 281 static void dma8237_write_addr(struct ecppunit *, uint32_t); 282 static void dma8237_write_count(struct ecppunit *, uint32_t); 283 static uint32_t dma8237_read_count(struct ecppunit *); 284 static void dma8237_write(struct ecppunit *, int, uint8_t); 285 static uint8_t dma8237_read(struct ecppunit *, int); 286 #ifdef INCLUDE_DMA8237_READ_ADDR 287 static uint32_t dma8237_read_addr(struct ecppunit *); 288 #endif 289 290 /* i86 PC support rountines */ 291 292 #if defined(__x86) 293 static int x86_dma_start(struct ecppunit *); 294 static int x86_dma_stop(struct ecppunit *, size_t *); 295 static int x86_map_regs(struct ecppunit *); 296 static void x86_unmap_regs(struct ecppunit *); 297 static int x86_config_chip(struct ecppunit *); 298 static size_t x86_getcnt(struct ecppunit *); 299 #endif 300 301 /* IEEE 1284 phase transitions */ 302 static void ecpp_1284_init_interface(struct ecppunit *); 303 static int ecpp_1284_termination(struct ecppunit *); 304 static uchar_t ecpp_idle_phase(struct ecppunit *); 305 static int ecp_forward2reverse(struct ecppunit *); 306 static int ecp_reverse2forward(struct ecppunit *); 307 static int read_nibble_backchan(struct ecppunit *); 308 309 /* reverse transfers */ 310 static uint_t ecpp_peripheral2host(struct ecppunit *); 311 static uchar_t ecp_peripheral2host(struct ecppunit *); 312 static uchar_t nibble_peripheral2host(struct ecppunit *pp, uint8_t *); 313 static int ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int); 314 static void ecpp_ecp_read_timeout(void *); 315 static void ecpp_ecp_read_completion(struct ecppunit *); 316 317 /* IEEE 1284 mode transitions */ 318 static void ecpp_default_negotiation(struct ecppunit *); 319 static int ecpp_mode_negotiation(struct ecppunit *, uchar_t); 320 static int ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *); 321 static int ecp_negotiation(struct ecppunit *); 322 static int nibble_negotiation(struct ecppunit *); 323 static int devidnib_negotiation(struct ecppunit *); 324 325 /* IEEE 1284 utility routines */ 326 static int wait_dsr(struct ecppunit *, uint8_t, uint8_t, int); 327 328 /* debugging functions */ 329 static void ecpp_error(dev_info_t *, char *, ...); 330 static uchar_t ecpp_get_error_status(uchar_t); 331 332 /* 333 * Chip-dependent structures 334 */ 335 static ddi_dma_attr_t cheerio_dma_attr = { 336 DMA_ATTR_VERSION, /* version */ 337 0x00000000ull, /* dlim_addr_lo */ 338 0xfffffffeull, /* dlim_addr_hi */ 339 0xffffff, /* DMA counter register */ 340 1, /* DMA address alignment */ 341 0x74, /* burst sizes */ 342 0x0001, /* min effective DMA size */ 343 0xffff, /* maximum transfer size */ 344 0xffff, /* segment boundary */ 345 1, /* s/g list length */ 346 1, /* granularity of device */ 347 0 /* DMA flags */ 348 }; 349 350 static struct ecpp_hw pc87332 = { 351 pc87332_map_regs, 352 pc87332_unmap_regs, 353 pc87332_config_chip, 354 pc87332_config_mode, 355 cheerio_mask_intr, 356 cheerio_unmask_intr, 357 cheerio_dma_start, 358 cheerio_dma_stop, 359 cheerio_getcnt, 360 &cheerio_dma_attr 361 }; 362 363 static struct ecpp_hw pc97317 = { 364 pc97317_map_regs, 365 pc97317_unmap_regs, 366 pc97317_config_chip, 367 pc97317_config_mode, 368 cheerio_mask_intr, 369 cheerio_unmask_intr, 370 cheerio_dma_start, 371 cheerio_dma_stop, 372 cheerio_getcnt, 373 &cheerio_dma_attr 374 }; 375 376 static ddi_dma_attr_t i8237_dma_attr = { 377 DMA_ATTR_VERSION, /* version */ 378 0x00000000ull, /* dlim_addr_lo */ 379 0xfffffffeull, /* dlim_addr_hi */ 380 0xffff, /* DMA counter register */ 381 1, /* DMA address alignment */ 382 0x01, /* burst sizes */ 383 0x0001, /* min effective DMA size */ 384 0xffff, /* maximum transfer size */ 385 0x7fff, /* segment boundary */ 386 1, /* s/g list length */ 387 1, /* granularity of device */ 388 0 /* DMA flags */ 389 }; 390 391 static struct ecpp_hw m1553 = { 392 m1553_map_regs, 393 m1553_unmap_regs, 394 m1553_config_chip, 395 empty_config_mode, /* no config_mode */ 396 empty_mask_intr, /* no mask_intr */ 397 empty_mask_intr, /* no unmask_intr */ 398 dma8237_dma_start, 399 dma8237_dma_stop, 400 dma8237_getcnt, 401 &i8237_dma_attr 402 }; 403 404 #if defined(__x86) 405 static ddi_dma_attr_t sb_dma_attr = { 406 DMA_ATTR_VERSION, /* version */ 407 0x00000000ull, /* dlim_addr_lo */ 408 0xffffff, /* dlim_addr_hi */ 409 0xffff, /* DMA counter register */ 410 1, /* DMA address alignment */ 411 0x01, /* burst sizes */ 412 0x0001, /* min effective DMA size */ 413 0xffffffff, /* maximum transfer size */ 414 0xffff, /* segment boundary */ 415 1, /* s/g list length */ 416 1, /* granularity of device */ 417 0 /* DMA flags */ 418 }; 419 420 static struct ecpp_hw x86 = { 421 x86_map_regs, 422 x86_unmap_regs, 423 x86_config_chip, 424 empty_config_mode, /* no config_mode */ 425 empty_mask_intr, /* no mask_intr */ 426 empty_mask_intr, /* no unmask_intr */ 427 x86_dma_start, 428 x86_dma_stop, 429 x86_getcnt, 430 &sb_dma_attr 431 }; 432 #endif 433 434 /* 435 * list of supported devices 436 */ 437 struct ecpp_hw_bind ecpp_hw_bind[] = { 438 { "ns87317-ecpp", &pc97317, "PC97317" }, 439 { "pnpALI,1533,3", &m1553, "M1553" }, 440 { "ecpp", &pc87332, "PC87332" }, 441 #if defined(__x86) 442 { "lp", &x86, "i86pc"}, 443 #endif 444 }; 445 446 static ddi_device_acc_attr_t acc_attr = { 447 DDI_DEVICE_ATTR_V0, 448 DDI_STRUCTURE_LE_ACC, 449 DDI_STRICTORDER_ACC 450 }; 451 452 static struct ecpp_transfer_parms default_xfer_parms = { 453 FWD_TIMEOUT_DEFAULT, /* write timeout in seconds */ 454 ECPP_CENTRONICS /* supported mode */ 455 }; 456 457 /* prnio interface info string */ 458 static const char prn_ifinfo[] = PRN_PARALLEL; 459 460 /* prnio timeouts */ 461 static const struct prn_timeouts prn_timeouts_default = { 462 FWD_TIMEOUT_DEFAULT, /* forward timeout */ 463 REV_TIMEOUT_DEFAULT /* reverse timeout */ 464 }; 465 466 static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY; 467 static int ecpp_def_timeout = 90; /* left in for 2.7 compatibility */ 468 469 static void *ecppsoft_statep; 470 471 /* 472 * STREAMS framework manages locks for these structures 473 */ 474 _NOTE(SCHEME_PROTECTS_DATA("unique per call", iocblk)) 475 _NOTE(SCHEME_PROTECTS_DATA("unique per call", datab)) 476 _NOTE(SCHEME_PROTECTS_DATA("unique per call", msgb)) 477 _NOTE(SCHEME_PROTECTS_DATA("unique per call", queue)) 478 _NOTE(SCHEME_PROTECTS_DATA("unique per call", copyreq)) 479 _NOTE(SCHEME_PROTECTS_DATA("unique per call", stroptions)) 480 481 struct module_info ecppinfo = { 482 /* id, name, min pkt siz, max pkt siz, hi water, low water */ 483 42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT 484 }; 485 486 static struct qinit ecpp_rinit = { 487 putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL 488 }; 489 490 static struct qinit ecpp_wint = { 491 ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL 492 }; 493 494 struct streamtab ecpp_str_info = { 495 &ecpp_rinit, &ecpp_wint, NULL, NULL 496 }; 497 498 static struct cb_ops ecpp_cb_ops = { 499 nodev, /* cb_open */ 500 nodev, /* cb_close */ 501 nodev, /* cb_strategy */ 502 nodev, /* cb_print */ 503 nodev, /* cb_dump */ 504 nodev, /* cb_read */ 505 nodev, /* cb_write */ 506 nodev, /* cb_ioctl */ 507 nodev, /* cb_devmap */ 508 nodev, /* cb_mmap */ 509 nodev, /* cb_segmap */ 510 nochpoll, /* cb_chpoll */ 511 ddi_prop_op, /* cb_prop_op */ 512 &ecpp_str_info, /* cb_stream */ 513 (D_NEW | D_MP | D_MTPERQ) /* cb_flag */ 514 }; 515 516 /* 517 * Declare ops vectors for auto configuration. 518 */ 519 struct dev_ops ecpp_ops = { 520 DEVO_REV, /* devo_rev */ 521 0, /* devo_refcnt */ 522 ecpp_getinfo, /* devo_getinfo */ 523 nulldev, /* devo_identify */ 524 nulldev, /* devo_probe */ 525 ecpp_attach, /* devo_attach */ 526 ecpp_detach, /* devo_detach */ 527 nodev, /* devo_reset */ 528 &ecpp_cb_ops, /* devo_cb_ops */ 529 (struct bus_ops *)NULL, /* devo_bus_ops */ 530 nulldev, /* devo_power */ 531 ddi_quiesce_not_needed, /* devo_quiesce */ 532 }; 533 534 extern struct mod_ops mod_driverops; 535 536 static struct modldrv ecppmodldrv = { 537 &mod_driverops, /* type of module - driver */ 538 "parallel port driver", 539 &ecpp_ops, 540 }; 541 542 static struct modlinkage ecppmodlinkage = { 543 MODREV_1, 544 { &ecppmodldrv, NULL } 545 }; 546 547 548 /* 549 * 550 * DDI/DKI entry points and supplementary routines 551 * 552 */ 553 554 555 int 556 _init(void) 557 { 558 int error; 559 560 if ((error = mod_install(&ecppmodlinkage)) == 0) { 561 (void) ddi_soft_state_init(&ecppsoft_statep, 562 sizeof (struct ecppunit), 1); 563 } 564 565 return (error); 566 } 567 568 int 569 _fini(void) 570 { 571 int error; 572 573 if ((error = mod_remove(&ecppmodlinkage)) == 0) { 574 ddi_soft_state_fini(&ecppsoft_statep); 575 } 576 577 return (error); 578 } 579 580 int 581 _info(struct modinfo *modinfop) 582 { 583 return (mod_info(&ecppmodlinkage, modinfop)); 584 } 585 586 static int 587 ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 588 { 589 int instance; 590 char name[16]; 591 struct ecppunit *pp; 592 struct ecpp_hw_bind *hw_bind; 593 594 instance = ddi_get_instance(dip); 595 596 switch (cmd) { 597 case DDI_ATTACH: 598 break; 599 600 case DDI_RESUME: 601 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) { 602 return (DDI_FAILURE); 603 } 604 605 mutex_enter(&pp->umutex); 606 607 pp->suspended = FALSE; 608 609 /* 610 * Initialize the chip and restore current mode if needed 611 */ 612 (void) ECPP_CONFIG_CHIP(pp); 613 (void) ecpp_reset_port_regs(pp); 614 615 if (pp->oflag == TRUE) { 616 int current_mode = pp->current_mode; 617 618 (void) ecpp_1284_termination(pp); 619 (void) ecpp_mode_negotiation(pp, current_mode); 620 } 621 622 mutex_exit(&pp->umutex); 623 624 return (DDI_SUCCESS); 625 626 default: 627 return (DDI_FAILURE); 628 } 629 630 if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) { 631 ecpp_error(dip, "ddi_soft_state_zalloc failed\n"); 632 goto fail; 633 } 634 635 pp = ddi_get_soft_state(ecppsoft_statep, instance); 636 637 pp->dip = dip; 638 pp->suspended = FALSE; 639 640 /* 641 * Determine SuperIO type and set chip-dependent variables 642 */ 643 hw_bind = ecpp_determine_sio_type(pp); 644 645 if (hw_bind == NULL) { 646 cmn_err(CE_NOTE, "parallel port controller not supported"); 647 goto fail_sio; 648 } else { 649 pp->hw = hw_bind->hw; 650 ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info); 651 } 652 653 /* 654 * Map registers 655 */ 656 if (ECPP_MAP_REGS(pp) != SUCCESS) { 657 goto fail_map; 658 } 659 660 if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT, 661 NULL, &pp->dma_handle) != DDI_SUCCESS) { 662 ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n"); 663 goto fail_dma; 664 } 665 666 if (ddi_get_iblock_cookie(dip, 0, 667 &pp->ecpp_trap_cookie) != DDI_SUCCESS) { 668 ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n"); 669 goto fail_ibc; 670 } 671 672 mutex_init(&pp->umutex, NULL, MUTEX_DRIVER, 673 (void *)pp->ecpp_trap_cookie); 674 675 cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL); 676 677 if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr, 678 (caddr_t)pp) != DDI_SUCCESS) { 679 ecpp_error(dip, "ecpp_attach: failed to add hard intr\n"); 680 goto fail_intr; 681 } 682 683 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, 684 &pp->softintr_id, 0, 0, ecpp_softintr, 685 (caddr_t)pp) != DDI_SUCCESS) { 686 ecpp_error(dip, "ecpp_attach: failed to add soft intr\n"); 687 goto fail_softintr; 688 } 689 690 (void) sprintf(name, "ecpp%d", instance); 691 692 if (ddi_create_minor_node(dip, name, S_IFCHR, instance, 693 DDI_NT_PRINTER, NULL) == DDI_FAILURE) { 694 ecpp_error(dip, "ecpp_attach: create_minor_node failed\n"); 695 goto fail_minor; 696 } 697 698 pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP); 699 if (pp->ioblock == NULL) { 700 ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n"); 701 goto fail_iob; 702 } else { 703 ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock); 704 } 705 706 ecpp_get_props(pp); 707 #if defined(__x86) 708 if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) { 709 if (ddi_dmae_alloc(dip, pp->uh.x86.chn, 710 DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS) 711 ecpp_error(pp->dip, "dmae_alloc success!\n"); 712 } 713 #endif 714 if (ECPP_CONFIG_CHIP(pp) == FAILURE) { 715 ecpp_error(pp->dip, "config_chip failed.\n"); 716 goto fail_config; 717 } 718 719 ecpp_kstat_init(pp); 720 721 ddi_report_dev(dip); 722 723 return (DDI_SUCCESS); 724 725 fail_config: 726 ddi_prop_remove_all(dip); 727 kmem_free(pp->ioblock, IO_BLOCK_SZ); 728 fail_iob: 729 ddi_remove_minor_node(dip, NULL); 730 fail_minor: 731 ddi_remove_softintr(pp->softintr_id); 732 fail_softintr: 733 ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie); 734 fail_intr: 735 mutex_destroy(&pp->umutex); 736 cv_destroy(&pp->pport_cv); 737 fail_ibc: 738 ddi_dma_free_handle(&pp->dma_handle); 739 fail_dma: 740 ECPP_UNMAP_REGS(pp); 741 fail_map: 742 fail_sio: 743 ddi_soft_state_free(ecppsoft_statep, instance); 744 fail: 745 ecpp_error(dip, "ecpp_attach: failed.\n"); 746 747 return (DDI_FAILURE); 748 } 749 750 static int 751 ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 752 { 753 int instance; 754 struct ecppunit *pp; 755 756 instance = ddi_get_instance(dip); 757 758 switch (cmd) { 759 case DDI_DETACH: 760 break; 761 762 case DDI_SUSPEND: 763 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) { 764 return (DDI_FAILURE); 765 } 766 767 mutex_enter(&pp->umutex); 768 ASSERT(pp->suspended == FALSE); 769 770 pp->suspended = TRUE; /* prevent new transfers */ 771 772 /* 773 * Wait if there's any activity on the port 774 */ 775 if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) { 776 (void) cv_reltimedwait(&pp->pport_cv, &pp->umutex, 777 SUSPEND_TOUT * drv_usectohz(1000000), 778 TR_CLOCK_TICK); 779 if ((pp->e_busy == ECPP_BUSY) || 780 (pp->e_busy == ECPP_FLUSH)) { 781 pp->suspended = FALSE; 782 mutex_exit(&pp->umutex); 783 ecpp_error(pp->dip, 784 "ecpp_detach: suspend timeout\n"); 785 return (DDI_FAILURE); 786 } 787 } 788 789 mutex_exit(&pp->umutex); 790 return (DDI_SUCCESS); 791 792 default: 793 return (DDI_FAILURE); 794 } 795 796 pp = ddi_get_soft_state(ecppsoft_statep, instance); 797 #if defined(__x86) 798 if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) 799 (void) ddi_dmae_release(pp->dip, pp->uh.x86.chn); 800 #endif 801 if (pp->dma_handle != NULL) 802 ddi_dma_free_handle(&pp->dma_handle); 803 804 ddi_remove_minor_node(dip, NULL); 805 806 ddi_remove_softintr(pp->softintr_id); 807 808 ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie); 809 810 if (pp->ksp) { 811 kstat_delete(pp->ksp); 812 } 813 if (pp->intrstats) { 814 kstat_delete(pp->intrstats); 815 } 816 817 cv_destroy(&pp->pport_cv); 818 819 mutex_destroy(&pp->umutex); 820 821 ECPP_UNMAP_REGS(pp); 822 823 kmem_free(pp->ioblock, IO_BLOCK_SZ); 824 825 ddi_prop_remove_all(dip); 826 827 ddi_soft_state_free(ecppsoft_statep, instance); 828 829 return (DDI_SUCCESS); 830 831 } 832 833 /* 834 * ecpp_get_props() reads ecpp.conf for user defineable tuneables. 835 * If the file or a particular variable is not there, a default value 836 * is assigned. 837 */ 838 839 static void 840 ecpp_get_props(struct ecppunit *pp) 841 { 842 char *prop; 843 #if defined(__x86) 844 int len; 845 int value; 846 #endif 847 /* 848 * If fast_centronics is TRUE, non-compliant IEEE 1284 849 * peripherals ( Centronics peripherals) will operate in DMA mode. 850 * Transfers betwee main memory and the device will be via DMA; 851 * peripheral handshaking will be conducted by superio logic. 852 * If ecpp can not read the variable correctly fast_centronics will 853 * be set to FALSE. In this case, transfers and handshaking 854 * will be conducted by PIO for Centronics devices. 855 */ 856 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0, 857 "fast-centronics", &prop) == DDI_PROP_SUCCESS) { 858 pp->fast_centronics = 859 (strcmp(prop, "true") == 0) ? TRUE : FALSE; 860 ddi_prop_free(prop); 861 } else { 862 pp->fast_centronics = FALSE; 863 } 864 865 /* 866 * If fast-1284-compatible is set to TRUE, when ecpp communicates 867 * with IEEE 1284 compliant peripherals, data transfers between 868 * main memory and the parallel port will be conducted by DMA. 869 * Handshaking between the port and peripheral will be conducted 870 * by superio logic. This is the default characteristic. If 871 * fast-1284-compatible is set to FALSE, transfers and handshaking 872 * will be conducted by PIO. 873 */ 874 875 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0, 876 "fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) { 877 pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE; 878 ddi_prop_free(prop); 879 } else { 880 pp->fast_compat = TRUE; 881 } 882 883 /* 884 * Some centronics peripherals require the nInit signal to be 885 * toggled to reset the device. If centronics_init_seq is set 886 * to TRUE, ecpp will toggle the nInit signal upon every ecpp_open(). 887 * Applications have the opportunity to toggle the nInit signal 888 * with ioctl(2) calls as well. The default is to set it to FALSE. 889 */ 890 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0, 891 "centronics-init-seq", &prop) == DDI_PROP_SUCCESS) { 892 pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE; 893 ddi_prop_free(prop); 894 } else { 895 pp->init_seq = FALSE; 896 } 897 898 /* 899 * If one of the centronics status signals are in an erroneous 900 * state, ecpp_wsrv() will be reinvoked centronics-retry ms to 901 * check if the status is ok to transfer. If the property is not 902 * found, wsrv_retry will be set to CENTRONICS_RETRY ms. 903 */ 904 pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 905 "centronics-retry", CENTRONICS_RETRY); 906 907 /* 908 * In PIO mode, ecpp_isr() will loop for wait for the busy signal 909 * to be deasserted before transferring the next byte. wait_for_busy 910 * is specificied in microseconds. If the property is not found 911 * ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us. 912 */ 913 pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 914 "centronics-wait-for-busy", WAIT_FOR_BUSY); 915 916 /* 917 * In PIO mode, centronics transfers must hold the data signals 918 * for a data_setup_time milliseconds before the strobe is asserted. 919 */ 920 pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 921 "centronics-data-setup-time", DATA_SETUP_TIME); 922 923 /* 924 * In PIO mode, centronics transfers asserts the strobe signal 925 * for a period of strobe_pulse_width milliseconds. 926 */ 927 pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 928 "centronics-strobe-pulse-width", STROBE_PULSE_WIDTH); 929 930 /* 931 * Upon a transfer the peripheral, ecpp waits write_timeout seconds 932 * for the transmission to complete. 933 */ 934 default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY, 935 pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout); 936 937 pp->xfer_parms = default_xfer_parms; 938 939 /* 940 * Get dma channel for M1553 941 */ 942 if (pp->hw == &m1553) { 943 pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY, 944 pp->dip, 0, "dma-channel", 0x1); 945 ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn); 946 } 947 #if defined(__x86) 948 len = sizeof (value); 949 /* Get dma channel for i86 pc */ 950 if (pp->hw == &x86) { 951 if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF, 952 DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len) 953 != DDI_PROP_SUCCESS) { 954 ecpp_error(pp->dip, "No dma channel found\n"); 955 pp->uh.x86.chn = 0xff; 956 pp->fast_compat = FALSE; 957 pp->noecpregs = TRUE; 958 } else 959 pp->uh.x86.chn = (uint8_t)value; 960 } 961 #endif 962 /* 963 * these properties are not yet public 964 */ 965 pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 966 "ecp-rev-speed", ECP_REV_SPEED); 967 968 pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 969 "rev-watchdog", REV_WATCHDOG); 970 971 ecpp_error(pp->dip, 972 "ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n" 973 "ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n" 974 "ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n" 975 "ecpp_get_prop: transfer-timeout=%d\n", 976 pp->fast_centronics, pp->fast_compat, 977 pp->wsrv_retry, pp->wait_for_busy, 978 pp->data_setup_time, pp->strobe_pulse_width, 979 pp->xfer_parms.write_timeout); 980 } 981 982 /*ARGSUSED*/ 983 int 984 ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 985 { 986 dev_t dev = (dev_t)arg; 987 struct ecppunit *pp; 988 int instance, ret; 989 990 instance = getminor(dev); 991 992 switch (infocmd) { 993 case DDI_INFO_DEVT2DEVINFO: 994 pp = ddi_get_soft_state(ecppsoft_statep, instance); 995 if (pp != NULL) { 996 *result = pp->dip; 997 ret = DDI_SUCCESS; 998 } else { 999 ret = DDI_FAILURE; 1000 } 1001 break; 1002 1003 case DDI_INFO_DEVT2INSTANCE: 1004 *result = (void *)(uintptr_t)instance; 1005 ret = DDI_SUCCESS; 1006 break; 1007 1008 default: 1009 ret = DDI_FAILURE; 1010 break; 1011 } 1012 1013 return (ret); 1014 } 1015 1016 /*ARGSUSED2*/ 1017 static int 1018 ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp) 1019 { 1020 struct ecppunit *pp; 1021 int instance; 1022 struct stroptions *sop; 1023 mblk_t *mop; 1024 1025 instance = getminor(*dev); 1026 1027 if (instance < 0) { 1028 return (ENXIO); 1029 } 1030 1031 pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance); 1032 1033 if (pp == NULL) { 1034 return (ENXIO); 1035 } 1036 1037 mutex_enter(&pp->umutex); 1038 1039 /* 1040 * Parallel port is an exclusive-use device 1041 * thus providing print job integrity 1042 */ 1043 if (pp->oflag == TRUE) { 1044 ecpp_error(pp->dip, "ecpp open failed"); 1045 mutex_exit(&pp->umutex); 1046 return (EBUSY); 1047 } 1048 1049 pp->oflag = TRUE; 1050 1051 /* initialize state variables */ 1052 pp->prn_timeouts = prn_timeouts_default; 1053 pp->xfer_parms = default_xfer_parms; 1054 pp->current_mode = ECPP_CENTRONICS; 1055 pp->backchannel = ECPP_CENTRONICS; 1056 pp->current_phase = ECPP_PHASE_PO; 1057 pp->port = ECPP_PORT_DMA; 1058 pp->instance = instance; 1059 pp->timeout_error = 0; 1060 pp->saved_dsr = DSR_READ(pp); 1061 pp->ecpp_drain_counter = 0; 1062 pp->dma_cancelled = FALSE; 1063 pp->io_mode = ECPP_DMA; 1064 pp->joblen = 0; 1065 pp->tfifo_intr = 0; 1066 pp->softintr_pending = 0; 1067 pp->nread = 0; 1068 1069 /* clear the state flag */ 1070 pp->e_busy = ECPP_IDLE; 1071 1072 pp->readq = RD(q); 1073 pp->writeq = WR(q); 1074 pp->msg = NULL; 1075 1076 RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp; 1077 1078 /* 1079 * Get ready: check host/peripheral, negotiate into default mode 1080 */ 1081 if (ecpp_reset_port_regs(pp) == FAILURE) { 1082 mutex_exit(&pp->umutex); 1083 return (EIO); 1084 } 1085 1086 mutex_exit(&pp->umutex); 1087 1088 /* 1089 * Configure the Stream head and enable the Stream 1090 */ 1091 if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) { 1092 return (EAGAIN); 1093 } 1094 1095 mop->b_datap->db_type = M_SETOPTS; 1096 mop->b_wptr += sizeof (struct stroptions); 1097 1098 /* 1099 * if device is open with O_NONBLOCK flag set, let read(2) return 0 1100 * if no data waiting to be read. Writes will block on flow control. 1101 */ 1102 sop = (struct stroptions *)mop->b_rptr; 1103 sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON; 1104 sop->so_hiwat = ECPPHIWAT; 1105 sop->so_lowat = ECPPLOWAT; 1106 1107 /* enable the stream */ 1108 qprocson(q); 1109 1110 putnext(q, mop); 1111 1112 mutex_enter(&pp->umutex); 1113 1114 ecpp_default_negotiation(pp); 1115 1116 /* go revidle */ 1117 (void) ecpp_idle_phase(pp); 1118 1119 ecpp_error(pp->dip, 1120 "ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n", 1121 pp->current_mode, pp->current_phase, 1122 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 1123 1124 mutex_exit(&pp->umutex); 1125 1126 return (0); 1127 } 1128 1129 /*ARGSUSED1*/ 1130 static int 1131 ecpp_close(queue_t *q, int flag, cred_t *cred_p) 1132 { 1133 struct ecppunit *pp; 1134 timeout_id_t timeout_id, fifo_timer_id, wsrv_timer_id; 1135 1136 pp = (struct ecppunit *)q->q_ptr; 1137 1138 ecpp_error(pp->dip, "ecpp_close: entering ...\n"); 1139 1140 mutex_enter(&pp->umutex); 1141 1142 /* 1143 * ecpp_close() will continue to loop until the 1144 * queue has been drained or if the thread 1145 * has received a SIG. Typically, when the queue 1146 * has data, the port will be ECPP_BUSY. However, 1147 * after a dma completes and before the wsrv 1148 * starts the next transfer, the port may be IDLE. 1149 * In this case, ecpp_close() will loop within this 1150 * while(qsize) segment. Since, ecpp_wsrv() runs 1151 * at software interupt level, this shouldn't loop 1152 * very long. 1153 */ 1154 while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) { 1155 if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) { 1156 ecpp_error(pp->dip, "ecpp_close:B: received SIG\n"); 1157 /* 1158 * Returning from a signal such as 1159 * SIGTERM or SIGKILL 1160 */ 1161 ecpp_flush(pp, FWRITE); 1162 break; 1163 } else { 1164 ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n"); 1165 } 1166 } 1167 1168 ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, " 1169 "qsize(WR(q))=%d, qsize(RD(q))=%d\n", 1170 pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q)); 1171 1172 /* 1173 * Cancel all timeouts, disable interrupts 1174 * 1175 * Note that we can`t call untimeout(9F) with mutex held: 1176 * callout may be blocked on the same mutex, and untimeout() will 1177 * cv_wait() while callout is executing, thus creating a deadlock 1178 * So we zero the timeout id's inside mutex and call untimeout later 1179 */ 1180 timeout_id = pp->timeout_id; 1181 fifo_timer_id = pp->fifo_timer_id; 1182 wsrv_timer_id = pp->wsrv_timer_id; 1183 1184 pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0; 1185 1186 pp->softintr_pending = 0; 1187 pp->dma_cancelled = TRUE; 1188 ECPP_MASK_INTR(pp); 1189 1190 mutex_exit(&pp->umutex); 1191 1192 qprocsoff(q); 1193 1194 if (timeout_id) { 1195 (void) untimeout(timeout_id); 1196 } 1197 if (fifo_timer_id) { 1198 (void) untimeout(fifo_timer_id); 1199 } 1200 if (wsrv_timer_id) { 1201 (void) untimeout(wsrv_timer_id); 1202 } 1203 1204 mutex_enter(&pp->umutex); 1205 1206 /* set link to Compatible mode */ 1207 if ((pp->current_mode == ECPP_ECP_MODE) && 1208 (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) { 1209 (void) ecp_reverse2forward(pp); 1210 } 1211 1212 (void) ecpp_1284_termination(pp); 1213 1214 pp->oflag = FALSE; 1215 q->q_ptr = WR(q)->q_ptr = NULL; 1216 pp->readq = pp->writeq = NULL; 1217 pp->msg = NULL; 1218 1219 ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n", 1220 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 1221 1222 mutex_exit(&pp->umutex); 1223 1224 return (0); 1225 } 1226 1227 /* 1228 * standard put procedure for ecpp 1229 */ 1230 static int 1231 ecpp_wput(queue_t *q, mblk_t *mp) 1232 { 1233 struct msgb *nmp; 1234 struct ecppunit *pp; 1235 1236 pp = (struct ecppunit *)q->q_ptr; 1237 1238 if (!mp) { 1239 return (0); 1240 } 1241 1242 if ((mp->b_wptr - mp->b_rptr) <= 0) { 1243 ecpp_error(pp->dip, 1244 "ecpp_wput:bogus packet recieved mp=%x\n", mp); 1245 freemsg(mp); 1246 return (0); 1247 } 1248 1249 switch (DB_TYPE(mp)) { 1250 case M_DATA: 1251 /* 1252 * This is a quick fix for multiple message block problem, 1253 * it will be changed later with better performance code. 1254 */ 1255 if (mp->b_cont) { 1256 /* 1257 * mblk has scattered data ... do msgpullup 1258 * if it fails, continue with the current mblk 1259 */ 1260 if ((nmp = msgpullup(mp, -1)) != NULL) { 1261 freemsg(mp); 1262 mp = nmp; 1263 ecpp_error(pp->dip, 1264 "ecpp_wput:msgpullup: mp=%p len=%d\n", 1265 mp, mp->b_wptr - mp->b_rptr); 1266 } 1267 } 1268 1269 /* let ecpp_wsrv() concatenate small blocks */ 1270 (void) putq(q, mp); 1271 1272 break; 1273 1274 case M_CTL: 1275 (void) putq(q, mp); 1276 1277 break; 1278 1279 case M_IOCTL: { 1280 struct iocblk *iocbp; 1281 1282 iocbp = (struct iocblk *)mp->b_rptr; 1283 1284 ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd); 1285 1286 mutex_enter(&pp->umutex); 1287 1288 /* TESTIO and GET_STATUS can be used during transfer */ 1289 if ((pp->e_busy == ECPP_BUSY) && 1290 (iocbp->ioc_cmd != BPPIOC_TESTIO) && 1291 (iocbp->ioc_cmd != PRNIOC_GET_STATUS)) { 1292 mutex_exit(&pp->umutex); 1293 (void) putq(q, mp); 1294 } else { 1295 mutex_exit(&pp->umutex); 1296 ecpp_putioc(q, mp); 1297 } 1298 1299 break; 1300 } 1301 1302 case M_IOCDATA: { 1303 struct copyresp *csp; 1304 1305 ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n"); 1306 1307 csp = (struct copyresp *)mp->b_rptr; 1308 1309 /* 1310 * If copy request failed, quit now 1311 */ 1312 if (csp->cp_rval != 0) { 1313 freemsg(mp); 1314 return (0); 1315 } 1316 1317 switch (csp->cp_cmd) { 1318 case ECPPIOC_SETPARMS: 1319 case ECPPIOC_SETREGS: 1320 case ECPPIOC_SETPORT: 1321 case ECPPIOC_SETDATA: 1322 case PRNIOC_SET_IFCAP: 1323 case PRNIOC_SET_TIMEOUTS: 1324 /* 1325 * need to retrieve and use the data, but if the 1326 * device is busy, wait. 1327 */ 1328 (void) putq(q, mp); 1329 break; 1330 1331 case ECPPIOC_GETPARMS: 1332 case ECPPIOC_GETREGS: 1333 case ECPPIOC_GETPORT: 1334 case ECPPIOC_GETDATA: 1335 case BPPIOC_GETERR: 1336 case BPPIOC_TESTIO: 1337 case PRNIOC_GET_IFCAP: 1338 case PRNIOC_GET_STATUS: 1339 case PRNIOC_GET_1284_STATUS: 1340 case PRNIOC_GET_TIMEOUTS: 1341 /* data transfered to user space okay */ 1342 ecpp_ack_ioctl(q, mp); 1343 break; 1344 1345 case ECPPIOC_GETDEVID: 1346 ecpp_wput_iocdata_devid(q, mp, 1347 offsetof(struct ecpp_device_id, rlen)); 1348 break; 1349 1350 case PRNIOC_GET_1284_DEVID: 1351 ecpp_wput_iocdata_devid(q, mp, 1352 offsetof(struct prn_1284_device_id, id_rlen)); 1353 break; 1354 1355 case PRNIOC_GET_IFINFO: 1356 ecpp_wput_iocdata_devid(q, mp, 1357 offsetof(struct prn_interface_info, if_rlen)); 1358 break; 1359 1360 default: 1361 ecpp_nack_ioctl(q, mp, EINVAL); 1362 break; 1363 } 1364 1365 break; 1366 } 1367 1368 case M_FLUSH: 1369 ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n"); 1370 1371 if (*mp->b_rptr & FLUSHW) { 1372 mutex_enter(&pp->umutex); 1373 ecpp_flush(pp, FWRITE); 1374 mutex_exit(&pp->umutex); 1375 } 1376 1377 if (*mp->b_rptr & FLUSHR) { 1378 mutex_enter(&pp->umutex); 1379 ecpp_flush(pp, FREAD); 1380 mutex_exit(&pp->umutex); 1381 qreply(q, mp); 1382 } else { 1383 freemsg(mp); 1384 } 1385 1386 break; 1387 1388 case M_READ: 1389 /* 1390 * When the user calls read(2), M_READ message is sent to us, 1391 * first byte of which is the number of requested bytes 1392 * We add up user requests and use resulting number 1393 * to calculate the reverse transfer block size 1394 */ 1395 mutex_enter(&pp->umutex); 1396 if (pp->e_busy == ECPP_IDLE) { 1397 pp->nread += *(size_t *)mp->b_rptr; 1398 ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread); 1399 freemsg(mp); 1400 } else { 1401 ecpp_error(pp->dip, "ecpp_wput: M_READ queueing"); 1402 (void) putq(q, mp); 1403 } 1404 mutex_exit(&pp->umutex); 1405 break; 1406 1407 default: 1408 ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n", 1409 DB_TYPE(mp)); 1410 freemsg(mp); 1411 break; 1412 } 1413 1414 return (0); 1415 } 1416 1417 /* 1418 * Process ECPPIOC_GETDEVID-like ioctls 1419 */ 1420 static void 1421 ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset) 1422 { 1423 struct copyresp *csp; 1424 struct ecpp_copystate *stp; 1425 mblk_t *datamp; 1426 1427 csp = (struct copyresp *)mp->b_rptr; 1428 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 1429 1430 /* determine the state of copyin/copyout process */ 1431 switch (stp->state) { 1432 case ECPP_STRUCTIN: 1433 /* user structure has arrived */ 1434 (void) putq(q, mp); 1435 break; 1436 1437 case ECPP_ADDROUT: 1438 /* 1439 * data transfered to user space okay 1440 * now update user structure 1441 */ 1442 datamp = allocb(sizeof (int), BPRI_MED); 1443 if (datamp == NULL) { 1444 ecpp_nack_ioctl(q, mp, ENOSR); 1445 break; 1446 } 1447 1448 *(int *)datamp->b_rptr = 1449 *(int *)((char *)&stp->un + rlen_offset); 1450 stp->state = ECPP_STRUCTOUT; 1451 1452 mcopyout(mp, csp->cp_private, sizeof (int), 1453 (char *)stp->uaddr + rlen_offset, datamp); 1454 qreply(q, mp); 1455 break; 1456 1457 case ECPP_STRUCTOUT: 1458 /* user structure was updated okay */ 1459 freemsg(csp->cp_private); 1460 ecpp_ack_ioctl(q, mp); 1461 break; 1462 1463 default: 1464 ecpp_nack_ioctl(q, mp, EINVAL); 1465 break; 1466 } 1467 } 1468 1469 static uchar_t 1470 ecpp_get_error_status(uchar_t status) 1471 { 1472 uchar_t pin_status = 0; 1473 1474 if (!(status & ECPP_nERR)) { 1475 pin_status |= BPP_ERR_ERR; 1476 } 1477 1478 if (status & ECPP_PE) { 1479 pin_status |= BPP_PE_ERR; 1480 } 1481 1482 if (!(status & ECPP_SLCT)) { 1483 pin_status |= BPP_SLCT_ERR; 1484 } 1485 1486 if (!(status & ECPP_nBUSY)) { 1487 pin_status |= BPP_SLCT_ERR; 1488 } 1489 1490 return (pin_status); 1491 } 1492 1493 /* 1494 * ioctl handler for output PUT procedure. 1495 */ 1496 static void 1497 ecpp_putioc(queue_t *q, mblk_t *mp) 1498 { 1499 struct iocblk *iocbp; 1500 struct ecppunit *pp; 1501 1502 pp = (struct ecppunit *)q->q_ptr; 1503 1504 iocbp = (struct iocblk *)mp->b_rptr; 1505 1506 /* I_STR ioctls are invalid */ 1507 if (iocbp->ioc_count != TRANSPARENT) { 1508 ecpp_nack_ioctl(q, mp, EINVAL); 1509 return; 1510 } 1511 1512 switch (iocbp->ioc_cmd) { 1513 case ECPPIOC_SETPARMS: { 1514 mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL); 1515 qreply(q, mp); 1516 break; 1517 } 1518 1519 case ECPPIOC_GETPARMS: { 1520 struct ecpp_transfer_parms xfer_parms; 1521 1522 mutex_enter(&pp->umutex); 1523 1524 pp->xfer_parms.mode = pp->current_mode; 1525 xfer_parms = pp->xfer_parms; 1526 1527 mutex_exit(&pp->umutex); 1528 1529 ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms)); 1530 break; 1531 } 1532 1533 case ECPPIOC_SETREGS: { 1534 mutex_enter(&pp->umutex); 1535 if (pp->current_mode != ECPP_DIAG_MODE) { 1536 mutex_exit(&pp->umutex); 1537 ecpp_nack_ioctl(q, mp, EINVAL); 1538 break; 1539 } 1540 mutex_exit(&pp->umutex); 1541 1542 mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL); 1543 qreply(q, mp); 1544 break; 1545 } 1546 1547 case ECPPIOC_GETREGS: { 1548 struct ecpp_regs rg; 1549 1550 mutex_enter(&pp->umutex); 1551 1552 if (pp->current_mode != ECPP_DIAG_MODE) { 1553 mutex_exit(&pp->umutex); 1554 ecpp_nack_ioctl(q, mp, EINVAL); 1555 break; 1556 } 1557 1558 rg.dsr = DSR_READ(pp); 1559 rg.dcr = DCR_READ(pp); 1560 1561 mutex_exit(&pp->umutex); 1562 1563 ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n", 1564 rg.dsr, rg.dcr); 1565 1566 /* these bits must be 1 */ 1567 rg.dsr |= ECPP_SETREGS_DSR_MASK; 1568 rg.dcr |= ECPP_SETREGS_DCR_MASK; 1569 1570 ecpp_putioc_copyout(q, mp, &rg, sizeof (rg)); 1571 break; 1572 } 1573 1574 case ECPPIOC_SETPORT: 1575 case ECPPIOC_SETDATA: { 1576 mutex_enter(&pp->umutex); 1577 if (pp->current_mode != ECPP_DIAG_MODE) { 1578 mutex_exit(&pp->umutex); 1579 ecpp_nack_ioctl(q, mp, EINVAL); 1580 break; 1581 } 1582 mutex_exit(&pp->umutex); 1583 1584 /* 1585 * each of the commands fetches a byte quantity. 1586 */ 1587 mcopyin(mp, NULL, sizeof (uchar_t), NULL); 1588 qreply(q, mp); 1589 break; 1590 } 1591 1592 case ECPPIOC_GETDATA: 1593 case ECPPIOC_GETPORT: { 1594 uchar_t byte; 1595 1596 mutex_enter(&pp->umutex); 1597 1598 /* must be in diagnostic mode for these commands to work */ 1599 if (pp->current_mode != ECPP_DIAG_MODE) { 1600 mutex_exit(&pp->umutex); 1601 ecpp_nack_ioctl(q, mp, EINVAL); 1602 break; 1603 } 1604 1605 if (iocbp->ioc_cmd == ECPPIOC_GETPORT) { 1606 byte = pp->port; 1607 } else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) { 1608 switch (pp->port) { 1609 case ECPP_PORT_PIO: 1610 byte = DATAR_READ(pp); 1611 break; 1612 case ECPP_PORT_TDMA: 1613 byte = TFIFO_READ(pp); 1614 ecpp_error(pp->dip, "GETDATA=0x%x\n", byte); 1615 break; 1616 default: 1617 ecpp_nack_ioctl(q, mp, EINVAL); 1618 break; 1619 } 1620 } else { 1621 mutex_exit(&pp->umutex); 1622 ecpp_error(pp->dip, "weird command"); 1623 ecpp_nack_ioctl(q, mp, EINVAL); 1624 break; 1625 } 1626 1627 mutex_exit(&pp->umutex); 1628 1629 ecpp_putioc_copyout(q, mp, &byte, sizeof (byte)); 1630 1631 break; 1632 } 1633 1634 case BPPIOC_GETERR: { 1635 struct bpp_error_status bpp_status; 1636 1637 mutex_enter(&pp->umutex); 1638 1639 bpp_status.timeout_occurred = pp->timeout_error; 1640 bpp_status.bus_error = 0; /* not used */ 1641 bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr); 1642 1643 mutex_exit(&pp->umutex); 1644 1645 ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status)); 1646 1647 break; 1648 } 1649 1650 case BPPIOC_TESTIO: { 1651 mutex_enter(&pp->umutex); 1652 1653 if (!((pp->current_mode == ECPP_CENTRONICS) || 1654 (pp->current_mode == ECPP_COMPAT_MODE))) { 1655 ecpp_nack_ioctl(q, mp, EINVAL); 1656 } else { 1657 pp->saved_dsr = DSR_READ(pp); 1658 1659 if ((pp->saved_dsr & ECPP_PE) || 1660 !(pp->saved_dsr & ECPP_SLCT) || 1661 !(pp->saved_dsr & ECPP_nERR)) { 1662 ecpp_nack_ioctl(q, mp, EIO); 1663 } else { 1664 ecpp_ack_ioctl(q, mp); 1665 } 1666 } 1667 1668 mutex_exit(&pp->umutex); 1669 1670 break; 1671 } 1672 1673 case PRNIOC_RESET: 1674 /* 1675 * Initialize interface only if no transfer is in progress 1676 */ 1677 mutex_enter(&pp->umutex); 1678 if (pp->e_busy == ECPP_BUSY) { 1679 mutex_exit(&pp->umutex); 1680 ecpp_nack_ioctl(q, mp, EIO); 1681 } else { 1682 (void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS); 1683 1684 DCR_WRITE(pp, ECPP_SLCTIN); 1685 drv_usecwait(2); 1686 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 1687 1688 ecpp_default_negotiation(pp); 1689 1690 mutex_exit(&pp->umutex); 1691 ecpp_ack_ioctl(q, mp); 1692 } 1693 break; 1694 1695 case PRNIOC_GET_IFCAP: { 1696 uint_t ifcap; 1697 1698 mutex_enter(&pp->umutex); 1699 1700 ifcap = ecpp_get_prn_ifcap(pp); 1701 1702 mutex_exit(&pp->umutex); 1703 1704 ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap)); 1705 break; 1706 } 1707 1708 case PRNIOC_SET_IFCAP: { 1709 mcopyin(mp, NULL, sizeof (uint_t), NULL); 1710 qreply(q, mp); 1711 break; 1712 } 1713 1714 case PRNIOC_GET_TIMEOUTS: { 1715 struct prn_timeouts timeouts; 1716 1717 mutex_enter(&pp->umutex); 1718 timeouts = pp->prn_timeouts; 1719 mutex_exit(&pp->umutex); 1720 1721 ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts)); 1722 1723 break; 1724 } 1725 1726 case PRNIOC_SET_TIMEOUTS: 1727 mcopyin(mp, NULL, sizeof (struct prn_timeouts), 1728 *(caddr_t *)(void *)mp->b_cont->b_rptr); 1729 qreply(q, mp); 1730 break; 1731 1732 case PRNIOC_GET_STATUS: { 1733 uint8_t dsr; 1734 uint_t status; 1735 1736 mutex_enter(&pp->umutex); 1737 1738 /* DSR only makes sense in Centronics & Compat mode */ 1739 if (pp->current_mode == ECPP_CENTRONICS || 1740 pp->current_mode == ECPP_COMPAT_MODE) { 1741 dsr = DSR_READ(pp); 1742 if ((dsr & ECPP_PE) || 1743 !(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) { 1744 status = PRN_ONLINE; 1745 } else { 1746 status = PRN_ONLINE | PRN_READY; 1747 } 1748 } else { 1749 status = PRN_ONLINE | PRN_READY; 1750 } 1751 1752 mutex_exit(&pp->umutex); 1753 1754 ecpp_putioc_copyout(q, mp, &status, sizeof (status)); 1755 break; 1756 } 1757 1758 case PRNIOC_GET_1284_STATUS: { 1759 uint8_t dsr; 1760 uchar_t status; 1761 1762 mutex_enter(&pp->umutex); 1763 1764 /* status only makes sense in Centronics & Compat mode */ 1765 if (pp->current_mode != ECPP_COMPAT_MODE && 1766 pp->current_mode != ECPP_CENTRONICS) { 1767 mutex_exit(&pp->umutex); 1768 ecpp_nack_ioctl(q, mp, EINVAL); 1769 break; 1770 } 1771 1772 dsr = DSR_READ(pp); /* read status */ 1773 1774 mutex_exit(&pp->umutex); 1775 1776 ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr); 1777 1778 status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) | 1779 (~dsr & ECPP_nBUSY); 1780 1781 ecpp_putioc_copyout(q, mp, &status, sizeof (status)); 1782 break; 1783 } 1784 1785 case ECPPIOC_GETDEVID: 1786 ecpp_putioc_stateful_copyin(q, mp, 1787 sizeof (struct ecpp_device_id)); 1788 break; 1789 1790 case PRNIOC_GET_1284_DEVID: 1791 ecpp_putioc_stateful_copyin(q, mp, 1792 sizeof (struct prn_1284_device_id)); 1793 break; 1794 1795 case PRNIOC_GET_IFINFO: 1796 ecpp_putioc_stateful_copyin(q, mp, 1797 sizeof (struct prn_interface_info)); 1798 break; 1799 1800 default: 1801 ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n", 1802 iocbp->ioc_cmd); 1803 ecpp_nack_ioctl(q, mp, EINVAL); 1804 break; 1805 } 1806 } 1807 1808 /* 1809 * allocate mblk and copyout the requested number of bytes 1810 */ 1811 static void 1812 ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len) 1813 { 1814 mblk_t *tmp; 1815 1816 if ((tmp = allocb(len, BPRI_MED)) == NULL) { 1817 ecpp_nack_ioctl(q, mp, ENOSR); 1818 return; 1819 } 1820 1821 bcopy(buf, tmp->b_wptr, len); 1822 1823 mcopyout(mp, NULL, len, NULL, tmp); 1824 qreply(q, mp); 1825 } 1826 1827 /* 1828 * copyin the structure using struct ecpp_copystate 1829 */ 1830 static void 1831 ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size) 1832 { 1833 mblk_t *tmp; 1834 struct ecpp_copystate *stp; 1835 1836 if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) { 1837 ecpp_nack_ioctl(q, mp, EAGAIN); 1838 return; 1839 } 1840 1841 stp = (struct ecpp_copystate *)tmp->b_rptr; 1842 stp->state = ECPP_STRUCTIN; 1843 stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr; 1844 1845 tmp->b_wptr += sizeof (struct ecpp_copystate); 1846 1847 mcopyin(mp, tmp, size, stp->uaddr); 1848 qreply(q, mp); 1849 } 1850 1851 /* 1852 * read queue is only used when the peripheral sends data faster, 1853 * then the application consumes it; 1854 * once the low water mark is reached, this routine will be scheduled 1855 */ 1856 static int 1857 ecpp_rsrv(queue_t *q) 1858 { 1859 struct msgb *mp; 1860 1861 /* 1862 * send data upstream until next queue is full or the queue is empty 1863 */ 1864 while (canputnext(q) && (mp = getq(q))) { 1865 putnext(q, mp); 1866 } 1867 1868 /* 1869 * if there is still space on the queue, enable backchannel 1870 */ 1871 if (canputnext(q)) { 1872 struct ecppunit *pp = (struct ecppunit *)q->q_ptr; 1873 1874 mutex_enter(&pp->umutex); 1875 1876 if (pp->e_busy == ECPP_IDLE) { 1877 (void) ecpp_idle_phase(pp); 1878 cv_signal(&pp->pport_cv); /* signal ecpp_close() */ 1879 } 1880 1881 mutex_exit(&pp->umutex); 1882 } 1883 1884 return (0); 1885 } 1886 1887 static int 1888 ecpp_wsrv(queue_t *q) 1889 { 1890 struct ecppunit *pp = (struct ecppunit *)q->q_ptr; 1891 struct msgb *mp; 1892 size_t len, total_len; 1893 size_t my_ioblock_sz; 1894 caddr_t my_ioblock; 1895 caddr_t start_addr; 1896 1897 mutex_enter(&pp->umutex); 1898 1899 ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy); 1900 1901 /* if channel is actively doing work, wait till completed */ 1902 if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) { 1903 mutex_exit(&pp->umutex); 1904 return (0); 1905 } else if (pp->suspended == TRUE) { 1906 /* 1907 * if the system is about to suspend and ecpp_detach() 1908 * is blocked due to active transfers, wake it up and exit 1909 */ 1910 cv_signal(&pp->pport_cv); 1911 mutex_exit(&pp->umutex); 1912 return (0); 1913 } 1914 1915 /* peripheral status should be okay before starting transfer */ 1916 if (pp->e_busy == ECPP_ERR) { 1917 if (ecpp_check_status(pp) == FAILURE) { 1918 if (pp->wsrv_timer_id == 0) { 1919 ecpp_error(pp->dip, "wsrv: start wrsv_timer\n"); 1920 pp->wsrv_timer_id = timeout(ecpp_wsrv_timer, 1921 (caddr_t)pp, 1922 drv_usectohz(pp->wsrv_retry * 1000)); 1923 } else { 1924 ecpp_error(pp->dip, 1925 "ecpp_wsrv: wrsv_timer is active\n"); 1926 } 1927 1928 mutex_exit(&pp->umutex); 1929 return (0); 1930 } else { 1931 pp->e_busy = ECPP_IDLE; 1932 } 1933 } 1934 1935 my_ioblock = pp->ioblock; 1936 my_ioblock_sz = IO_BLOCK_SZ; 1937 1938 /* 1939 * it`s important to null pp->msg here, 1940 * cleaning up from the previous transfer attempts 1941 */ 1942 pp->msg = NULL; 1943 1944 start_addr = NULL; 1945 len = total_len = 0; 1946 /* 1947 * The following loop is implemented to gather the 1948 * many small writes that the lp subsystem makes and 1949 * compile them into one large dma transfer. The len and 1950 * total_len variables are a running count of the number of 1951 * bytes that have been gathered. They are bcopied to the 1952 * ioblock buffer. The pp->e_busy is set to E_BUSY as soon as 1953 * we start gathering packets to indicate the following transfer. 1954 */ 1955 while (mp = getq(q)) { 1956 switch (DB_TYPE(mp)) { 1957 case M_DATA: 1958 pp->e_busy = ECPP_BUSY; 1959 len = mp->b_wptr - mp->b_rptr; 1960 1961 if ((total_len == 0) && (len >= my_ioblock_sz)) { 1962 /* 1963 * if the first M_DATA is bigger than ioblock, 1964 * just use this mblk and start the transfer 1965 */ 1966 total_len = len; 1967 start_addr = (caddr_t)mp->b_rptr; 1968 pp->msg = mp; 1969 goto breakout; 1970 } else if (total_len + len > my_ioblock_sz) { 1971 /* 1972 * current M_DATA does not fit in ioblock, 1973 * put it back and start the transfer 1974 */ 1975 (void) putbq(q, mp); 1976 goto breakout; 1977 } else { 1978 /* 1979 * otherwise add data to ioblock and free mblk 1980 */ 1981 bcopy(mp->b_rptr, my_ioblock, len); 1982 my_ioblock += len; 1983 total_len += len; 1984 start_addr = (caddr_t)pp->ioblock; 1985 freemsg(mp); 1986 } 1987 break; 1988 1989 case M_IOCTL: 1990 /* 1991 * Assume a simple loopback test: an application 1992 * writes data into the TFIFO, reads it using 1993 * ECPPIOC_GETDATA and compares. If the transfer 1994 * times out (which is only possible on Grover), 1995 * the ioctl might be processed before the data 1996 * got to the TFIFO, which leads to miscompare. 1997 * So if we met ioctl, postpone it until after xfer. 1998 */ 1999 if (total_len > 0) { 2000 (void) putbq(q, mp); 2001 goto breakout; 2002 } 2003 2004 ecpp_error(pp->dip, "M_IOCTL.\n"); 2005 2006 mutex_exit(&pp->umutex); 2007 2008 ecpp_putioc(q, mp); 2009 2010 mutex_enter(&pp->umutex); 2011 2012 break; 2013 2014 case M_IOCDATA: { 2015 struct copyresp *csp = (struct copyresp *)mp->b_rptr; 2016 2017 ecpp_error(pp->dip, "M_IOCDATA\n"); 2018 2019 /* 2020 * If copy request failed, quit now 2021 */ 2022 if (csp->cp_rval != 0) { 2023 freemsg(mp); 2024 break; 2025 } 2026 2027 switch (csp->cp_cmd) { 2028 case ECPPIOC_SETPARMS: 2029 case ECPPIOC_SETREGS: 2030 case ECPPIOC_SETPORT: 2031 case ECPPIOC_SETDATA: 2032 case ECPPIOC_GETDEVID: 2033 case PRNIOC_SET_IFCAP: 2034 case PRNIOC_GET_1284_DEVID: 2035 case PRNIOC_SET_TIMEOUTS: 2036 case PRNIOC_GET_IFINFO: 2037 ecpp_srvioc(q, mp); 2038 break; 2039 2040 default: 2041 ecpp_nack_ioctl(q, mp, EINVAL); 2042 break; 2043 } 2044 2045 break; 2046 } 2047 2048 case M_CTL: 2049 if (pp->e_busy != ECPP_IDLE) { 2050 ecpp_error(pp->dip, "wsrv: M_CTL postponed\n"); 2051 (void) putbq(q, mp); 2052 goto breakout; 2053 } else { 2054 ecpp_error(pp->dip, "wsrv: M_CTL\n"); 2055 } 2056 2057 /* sanity check */ 2058 if ((mp->b_wptr - mp->b_rptr != sizeof (int)) || 2059 (*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) { 2060 ecpp_error(pp->dip, "wsrv: bogus M_CTL"); 2061 freemsg(mp); 2062 break; 2063 } else { 2064 freemsg(mp); 2065 } 2066 2067 /* This was a backchannel request */ 2068 (void) ecpp_peripheral2host(pp); 2069 2070 /* exit if transfer have been initiated */ 2071 if (pp->e_busy == ECPP_BUSY) { 2072 goto breakout; 2073 } 2074 break; 2075 2076 case M_READ: 2077 pp->nread += *(size_t *)mp->b_rptr; 2078 freemsg(mp); 2079 ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread); 2080 break; 2081 2082 default: 2083 ecpp_error(pp->dip, "wsrv: should never get here\n"); 2084 freemsg(mp); 2085 break; 2086 } 2087 } 2088 breakout: 2089 /* 2090 * If total_len > 0 then start the transfer, otherwise goto idle state 2091 */ 2092 if (total_len > 0) { 2093 ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len); 2094 pp->e_busy = ECPP_BUSY; 2095 ecpp_start(pp, start_addr, total_len); 2096 } else { 2097 ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy); 2098 2099 /* IDLE if xfer_timeout, or FIFO_EMPTY */ 2100 if (pp->e_busy == ECPP_IDLE) { 2101 (void) ecpp_idle_phase(pp); 2102 cv_signal(&pp->pport_cv); /* signal ecpp_close() */ 2103 } 2104 } 2105 2106 mutex_exit(&pp->umutex); 2107 return (1); 2108 } 2109 2110 /* 2111 * Ioctl processor for queued ioctl data transfer messages. 2112 */ 2113 static void 2114 ecpp_srvioc(queue_t *q, mblk_t *mp) 2115 { 2116 struct iocblk *iocbp; 2117 struct ecppunit *pp; 2118 2119 iocbp = (struct iocblk *)mp->b_rptr; 2120 pp = (struct ecppunit *)q->q_ptr; 2121 2122 switch (iocbp->ioc_cmd) { 2123 case ECPPIOC_SETPARMS: { 2124 struct ecpp_transfer_parms *xferp; 2125 2126 xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr; 2127 2128 if (xferp->write_timeout <= 0 || 2129 xferp->write_timeout >= ECPP_MAX_TIMEOUT) { 2130 ecpp_nack_ioctl(q, mp, EINVAL); 2131 break; 2132 } 2133 2134 if (!((xferp->mode == ECPP_CENTRONICS) || 2135 (xferp->mode == ECPP_COMPAT_MODE) || 2136 (xferp->mode == ECPP_NIBBLE_MODE) || 2137 (xferp->mode == ECPP_ECP_MODE) || 2138 (xferp->mode == ECPP_DIAG_MODE))) { 2139 ecpp_nack_ioctl(q, mp, EINVAL); 2140 break; 2141 } 2142 2143 pp->xfer_parms = *xferp; 2144 pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout; 2145 2146 ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n", 2147 pp->current_mode, pp->xfer_parms.mode); 2148 2149 if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) { 2150 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT); 2151 } else { 2152 /* 2153 * mode nego was a success. If nibble mode check 2154 * back channel and set into REVIDLE. 2155 */ 2156 if ((pp->current_mode == ECPP_NIBBLE_MODE) && 2157 (read_nibble_backchan(pp) == FAILURE)) { 2158 /* 2159 * problems reading the backchannel 2160 * returned to centronics; 2161 * ioctl fails. 2162 */ 2163 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT); 2164 break; 2165 } 2166 2167 ecpp_ack_ioctl(q, mp); 2168 } 2169 if (pp->current_mode != ECPP_DIAG_MODE) { 2170 pp->port = ECPP_PORT_DMA; 2171 } else { 2172 pp->port = ECPP_PORT_PIO; 2173 } 2174 2175 pp->xfer_parms.mode = pp->current_mode; 2176 2177 break; 2178 } 2179 2180 case ECPPIOC_SETREGS: { 2181 struct ecpp_regs *rg; 2182 uint8_t dcr; 2183 2184 rg = (struct ecpp_regs *)mp->b_cont->b_rptr; 2185 2186 /* must be in diagnostic mode for these commands to work */ 2187 if (pp->current_mode != ECPP_DIAG_MODE) { 2188 ecpp_nack_ioctl(q, mp, EINVAL); 2189 break; 2190 } 2191 2192 /* bits 4-7 must be 1 or return EINVAL */ 2193 if ((rg->dcr & ECPP_SETREGS_DCR_MASK) != 2194 ECPP_SETREGS_DCR_MASK) { 2195 ecpp_nack_ioctl(q, mp, EINVAL); 2196 break; 2197 } 2198 2199 /* get the old dcr */ 2200 dcr = DCR_READ(pp) & ~ECPP_REV_DIR; 2201 /* get the new dcr */ 2202 dcr = (dcr & ECPP_SETREGS_DCR_MASK) | 2203 (rg->dcr & ~ECPP_SETREGS_DCR_MASK); 2204 DCR_WRITE(pp, dcr); 2205 ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr); 2206 ecpp_ack_ioctl(q, mp); 2207 break; 2208 } 2209 2210 case ECPPIOC_SETPORT: { 2211 uchar_t *port; 2212 2213 port = (uchar_t *)mp->b_cont->b_rptr; 2214 2215 /* must be in diagnostic mode for these commands to work */ 2216 if (pp->current_mode != ECPP_DIAG_MODE) { 2217 ecpp_nack_ioctl(q, mp, EINVAL); 2218 break; 2219 } 2220 2221 switch (*port) { 2222 case ECPP_PORT_PIO: 2223 /* put superio into PIO mode */ 2224 ECR_WRITE(pp, 2225 ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 2226 pp->port = *port; 2227 ecpp_ack_ioctl(q, mp); 2228 break; 2229 2230 case ECPP_PORT_TDMA: 2231 ecpp_error(pp->dip, "SETPORT: to TDMA\n"); 2232 pp->tfifo_intr = 1; 2233 /* change to mode 110 */ 2234 ECR_WRITE(pp, 2235 ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV); 2236 pp->port = *port; 2237 ecpp_ack_ioctl(q, mp); 2238 break; 2239 2240 default: 2241 ecpp_nack_ioctl(q, mp, EINVAL); 2242 } 2243 2244 break; 2245 } 2246 2247 case ECPPIOC_SETDATA: { 2248 uchar_t *data; 2249 2250 data = (uchar_t *)mp->b_cont->b_rptr; 2251 2252 /* must be in diagnostic mode for these commands to work */ 2253 if (pp->current_mode != ECPP_DIAG_MODE) { 2254 ecpp_nack_ioctl(q, mp, EINVAL); 2255 break; 2256 } 2257 2258 switch (pp->port) { 2259 case ECPP_PORT_PIO: 2260 DATAR_WRITE(pp, *data); 2261 ecpp_ack_ioctl(q, mp); 2262 break; 2263 2264 case ECPP_PORT_TDMA: 2265 TFIFO_WRITE(pp, *data); 2266 ecpp_ack_ioctl(q, mp); 2267 break; 2268 2269 default: 2270 ecpp_nack_ioctl(q, mp, EINVAL); 2271 } 2272 2273 break; 2274 } 2275 2276 case ECPPIOC_GETDEVID: { 2277 struct copyresp *csp; 2278 struct ecpp_copystate *stp; 2279 struct ecpp_device_id *dp; 2280 struct ecpp_device_id id; 2281 2282 csp = (struct copyresp *)mp->b_rptr; 2283 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2284 dp = (struct ecpp_device_id *)mp->b_cont->b_rptr; 2285 2286 #ifdef _MULTI_DATAMODEL 2287 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) { 2288 struct ecpp_device_id32 *dp32; 2289 2290 dp32 = (struct ecpp_device_id32 *)dp; 2291 id.mode = dp32->mode; 2292 id.len = dp32->len; 2293 id.addr = (char *)(uintptr_t)dp32->addr; 2294 } else { 2295 #endif /* _MULTI_DATAMODEL */ 2296 id = *dp; 2297 #ifdef _MULTI_DATAMODEL 2298 } 2299 #endif /* _MULTI_DATAMODEL */ 2300 2301 ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen); 2302 break; 2303 } 2304 2305 case PRNIOC_GET_1284_DEVID: { 2306 struct copyresp *csp; 2307 struct ecpp_copystate *stp; 2308 struct prn_1284_device_id *dp; 2309 struct ecpp_device_id id; 2310 2311 csp = (struct copyresp *)mp->b_rptr; 2312 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2313 dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr; 2314 2315 /* imitate struct ecpp_device_id */ 2316 id.mode = ECPP_NIBBLE_MODE; 2317 2318 #ifdef _MULTI_DATAMODEL 2319 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) { 2320 struct prn_1284_device_id32 *dp32; 2321 2322 dp32 = (struct prn_1284_device_id32 *)dp; 2323 id.len = dp32->id_len; 2324 id.addr = (char *)(uintptr_t)dp32->id_data; 2325 } else { 2326 #endif /* _MULTI_DATAMODEL */ 2327 id.len = dp->id_len; 2328 id.addr = (char *)dp->id_data; 2329 #ifdef _MULTI_DATAMODEL 2330 } 2331 #endif /* _MULTI_DATAMODEL */ 2332 2333 ecpp_srvioc_devid(q, mp, &id, 2334 (int *)&stp->un.prn_devid.id_rlen); 2335 break; 2336 } 2337 2338 case PRNIOC_SET_IFCAP: { 2339 uint_t ifcap, new_ifcap; 2340 2341 ifcap = ecpp_get_prn_ifcap(pp); 2342 new_ifcap = *(uint_t *)mp->b_cont->b_rptr; 2343 2344 if (ifcap == new_ifcap) { 2345 ecpp_ack_ioctl(q, mp); 2346 break; 2347 } 2348 2349 /* only changing PRN_BIDI is supported */ 2350 if ((ifcap ^ new_ifcap) & ~PRN_BIDI) { 2351 ecpp_nack_ioctl(q, mp, EINVAL); 2352 break; 2353 } 2354 2355 if (new_ifcap & PRN_BIDI) { /* go bidirectional */ 2356 ecpp_default_negotiation(pp); 2357 } else { /* go unidirectional */ 2358 (void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS); 2359 } 2360 2361 ecpp_ack_ioctl(q, mp); 2362 break; 2363 } 2364 2365 case PRNIOC_SET_TIMEOUTS: { 2366 struct prn_timeouts *prn_timeouts; 2367 2368 prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr; 2369 2370 if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) { 2371 ecpp_nack_ioctl(q, mp, EINVAL); 2372 break; 2373 } 2374 2375 pp->prn_timeouts = *prn_timeouts; 2376 pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward; 2377 2378 ecpp_ack_ioctl(q, mp); 2379 break; 2380 } 2381 2382 case PRNIOC_GET_IFINFO: 2383 ecpp_srvioc_prnif(q, mp); 2384 break; 2385 2386 default: /* unexpected ioctl type */ 2387 ecpp_nack_ioctl(q, mp, EINVAL); 2388 break; 2389 } 2390 } 2391 2392 static void 2393 ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen) 2394 { 2395 struct ecppunit *pp; 2396 struct copyresp *csp; 2397 struct ecpp_copystate *stp; 2398 int error; 2399 int len; 2400 int mode; 2401 mblk_t *datamp; 2402 2403 pp = (struct ecppunit *)q->q_ptr; 2404 csp = (struct copyresp *)mp->b_rptr; 2405 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2406 mode = id->mode; 2407 2408 /* check arguments */ 2409 if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) { 2410 ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n", 2411 mode, id->len); 2412 ecpp_nack_ioctl(q, mp, EINVAL); 2413 return; 2414 } 2415 2416 /* Currently only Nibble mode is supported */ 2417 if (mode != ECPP_NIBBLE_MODE) { 2418 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT); 2419 return; 2420 } 2421 2422 if ((id->addr == NULL) && (id->len != 0)) { 2423 ecpp_nack_ioctl(q, mp, EFAULT); 2424 return; 2425 } 2426 2427 /* read device ID length */ 2428 if (error = ecpp_getdevid(pp, NULL, &len, mode)) { 2429 ecpp_nack_ioctl(q, mp, error); 2430 goto breakout; 2431 } 2432 2433 /* don't take into account two length bytes */ 2434 len -= 2; 2435 *rlen = len; 2436 2437 /* limit transfer to user buffer length */ 2438 if (id->len < len) { 2439 len = id->len; 2440 } 2441 2442 if (len == 0) { 2443 /* just return rlen */ 2444 stp->state = ECPP_ADDROUT; 2445 ecpp_wput_iocdata_devid(q, mp, 2446 (uintptr_t)rlen - (uintptr_t)&stp->un); 2447 goto breakout; 2448 } 2449 2450 if ((datamp = allocb(len, BPRI_MED)) == NULL) { 2451 ecpp_nack_ioctl(q, mp, ENOSR); 2452 goto breakout; 2453 } 2454 2455 /* read ID string */ 2456 error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode); 2457 if (error) { 2458 freemsg(datamp); 2459 ecpp_nack_ioctl(q, mp, error); 2460 goto breakout; 2461 } else { 2462 datamp->b_wptr += len; 2463 2464 stp->state = ECPP_ADDROUT; 2465 mcopyout(mp, csp->cp_private, len, id->addr, datamp); 2466 qreply(q, mp); 2467 } 2468 2469 return; 2470 2471 breakout: 2472 (void) ecpp_1284_termination(pp); 2473 } 2474 2475 /* 2476 * PRNIOC_GET_IFINFO: return prnio interface info string 2477 */ 2478 static void 2479 ecpp_srvioc_prnif(queue_t *q, mblk_t *mp) 2480 { 2481 struct copyresp *csp; 2482 struct ecpp_copystate *stp; 2483 uint_t len; 2484 struct prn_interface_info *ip; 2485 struct prn_interface_info info; 2486 mblk_t *datamp; 2487 #ifdef _MULTI_DATAMODEL 2488 struct iocblk *iocbp = (struct iocblk *)mp->b_rptr; 2489 #endif 2490 2491 csp = (struct copyresp *)mp->b_rptr; 2492 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2493 ip = (struct prn_interface_info *)mp->b_cont->b_rptr; 2494 2495 #ifdef _MULTI_DATAMODEL 2496 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) { 2497 struct prn_interface_info32 *ip32; 2498 2499 ip32 = (struct prn_interface_info32 *)ip; 2500 info.if_len = ip32->if_len; 2501 info.if_data = (char *)(uintptr_t)ip32->if_data; 2502 } else { 2503 #endif /* _MULTI_DATAMODEL */ 2504 info = *ip; 2505 #ifdef _MULTI_DATAMODEL 2506 } 2507 #endif /* _MULTI_DATAMODEL */ 2508 2509 len = strlen(prn_ifinfo); 2510 stp->un.prn_if.if_rlen = len; 2511 stp->state = ECPP_ADDROUT; 2512 2513 /* check arguments */ 2514 if ((info.if_data == NULL) && (info.if_len != 0)) { 2515 ecpp_nack_ioctl(q, mp, EFAULT); 2516 return; 2517 } 2518 2519 if (info.if_len == 0) { 2520 /* just copyout rlen */ 2521 ecpp_wput_iocdata_devid(q, mp, 2522 offsetof(struct prn_interface_info, if_rlen)); 2523 return; 2524 } 2525 2526 /* if needed, trim to the buffer size */ 2527 if (len > info.if_len) { 2528 len = info.if_len; 2529 } 2530 2531 if ((datamp = allocb(len, BPRI_MED)) == NULL) { 2532 ecpp_nack_ioctl(q, mp, ENOSR); 2533 return; 2534 } 2535 2536 bcopy(&prn_ifinfo[0], datamp->b_wptr, len); 2537 datamp->b_wptr += len; 2538 2539 mcopyout(mp, csp->cp_private, len, info.if_data, datamp); 2540 qreply(q, mp); 2541 } 2542 2543 static void 2544 ecpp_flush(struct ecppunit *pp, int cmd) 2545 { 2546 queue_t *q; 2547 uint8_t ecr, dcr; 2548 timeout_id_t timeout_id, fifo_timer_id, wsrv_timer_id; 2549 2550 ASSERT(mutex_owned(&pp->umutex)); 2551 2552 if (!(cmd & FWRITE)) { 2553 return; 2554 } 2555 2556 q = pp->writeq; 2557 timeout_id = fifo_timer_id = wsrv_timer_id = 0; 2558 2559 ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy); 2560 2561 /* if there is an ongoing DMA, it needs to be turned off. */ 2562 switch (pp->e_busy) { 2563 case ECPP_BUSY: 2564 /* 2565 * Change the port status to ECPP_FLUSH to 2566 * indicate to ecpp_wsrv that the wq is being flushed. 2567 */ 2568 pp->e_busy = ECPP_FLUSH; 2569 2570 /* 2571 * dma_cancelled indicates to ecpp_isr() that we have 2572 * turned off the DMA. Since the mutex is held, ecpp_isr() 2573 * may be blocked. Once ecpp_flush() finishes and ecpp_isr() 2574 * gains the mutex, ecpp_isr() will have a _reset_ DMAC. Most 2575 * significantly, the DMAC will be reset after ecpp_isr() was 2576 * invoked. Therefore we need to have a flag "dma_cancelled" 2577 * to signify when the described condition has occured. If 2578 * ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr 2579 * and simply claim the interupt. 2580 */ 2581 2582 pp->dma_cancelled = TRUE; 2583 2584 /* either DMA or PIO transfer */ 2585 if (COMPAT_DMA(pp) || 2586 (pp->current_mode == ECPP_ECP_MODE) || 2587 (pp->current_mode == ECPP_DIAG_MODE)) { 2588 /* 2589 * if the bcr is zero, then DMA is complete and 2590 * we are waiting for the fifo to drain. Therefore, 2591 * turn off dma. 2592 */ 2593 if (ECPP_DMA_STOP(pp, NULL) == FAILURE) { 2594 ecpp_error(pp->dip, 2595 "ecpp_flush: dma_stop failed.\n"); 2596 } 2597 2598 /* 2599 * If the status of the port is ECPP_BUSY, 2600 * the DMA is stopped by either explicitly above, or by 2601 * ecpp_isr() but the FIFO hasn't drained yet. In either 2602 * case, we need to unbind the dma mappings. 2603 */ 2604 if (ddi_dma_unbind_handle( 2605 pp->dma_handle) != DDI_SUCCESS) 2606 ecpp_error(pp->dip, 2607 "ecpp_flush: unbind failed.\n"); 2608 2609 if (pp->msg != NULL) { 2610 freemsg(pp->msg); 2611 pp->msg = NULL; 2612 } 2613 } else { 2614 /* 2615 * PIO transfer: disable nAck interrups 2616 */ 2617 dcr = DCR_READ(pp); 2618 dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN); 2619 DCR_WRITE(pp, dcr); 2620 ECPP_MASK_INTR(pp); 2621 } 2622 2623 /* 2624 * The transfer is cleaned up. There may or may not be data 2625 * in the fifo. We don't care at this point. Ie. SuperIO may 2626 * transfer the remaining bytes in the fifo or not. it doesn't 2627 * matter. All that is important at this stage is that no more 2628 * fifo timers are started. 2629 */ 2630 2631 timeout_id = pp->timeout_id; 2632 fifo_timer_id = pp->fifo_timer_id; 2633 pp->timeout_id = pp->fifo_timer_id = 0; 2634 pp->softintr_pending = 0; 2635 2636 break; 2637 2638 case ECPP_ERR: 2639 /* 2640 * Change the port status to ECPP_FLUSH to 2641 * indicate to ecpp_wsrv that the wq is being flushed. 2642 */ 2643 pp->e_busy = ECPP_FLUSH; 2644 2645 /* 2646 * Most likely there are mblks in the queue, 2647 * but the driver can not transmit because 2648 * of the bad port status. In this case, 2649 * ecpp_flush() should make sure ecpp_wsrv_timer() 2650 * is turned off. 2651 */ 2652 wsrv_timer_id = pp->wsrv_timer_id; 2653 pp->wsrv_timer_id = 0; 2654 2655 break; 2656 2657 case ECPP_IDLE: 2658 /* No work to do. Ready to flush */ 2659 break; 2660 2661 default: 2662 ecpp_error(pp->dip, 2663 "ecpp_flush: illegal state %x\n", pp->e_busy); 2664 } 2665 2666 /* in DIAG mode clear TFIFO if needed */ 2667 if (pp->current_mode == ECPP_DIAG_MODE) { 2668 ecr = ECR_READ(pp); 2669 if (!(ecr & ECPP_FIFO_EMPTY)) { 2670 ECR_WRITE(pp, 2671 ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 2672 ECR_WRITE(pp, ecr); 2673 } 2674 } 2675 2676 /* Discard all messages on the output queue. */ 2677 flushq(q, FLUSHDATA); 2678 2679 /* The port is no longer flushing or dma'ing for that matter. */ 2680 pp->e_busy = ECPP_IDLE; 2681 2682 /* Set the right phase */ 2683 if (pp->current_mode == ECPP_ECP_MODE) { 2684 if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) { 2685 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 2686 } else { 2687 pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE; 2688 } 2689 } 2690 2691 /* cancel timeouts if any */ 2692 mutex_exit(&pp->umutex); 2693 2694 if (timeout_id) { 2695 (void) untimeout(timeout_id); 2696 } 2697 if (fifo_timer_id) { 2698 (void) untimeout(fifo_timer_id); 2699 } 2700 if (wsrv_timer_id) { 2701 (void) untimeout(wsrv_timer_id); 2702 } 2703 2704 mutex_enter(&pp->umutex); 2705 2706 cv_signal(&pp->pport_cv); /* wake up ecpp_close() */ 2707 } 2708 2709 static void 2710 ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len) 2711 { 2712 ASSERT(mutex_owned(&pp->umutex)); 2713 ASSERT(pp->e_busy == ECPP_BUSY); 2714 2715 ecpp_error(pp->dip, 2716 "ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n", 2717 pp->current_mode, pp->current_phase, ECR_READ(pp), len); 2718 2719 pp->dma_dir = DDI_DMA_WRITE; /* this is a forward transfer */ 2720 2721 switch (pp->current_mode) { 2722 case ECPP_NIBBLE_MODE: 2723 (void) ecpp_1284_termination(pp); 2724 2725 /* After termination we are either Compatible or Centronics */ 2726 2727 /* FALLTHRU */ 2728 2729 case ECPP_CENTRONICS: 2730 case ECPP_COMPAT_MODE: 2731 if (pp->io_mode == ECPP_DMA) { 2732 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) { 2733 return; 2734 } 2735 } else { 2736 /* PIO mode */ 2737 if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) { 2738 return; 2739 } 2740 (void) ecpp_pio_writeb(pp); 2741 } 2742 break; 2743 2744 case ECPP_DIAG_MODE: { 2745 int oldlen; 2746 2747 /* put superio into TFIFO mode, if not already */ 2748 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110); 2749 /* 2750 * DMA would block if the TFIFO is not empty 2751 * if by this moment nobody read these bytes, they`re gone 2752 */ 2753 drv_usecwait(1); 2754 if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) { 2755 ecpp_error(pp->dip, 2756 "ecpp_start: TFIFO not empty, clearing\n"); 2757 ECR_WRITE(pp, 2758 ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 2759 ECR_WRITE(pp, 2760 ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110); 2761 } 2762 2763 /* we can DMA at most 16 bytes into TFIFO */ 2764 oldlen = len; 2765 if (len > ECPP_FIFO_SZ) { 2766 len = ECPP_FIFO_SZ; 2767 } 2768 2769 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) { 2770 return; 2771 } 2772 2773 /* put the rest of data back on the queue */ 2774 if (oldlen > len) { 2775 ecpp_putback_untransfered(pp, addr + len, oldlen - len); 2776 } 2777 2778 break; 2779 } 2780 2781 case ECPP_ECP_MODE: 2782 ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE || 2783 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 2784 2785 /* if in Reverse Phase negotiate to Forward */ 2786 if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) { 2787 if (ecp_reverse2forward(pp) == FAILURE) { 2788 if (pp->msg) { 2789 (void) putbq(pp->writeq, pp->msg); 2790 } else { 2791 ecpp_putback_untransfered(pp, 2792 addr, len); 2793 } 2794 } 2795 } 2796 2797 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) { 2798 return; 2799 } 2800 2801 break; 2802 } 2803 2804 /* schedule transfer timeout */ 2805 pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp, 2806 pp->xfer_parms.write_timeout * drv_usectohz(1000000)); 2807 } 2808 2809 /* 2810 * Transfer a PIO "block" a byte at a time. 2811 * The block is starts at addr and ends at pp->last_byte 2812 */ 2813 static uint8_t 2814 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len) 2815 { 2816 pp->next_byte = addr; 2817 pp->last_byte = (caddr_t)((ulong_t)addr + len); 2818 2819 if (ecpp_check_status(pp) == FAILURE) { 2820 /* 2821 * if status signals are bad, do not start PIO, 2822 * put everything back on the queue. 2823 */ 2824 ecpp_error(pp->dip, 2825 "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len); 2826 2827 if (pp->msg != NULL) { 2828 /* 2829 * this circumstance we want to copy the 2830 * untransfered section of msg to a new mblk, 2831 * then free the orignal one. 2832 */ 2833 ecpp_putback_untransfered(pp, 2834 (void *)pp->msg->b_rptr, len); 2835 ecpp_error(pp->dip, 2836 "ecpp_prep_pio_xfer: len1=%d\n", len); 2837 2838 freemsg(pp->msg); 2839 pp->msg = NULL; 2840 } else { 2841 ecpp_putback_untransfered(pp, pp->ioblock, len); 2842 ecpp_error(pp->dip, 2843 "ecpp_prep_pio_xfer: len2=%d\n", len); 2844 } 2845 qenable(pp->writeq); 2846 2847 return (FAILURE); 2848 } 2849 2850 pp->dma_cancelled = FALSE; 2851 2852 /* pport must be in PIO mode */ 2853 if (ecr_write(pp, ECR_mode_001 | 2854 ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) { 2855 ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n"); 2856 } 2857 2858 ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n", 2859 DCR_READ(pp), ECR_READ(pp)); 2860 2861 return (SUCCESS); 2862 } 2863 2864 static uint8_t 2865 ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len) 2866 { 2867 uint8_t ecr_mode[] = { 2868 0, 2869 ECR_mode_010, /* Centronix */ 2870 ECR_mode_010, /* Compat */ 2871 0, /* Byte */ 2872 0, /* Nibble */ 2873 ECR_mode_011, /* ECP */ 2874 0, /* Failure */ 2875 ECR_mode_110, /* Diag */ 2876 }; 2877 uint8_t ecr; 2878 2879 ASSERT((pp->current_mode <= ECPP_DIAG_MODE) && 2880 (ecr_mode[pp->current_mode] != 0)); 2881 2882 if (ecpp_setup_dma_resources(pp, addr, len) == FAILURE) { 2883 qenable(pp->writeq); 2884 return (FAILURE); 2885 } 2886 2887 if (ecpp_check_status(pp) == FAILURE) { 2888 /* 2889 * if status signals are bad, do not start DMA, but 2890 * rather put everything back on the queue. 2891 */ 2892 ecpp_error(pp->dip, 2893 "ecpp_init_dma_xfer: suspending DMA len=%d\n", 2894 pp->dma_cookie.dmac_size); 2895 2896 if (pp->msg != NULL) { 2897 /* 2898 * this circumstance we want to copy the 2899 * untransfered section of msg to a new mblk, 2900 * then free the orignal one. 2901 */ 2902 ecpp_putback_untransfered(pp, 2903 (void *)pp->msg->b_rptr, len); 2904 ecpp_error(pp->dip, 2905 "ecpp_init_dma_xfer:a:len=%d\n", len); 2906 2907 freemsg(pp->msg); 2908 pp->msg = NULL; 2909 } else { 2910 ecpp_putback_untransfered(pp, pp->ioblock, len); 2911 ecpp_error(pp->dip, 2912 "ecpp_init_dma_xfer:b:len=%d\n", len); 2913 } 2914 2915 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 2916 ecpp_error(pp->dip, 2917 "ecpp_init_dma_xfer: unbind FAILURE.\n"); 2918 } 2919 qenable(pp->writeq); 2920 return (FAILURE); 2921 } 2922 2923 pp->xfercnt = pp->resid = len; 2924 pp->dma_cancelled = FALSE; 2925 pp->tfifo_intr = 0; 2926 2927 /* set the right ECR mode and disable DMA */ 2928 ecr = ecr_mode[pp->current_mode]; 2929 (void) ecr_write(pp, ecr | ECPP_INTR_SRV | ECPP_INTR_MASK); 2930 2931 /* prepare DMAC for a transfer */ 2932 if (ECPP_DMA_START(pp) == FAILURE) { 2933 ecpp_error(pp->dip, "ecpp_init_dma_xfer: dma_start FAILED.\n"); 2934 return (FAILURE); 2935 } 2936 2937 /* GO! */ 2938 (void) ecr_write(pp, ecr | ECPP_DMA_ENABLE | ECPP_INTR_MASK); 2939 2940 return (SUCCESS); 2941 } 2942 2943 static uint8_t 2944 ecpp_setup_dma_resources(struct ecppunit *pp, caddr_t addr, size_t len) 2945 { 2946 int err; 2947 off_t woff; 2948 size_t wlen; 2949 2950 ASSERT(pp->dma_dir == DDI_DMA_READ || pp->dma_dir == DDI_DMA_WRITE); 2951 2952 err = ddi_dma_addr_bind_handle(pp->dma_handle, NULL, 2953 addr, len, pp->dma_dir | DDI_DMA_PARTIAL, 2954 DDI_DMA_DONTWAIT, NULL, 2955 &pp->dma_cookie, &pp->dma_cookie_count); 2956 2957 switch (err) { 2958 case DDI_DMA_MAPPED: 2959 ecpp_error(pp->dip, "ecpp_setup_dma: DMA_MAPPED\n"); 2960 2961 pp->dma_nwin = 1; 2962 pp->dma_curwin = 1; 2963 break; 2964 2965 case DDI_DMA_PARTIAL_MAP: { 2966 ecpp_error(pp->dip, "ecpp_setup_dma: DMA_PARTIAL_MAP\n"); 2967 2968 if (ddi_dma_numwin(pp->dma_handle, 2969 &pp->dma_nwin) != DDI_SUCCESS) { 2970 (void) ddi_dma_unbind_handle(pp->dma_handle); 2971 return (FAILURE); 2972 } 2973 pp->dma_curwin = 1; 2974 2975 /* 2976 * The very first window is returned by bind_handle, 2977 * but we must do this explicitly here, otherwise 2978 * next getwin would return wrong cookie dmac_size 2979 */ 2980 if (ddi_dma_getwin(pp->dma_handle, 0, &woff, &wlen, 2981 &pp->dma_cookie, &pp->dma_cookie_count) != DDI_SUCCESS) { 2982 ecpp_error(pp->dip, 2983 "ecpp_setup_dma: ddi_dma_getwin failed!"); 2984 (void) ddi_dma_unbind_handle(pp->dma_handle); 2985 return (FAILURE); 2986 } 2987 2988 ecpp_error(pp->dip, 2989 "ecpp_setup_dma: cookies=%d, windows=%d" 2990 " addr=%lx len=%d\n", 2991 pp->dma_cookie_count, pp->dma_nwin, 2992 pp->dma_cookie.dmac_address, pp->dma_cookie.dmac_size); 2993 2994 break; 2995 } 2996 2997 default: 2998 ecpp_error(pp->dip, "ecpp_setup_dma: err=%x\n", err); 2999 return (FAILURE); 3000 } 3001 3002 return (SUCCESS); 3003 } 3004 3005 static void 3006 ecpp_ack_ioctl(queue_t *q, mblk_t *mp) 3007 { 3008 struct iocblk *iocbp; 3009 3010 mp->b_datap->db_type = M_IOCACK; 3011 mp->b_wptr = mp->b_rptr + sizeof (struct iocblk); 3012 3013 if (mp->b_cont) { 3014 freemsg(mp->b_cont); 3015 mp->b_cont = NULL; 3016 } 3017 3018 iocbp = (struct iocblk *)mp->b_rptr; 3019 iocbp->ioc_error = 0; 3020 iocbp->ioc_count = 0; 3021 iocbp->ioc_rval = 0; 3022 3023 qreply(q, mp); 3024 } 3025 3026 static void 3027 ecpp_nack_ioctl(queue_t *q, mblk_t *mp, int err) 3028 { 3029 struct iocblk *iocbp; 3030 3031 mp->b_datap->db_type = M_IOCNAK; 3032 mp->b_wptr = mp->b_rptr + sizeof (struct iocblk); 3033 iocbp = (struct iocblk *)mp->b_rptr; 3034 iocbp->ioc_error = err; 3035 3036 if (mp->b_cont) { 3037 freemsg(mp->b_cont); 3038 mp->b_cont = NULL; 3039 } 3040 3041 qreply(q, mp); 3042 } 3043 3044 uint_t 3045 ecpp_isr(caddr_t arg) 3046 { 3047 struct ecppunit *pp = (struct ecppunit *)(void *)arg; 3048 uint32_t dcsr; 3049 uint8_t dsr; 3050 int cheerio_pend_counter; 3051 int retval = DDI_INTR_UNCLAIMED; 3052 hrtime_t now; 3053 3054 mutex_enter(&pp->umutex); 3055 /* 3056 * interrupt may occur while other thread is holding the lock 3057 * and cancels DMA transfer (e.g. ecpp_flush()) 3058 * since it cannot cancel the interrupt thread, 3059 * it just sets dma_cancelled to TRUE, 3060 * telling interrupt handler to exit immediately 3061 */ 3062 if (pp->dma_cancelled == TRUE) { 3063 ecpp_error(pp->dip, "dma-cancel isr\n"); 3064 3065 pp->intr_hard++; 3066 pp->dma_cancelled = FALSE; 3067 3068 mutex_exit(&pp->umutex); 3069 return (DDI_INTR_CLAIMED); 3070 } 3071 3072 /* Southbridge interrupts are handled separately */ 3073 #if defined(__x86) 3074 if (pp->hw == &x86) 3075 #else 3076 if (pp->hw == &m1553) 3077 #endif 3078 { 3079 retval = ecpp_M1553_intr(pp); 3080 if (retval == DDI_INTR_UNCLAIMED) { 3081 goto unexpected; 3082 } 3083 mutex_exit(&pp->umutex); 3084 return (DDI_INTR_CLAIMED); 3085 } 3086 3087 /* 3088 * the intr is through the motherboard. it is faster than PCI route. 3089 * sometimes ecpp_isr() is invoked before cheerio csr is updated. 3090 */ 3091 cheerio_pend_counter = ecpp_isr_max_delay; 3092 dcsr = GET_DMAC_CSR(pp); 3093 3094 while (!(dcsr & DCSR_INT_PEND) && cheerio_pend_counter-- > 0) { 3095 drv_usecwait(1); 3096 dcsr = GET_DMAC_CSR(pp); 3097 } 3098 3099 /* 3100 * This is a workaround for what seems to be a timing problem 3101 * with the delivery of interrupts and CSR updating with the 3102 * ebus2 csr, superio and the n_ERR pin from the peripheral. 3103 * 3104 * delay is not needed for PIO mode 3105 */ 3106 if (!COMPAT_PIO(pp)) { 3107 drv_usecwait(100); 3108 dcsr = GET_DMAC_CSR(pp); 3109 } 3110 3111 /* on 97317 in Extended mode IRQ_ST of DSR is deasserted when read */ 3112 dsr = DSR_READ(pp); 3113 3114 /* 3115 * check if interrupt is for this device: 3116 * it should be reflected either in cheerio DCSR register 3117 * or in IRQ_ST bit of DSR on 97317 3118 */ 3119 if ((dcsr & DCSR_INT_PEND) == 0) { 3120 if (pp->hw != &pc97317) { 3121 goto unclaimed; 3122 } 3123 /* 3124 * on Excalibur, reading DSR will deassert SuperIO IRQx line 3125 * RIO's DCSR_INT_PEND seems to follow IRQx transitions, 3126 * so if DSR is read after interrupt occured, but before 3127 * we get here, IRQx and hence INT_PEND will be deasserted 3128 * as a result, we can miss a service interrupt in PIO mode 3129 * 3130 * malicious DSR reader is BPPIOC_TESTIO, which is called 3131 * by LP in between data blocks to check printer status 3132 * this workaround lets us not to miss an interrupt 3133 * 3134 * also, nErr interrupt (ECP mode) not always reflected in DCSR 3135 */ 3136 if (((dsr & ECPP_IRQ_ST) == 0) || 3137 ((COMPAT_PIO(pp)) && (pp->e_busy == ECPP_BUSY)) || 3138 (((dsr & ECPP_nERR) == 0) && 3139 (pp->current_mode == ECPP_ECP_MODE))) { 3140 dcsr = 0; 3141 } else { 3142 goto unclaimed; 3143 } 3144 } 3145 3146 pp->intr_hard++; 3147 3148 /* the intr is for us - check all possible interrupt sources */ 3149 if (dcsr & DCSR_ERR_PEND) { 3150 size_t bcr; 3151 3152 /* we are expecting a data transfer interrupt */ 3153 ASSERT(pp->e_busy == ECPP_BUSY); 3154 3155 /* 3156 * some kind of DMA error 3157 */ 3158 if (ECPP_DMA_STOP(pp, &bcr) == FAILURE) { 3159 ecpp_error(pp->dip, "ecpp_isr: dma_stop failed\n"); 3160 } 3161 3162 ecpp_error(pp->dip, "ecpp_isr: DMAC ERROR bcr=%d\n", bcr); 3163 3164 ecpp_xfer_cleanup(pp); 3165 3166 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 3167 ecpp_error(pp->dip, "ecpp_isr(e): unbind failed\n"); 3168 } 3169 3170 mutex_exit(&pp->umutex); 3171 return (DDI_INTR_CLAIMED); 3172 } 3173 3174 if (dcsr & DCSR_TC) { 3175 retval = ecpp_dma_ihdlr(pp); 3176 mutex_exit(&pp->umutex); 3177 return (DDI_INTR_CLAIMED); 3178 } 3179 3180 if (COMPAT_PIO(pp)) { 3181 retval = ecpp_pio_ihdlr(pp); 3182 mutex_exit(&pp->umutex); 3183 return (DDI_INTR_CLAIMED); 3184 } 3185 3186 /* does peripheral need attention? */ 3187 if ((dsr & ECPP_nERR) == 0) { 3188 retval = ecpp_nErr_ihdlr(pp); 3189 mutex_exit(&pp->umutex); 3190 return (DDI_INTR_CLAIMED); 3191 } 3192 3193 pp->intr_hard--; 3194 3195 unexpected: 3196 3197 pp->intr_spurious++; 3198 3199 /* 3200 * The following procedure tries to prevent soft hangs 3201 * in event of peripheral/superio misbehaviour: 3202 * if number of unexpected interrupts in the last SPUR_PERIOD ns 3203 * exceeded SPUR_CRITICAL, then shut up interrupts 3204 */ 3205 now = gethrtime(); 3206 if (pp->lastspur == 0 || now - pp->lastspur > SPUR_PERIOD) { 3207 /* last unexpected interrupt was long ago */ 3208 pp->lastspur = now; 3209 pp->nspur = 1; 3210 } else { 3211 /* last unexpected interrupt was recently */ 3212 pp->nspur++; 3213 } 3214 3215 if (pp->nspur >= SPUR_CRITICAL) { 3216 ECPP_MASK_INTR(pp); 3217 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK | ECPP_INTR_SRV); 3218 pp->nspur = 0; 3219 cmn_err(CE_NOTE, "%s%d: too many interrupt requests", 3220 ddi_get_name(pp->dip), ddi_get_instance(pp->dip)); 3221 } else { 3222 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_SRV | ECPP_INTR_MASK); 3223 } 3224 3225 ecpp_error(pp->dip, 3226 "isr:unknown: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n", 3227 dcsr, ECR_READ(pp), dsr, DCR_READ(pp), 3228 pp->current_mode, pp->current_phase); 3229 3230 mutex_exit(&pp->umutex); 3231 return (DDI_INTR_CLAIMED); 3232 3233 unclaimed: 3234 3235 pp->intr_spurious++; 3236 3237 ecpp_error(pp->dip, 3238 "isr:UNCL: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n", 3239 dcsr, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp), 3240 pp->current_mode, pp->current_phase); 3241 3242 mutex_exit(&pp->umutex); 3243 return (DDI_INTR_UNCLAIMED); 3244 } 3245 3246 /* 3247 * M1553 intr handler 3248 */ 3249 static uint_t 3250 ecpp_M1553_intr(struct ecppunit *pp) 3251 { 3252 int retval = DDI_INTR_UNCLAIMED; 3253 3254 pp->intr_hard++; 3255 3256 if (pp->e_busy == ECPP_BUSY) { 3257 /* Centronics or Compat PIO transfer */ 3258 if (COMPAT_PIO(pp)) { 3259 return (ecpp_pio_ihdlr(pp)); 3260 } 3261 3262 /* Centronics or Compat DMA transfer */ 3263 if (COMPAT_DMA(pp) || 3264 (pp->current_mode == ECPP_ECP_MODE) || 3265 (pp->current_mode == ECPP_DIAG_MODE)) { 3266 return (ecpp_dma_ihdlr(pp)); 3267 } 3268 } 3269 3270 /* Nibble or ECP backchannel request? */ 3271 if ((DSR_READ(pp) & ECPP_nERR) == 0) { 3272 return (ecpp_nErr_ihdlr(pp)); 3273 } 3274 3275 return (retval); 3276 } 3277 3278 /* 3279 * DMA completion interrupt handler 3280 */ 3281 static uint_t 3282 ecpp_dma_ihdlr(struct ecppunit *pp) 3283 { 3284 clock_t tm; 3285 3286 ecpp_error(pp->dip, "ecpp_dma_ihdlr(%x): ecr=%x, dsr=%x, dcr=%x\n", 3287 pp->current_mode, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 3288 3289 /* we are expecting a data transfer interrupt */ 3290 ASSERT(pp->e_busy == ECPP_BUSY); 3291 3292 /* Intr generated while invoking TFIFO mode. Exit */ 3293 if (pp->tfifo_intr == 1) { 3294 pp->tfifo_intr = 0; 3295 ecpp_error(pp->dip, "ecpp_dma_ihdlr: tfifo_intr is 1\n"); 3296 return (DDI_INTR_CLAIMED); 3297 } 3298 3299 if (ECPP_DMA_STOP(pp, NULL) == FAILURE) { 3300 ecpp_error(pp->dip, "ecpp_dma_ihdlr: dma_stop failed\n"); 3301 } 3302 3303 if (pp->current_mode == ECPP_ECP_MODE && 3304 pp->current_phase == ECPP_PHASE_ECP_REV_XFER) { 3305 ecpp_ecp_read_completion(pp); 3306 } else { 3307 /* 3308 * fifo_timer() will do the cleanup when the FIFO drains 3309 */ 3310 if ((ECR_READ(pp) & ECPP_FIFO_EMPTY) || 3311 (pp->current_mode == ECPP_DIAG_MODE)) { 3312 tm = 0; /* no use in waiting if FIFO is already empty */ 3313 } else { 3314 tm = drv_usectohz(FIFO_DRAIN_PERIOD); 3315 } 3316 pp->fifo_timer_id = timeout(ecpp_fifo_timer, (caddr_t)pp, tm); 3317 } 3318 3319 /* 3320 * Stop the DMA transfer timeout timer 3321 * this operation will temporarily give up the mutex, 3322 * so we do it in the end of the handler to avoid races 3323 */ 3324 ecpp_untimeout_unblock(pp, &pp->timeout_id); 3325 3326 return (DDI_INTR_CLAIMED); 3327 } 3328 3329 /* 3330 * ecpp_pio_ihdlr() is a PIO interrupt processing routine 3331 * It masks interrupts, updates statistics and initiates next byte transfer 3332 */ 3333 static uint_t 3334 ecpp_pio_ihdlr(struct ecppunit *pp) 3335 { 3336 ASSERT(mutex_owned(&pp->umutex)); 3337 ASSERT(pp->e_busy == ECPP_BUSY); 3338 3339 /* update statistics */ 3340 pp->joblen++; 3341 pp->ctxpio_obytes++; 3342 3343 /* disable nAck interrups */ 3344 ECPP_MASK_INTR(pp); 3345 DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_REV_DIR | ECPP_INTR_EN)); 3346 3347 /* 3348 * If it was the last byte of the data block cleanup, 3349 * otherwise trigger a soft interrupt to send the next byte 3350 */ 3351 if (pp->next_byte >= pp->last_byte) { 3352 ecpp_xfer_cleanup(pp); 3353 ecpp_error(pp->dip, 3354 "ecpp_pio_ihdlr: pp->joblen=%d,pp->ctx_cf=%d,\n", 3355 pp->joblen, pp->ctx_cf); 3356 } else { 3357 if (pp->softintr_pending) { 3358 ecpp_error(pp->dip, 3359 "ecpp_pio_ihdlr:E: next byte in progress\n"); 3360 } else { 3361 pp->softintr_flags = ECPP_SOFTINTR_PIONEXT; 3362 pp->softintr_pending = 1; 3363 ddi_trigger_softintr(pp->softintr_id); 3364 } 3365 } 3366 3367 return (DDI_INTR_CLAIMED); 3368 } 3369 3370 /* 3371 * ecpp_pio_writeb() sends a byte using Centronics handshake 3372 */ 3373 static void 3374 ecpp_pio_writeb(struct ecppunit *pp) 3375 { 3376 uint8_t dcr; 3377 3378 dcr = DCR_READ(pp) & ~ECPP_REV_DIR; 3379 dcr |= ECPP_INTR_EN; 3380 3381 /* send the next byte */ 3382 DATAR_WRITE(pp, *(pp->next_byte++)); 3383 3384 drv_usecwait(pp->data_setup_time); 3385 3386 /* Now Assert (neg logic) nStrobe */ 3387 if (dcr_write(pp, dcr | ECPP_STB) == FAILURE) { 3388 ecpp_error(pp->dip, "ecpp_pio_writeb:1: failed w/DCR\n"); 3389 } 3390 3391 /* Enable nAck interrupts */ 3392 (void) DSR_READ(pp); /* ensure IRQ_ST is armed */ 3393 ECPP_UNMASK_INTR(pp); 3394 3395 drv_usecwait(pp->strobe_pulse_width); 3396 3397 if (dcr_write(pp, dcr & ~ECPP_STB) == FAILURE) { 3398 ecpp_error(pp->dip, "ecpp_pio_writeb:2: failed w/DCR\n"); 3399 } 3400 } 3401 3402 /* 3403 * Backchannel request interrupt handler 3404 */ 3405 static uint_t 3406 ecpp_nErr_ihdlr(struct ecppunit *pp) 3407 { 3408 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: mode=%x, phase=%x\n", 3409 pp->current_mode, pp->current_phase); 3410 3411 if (pp->oflag != TRUE) { 3412 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: not open!\n"); 3413 return (DDI_INTR_UNCLAIMED); 3414 } 3415 3416 if (pp->e_busy == ECPP_BUSY) { 3417 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: busy\n"); 3418 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK); 3419 return (DDI_INTR_CLAIMED); 3420 } 3421 3422 /* mask nErr & nAck interrupts */ 3423 ECPP_MASK_INTR(pp); 3424 DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_INTR_EN | ECPP_REV_DIR)); 3425 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK); 3426 3427 /* going reverse */ 3428 switch (pp->current_mode) { 3429 case ECPP_ECP_MODE: 3430 /* 3431 * Peripheral asserts nPeriphRequest (nFault) 3432 */ 3433 break; 3434 case ECPP_NIBBLE_MODE: 3435 /* 3436 * Event 18: Periph asserts nErr to indicate data avail 3437 * Event 19: After waiting minimum pulse width, 3438 * periph sets nAck high to generate an interrupt 3439 * 3440 * Interface is in Interrupt Phase 3441 */ 3442 pp->current_phase = ECPP_PHASE_NIBT_REVINTR; 3443 3444 break; 3445 default: 3446 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: wrong mode!\n"); 3447 return (DDI_INTR_UNCLAIMED); 3448 } 3449 3450 (void) ecpp_backchan_req(pp); /* put backchannel request on the wq */ 3451 3452 return (DDI_INTR_CLAIMED); 3453 } 3454 3455 /* 3456 * Softintr handler does work according to softintr_flags: 3457 * in case of ECPP_SOFTINTR_PIONEXT it sends next byte of PIO transfer 3458 */ 3459 static uint_t 3460 ecpp_softintr(caddr_t arg) 3461 { 3462 struct ecppunit *pp = (struct ecppunit *)arg; 3463 uint32_t unx_len, ecpp_reattempts = 0; 3464 3465 mutex_enter(&pp->umutex); 3466 3467 pp->intr_soft++; 3468 3469 if (!pp->softintr_pending) { 3470 mutex_exit(&pp->umutex); 3471 return (DDI_INTR_CLAIMED); 3472 } else { 3473 pp->softintr_pending = 0; 3474 } 3475 3476 if (pp->softintr_flags & ECPP_SOFTINTR_PIONEXT) { 3477 pp->softintr_flags &= ~ECPP_SOFTINTR_PIONEXT; 3478 /* 3479 * Sent next byte in PIO mode 3480 */ 3481 ecpp_reattempts = 0; 3482 do { 3483 if (ecpp_check_status(pp) == SUCCESS) { 3484 pp->e_busy = ECPP_BUSY; 3485 break; 3486 } 3487 drv_usecwait(1); 3488 if (pp->isr_reattempt_high < ecpp_reattempts) { 3489 pp->isr_reattempt_high = ecpp_reattempts; 3490 } 3491 } while (++ecpp_reattempts < pp->wait_for_busy); 3492 3493 /* if the peripheral still not recovered suspend the transfer */ 3494 if (pp->e_busy == ECPP_ERR) { 3495 ++pp->ctx_cf; /* check status fail */ 3496 ecpp_error(pp->dip, "ecpp_softintr:check_status:F: " 3497 "dsr=%x jl=%d cf_isr=%d\n", 3498 DSR_READ(pp), pp->joblen, pp->ctx_cf); 3499 3500 /* 3501 * if status signals are bad, 3502 * put everything back on the wq. 3503 */ 3504 unx_len = pp->last_byte - pp->next_byte; 3505 if (pp->msg != NULL) { 3506 ecpp_putback_untransfered(pp, 3507 (void *)pp->msg->b_rptr, unx_len); 3508 ecpp_error(pp->dip, 3509 "ecpp_softintr:e1:unx_len=%d\n", unx_len); 3510 3511 freemsg(pp->msg); 3512 pp->msg = NULL; 3513 } else { 3514 ecpp_putback_untransfered(pp, 3515 pp->next_byte, unx_len); 3516 ecpp_error(pp->dip, 3517 "ecpp_softintr:e2:unx_len=%d\n", unx_len); 3518 } 3519 3520 ecpp_xfer_cleanup(pp); 3521 pp->e_busy = ECPP_ERR; 3522 qenable(pp->writeq); 3523 } else { 3524 /* send the next one */ 3525 pp->e_busy = ECPP_BUSY; 3526 (void) ecpp_pio_writeb(pp); 3527 } 3528 } 3529 3530 mutex_exit(&pp->umutex); 3531 return (DDI_INTR_CLAIMED); 3532 } 3533 3534 3535 /* 3536 * Transfer clean-up: 3537 * shut down the DMAC 3538 * stop the transfer timer 3539 * enable write queue 3540 */ 3541 static void 3542 ecpp_xfer_cleanup(struct ecppunit *pp) 3543 { 3544 ASSERT(mutex_owned(&pp->umutex)); 3545 3546 /* 3547 * if we did not use the ioblock, the mblk that 3548 * was used should be freed. 3549 */ 3550 if (pp->msg != NULL) { 3551 freemsg(pp->msg); 3552 pp->msg = NULL; 3553 } 3554 3555 /* The port is no longer active */ 3556 pp->e_busy = ECPP_IDLE; 3557 3558 /* Stop the transfer timeout timer */ 3559 ecpp_untimeout_unblock(pp, &pp->timeout_id); 3560 3561 qenable(pp->writeq); 3562 } 3563 3564 /*VARARGS*/ 3565 static void 3566 ecpp_error(dev_info_t *dip, char *fmt, ...) 3567 { 3568 static long last; 3569 static char *lastfmt; 3570 char msg_buffer[255]; 3571 va_list ap; 3572 time_t now; 3573 3574 if (!ecpp_debug) { 3575 return; 3576 } 3577 3578 /* 3579 * This function is supposed to be a quick non-blockable 3580 * wrapper for cmn_err(9F), which provides a sensible degree 3581 * of debug message throttling. Not using any type of lock 3582 * is a requirement, but this also leaves two static variables 3583 * - last and lastfmt - unprotected. However, this will not do 3584 * any harm to driver functionality, it can only weaken throttling. 3585 * The following directive asks warlock to not worry about these 3586 * variables. 3587 */ 3588 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(last, lastfmt)) 3589 3590 /* 3591 * Don't print same error message too often. 3592 */ 3593 now = gethrestime_sec(); 3594 if ((last == (now & ~1)) && (lastfmt == fmt)) 3595 return; 3596 3597 last = now & ~1; 3598 lastfmt = fmt; 3599 3600 va_start(ap, fmt); 3601 (void) vsprintf(msg_buffer, fmt, ap); 3602 cmn_err(CE_CONT, "%s%d: %s", ddi_get_name(dip), 3603 ddi_get_instance(dip), msg_buffer); 3604 va_end(ap); 3605 } 3606 3607 /* 3608 * Forward transfer timeout 3609 */ 3610 static void 3611 ecpp_xfer_timeout(void *arg) 3612 { 3613 struct ecppunit *pp = arg; 3614 void *unx_addr; 3615 size_t unx_len, xferd; 3616 uint8_t dcr; 3617 timeout_id_t fifo_timer_id; 3618 3619 mutex_enter(&pp->umutex); 3620 3621 if (pp->timeout_id == 0) { 3622 mutex_exit(&pp->umutex); 3623 return; 3624 } else { 3625 pp->timeout_id = 0; 3626 } 3627 3628 pp->xfer_tout++; 3629 3630 pp->dma_cancelled = TRUE; /* prevent race with isr() */ 3631 3632 if (COMPAT_PIO(pp)) { 3633 /* 3634 * PIO mode timeout 3635 */ 3636 3637 /* turn off nAck interrupts */ 3638 dcr = DCR_READ(pp); 3639 (void) dcr_write(pp, dcr & ~(ECPP_REV_DIR | ECPP_INTR_EN)); 3640 ECPP_MASK_INTR(pp); 3641 3642 pp->softintr_pending = 0; 3643 unx_len = pp->last_byte - pp->next_byte; 3644 ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len); 3645 3646 if (unx_len > 0) { 3647 unx_addr = pp->next_byte; 3648 } else { 3649 ecpp_xfer_cleanup(pp); 3650 qenable(pp->writeq); 3651 mutex_exit(&pp->umutex); 3652 return; 3653 } 3654 } else { 3655 /* 3656 * DMA mode timeout 3657 * 3658 * If DMAC fails to shut off, continue anyways and attempt 3659 * to put untransfered data back on queue. 3660 */ 3661 if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) { 3662 ecpp_error(pp->dip, 3663 "ecpp_xfer_timeout: failed dma_stop\n"); 3664 } 3665 3666 ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len); 3667 3668 if (ddi_dma_unbind_handle(pp->dma_handle) == DDI_FAILURE) { 3669 ecpp_error(pp->dip, 3670 "ecpp_xfer_timeout: failed unbind\n"); 3671 } 3672 3673 /* 3674 * if the bcr is zero, then DMA is complete and 3675 * we are waiting for the fifo to drain. So let 3676 * ecpp_fifo_timer() look after the clean up. 3677 */ 3678 if (unx_len == 0) { 3679 qenable(pp->writeq); 3680 mutex_exit(&pp->umutex); 3681 return; 3682 } else { 3683 xferd = pp->dma_cookie.dmac_size - unx_len; 3684 pp->resid -= xferd; 3685 unx_len = pp->resid; 3686 3687 /* update statistics */ 3688 pp->obytes[pp->current_mode] += xferd; 3689 pp->joblen += xferd; 3690 3691 if (pp->msg != NULL) { 3692 unx_addr = (caddr_t)pp->msg->b_wptr - unx_len; 3693 } else { 3694 unx_addr = pp->ioblock + 3695 (pp->xfercnt - unx_len); 3696 } 3697 } 3698 } 3699 3700 /* Following code is common for PIO and DMA modes */ 3701 3702 ecpp_putback_untransfered(pp, (caddr_t)unx_addr, unx_len); 3703 3704 if (pp->msg != NULL) { 3705 freemsg(pp->msg); 3706 pp->msg = NULL; 3707 } 3708 3709 /* mark the error status structure */ 3710 pp->timeout_error = 1; 3711 pp->e_busy = ECPP_ERR; 3712 fifo_timer_id = pp->fifo_timer_id; 3713 pp->fifo_timer_id = 0; 3714 3715 qenable(pp->writeq); 3716 3717 mutex_exit(&pp->umutex); 3718 3719 if (fifo_timer_id) { 3720 (void) untimeout(fifo_timer_id); 3721 } 3722 } 3723 3724 static void 3725 ecpp_putback_untransfered(struct ecppunit *pp, void *startp, uint_t len) 3726 { 3727 mblk_t *new_mp; 3728 3729 ecpp_error(pp->dip, "ecpp_putback_untrans=%d\n", len); 3730 3731 if (len == 0) { 3732 return; 3733 } 3734 3735 new_mp = allocb(len, BPRI_MED); 3736 if (new_mp == NULL) { 3737 ecpp_error(pp->dip, 3738 "ecpp_putback_untransfered: allocb FAILURE.\n"); 3739 return; 3740 } 3741 3742 bcopy(startp, new_mp->b_rptr, len); 3743 new_mp->b_wptr = new_mp->b_rptr + len; 3744 3745 if (!putbq(pp->writeq, new_mp)) { 3746 freemsg(new_mp); 3747 } 3748 } 3749 3750 static uchar_t 3751 ecr_write(struct ecppunit *pp, uint8_t ecr_byte) 3752 { 3753 int i, current_ecr; 3754 3755 for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) { 3756 ECR_WRITE(pp, ecr_byte); 3757 3758 current_ecr = ECR_READ(pp); 3759 3760 /* mask off the lower two read-only bits */ 3761 if ((ecr_byte & 0xFC) == (current_ecr & 0xFC)) 3762 return (SUCCESS); 3763 } 3764 return (FAILURE); 3765 } 3766 3767 static uchar_t 3768 dcr_write(struct ecppunit *pp, uint8_t dcr_byte) 3769 { 3770 uint8_t current_dcr; 3771 int i; 3772 3773 for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) { 3774 DCR_WRITE(pp, dcr_byte); 3775 3776 current_dcr = DCR_READ(pp); 3777 3778 /* compare only bits 0-4 (direction bit return 1) */ 3779 if ((dcr_byte & 0x1F) == (current_dcr & 0x1F)) 3780 return (SUCCESS); 3781 } 3782 ecpp_error(pp->dip, 3783 "(%d)dcr_write: dcr written =%x, dcr readback =%x\n", 3784 i, dcr_byte, current_dcr); 3785 3786 return (FAILURE); 3787 } 3788 3789 static uchar_t 3790 ecpp_reset_port_regs(struct ecppunit *pp) 3791 { 3792 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 3793 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 3794 return (SUCCESS); 3795 } 3796 3797 /* 3798 * The data transferred by the DMA engine goes through the FIFO, 3799 * so that when the DMA counter reaches zero (and an interrupt occurs) 3800 * the FIFO can still contain data. If this is the case, the ISR will 3801 * schedule this callback to wait until the FIFO drains or a timeout occurs. 3802 */ 3803 static void 3804 ecpp_fifo_timer(void *arg) 3805 { 3806 struct ecppunit *pp = arg; 3807 uint8_t ecr; 3808 timeout_id_t timeout_id; 3809 3810 mutex_enter(&pp->umutex); 3811 3812 /* 3813 * If the FIFO timer has been turned off, exit. 3814 */ 3815 if (pp->fifo_timer_id == 0) { 3816 ecpp_error(pp->dip, "ecpp_fifo_timer: untimedout\n"); 3817 mutex_exit(&pp->umutex); 3818 return; 3819 } else { 3820 pp->fifo_timer_id = 0; 3821 } 3822 3823 /* 3824 * If the FIFO is not empty restart timer. Wait FIFO_DRAIN_PERIOD 3825 * (250 ms) and check FIFO_EMPTY bit again. Repeat until FIFO is 3826 * empty or until 10 * FIFO_DRAIN_PERIOD expires. 3827 */ 3828 ecr = ECR_READ(pp); 3829 3830 if ((pp->current_mode != ECPP_DIAG_MODE) && 3831 (((ecr & ECPP_FIFO_EMPTY) == 0) && 3832 (pp->ecpp_drain_counter < 10))) { 3833 3834 ecpp_error(pp->dip, 3835 "ecpp_fifo_timer(%d):FIFO not empty:ecr=%x\n", 3836 pp->ecpp_drain_counter, ecr); 3837 3838 pp->fifo_timer_id = timeout(ecpp_fifo_timer, 3839 (caddr_t)pp, drv_usectohz(FIFO_DRAIN_PERIOD)); 3840 ++pp->ecpp_drain_counter; 3841 3842 mutex_exit(&pp->umutex); 3843 return; 3844 } 3845 3846 if (pp->current_mode != ECPP_DIAG_MODE) { 3847 /* 3848 * If the FIFO won't drain after 10 FIFO_DRAIN_PERIODs 3849 * then don't wait any longer. Simply clean up the transfer. 3850 */ 3851 if (pp->ecpp_drain_counter >= 10) { 3852 ecpp_error(pp->dip, "ecpp_fifo_timer(%d):" 3853 " clearing FIFO,can't wait:ecr=%x\n", 3854 pp->ecpp_drain_counter, ecr); 3855 } else { 3856 ecpp_error(pp->dip, 3857 "ecpp_fifo_timer(%d):FIFO empty:ecr=%x\n", 3858 pp->ecpp_drain_counter, ecr); 3859 } 3860 3861 pp->ecpp_drain_counter = 0; 3862 } 3863 3864 /* 3865 * Main section of routine: 3866 * - stop the DMA transfer timer 3867 * - program DMA with next cookie/window or unbind the DMA mapping 3868 * - update stats 3869 * - if last mblk in queue, signal to close() & return to idle state 3870 */ 3871 3872 /* Stop the DMA transfer timeout timer */ 3873 timeout_id = pp->timeout_id; 3874 pp->timeout_id = 0; 3875 3876 /* data has drained from fifo, it is ok to free dma resource */ 3877 if (pp->current_mode == ECPP_ECP_MODE || 3878 pp->current_mode == ECPP_DIAG_MODE || 3879 COMPAT_DMA(pp)) { 3880 off_t off; 3881 size_t len; 3882 3883 /* update residual */ 3884 pp->resid -= pp->dma_cookie.dmac_size; 3885 3886 /* update statistics */ 3887 pp->joblen += pp->dma_cookie.dmac_size; 3888 if (pp->dma_dir == DDI_DMA_WRITE) { 3889 pp->obytes[pp->current_mode] += 3890 pp->dma_cookie.dmac_size; 3891 } else { 3892 pp->ibytes[pp->current_mode] += 3893 pp->dma_cookie.dmac_size; 3894 } 3895 3896 /* 3897 * Look if any cookies/windows left 3898 */ 3899 if (--pp->dma_cookie_count > 0) { 3900 /* process the next cookie */ 3901 ddi_dma_nextcookie(pp->dma_handle, 3902 &pp->dma_cookie); 3903 } else if (pp->dma_curwin < pp->dma_nwin) { 3904 /* process the next window */ 3905 if (ddi_dma_getwin(pp->dma_handle, 3906 pp->dma_curwin, &off, &len, 3907 &pp->dma_cookie, 3908 &pp->dma_cookie_count) != DDI_SUCCESS) { 3909 ecpp_error(pp->dip, 3910 "ecpp_fifo_timer: ddi_dma_getwin failed\n"); 3911 goto dma_done; 3912 } 3913 3914 pp->dma_curwin++; 3915 } else { 3916 goto dma_done; 3917 } 3918 3919 ecpp_error(pp->dip, "ecpp_fifo_timer: next addr=%llx len=%d\n", 3920 pp->dma_cookie.dmac_address, 3921 pp->dma_cookie.dmac_size); 3922 3923 /* kick off new transfer */ 3924 if (ECPP_DMA_START(pp) != SUCCESS) { 3925 ecpp_error(pp->dip, 3926 "ecpp_fifo_timer: dma_start failed\n"); 3927 goto dma_done; 3928 } 3929 3930 (void) ecr_write(pp, (ecr & 0xe0) | 3931 ECPP_DMA_ENABLE | ECPP_INTR_MASK); 3932 3933 mutex_exit(&pp->umutex); 3934 3935 if (timeout_id) { 3936 (void) untimeout(timeout_id); 3937 } 3938 return; 3939 3940 dma_done: 3941 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 3942 ecpp_error(pp->dip, "ecpp_fifo_timer: unbind failed\n"); 3943 } else { 3944 ecpp_error(pp->dip, "ecpp_fifo_timer: unbind ok\n"); 3945 } 3946 } 3947 3948 /* 3949 * if we did not use the dmablock, the mblk that 3950 * was used should be freed. 3951 */ 3952 if (pp->msg != NULL) { 3953 freemsg(pp->msg); 3954 pp->msg = NULL; 3955 } 3956 3957 /* The port is no longer active */ 3958 pp->e_busy = ECPP_IDLE; 3959 3960 qenable(pp->writeq); 3961 3962 mutex_exit(&pp->umutex); 3963 3964 if (timeout_id) { 3965 (void) untimeout(timeout_id); 3966 } 3967 } 3968 3969 /* 3970 * In Compatibility mode, check if the peripheral is ready to accept data 3971 */ 3972 static uint8_t 3973 ecpp_check_status(struct ecppunit *pp) 3974 { 3975 uint8_t dsr; 3976 uint8_t statmask; 3977 3978 if (pp->current_mode == ECPP_ECP_MODE || 3979 pp->current_mode == ECPP_DIAG_MODE) 3980 return (SUCCESS); 3981 3982 statmask = ECPP_nERR | ECPP_SLCT | ECPP_nBUSY | ECPP_nACK; 3983 3984 dsr = DSR_READ(pp); 3985 if ((dsr & ECPP_PE) || ((dsr & statmask) != statmask)) { 3986 pp->e_busy = ECPP_ERR; 3987 return (FAILURE); 3988 } else { 3989 return (SUCCESS); 3990 } 3991 } 3992 3993 /* 3994 * if the peripheral is not ready to accept data, write service routine 3995 * periodically reschedules itself to recheck peripheral status 3996 * and start data transfer as soon as possible 3997 */ 3998 static void 3999 ecpp_wsrv_timer(void *arg) 4000 { 4001 struct ecppunit *pp = arg; 4002 4003 ecpp_error(pp->dip, "ecpp_wsrv_timer: starting\n"); 4004 4005 mutex_enter(&pp->umutex); 4006 4007 if (pp->wsrv_timer_id == 0) { 4008 mutex_exit(&pp->umutex); 4009 return; 4010 } else { 4011 pp->wsrv_timer_id = 0; 4012 } 4013 4014 ecpp_error(pp->dip, "ecpp_wsrv_timer: qenabling...\n"); 4015 4016 qenable(pp->writeq); 4017 4018 mutex_exit(&pp->umutex); 4019 } 4020 4021 /* 4022 * Allocate a message indicating a backchannel request 4023 * and put it on the write queue 4024 */ 4025 static int 4026 ecpp_backchan_req(struct ecppunit *pp) 4027 { 4028 mblk_t *mp; 4029 4030 if ((mp = allocb(sizeof (int), BPRI_MED)) == NULL) { 4031 ecpp_error(pp->dip, "ecpp_backchan_req: allocb failed\n"); 4032 return (FAILURE); 4033 } else { 4034 mp->b_datap->db_type = M_CTL; 4035 *(int *)mp->b_rptr = ECPP_BACKCHANNEL; 4036 mp->b_wptr = mp->b_rptr + sizeof (int); 4037 if (!putbq(pp->writeq, mp)) { 4038 ecpp_error(pp->dip, "ecpp_backchan_req:putbq failed\n"); 4039 freemsg(mp); 4040 return (FAILURE); 4041 } 4042 return (SUCCESS); 4043 } 4044 } 4045 4046 /* 4047 * Cancel the function scheduled with timeout(9F) 4048 * This function is to be called with the mutex held 4049 */ 4050 static void 4051 ecpp_untimeout_unblock(struct ecppunit *pp, timeout_id_t *id) 4052 { 4053 timeout_id_t saved_id; 4054 4055 ASSERT(mutex_owned(&pp->umutex)); 4056 4057 if (*id) { 4058 saved_id = *id; 4059 *id = 0; 4060 mutex_exit(&pp->umutex); 4061 (void) untimeout(saved_id); 4062 mutex_enter(&pp->umutex); 4063 } 4064 } 4065 4066 /* 4067 * get prnio interface capabilities 4068 */ 4069 static uint_t 4070 ecpp_get_prn_ifcap(struct ecppunit *pp) 4071 { 4072 uint_t ifcap; 4073 4074 ifcap = PRN_1284_DEVID | PRN_TIMEOUTS | PRN_STREAMS; 4075 4076 /* status (DSR) only makes sense in Centronics & Compat modes */ 4077 if (pp->current_mode == ECPP_CENTRONICS || 4078 pp->current_mode == ECPP_COMPAT_MODE) { 4079 ifcap |= PRN_1284_STATUS; 4080 } else if (pp->current_mode == ECPP_NIBBLE_MODE || 4081 pp->current_mode == ECPP_ECP_MODE) { 4082 ifcap |= PRN_BIDI; 4083 } 4084 4085 return (ifcap); 4086 } 4087 4088 /* 4089 * Determine SuperI/O type 4090 */ 4091 static struct ecpp_hw_bind * 4092 ecpp_determine_sio_type(struct ecppunit *pp) 4093 { 4094 struct ecpp_hw_bind *hw_bind; 4095 char *name; 4096 int i; 4097 4098 name = ddi_binding_name(pp->dip); 4099 4100 for (hw_bind = NULL, i = 0; i < NELEM(ecpp_hw_bind); i++) { 4101 if (strcmp(name, ecpp_hw_bind[i].name) == 0) { 4102 hw_bind = &ecpp_hw_bind[i]; 4103 break; 4104 } 4105 } 4106 4107 return (hw_bind); 4108 } 4109 4110 4111 /* 4112 * 4113 * IEEE 1284 support routines: 4114 * negotiation and termination; 4115 * phase transitions; 4116 * device ID; 4117 * 4118 */ 4119 4120 /* 4121 * Interface initialization, abnormal termination into Compatibility mode 4122 * 4123 * Peripheral may be non-1284, so we set current mode to ECPP_CENTRONICS 4124 */ 4125 static void 4126 ecpp_1284_init_interface(struct ecppunit *pp) 4127 { 4128 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 4129 4130 /* 4131 * Toggle the nInit signal if configured in ecpp.conf 4132 * for most peripherals it is not needed 4133 */ 4134 if (pp->init_seq == TRUE) { 4135 DCR_WRITE(pp, ECPP_SLCTIN); 4136 drv_usecwait(50); /* T(ER) = 50us */ 4137 } 4138 4139 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4140 4141 pp->current_mode = pp->backchannel = ECPP_CENTRONICS; 4142 pp->current_phase = ECPP_PHASE_C_IDLE; 4143 ECPP_CONFIG_MODE(pp); 4144 pp->to_mode[pp->current_mode]++; 4145 4146 ecpp_error(pp->dip, "ecpp_1284_init_interface: ok\n"); 4147 } 4148 4149 /* 4150 * ECP mode negotiation 4151 */ 4152 static int 4153 ecp_negotiation(struct ecppunit *pp) 4154 { 4155 uint8_t dsr; 4156 4157 /* ECP mode negotiation */ 4158 4159 if (ecpp_1284_negotiation(pp, ECPP_XREQ_ECP, &dsr) == FAILURE) 4160 return (FAILURE); 4161 4162 /* Event 5: peripheral deasserts PError and Busy, asserts Select */ 4163 if ((dsr & (ECPP_PE | ECPP_nBUSY | ECPP_SLCT)) != 4164 (ECPP_nBUSY | ECPP_SLCT)) { 4165 ecpp_error(pp->dip, 4166 "ecp_negotiation: failed event 5 %x\n", DSR_READ(pp)); 4167 (void) ecpp_1284_termination(pp); 4168 return (FAILURE); 4169 } 4170 4171 /* entered Setup Phase */ 4172 pp->current_phase = ECPP_PHASE_ECP_SETUP; 4173 4174 /* Event 30: host asserts nAutoFd */ 4175 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX); 4176 4177 /* Event 31: peripheral asserts PError */ 4178 if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) { 4179 ecpp_error(pp->dip, 4180 "ecp_negotiation: failed event 31 %x\n", DSR_READ(pp)); 4181 (void) ecpp_1284_termination(pp); 4182 return (FAILURE); 4183 } 4184 4185 /* entered Forward Idle Phase */ 4186 pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE; 4187 4188 /* successful negotiation into ECP mode */ 4189 pp->current_mode = ECPP_ECP_MODE; 4190 pp->backchannel = ECPP_ECP_MODE; 4191 4192 ecpp_error(pp->dip, "ecp_negotiation: ok\n"); 4193 4194 return (SUCCESS); 4195 } 4196 4197 /* 4198 * Nibble mode negotiation 4199 */ 4200 static int 4201 nibble_negotiation(struct ecppunit *pp) 4202 { 4203 uint8_t dsr; 4204 4205 if (ecpp_1284_negotiation(pp, ECPP_XREQ_NIBBLE, &dsr) == FAILURE) { 4206 return (FAILURE); 4207 } 4208 4209 /* 4210 * If peripheral has data available, PE and nErr will 4211 * be set low at Event 5 & 6. 4212 */ 4213 if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) { 4214 pp->current_phase = ECPP_PHASE_NIBT_AVAIL; 4215 } else { 4216 pp->current_phase = ECPP_PHASE_NIBT_NAVAIL; 4217 } 4218 4219 /* successful negotiation into Nibble mode */ 4220 pp->current_mode = ECPP_NIBBLE_MODE; 4221 pp->backchannel = ECPP_NIBBLE_MODE; 4222 4223 ecpp_error(pp->dip, "nibble_negotiation: ok (phase=%x)\n", 4224 pp->current_phase); 4225 4226 return (SUCCESS); 4227 4228 } 4229 4230 /* 4231 * Wait ptimeout usec for periph to set 'mask' bits to 'val' state 4232 * 4233 * return value < 0 indicates timeout 4234 */ 4235 static int 4236 wait_dsr(struct ecppunit *pp, uint8_t mask, uint8_t val, int ptimeout) 4237 { 4238 while (((DSR_READ(pp) & mask) != val) && ptimeout--) { 4239 drv_usecwait(1); 4240 } 4241 4242 return (ptimeout); 4243 } 4244 4245 /* 4246 * 1284 negotiation Events 0..6 4247 * required mode is indicated by extensibility request value 4248 * 4249 * After successful negotiation SUCCESS is returned and 4250 * current mode is set according to xreq, 4251 * otherwise FAILURE is returned and current mode is set to 4252 * either COMPAT (1284 periph) or CENTRONICS (non-1284 periph) 4253 * 4254 * Current phase must be set by the caller (mode-specific negotiation) 4255 * 4256 * If rdsr is not NULL, DSR value after Event 6 is stored here 4257 */ 4258 static int 4259 ecpp_1284_negotiation(struct ecppunit *pp, uint8_t xreq, uint8_t *rdsr) 4260 { 4261 int xflag; 4262 4263 ecpp_error(pp->dip, "nego(%x): entering...\n", xreq); 4264 4265 /* negotiation should start in Compatibility mode */ 4266 (void) ecpp_1284_termination(pp); 4267 4268 /* Set host into Compat mode */ 4269 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 4270 4271 pp->current_phase = ECPP_PHASE_NEGO; 4272 4273 /* Event 0: host sets extensibility request on data lines */ 4274 DATAR_WRITE(pp, xreq); 4275 4276 /* Event 1: host deassert nSelectin and assert nAutoFd */ 4277 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX); 4278 4279 drv_usecwait(1); /* Tp(ecp) == 0.5us */ 4280 4281 /* 4282 * Event 2: peripheral asserts nAck, deasserts nFault, 4283 * asserts Select, asserts PError 4284 */ 4285 if (wait_dsr(pp, ECPP_nERR | ECPP_SLCT | ECPP_PE | ECPP_nACK, 4286 ECPP_nERR | ECPP_SLCT | ECPP_PE, 35000) < 0) { 4287 /* peripheral is not 1284-compliant */ 4288 ecpp_error(pp->dip, 4289 "nego(%x): failed event 2 %x\n", xreq, DSR_READ(pp)); 4290 (void) ecpp_1284_termination(pp); 4291 return (FAILURE); 4292 } 4293 4294 /* 4295 * Event 3: host asserts nStrobe, latching extensibility value into 4296 * peripherals input latch. 4297 */ 4298 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_STB); 4299 4300 drv_usecwait(2); /* Tp(ecp) = 0.5us */ 4301 4302 /* 4303 * Event 4: hosts deasserts nStrobe and nAutoFD to acknowledge that 4304 * it has recognized an 1284 compatible peripheral 4305 */ 4306 DCR_WRITE(pp, ECPP_nINIT); 4307 4308 /* 4309 * Event 5: Peripheral confirms it supports requested extension 4310 * For Nibble mode Xflag must be low, otherwise it must be high 4311 */ 4312 xflag = (xreq == ECPP_XREQ_NIBBLE) ? 0 : ECPP_SLCT; 4313 4314 /* 4315 * Event 6: Peripheral sets nAck high 4316 * indicating that status lines are valid 4317 */ 4318 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) { 4319 /* Something wrong with peripheral */ 4320 ecpp_error(pp->dip, 4321 "nego(%x): failed event 6 %x\n", xreq, DSR_READ(pp)); 4322 (void) ecpp_1284_termination(pp); 4323 return (FAILURE); 4324 } 4325 4326 if ((DSR_READ(pp) & ECPP_SLCT) != xflag) { 4327 /* Extensibility value is not supported */ 4328 ecpp_error(pp->dip, 4329 "nego(%x): failed event 5 %x\n", xreq, DSR_READ(pp)); 4330 (void) ecpp_1284_termination(pp); 4331 return (FAILURE); 4332 } 4333 4334 if (rdsr) { 4335 *rdsr = DSR_READ(pp); 4336 } 4337 4338 return (SUCCESS); 4339 } 4340 4341 /* 4342 * 1284 Termination: Events 22..28 - set link to Compatibility mode 4343 * 4344 * This routine is not designed for Immediate termination, 4345 * caller must take care of waiting for a valid state, 4346 * (in particular, in ECP mode current phase must be Forward Idle) 4347 * otherwise interface will be reinitialized 4348 * 4349 * In case of Valid state termination SUCCESS is returned and 4350 * current_mode is ECPP_COMPAT_MODE, current phase is ECPP_PHASE_C_IDLE 4351 * Otherwise interface is reinitialized, FAILURE is returned and 4352 * current mode is ECPP_CENTRONICS, current phase is ECPP_PHASE_C_IDLE 4353 */ 4354 static int 4355 ecpp_1284_termination(struct ecppunit *pp) 4356 { 4357 int previous_mode = pp->current_mode; 4358 4359 if (((pp->current_mode == ECPP_COMPAT_MODE || 4360 pp->current_mode == ECPP_CENTRONICS) && 4361 pp->current_phase == ECPP_PHASE_C_IDLE) || 4362 pp->current_mode == ECPP_DIAG_MODE) { 4363 ecpp_error(pp->dip, "termination: not needed\n"); 4364 return (SUCCESS); 4365 } 4366 4367 /* Set host into Compat mode, interrupts disabled */ 4368 ECPP_MASK_INTR(pp); 4369 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 4370 4371 pp->current_mode = ECPP_COMPAT_MODE; /* needed by next function */ 4372 4373 ECPP_CONFIG_MODE(pp); 4374 4375 /* 4376 * EPP mode uses simple nInit pulse for termination 4377 */ 4378 if (previous_mode == ECPP_EPP_MODE) { 4379 /* Event 68: host sets nInit low */ 4380 DCR_WRITE(pp, 0); 4381 4382 drv_usecwait(55); /* T(ER) = 50us */ 4383 4384 /* Event 69: host sets nInit high */ 4385 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4386 4387 goto endterm; 4388 } 4389 4390 /* terminate peripheral to Compat mode */ 4391 pp->current_phase = ECPP_PHASE_TERM; 4392 4393 /* Event 22: hosts sets nSelectIn low and nAutoFd high */ 4394 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4395 4396 /* Event 23: peripheral deasserts nFault and nBusy */ 4397 /* Event 24: peripheral asserts nAck */ 4398 if (wait_dsr(pp, ECPP_nERR | ECPP_nBUSY | ECPP_nACK, 4399 ECPP_nERR, 35000) < 0) { 4400 ecpp_error(pp->dip, 4401 "termination: failed events 23,24 %x\n", DSR_READ(pp)); 4402 ecpp_1284_init_interface(pp); 4403 return (FAILURE); 4404 } 4405 4406 drv_usecwait(1); /* Tp = 0.5us */ 4407 4408 /* Event 25: hosts sets nAutoFd low */ 4409 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN | ECPP_AFX); 4410 4411 /* Event 26: the peripheral puts itself in Compatible mode */ 4412 4413 /* Event 27: peripheral deasserts nAck */ 4414 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) { 4415 ecpp_error(pp->dip, 4416 "termination: failed event 27 %x\n", DSR_READ(pp)); 4417 ecpp_1284_init_interface(pp); 4418 return (FAILURE); 4419 } 4420 4421 drv_usecwait(1); /* Tp = 0.5us */ 4422 4423 /* Event 28: hosts deasserts nAutoFd */ 4424 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4425 4426 drv_usecwait(1); /* Tp = 0.5us */ 4427 4428 endterm: 4429 /* Compatible mode Idle Phase */ 4430 pp->current_phase = ECPP_PHASE_C_IDLE; 4431 4432 ecpp_error(pp->dip, "termination: completed %x %x\n", 4433 DSR_READ(pp), DCR_READ(pp)); 4434 4435 return (SUCCESS); 4436 } 4437 4438 /* 4439 * Initiate ECP backchannel DMA transfer 4440 */ 4441 static uchar_t 4442 ecp_peripheral2host(struct ecppunit *pp) 4443 { 4444 mblk_t *mp = NULL; 4445 size_t len; 4446 uint32_t xfer_time; 4447 4448 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4449 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 4450 4451 /* 4452 * hardware generates cycles to receive data from the peripheral 4453 * we only need to read from FIFO 4454 */ 4455 4456 /* 4457 * If user issued read(2) of rev_resid bytes, xfer exactly this amount 4458 * unless it exceeds ECP_REV_BLKSZ_MAX; otherwise try to read 4459 * ECP_REV_BLKSZ_MAX or at least ECP_REV_BLKSZ bytes 4460 */ 4461 if (pp->nread > 0) { 4462 len = min(pp->nread, ECP_REV_BLKSZ_MAX); 4463 } else { 4464 len = ECP_REV_BLKSZ_MAX; 4465 } 4466 4467 pp->nread = 0; /* clear after use */ 4468 4469 /* 4470 * Allocate mblk for data, make max 2 attepmts: 4471 * if len bytes block fails, try our block size 4472 */ 4473 while ((mp = allocb(len, BPRI_MED)) == NULL) { 4474 ecpp_error(pp->dip, 4475 "ecp_periph2host: failed allocb(%d)\n", len); 4476 if (len > ECP_REV_BLKSZ) { 4477 len = ECP_REV_BLKSZ; 4478 } else { 4479 break; 4480 } 4481 } 4482 4483 if (mp == NULL) { 4484 goto fail; 4485 } 4486 4487 pp->msg = mp; 4488 pp->e_busy = ECPP_BUSY; 4489 pp->dma_dir = DDI_DMA_READ; 4490 pp->current_phase = ECPP_PHASE_ECP_REV_XFER; 4491 4492 if (ecpp_init_dma_xfer(pp, (caddr_t)mp->b_rptr, len) == FAILURE) { 4493 goto fail; 4494 } 4495 4496 /* 4497 * there are two problems with defining ECP backchannel xfer timeout 4498 * 4499 * a) IEEE 1284 allows infinite time between backchannel bytes, 4500 * but we must stop at some point to send the data upstream, 4501 * look if any forward transfer requests are pending, etc; 4502 * all that done, we can continue with backchannel data; 4503 * 4504 * b) we don`t know how much data peripheral has; 4505 * DMA counter is set to our buffer size, which can be bigger 4506 * than needed - in this case a timeout must detect this; 4507 * 4508 * The timeout we schedule here serves as both the transfer timeout 4509 * and a means of detecting backchannel stalls; in fact, there are 4510 * two timeouts in one: 4511 * 4512 * - transfer timeout is based on the ECP bandwidth of ~1MB/sec and 4513 * equals the time needed to transfer the whole buffer 4514 * (but not less than ECP_REV_MINTOUT ms); if it occurs, 4515 * DMA is stopped and the data is sent upstream; 4516 * 4517 * - backchannel watchdog, which would look at DMA counter 4518 * every rev_watchdog ms and stop the transfer only 4519 * if the counter hasn`t changed since the last time; 4520 * otherwise it would save DMA counter value and restart itself; 4521 * 4522 * transfer timeout is a multiple of rev_watchdog 4523 * and implemented as a downward counter 4524 * 4525 * on Grover, we can`t access DMAC registers while DMA is in flight, 4526 * so we can`t have watchdog on Grover, only timeout 4527 */ 4528 4529 /* calculate number of watchdog invocations equal to the xfer timeout */ 4530 xfer_time = max((1000 * len) / pp->ecp_rev_speed, ECP_REV_MINTOUT); 4531 #if defined(__x86) 4532 pp->rev_timeout_cnt = (pp->hw == &x86) ? 1 : 4533 max(xfer_time / pp->rev_watchdog, 1); 4534 #else 4535 pp->rev_timeout_cnt = (pp->hw == &m1553) ? 1 : 4536 max(xfer_time / pp->rev_watchdog, 1); 4537 #endif 4538 4539 pp->last_dmacnt = len; /* nothing xferred yet */ 4540 4541 pp->timeout_id = timeout(ecpp_ecp_read_timeout, (caddr_t)pp, 4542 drv_usectohz(pp->rev_watchdog * 1000)); 4543 4544 ecpp_error(pp->dip, "ecp_periph2host: DMA started len=%d\n" 4545 "xfer_time=%d wdog=%d cnt=%d\n", 4546 len, xfer_time, pp->rev_watchdog, pp->rev_timeout_cnt); 4547 4548 return (SUCCESS); 4549 4550 fail: 4551 if (mp) { 4552 freemsg(mp); 4553 } 4554 pp->e_busy = ECPP_IDLE; 4555 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 4556 4557 return (FAILURE); 4558 } 4559 4560 /* 4561 * ECP backchannel read timeout 4562 * implements both backchannel watchdog and transfer timeout in ECP mode 4563 * if the transfer is still in progress, reschedule itself, 4564 * otherwise call completion routine 4565 */ 4566 static void 4567 ecpp_ecp_read_timeout(void *arg) 4568 { 4569 struct ecppunit *pp = arg; 4570 size_t dmacnt; 4571 4572 mutex_enter(&pp->umutex); 4573 4574 if (pp->timeout_id == 0) { 4575 mutex_exit(&pp->umutex); 4576 return; 4577 } else { 4578 pp->timeout_id = 0; 4579 } 4580 4581 if (--pp->rev_timeout_cnt == 0) { 4582 /* 4583 * Transfer timed out 4584 */ 4585 ecpp_error(pp->dip, "ecp_read_timeout: timeout\n"); 4586 pp->xfer_tout++; 4587 ecpp_ecp_read_completion(pp); 4588 } else { 4589 /* 4590 * Backchannel watchdog: 4591 * look if DMA made any progress from the last time 4592 */ 4593 dmacnt = ECPP_DMA_GETCNT(pp); 4594 if (dmacnt - pp->last_dmacnt == 0) { 4595 /* 4596 * No progress - stop the transfer and send 4597 * whatever has been read so far up the stream 4598 */ 4599 ecpp_error(pp->dip, "ecp_read_timeout: no progress\n"); 4600 pp->xfer_tout++; 4601 ecpp_ecp_read_completion(pp); 4602 } else { 4603 /* 4604 * Something was transferred - restart ourselves 4605 */ 4606 ecpp_error(pp->dip, "ecp_read_timeout: restarting\n"); 4607 pp->last_dmacnt = dmacnt; 4608 pp->timeout_id = timeout(ecpp_ecp_read_timeout, 4609 (caddr_t)pp, 4610 drv_usectohz(pp->rev_watchdog * 1000)); 4611 } 4612 } 4613 4614 mutex_exit(&pp->umutex); 4615 } 4616 4617 /* 4618 * ECP backchannel read completion: 4619 * stop the DMA, free DMA resources and send read data upstream 4620 */ 4621 static void 4622 ecpp_ecp_read_completion(struct ecppunit *pp) 4623 { 4624 size_t xfer_len, unx_len; 4625 mblk_t *mp; 4626 4627 ASSERT(mutex_owned(&pp->umutex)); 4628 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4629 pp->current_phase == ECPP_PHASE_ECP_REV_XFER); 4630 ASSERT(pp->msg != NULL); 4631 4632 /* 4633 * Stop the transfer and unbind DMA handle 4634 */ 4635 if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) { 4636 unx_len = pp->resid; 4637 ecpp_error(pp->dip, "ecp_read_completion: failed dma_stop\n"); 4638 } 4639 4640 mp = pp->msg; 4641 xfer_len = pp->resid - unx_len; /* how much data was transferred */ 4642 4643 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 4644 ecpp_error(pp->dip, "ecp_read_completion: unbind failed.\n"); 4645 } 4646 4647 ecpp_error(pp->dip, "ecp_read_completion: xfered %d bytes of %d\n", 4648 xfer_len, pp->resid); 4649 4650 /* clean up and update statistics */ 4651 pp->msg = NULL; 4652 pp->resid -= xfer_len; 4653 pp->ibytes[pp->current_mode] += xfer_len; 4654 pp->e_busy = ECPP_IDLE; 4655 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 4656 4657 /* 4658 * Send the read data up the stream 4659 */ 4660 mp->b_wptr += xfer_len; 4661 if (canputnext(pp->readq)) { 4662 mutex_exit(&pp->umutex); 4663 putnext(pp->readq, mp); 4664 mutex_enter(&pp->umutex); 4665 } else { 4666 ecpp_error(pp->dip, "ecp_read_completion: fail canputnext\n"); 4667 if (!putq(pp->readq, mp)) { 4668 freemsg(mp); 4669 } 4670 } 4671 4672 /* if bytes left in the FIFO another transfer is needed */ 4673 if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) { 4674 (void) ecpp_backchan_req(pp); 4675 } 4676 4677 qenable(pp->writeq); 4678 } 4679 4680 /* 4681 * Read one byte in the Nibble mode 4682 */ 4683 static uchar_t 4684 nibble_peripheral2host(struct ecppunit *pp, uint8_t *byte) 4685 { 4686 uint8_t n[2]; /* two nibbles */ 4687 int i; 4688 4689 /* 4690 * One byte is made of two nibbles 4691 */ 4692 for (i = 0; i < 2; i++) { 4693 /* Event 7, 12: host asserts nAutoFd to move to read a nibble */ 4694 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX); 4695 4696 /* Event 8: peripheral puts data on the status lines */ 4697 4698 /* Event 9: peripheral asserts nAck, data available */ 4699 if (wait_dsr(pp, ECPP_nACK, 0, 35000) < 0) { 4700 ecpp_error(pp->dip, 4701 "nibble_periph2host(%d): failed event 9 %x\n", 4702 i + 1, DSR_READ(pp)); 4703 (void) ecpp_1284_termination(pp); 4704 return (FAILURE); 4705 } 4706 4707 n[i] = DSR_READ(pp); /* get a nibble */ 4708 4709 /* Event 10: host deasserts nAutoFd to say it grabbed data */ 4710 DCR_WRITE(pp, ECPP_nINIT); 4711 4712 /* (2) Event 13: peripheral asserts PE - end of data phase */ 4713 4714 /* Event 11: peripheral deasserts nAck to finish handshake */ 4715 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) { 4716 ecpp_error(pp->dip, 4717 "nibble_periph2host(%d): failed event 11 %x\n", 4718 i + 1, DSR_READ(pp)); 4719 (void) ecpp_1284_termination(pp); 4720 return (FAILURE); 4721 } 4722 } 4723 4724 /* extract data byte from two nibbles - optimized formula */ 4725 *byte = ((((n[1] & ~ECPP_nACK) << 1) | (~n[1] & ECPP_nBUSY)) & 0xf0) | 4726 ((((n[0] & ~ECPP_nACK) >> 3) | ((~n[0] & ECPP_nBUSY) >> 4)) & 0x0f); 4727 4728 pp->ibytes[ECPP_NIBBLE_MODE]++; 4729 return (SUCCESS); 4730 } 4731 4732 /* 4733 * process data transfers requested by the peripheral 4734 */ 4735 static uint_t 4736 ecpp_peripheral2host(struct ecppunit *pp) 4737 { 4738 if (!canputnext(pp->readq)) { 4739 ecpp_error(pp->dip, "ecpp_peripheral2host: readq full\n"); 4740 return (SUCCESS); 4741 } 4742 4743 switch (pp->backchannel) { 4744 case ECPP_CENTRONICS: 4745 /* no backchannel */ 4746 return (SUCCESS); 4747 4748 case ECPP_NIBBLE_MODE: 4749 ASSERT(pp->current_mode == ECPP_NIBBLE_MODE); 4750 4751 /* 4752 * Event 20: Host sets nAutoFd high to ack request 4753 */ 4754 DCR_WRITE(pp, ECPP_nINIT); 4755 4756 /* Event 21: Periph sets PError low to ack host */ 4757 if (wait_dsr(pp, ECPP_PE, 0, 35000) < 0) { 4758 ecpp_error(pp->dip, 4759 "ecpp_periph2host: failed event 21 %x\n", 4760 DSR_READ(pp)); 4761 (void) ecpp_1284_termination(pp); 4762 return (FAILURE); 4763 } 4764 4765 pp->current_phase = ECPP_PHASE_NIBT_AVAIL; 4766 4767 /* this routine will read the data in Nibble mode */ 4768 return (ecpp_idle_phase(pp)); 4769 4770 case ECPP_ECP_MODE: 4771 if ((pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE) && 4772 (ecp_forward2reverse(pp) == FAILURE)) { 4773 return (FAILURE); 4774 } 4775 4776 return (ecp_peripheral2host(pp)); /* start the transfer */ 4777 4778 case ECPP_DIAG_MODE: { 4779 mblk_t *mp; 4780 int i; 4781 4782 if (ECR_READ(pp) & ECPP_FIFO_EMPTY) { 4783 ecpp_error(pp->dip, "ecpp_periph2host: fifo empty\n"); 4784 return (SUCCESS); 4785 } 4786 4787 /* allocate the FIFO size */ 4788 if ((mp = allocb(ECPP_FIFO_SZ, BPRI_MED)) == NULL) { 4789 ecpp_error(pp->dip, 4790 "ecpp_periph2host: allocb FAILURE.\n"); 4791 return (FAILURE); 4792 } 4793 4794 /* 4795 * For the time being just read it byte by byte 4796 */ 4797 i = ECPP_FIFO_SZ; 4798 while (i-- && (!(ECR_READ(pp) & ECPP_FIFO_EMPTY))) { 4799 *mp->b_wptr++ = TFIFO_READ(pp); 4800 drv_usecwait(1); /* ECR is sometimes slow to update */ 4801 } 4802 4803 if (canputnext(pp->readq)) { 4804 mutex_exit(&pp->umutex); 4805 mp->b_datap->db_type = M_DATA; 4806 ecpp_error(pp->dip, 4807 "ecpp_periph2host: sending %d bytes\n", 4808 mp->b_wptr - mp->b_rptr); 4809 putnext(pp->readq, mp); 4810 mutex_enter(&pp->umutex); 4811 return (SUCCESS); 4812 } else { 4813 ecpp_error(pp->dip, 4814 "ecpp_periph2host: !canputnext data lost\n"); 4815 freemsg(mp); 4816 return (FAILURE); 4817 } 4818 } 4819 4820 default: 4821 ecpp_error(pp->dip, "ecpp_peripheraltohost: illegal back"); 4822 return (FAILURE); 4823 } 4824 } 4825 4826 /* 4827 * Negotiate from ECP Forward Idle to Reverse Idle Phase 4828 * 4829 * (manipulations with dcr/ecr are according to ECP Specification) 4830 */ 4831 static int 4832 ecp_forward2reverse(struct ecppunit *pp) 4833 { 4834 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4835 pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE); 4836 4837 /* place port into PS2 mode */ 4838 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4839 4840 /* set direction bit (DCR3-0 must be 0100 - National) */ 4841 DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT); 4842 4843 /* enable hardware assist */ 4844 ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4845 4846 drv_usecwait(1); /* Tp(ecp) = 0.5us */ 4847 4848 /* Event 39: host sets nInit low */ 4849 DCR_WRITE(pp, ECPP_REV_DIR); 4850 4851 /* Event 40: peripheral sets PError low */ 4852 4853 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 4854 4855 ecpp_error(pp->dip, "ecp_forward2reverse ok\n"); 4856 4857 return (SUCCESS); 4858 } 4859 4860 /* 4861 * Negotiate from ECP Reverse Idle to Forward Idle Phase 4862 * 4863 * (manipulations with dcr/ecr are according to ECP Specification) 4864 */ 4865 static int 4866 ecp_reverse2forward(struct ecppunit *pp) 4867 { 4868 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4869 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 4870 4871 /* Event 47: host deasserts nInit */ 4872 DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT); 4873 4874 /* 4875 * Event 48: peripheral deasserts nAck 4876 * Event 49: peripheral asserts PError 4877 */ 4878 if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) { 4879 ecpp_error(pp->dip, 4880 "ecp_reverse2forward: failed event 49 %x\n", DSR_READ(pp)); 4881 (void) ecpp_1284_termination(pp); 4882 return (FAILURE); 4883 } 4884 4885 /* place port into PS2 mode */ 4886 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4887 4888 /* clear direction bit */ 4889 DCR_WRITE(pp, ECPP_nINIT); 4890 4891 /* reenable hardware assist */ 4892 ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4893 4894 pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE; 4895 4896 ecpp_error(pp->dip, "ecp_reverse2forward ok\n"); 4897 4898 return (SUCCESS); 4899 } 4900 4901 /* 4902 * Default negotiation chooses the best mode supported by peripheral 4903 * Note that backchannel mode may be different from forward mode 4904 */ 4905 static void 4906 ecpp_default_negotiation(struct ecppunit *pp) 4907 { 4908 if (!noecp && (ecpp_mode_negotiation(pp, ECPP_ECP_MODE) == SUCCESS)) { 4909 /* 1284 compatible device */ 4910 pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO; 4911 return; 4912 } else if (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == SUCCESS) { 4913 /* 1284 compatible device */ 4914 pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO; 4915 } else { 4916 /* Centronics device */ 4917 pp->io_mode = 4918 (pp->fast_centronics == TRUE) ? ECPP_DMA : ECPP_PIO; 4919 } 4920 ECPP_CONFIG_MODE(pp); 4921 } 4922 4923 /* 4924 * Negotiate to the mode indicated by newmode 4925 */ 4926 static int 4927 ecpp_mode_negotiation(struct ecppunit *pp, uchar_t newmode) 4928 { 4929 /* any other mode is impossible */ 4930 ASSERT(pp->current_mode == ECPP_CENTRONICS || 4931 pp->current_mode == ECPP_COMPAT_MODE || 4932 pp->current_mode == ECPP_NIBBLE_MODE || 4933 pp->current_mode == ECPP_ECP_MODE || 4934 pp->current_mode == ECPP_DIAG_MODE); 4935 4936 if (pp->current_mode == newmode) { 4937 return (SUCCESS); 4938 } 4939 4940 /* termination from ECP is only allowed from the Forward Idle Phase */ 4941 if ((pp->current_mode == ECPP_ECP_MODE) && 4942 (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) { 4943 /* this may break into Centronics */ 4944 (void) ecp_reverse2forward(pp); 4945 } 4946 4947 switch (newmode) { 4948 case ECPP_CENTRONICS: 4949 (void) ecpp_1284_termination(pp); 4950 4951 /* put superio into PIO mode */ 4952 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 4953 4954 pp->current_mode = ECPP_CENTRONICS; 4955 pp->backchannel = ECPP_CENTRONICS; 4956 ECPP_CONFIG_MODE(pp); 4957 4958 pp->to_mode[pp->current_mode]++; 4959 return (SUCCESS); 4960 4961 case ECPP_COMPAT_MODE: 4962 /* ECPP_COMPAT_MODE should support Nibble as a backchannel */ 4963 if (pp->current_mode == ECPP_NIBBLE_MODE) { 4964 if (ecpp_1284_termination(pp) == SUCCESS) { 4965 pp->current_mode = ECPP_COMPAT_MODE; 4966 pp->backchannel = ECPP_NIBBLE_MODE; 4967 ECPP_CONFIG_MODE(pp); 4968 pp->to_mode[pp->current_mode]++; 4969 return (SUCCESS); 4970 } else { 4971 return (FAILURE); 4972 } 4973 } 4974 4975 if ((nibble_negotiation(pp) == SUCCESS) && 4976 (ecpp_1284_termination(pp) == SUCCESS)) { 4977 pp->backchannel = ECPP_NIBBLE_MODE; 4978 pp->current_mode = ECPP_COMPAT_MODE; 4979 ECPP_CONFIG_MODE(pp); 4980 pp->to_mode[pp->current_mode]++; 4981 return (SUCCESS); 4982 } else { 4983 return (FAILURE); 4984 } 4985 4986 case ECPP_NIBBLE_MODE: 4987 if (nibble_negotiation(pp) == FAILURE) { 4988 return (FAILURE); 4989 } 4990 4991 pp->backchannel = ECPP_NIBBLE_MODE; 4992 ECPP_CONFIG_MODE(pp); 4993 pp->to_mode[pp->current_mode]++; 4994 4995 return (SUCCESS); 4996 4997 case ECPP_ECP_MODE: 4998 if (pp->noecpregs) 4999 return (FAILURE); 5000 if (ecp_negotiation(pp) == FAILURE) { 5001 return (FAILURE); 5002 } 5003 5004 /* 5005 * National says CTR[3:0] should be 0100b before moving to 011 5006 */ 5007 DCR_WRITE(pp, ECPP_nINIT); 5008 5009 if (ecr_write(pp, ECR_mode_011 | 5010 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5011 ecpp_error(pp->dip, "mode_nego:ECP: failed w/ecr\n"); 5012 return (FAILURE); 5013 } 5014 5015 ECPP_CONFIG_MODE(pp); 5016 pp->to_mode[pp->current_mode]++; 5017 5018 return (SUCCESS); 5019 5020 case ECPP_DIAG_MODE: 5021 /* 5022 * In DIAG mode application can do nasty things(e.g drive pins) 5023 * To keep peripheral sane, terminate to Compatibility mode 5024 */ 5025 (void) ecpp_1284_termination(pp); 5026 5027 /* put superio into TFIFO mode */ 5028 if (ecr_write(pp, ECR_mode_001 | 5029 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5030 ecpp_error(pp->dip, "put to TFIFO: failed w/ecr\n"); 5031 return (FAILURE); 5032 } 5033 5034 pp->current_mode = ECPP_DIAG_MODE; 5035 pp->backchannel = ECPP_DIAG_MODE; 5036 ECPP_CONFIG_MODE(pp); 5037 pp->to_mode[pp->current_mode]++; 5038 5039 return (SUCCESS); 5040 5041 default: 5042 ecpp_error(pp->dip, 5043 "ecpp_mode_negotiation: mode %d not supported\n", newmode); 5044 return (FAILURE); 5045 } 5046 } 5047 5048 /* 5049 * Standard (9.1): Peripheral data is available only when the host places 5050 * the interface in a mode capable of peripheral-to-host data transfer. 5051 * This requires the host periodically to place the interface in such a mode. 5052 * Polling can be eliminated by leaving the interface in an 1284 idle phase. 5053 */ 5054 static uchar_t 5055 ecpp_idle_phase(struct ecppunit *pp) 5056 { 5057 uchar_t rval = FAILURE; 5058 5059 /* 5060 * If there is no space on the read queue, do not reverse channel 5061 */ 5062 if (!canputnext(pp->readq)) { 5063 ecpp_error(pp->dip, "ecpp_idle_phase: readq full\n"); 5064 return (SUCCESS); 5065 } 5066 5067 switch (pp->backchannel) { 5068 case ECPP_CENTRONICS: 5069 case ECPP_COMPAT_MODE: 5070 case ECPP_DIAG_MODE: 5071 /* nothing */ 5072 ecpp_error(pp->dip, "ecpp_idle_phase: compat idle\n"); 5073 return (SUCCESS); 5074 5075 case ECPP_NIBBLE_MODE: 5076 /* 5077 * read as much data as possible, ending up in either 5078 * Reverse Idle or Host Busy Data Available phase 5079 */ 5080 ecpp_error(pp->dip, "ecpp_idle_phase: nibble backchannel\n"); 5081 if ((pp->current_mode != ECPP_NIBBLE_MODE) && 5082 (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == FAILURE)) { 5083 break; 5084 } 5085 5086 rval = read_nibble_backchan(pp); 5087 5088 /* put interface into Reverse Idle phase */ 5089 if (pp->current_phase == ECPP_PHASE_NIBT_NAVAIL && 5090 canputnext(pp->readq)) { 5091 ecpp_error(pp->dip, "ecpp_idle_phase: going revidle\n"); 5092 5093 /* 5094 * Event 7: host asserts nAutoFd 5095 * enable nAck interrupt to get a backchannel request 5096 */ 5097 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_INTR_EN); 5098 5099 ECPP_UNMASK_INTR(pp); 5100 } 5101 5102 break; 5103 5104 case ECPP_ECP_MODE: 5105 /* 5106 * if data is already available, request the backchannel xfer 5107 * otherwise stay in Forward Idle and enable nErr interrupts 5108 */ 5109 ecpp_error(pp->dip, "ecpp_idle_phase: ECP forward\n"); 5110 5111 ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE || 5112 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 5113 5114 /* put interface into Forward Idle phase */ 5115 if ((pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) && 5116 (ecp_reverse2forward(pp) == FAILURE)) { 5117 return (FAILURE); 5118 } 5119 5120 /* 5121 * if data already available, put backchannel request on the wq 5122 * otherwise enable nErr interrupts 5123 */ 5124 if ((DSR_READ(pp) & ECPP_nERR) == 0) { 5125 (void) ecpp_backchan_req(pp); 5126 } else { 5127 ECR_WRITE(pp, 5128 ECR_READ(pp) & ~ECPP_INTR_MASK | ECPP_INTR_SRV); 5129 5130 ECPP_UNMASK_INTR(pp); 5131 } 5132 5133 return (SUCCESS); 5134 5135 default: 5136 ecpp_error(pp->dip, "ecpp_idle_phase: illegal backchannel"); 5137 } 5138 5139 return (rval); 5140 } 5141 5142 /* 5143 * This routine will leave the port in ECPP_PHASE_NIBT_REVIDLE 5144 * Due to flow control, though, it may stop at ECPP_PHASE_NIBT_AVAIL, 5145 * and continue later as the user consumes data from the read queue 5146 * 5147 * The current phase should be NIBT_AVAIL or NIBT_NAVAIL 5148 * If some events fail during transfer, termination puts link 5149 * to Compatibility mode and FAILURE is returned 5150 */ 5151 static int 5152 read_nibble_backchan(struct ecppunit *pp) 5153 { 5154 mblk_t *mp; 5155 int i; 5156 int rval = SUCCESS; 5157 5158 ASSERT(pp->current_mode == ECPP_NIBBLE_MODE); 5159 5160 pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE)) 5161 ? ECPP_PHASE_NIBT_NAVAIL : ECPP_PHASE_NIBT_AVAIL; 5162 5163 ecpp_error(pp->dip, "read_nibble_backchan: %x\n", DSR_READ(pp)); 5164 5165 /* 5166 * While data is available, read it in NIBBLE_REV_BLKSZ byte chunks 5167 * and send up the stream 5168 */ 5169 while (pp->current_phase == ECPP_PHASE_NIBT_AVAIL && rval == SUCCESS) { 5170 /* see if there's space on the queue */ 5171 if (!canputnext(pp->readq)) { 5172 ecpp_error(pp->dip, 5173 "read_nibble_backchan: canputnext failed\n"); 5174 return (SUCCESS); 5175 } 5176 5177 if ((mp = allocb(NIBBLE_REV_BLKSZ, BPRI_MED)) == NULL) { 5178 ecpp_error(pp->dip, 5179 "read_nibble_backchan: allocb failed\n"); 5180 return (SUCCESS); 5181 } 5182 5183 /* read a chunk of data from the peripheral byte by byte */ 5184 i = NIBBLE_REV_BLKSZ; 5185 while (i-- && !(DSR_READ(pp) & ECPP_nERR)) { 5186 if (nibble_peripheral2host(pp, mp->b_wptr) != SUCCESS) { 5187 rval = FAILURE; 5188 break; 5189 } 5190 mp->b_wptr++; 5191 } 5192 5193 pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE)) 5194 ? ECPP_PHASE_NIBT_NAVAIL 5195 : ECPP_PHASE_NIBT_AVAIL; 5196 5197 if (mp->b_wptr - mp->b_rptr > 0) { 5198 ecpp_error(pp->dip, 5199 "read_nibble_backchan: sending %d bytes\n", 5200 mp->b_wptr - mp->b_rptr); 5201 pp->nread = 0; 5202 mutex_exit(&pp->umutex); 5203 putnext(pp->readq, mp); 5204 mutex_enter(&pp->umutex); 5205 } else { 5206 freemsg(mp); 5207 } 5208 } 5209 5210 return (rval); 5211 } 5212 5213 /* 5214 * 'Request Device ID using nibble mode' negotiation 5215 */ 5216 static int 5217 devidnib_negotiation(struct ecppunit *pp) 5218 { 5219 uint8_t dsr; 5220 5221 if (ecpp_1284_negotiation(pp, 5222 ECPP_XREQ_NIBBLE | ECPP_XREQ_ID, &dsr) == FAILURE) { 5223 return (FAILURE); 5224 } 5225 5226 /* 5227 * If peripheral has data available, PE and nErr will 5228 * be set low at Event 5 & 6. 5229 */ 5230 if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) { 5231 pp->current_phase = ECPP_PHASE_NIBT_AVAIL; 5232 } else { 5233 pp->current_phase = ECPP_PHASE_NIBT_NAVAIL; 5234 } 5235 5236 ecpp_error(pp->dip, "ecpp_devidnib_nego: current_phase=%x\n", 5237 pp->current_phase); 5238 5239 /* successful negotiation into Nibble mode */ 5240 pp->current_mode = ECPP_NIBBLE_MODE; 5241 pp->backchannel = ECPP_NIBBLE_MODE; 5242 5243 ecpp_error(pp->dip, "ecpp_devidnib_nego: ok\n"); 5244 5245 return (SUCCESS); 5246 } 5247 5248 /* 5249 * Read 1284 device ID sequence 5250 * 5251 * This function should be called two times: 5252 * 1) ecpp_getdevid(pp, NULL, &len) - to retrieve ID length; 5253 * 2) ecpp_getdevid(pp, buffer, &len) - to read len bytes into buffer 5254 * 5255 * After 2) port is in Compatible mode 5256 * If the caller fails to make second call, it must reset port to Centronics 5257 * 5258 */ 5259 static int 5260 ecpp_getdevid(struct ecppunit *pp, uint8_t *id, int *lenp, int mode) 5261 { 5262 uint8_t lenhi, lenlo; 5263 uint8_t dsr; 5264 int i; 5265 5266 switch (mode) { 5267 case ECPP_NIBBLE_MODE: 5268 /* negotiate only if neccessary */ 5269 if ((pp->current_mode != mode) || (id == NULL)) { 5270 if (devidnib_negotiation(pp) == FAILURE) { 5271 return (EIO); 5272 } 5273 } 5274 5275 if (pp->current_phase != ECPP_PHASE_NIBT_AVAIL) { 5276 return (EIO); 5277 } 5278 5279 /* 5280 * Event 14: Host tristates data bus, peripheral 5281 * asserts nERR if data available, usually the 5282 * status bits (7-0) and requires two reads since 5283 * only nibbles are transfered. 5284 */ 5285 dsr = DSR_READ(pp); 5286 5287 if (id == NULL) { 5288 /* 5289 * first two bytes are the length of the sequence 5290 * (incl. these bytes) 5291 * first byte is MSB 5292 */ 5293 if ((dsr & ECPP_nERR) || 5294 (nibble_peripheral2host(pp, &lenhi) == FAILURE) || 5295 (dsr & ECPP_nERR) || 5296 (nibble_peripheral2host(pp, &lenlo) == FAILURE)) { 5297 ecpp_error(pp->dip, 5298 "ecpp_getdevid: id length read error\n"); 5299 return (EIO); 5300 } 5301 5302 *lenp = (lenhi << 8) | (lenlo); 5303 5304 ecpp_error(pp->dip, 5305 "ecpp_getdevid: id length = %d\n", *lenp); 5306 5307 if (*lenp < 2) { 5308 return (EIO); 5309 } 5310 } else { 5311 /* 5312 * read the rest of the data 5313 */ 5314 i = *lenp; 5315 while (i && ((dsr & ECPP_nERR) == 0)) { 5316 if (nibble_peripheral2host(pp, id++) == FAILURE) 5317 break; 5318 5319 i--; 5320 dsr = DSR_READ(pp); 5321 } 5322 ecpp_error(pp->dip, 5323 "ecpp_getdevid: read %d bytes\n", *lenp - i); 5324 5325 /* 5326 * 1284: After receiving the sequence, the host is 5327 * required to return the link to the Compatibility mode 5328 */ 5329 (void) ecpp_1284_termination(pp); 5330 } 5331 5332 break; 5333 5334 /* Other modes are not yet supported */ 5335 default: 5336 return (EINVAL); 5337 } 5338 5339 return (0); 5340 } 5341 5342 /* 5343 * Various hardware support 5344 * 5345 * First define some stubs for functions that do nothing 5346 */ 5347 5348 /*ARGSUSED*/ 5349 static void 5350 empty_config_mode(struct ecppunit *pp) 5351 { 5352 } 5353 5354 /*ARGSUSED*/ 5355 static void 5356 empty_mask_intr(struct ecppunit *pp) 5357 { 5358 } 5359 5360 #if defined(__x86) 5361 static size_t 5362 x86_getcnt(struct ecppunit *pp) 5363 { 5364 int count; 5365 5366 (void) ddi_dmae_getcnt(pp->dip, pp->uh.x86.chn, &count); 5367 return (count); 5368 } 5369 #endif 5370 5371 /* 5372 * 5373 * National PC87332 and PC97317 SuperIOs support routines 5374 * These chips are used in PCI-based Darwin, Quark, Quasar, Excalibur 5375 * and use EBus DMA facilities (Cheerio or RIO) 5376 * 5377 */ 5378 5379 static int 5380 pc87332_map_regs(struct ecppunit *pp) 5381 { 5382 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.ebus.c_reg, 0, 5383 sizeof (struct config_reg), &acc_attr, 5384 &pp->uh.ebus.c_handle) != DDI_SUCCESS) { 5385 ecpp_error(pp->dip, "pc87332_map_regs: failed c_reg\n"); 5386 goto fail; 5387 } 5388 5389 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0, 5390 sizeof (struct info_reg), &acc_attr, &pp->i_handle) 5391 != DDI_SUCCESS) { 5392 ecpp_error(pp->dip, "pc87332_map_regs: failed i_reg\n"); 5393 goto fail; 5394 } 5395 5396 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400, 5397 sizeof (struct fifo_reg), &acc_attr, &pp->f_handle) 5398 != DDI_SUCCESS) { 5399 ecpp_error(pp->dip, "pc87332_map_regs: failed f_reg\n"); 5400 goto fail; 5401 } 5402 5403 if (ddi_regs_map_setup(pp->dip, 2, (caddr_t *)&pp->uh.ebus.dmac, 0, 5404 sizeof (struct cheerio_dma_reg), &acc_attr, 5405 &pp->uh.ebus.d_handle) != DDI_SUCCESS) { 5406 ecpp_error(pp->dip, "pc87332_map_regs: failed dmac\n"); 5407 goto fail; 5408 } 5409 5410 return (SUCCESS); 5411 5412 fail: 5413 pc87332_unmap_regs(pp); 5414 return (FAILURE); 5415 } 5416 5417 static void 5418 pc87332_unmap_regs(struct ecppunit *pp) 5419 { 5420 if (pp->uh.ebus.c_handle) { 5421 ddi_regs_map_free(&pp->uh.ebus.c_handle); 5422 } 5423 if (pp->uh.ebus.d_handle) { 5424 ddi_regs_map_free(&pp->uh.ebus.d_handle); 5425 } 5426 if (pp->i_handle) { 5427 ddi_regs_map_free(&pp->i_handle); 5428 } 5429 if (pp->f_handle) { 5430 ddi_regs_map_free(&pp->f_handle); 5431 } 5432 } 5433 5434 static uint8_t 5435 pc87332_read_config_reg(struct ecppunit *pp, uint8_t reg_num) 5436 { 5437 uint8_t retval; 5438 5439 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num); 5440 retval = PP_GETB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data); 5441 5442 return (retval); 5443 } 5444 5445 static void 5446 pc87332_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val) 5447 { 5448 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num); 5449 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val); 5450 5451 /* 5452 * second write to this register is needed. the register behaves as 5453 * a fifo. the first value written goes to the data register. the 5454 * second write pushes the initial value to the register indexed. 5455 */ 5456 5457 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val); 5458 } 5459 5460 static int 5461 pc87332_config_chip(struct ecppunit *pp) 5462 { 5463 uint8_t pmc, fcr; 5464 5465 pp->current_phase = ECPP_PHASE_INIT; 5466 5467 /* ECP DMA configuration bit (PMC4) must be set */ 5468 pmc = pc87332_read_config_reg(pp, PMC); 5469 if (!(pmc & PC87332_PMC_ECP_DMA_CONFIG)) { 5470 pc87332_write_config_reg(pp, PMC, 5471 pmc | PC87332_PMC_ECP_DMA_CONFIG); 5472 } 5473 5474 /* 5475 * The Parallel Port Multiplexor pins must be driven. 5476 * Check to see if FCR3 is zero, if not clear FCR3. 5477 */ 5478 fcr = pc87332_read_config_reg(pp, FCR); 5479 if (fcr & PC87332_FCR_PPM_FLOAT_CTL) { 5480 pc87332_write_config_reg(pp, FCR, 5481 fcr & ~PC87332_FCR_PPM_FLOAT_CTL); 5482 } 5483 5484 /* 5485 * clear bits 3-0 in CTR (aka DCR) prior to enabling ECP mode 5486 * CTR5 can not be cleared in SPP mode, CTR5 will return 1. 5487 * "FAILURE" in this case is ok. Better to use dcr_write() 5488 * to ensure reliable writing to DCR. 5489 */ 5490 if (dcr_write(pp, ECPP_DCR_SET | ECPP_nINIT) == FAILURE) { 5491 ecpp_error(pp->dip, "ecpp_config_87332: DCR config\n"); 5492 } 5493 5494 /* enable ECP mode, level intr (note that DCR bits 3-0 == 0x0) */ 5495 pc87332_write_config_reg(pp, PCR, 5496 PC87332_PCR_INTR_LEVL | PC87332_PCR_ECP_EN); 5497 5498 /* put SuperIO in initial state */ 5499 if (ecr_write(pp, ECR_mode_001 | 5500 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5501 ecpp_error(pp->dip, "ecpp_config_87332: ECR\n"); 5502 } 5503 5504 if (dcr_write(pp, ECPP_DCR_SET | ECPP_SLCTIN | ECPP_nINIT) == FAILURE) { 5505 ecpp_error(pp->dip, "ecpp_config_87332: w/DCR failed2.\n"); 5506 return (FAILURE); 5507 5508 } 5509 /* we are in centronic mode */ 5510 pp->current_mode = ECPP_CENTRONICS; 5511 5512 /* in compatible mode with no data transfer in progress */ 5513 pp->current_phase = ECPP_PHASE_C_IDLE; 5514 5515 return (SUCCESS); 5516 } 5517 5518 /* 5519 * A new mode was set, do some mode specific reconfiguration 5520 * in this case - set interrupt characteristic 5521 */ 5522 static void 5523 pc87332_config_mode(struct ecppunit *pp) 5524 { 5525 if (COMPAT_PIO(pp)) { 5526 pc87332_write_config_reg(pp, PCR, 0x04); 5527 } else { 5528 pc87332_write_config_reg(pp, PCR, 0x14); 5529 } 5530 } 5531 5532 static int 5533 pc97317_map_regs(struct ecppunit *pp) 5534 { 5535 if (pc87332_map_regs(pp) != SUCCESS) { 5536 return (FAILURE); 5537 } 5538 5539 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->uh.ebus.c2_reg, 5540 0x403, sizeof (struct config2_reg), &acc_attr, 5541 &pp->uh.ebus.c2_handle) != DDI_SUCCESS) { 5542 ecpp_error(pp->dip, "pc97317_map_regs: failed c2_reg\n"); 5543 pc87332_unmap_regs(pp); 5544 return (FAILURE); 5545 } else { 5546 return (SUCCESS); 5547 } 5548 } 5549 5550 static void 5551 pc97317_unmap_regs(struct ecppunit *pp) 5552 { 5553 if (pp->uh.ebus.c2_handle) { 5554 ddi_regs_map_free(&pp->uh.ebus.c2_handle); 5555 } 5556 5557 pc87332_unmap_regs(pp); 5558 } 5559 5560 /* 5561 * OBP should configure the PC97317 such that it does not need further 5562 * configuration. Upon sustaining, it may be necessary to examine 5563 * or change the configuration registers. This routine is left in 5564 * the file for that purpose. 5565 */ 5566 static int 5567 pc97317_config_chip(struct ecppunit *pp) 5568 { 5569 uint8_t conreg; 5570 5571 /* set the logical device name */ 5572 pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4); 5573 5574 /* SPP Compatibility */ 5575 PP_PUTB(pp->uh.ebus.c2_handle, 5576 &pp->uh.ebus.c2_reg->eir, PC97317_CONFIG2_CONTROL2); 5577 PP_PUTB(pp->uh.ebus.c2_handle, &pp->uh.ebus.c2_reg->edr, 0x80); 5578 5579 /* low interrupt polarity */ 5580 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00); 5581 5582 /* ECP mode */ 5583 pc87332_write_config_reg(pp, PC97317_CONFIG_PP_CONFIG, 0xf2); 5584 5585 if (dcr_write(pp, ECPP_SLCTIN | ECPP_nINIT) == FAILURE) { 5586 ecpp_error(pp->dip, "pc97317_config_chip: failed w/DCR\n"); 5587 } 5588 5589 if (ecr_write(pp, ECR_mode_001 | 5590 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5591 ecpp_error(pp->dip, "pc97317_config_chip: failed w/ECR\n"); 5592 } 5593 5594 #ifdef DEBUG 5595 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DEV_NO); 5596 ecpp_error(pp->dip, "97317:conreg7(logical dev)=%x\n", conreg); 5597 5598 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_MSB); 5599 ecpp_error(pp->dip, "97317:conreg60(addrHi)=%x\n", conreg); 5600 5601 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_LSB); 5602 ecpp_error(pp->dip, "97317:conreg61(addrLo)=%x\n", conreg); 5603 5604 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_SEL); 5605 ecpp_error(pp->dip, "97317:conreg70(IRQL)=%x\n", conreg); 5606 5607 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_TYPE); 5608 ecpp_error(pp->dip, "97317:conreg71(intr type)=%x\n", conreg); 5609 5610 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_ACTIVATE); 5611 ecpp_error(pp->dip, "97317:conreg30(Active)=%x\n", conreg); 5612 5613 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_IO_RANGE); 5614 ecpp_error(pp->dip, "97317:conreg31(IO Range Check)=%x\n", conreg); 5615 5616 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA0_CHAN); 5617 ecpp_error(pp->dip, "97317:conreg74(DMA0 Chan)=%x\n", conreg); 5618 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA1_CHAN); 5619 ecpp_error(pp->dip, "97317:conreg75(DMA1 Chan)=%x\n", conreg); 5620 5621 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG); 5622 ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg); 5623 5624 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG); 5625 ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg); 5626 #endif /* DEBUG */ 5627 5628 return (SUCCESS); 5629 } 5630 5631 /* 5632 * A new mode was set, do some mode specific reconfiguration 5633 * in this case - set interrupt polarity 5634 */ 5635 static void 5636 pc97317_config_mode(struct ecppunit *pp) 5637 { 5638 /* set the logical device name */ 5639 pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4); 5640 5641 if (COMPAT_PIO(pp) || pp->current_mode == ECPP_NIBBLE_MODE) { 5642 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x02); 5643 } else { 5644 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00); 5645 } 5646 } 5647 5648 static void 5649 cheerio_mask_intr(struct ecppunit *pp) 5650 { 5651 /* mask Cheerio interrupts */ 5652 AND_SET_LONG_R(pp->uh.ebus.d_handle, 5653 &pp->uh.ebus.dmac->csr, ~DCSR_INT_EN); 5654 } 5655 5656 static void 5657 cheerio_unmask_intr(struct ecppunit *pp) 5658 { 5659 /* unmask Cheerio interrupts */ 5660 OR_SET_LONG_R(pp->uh.ebus.d_handle, 5661 &pp->uh.ebus.dmac->csr, DCSR_INT_EN | DCSR_TCI_DIS); 5662 } 5663 5664 static int 5665 cheerio_dma_start(struct ecppunit *pp) 5666 { 5667 cheerio_reset_dcsr(pp); 5668 SET_DMAC_BCR(pp, pp->dma_cookie.dmac_size); 5669 SET_DMAC_ACR(pp, pp->dma_cookie.dmac_address); 5670 5671 if (pp->dma_dir == DDI_DMA_READ) { 5672 SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA | 5673 DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0 | DCSR_WRITE); 5674 } else { 5675 SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA | 5676 DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0); 5677 } 5678 5679 return (SUCCESS); 5680 } 5681 5682 /* 5683 * Note: BCR is reset to 0, so counter should always be read before dma_stop 5684 */ 5685 static int 5686 cheerio_dma_stop(struct ecppunit *pp, size_t *countp) 5687 { 5688 uint8_t ecr; 5689 5690 /* disable DMA and byte counter */ 5691 AND_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr, 5692 ~(DCSR_EN_DMA | DCSR_EN_CNT| DCSR_INT_EN)); 5693 5694 /* ACK and disable the TC interrupt */ 5695 OR_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr, 5696 DCSR_TC | DCSR_TCI_DIS); 5697 5698 /* read DMA count if requested */ 5699 if (countp) { 5700 *countp = cheerio_getcnt(pp); 5701 } 5702 5703 cheerio_reset_dcsr(pp); 5704 SET_DMAC_BCR(pp, 0); 5705 5706 /* turn off SuperIO's DMA */ 5707 ecr = ECR_READ(pp); 5708 if (ecr_write(pp, ecr & ~ECPP_DMA_ENABLE) == FAILURE) { 5709 return (FAILURE); 5710 } 5711 5712 /* Disable SuperIO interrupts and DMA */ 5713 ecr = ECR_READ(pp); 5714 5715 return (ecr_write(pp, ecr | ECPP_INTR_SRV)); 5716 } 5717 5718 static size_t 5719 cheerio_getcnt(struct ecppunit *pp) 5720 { 5721 return (GET_DMAC_BCR(pp)); 5722 } 5723 5724 /* 5725 * Reset the DCSR by first setting the RESET bit to 1. Poll the 5726 * DCSR_CYC_PEND bit to make sure there are no more pending DMA cycles. 5727 * If there are no more pending cycles, clear the RESET bit. 5728 */ 5729 static void 5730 cheerio_reset_dcsr(struct ecppunit *pp) 5731 { 5732 int timeout = DMAC_RESET_TIMEOUT; 5733 5734 SET_DMAC_CSR(pp, DCSR_RESET); 5735 5736 while (GET_DMAC_CSR(pp) & DCSR_CYC_PEND) { 5737 if (timeout == 0) { 5738 ecpp_error(pp->dip, "cheerio_reset_dcsr: timeout\n"); 5739 break; 5740 } else { 5741 drv_usecwait(1); 5742 timeout--; 5743 } 5744 } 5745 5746 SET_DMAC_CSR(pp, 0); 5747 } 5748 5749 /* 5750 * 5751 * Grover Southbridge (M1553) support routines 5752 * Southbridge contains an Intel 8237 DMAC onboard which is used 5753 * to transport data to/from PCI space to superio parallel port 5754 * 5755 */ 5756 5757 5758 static int 5759 m1553_map_regs(struct ecppunit *pp) 5760 { 5761 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.m1553.isa_space, 5762 0, sizeof (struct isaspace), &acc_attr, 5763 &pp->uh.m1553.d_handle) != DDI_SUCCESS) { 5764 ecpp_error(pp->dip, "m1553_map_regs: failed isa space\n"); 5765 goto fail; 5766 } 5767 5768 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0, 5769 sizeof (struct info_reg), &acc_attr, &pp->i_handle) 5770 != DDI_SUCCESS) { 5771 ecpp_error(pp->dip, "m1553_map_regs: failed i_reg\n"); 5772 goto fail; 5773 } 5774 5775 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400, 5776 sizeof (struct fifo_reg), &acc_attr, &pp->f_handle) 5777 != DDI_SUCCESS) { 5778 ecpp_error(pp->dip, "m1553_map_regs: failed f_reg\n"); 5779 goto fail; 5780 } 5781 5782 return (SUCCESS); 5783 5784 fail: 5785 m1553_unmap_regs(pp); 5786 return (FAILURE); 5787 } 5788 5789 static void 5790 m1553_unmap_regs(struct ecppunit *pp) 5791 { 5792 if (pp->uh.m1553.d_handle) { 5793 ddi_regs_map_free(&pp->uh.m1553.d_handle); 5794 } 5795 if (pp->i_handle) { 5796 ddi_regs_map_free(&pp->i_handle); 5797 } 5798 if (pp->f_handle) { 5799 ddi_regs_map_free(&pp->f_handle); 5800 } 5801 } 5802 5803 #if defined(__x86) 5804 static int 5805 x86_map_regs(struct ecppunit *pp) 5806 { 5807 int nregs = 0; 5808 5809 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0, 5810 sizeof (struct info_reg), &acc_attr, &pp->i_handle) 5811 != DDI_SUCCESS) { 5812 ecpp_error(pp->dip, "x86_map_regs: failed i_reg\n"); 5813 goto fail; 5814 } 5815 if (ddi_dev_nregs(pp->dip, &nregs) == DDI_SUCCESS && nregs == 2) { 5816 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->f_reg, 0, 5817 sizeof (struct fifo_reg), &acc_attr, &pp->f_handle) 5818 != DDI_SUCCESS) { 5819 ecpp_error(pp->dip, "x86_map_regs: failed f_reg\n"); 5820 goto fail; 5821 } else 5822 pp->noecpregs = FALSE; 5823 } else { 5824 pp->noecpregs = TRUE; 5825 } 5826 return (SUCCESS); 5827 fail: 5828 x86_unmap_regs(pp); 5829 return (FAILURE); 5830 } 5831 5832 static void 5833 x86_unmap_regs(struct ecppunit *pp) 5834 { 5835 if (pp->i_handle) { 5836 ddi_regs_map_free(&pp->i_handle); 5837 } 5838 if (pp->f_handle) { 5839 ddi_regs_map_free(&pp->f_handle); 5840 } 5841 } 5842 #endif 5843 5844 static uint8_t 5845 m1553_read_config_reg(struct ecppunit *pp, uint8_t reg_num) 5846 { 5847 uint8_t retval; 5848 5849 dma8237_write(pp, 0x3F0, reg_num); 5850 retval = dma8237_read(pp, 0x3F1); 5851 5852 return (retval); 5853 } 5854 5855 static void 5856 m1553_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val) 5857 { 5858 dma8237_write(pp, 0x3F0, reg_num); 5859 dma8237_write(pp, 0x3F1, val); 5860 } 5861 5862 static int 5863 m1553_config_chip(struct ecppunit *pp) 5864 { 5865 uint8_t conreg; 5866 5867 /* Unlock configuration regs with "key sequence" */ 5868 dma8237_write(pp, 0x3F0, 0x51); 5869 dma8237_write(pp, 0x3F0, 0x23); 5870 5871 m1553_write_config_reg(pp, PnP_CONFIG_DEV_NO, 0x3); 5872 conreg = m1553_read_config_reg(pp, PnP_CONFIG_DEV_NO); 5873 ecpp_error(pp->dip, "M1553:conreg7(logical dev)=%x\n", conreg); 5874 5875 conreg = m1553_read_config_reg(pp, PnP_CONFIG_ACTIVATE); 5876 ecpp_error(pp->dip, "M1553:conreg30(Active)=%x\n", conreg); 5877 5878 conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_MSB); 5879 ecpp_error(pp->dip, "M1553:conreg60(addrHi)=%x\n", conreg); 5880 conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_LSB); 5881 ecpp_error(pp->dip, "M1553:conreg61(addrLo)=%x\n", conreg); 5882 5883 conreg = m1553_read_config_reg(pp, PnP_CONFIG_INTR_SEL); 5884 ecpp_error(pp->dip, "M1553:conreg70(IRQL)=%x\n", conreg); 5885 5886 conreg = m1553_read_config_reg(pp, PnP_CONFIG_DMA0_CHAN); 5887 ecpp_error(pp->dip, "M1553:conreg74(DMA0 Chan)=%x\n", conreg); 5888 5889 /* set FIFO threshold 1 and ECP mode, preserve bit 7 (IRQ polarity) */ 5890 conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0); 5891 conreg = (conreg & ~0x7F) | 0x0A; 5892 m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG0, conreg); 5893 conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0); 5894 ecpp_error(pp->dip, "M1553:conregFO(pport conf)=%x\n", conreg); 5895 5896 m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG1, 0x04); 5897 conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG1); 5898 ecpp_error(pp->dip, "M1553:conregF1(outconf)=%x\n", conreg); 5899 5900 /* lock configuration regs with key */ 5901 dma8237_write(pp, 0x3F0, 0xBB); 5902 5903 /* Set ECR, DCR in known state */ 5904 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 5905 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 5906 5907 ecpp_error(pp->dip, "m1553_config_chip: ecr=%x, dsr=%x, dcr=%x\n", 5908 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 5909 5910 return (SUCCESS); 5911 } 5912 5913 #if defined(__x86) 5914 static int 5915 x86_config_chip(struct ecppunit *pp) 5916 { 5917 if (ecr_write(pp, ECR_mode_001 | 5918 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5919 ecpp_error(pp->dip, "config chip: failed w/ecr\n"); 5920 pp->noecpregs = TRUE; 5921 } 5922 if (pp->noecpregs) 5923 pp->fast_compat = FALSE; 5924 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 5925 ecpp_error(pp->dip, "x86_config_chip: ecr=%x, dsr=%x, dcr=%x\n", 5926 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 5927 return (SUCCESS); 5928 } 5929 #endif 5930 5931 /* 5932 * dma8237_dma_start() programs the selected 8 bit channel 5933 * of DMAC1 with the dma cookie. pp->dma_cookie must 5934 * be set before this routine is called. 5935 */ 5936 static int 5937 dma8237_dma_start(struct ecppunit *pp) 5938 { 5939 uint8_t chn; 5940 5941 chn = pp->uh.m1553.chn; 5942 5943 ASSERT(chn <= DMAE_CH3 && 5944 pp->dma_cookie.dmac_size != 0 && 5945 pp->dma_cookie.dmac_address != NULL); 5946 5947 /* At this point Southbridge has not yet asserted DREQ */ 5948 5949 /* set mode to read-from-memory. */ 5950 dma8237_write(pp, DMAC2_MODE, DMAMODE_CASC); 5951 if (pp->dma_dir == DDI_DMA_READ) { 5952 dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE | 5953 DMAMODE_READ | chn); 5954 } else { 5955 dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE | 5956 DMAMODE_WRITE | chn); 5957 } 5958 5959 dma8237_write_addr(pp, pp->dma_cookie.dmac_address); 5960 dma8237_write_count(pp, pp->dma_cookie.dmac_size - 1); 5961 5962 /* 5963 * M1553 chip does not permit to access DMA register banks 5964 * while DMA is in flight. As a result, ecpp and floppy drivers 5965 * can potentially corrupt each other's DMA. The interlocking mechanism 5966 * is provided by a parent nexus driver (isadma), which is enabled 5967 * indirectly through a DMAC1_ALLMASK register access: 5968 * 5969 * writing a non-zero value to this register enters a lock, 5970 * writing zero releases the lock. 5971 * 5972 * DMA transfer must only occur after entering a lock. 5973 * If the lock is already owned by other driver, we will block. 5974 * 5975 * The following operation unmasks our channel and masks all others 5976 */ 5977 dma8237_write(pp, DMAC1_ALLMASK, ~(1 << chn)); 5978 pp->uh.m1553.isadma_entered = 1; 5979 5980 return (SUCCESS); 5981 } 5982 5983 static int 5984 dma8237_dma_stop(struct ecppunit *pp, size_t *countp) 5985 { 5986 uint8_t ecr; 5987 5988 /* stop DMA */ 5989 ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV; 5990 (void) ecr_write(pp, ecr); 5991 5992 if (pp->uh.m1553.isadma_entered) { 5993 /* reset the channel mask so we can issue PIO's to our device */ 5994 dma8237_write(pp, DMAC1_ALLMASK, 0); 5995 pp->uh.m1553.isadma_entered = 0; 5996 5997 } 5998 5999 /* read DMA count if requested */ 6000 if (countp) { 6001 *countp = dma8237_getcnt(pp); 6002 if (pp->dma_dir == DDI_DMA_READ && *countp > 0) { 6003 (*countp)++; /* need correction for reverse xfers */ 6004 } 6005 } 6006 return (SUCCESS); 6007 } 6008 #if defined(__x86) 6009 static int 6010 x86_dma_start(struct ecppunit *pp) 6011 { 6012 uint8_t chn; 6013 struct ddi_dmae_req dmaereq; 6014 6015 chn = pp->uh.x86.chn; 6016 ASSERT(chn <= DMAE_CH3 && 6017 pp->dma_cookie.dmac_size != 0 && 6018 pp->dma_cookie.dmac_address != NULL); 6019 bzero(&dmaereq, sizeof (struct ddi_dmae_req)); 6020 dmaereq.der_command = 6021 (pp->dma_dir & DDI_DMA_READ) ? DMAE_CMD_READ : DMAE_CMD_WRITE; 6022 if (ddi_dmae_prog(pp->dip, &dmaereq, &pp->dma_cookie, chn) 6023 != DDI_SUCCESS) 6024 ecpp_error(pp->dip, "prog failed !!!\n"); 6025 ecpp_error(pp->dip, "dma_started..\n"); 6026 return (SUCCESS); 6027 } 6028 6029 static int 6030 x86_dma_stop(struct ecppunit *pp, size_t *countp) 6031 { 6032 uint8_t ecr; 6033 6034 /* stop DMA */ 6035 if (pp->uh.x86.chn == 0xff) 6036 return (FAILURE); 6037 ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV; 6038 (void) ecr_write(pp, ecr); 6039 ecpp_error(pp->dip, "dma_stop\n"); 6040 6041 /* read DMA count if requested */ 6042 if (countp) { 6043 *countp = x86_getcnt(pp); 6044 } 6045 ecpp_error(pp->dip, "dma_stoped..\n"); 6046 return (SUCCESS); 6047 } 6048 #endif 6049 6050 /* channel must be masked */ 6051 static void 6052 dma8237_write_addr(struct ecppunit *pp, uint32_t addr) 6053 { 6054 uint8_t c_addr, c_lpage; 6055 uint16_t c_hpage, *p; 6056 6057 switch (pp->uh.m1553.chn) { 6058 case DMAE_CH0: 6059 c_addr = DMA_0ADR; 6060 c_lpage = DMA_0PAGE; 6061 c_hpage = DMA_0HPG; 6062 break; 6063 6064 case DMAE_CH1: 6065 c_addr = DMA_1ADR; 6066 c_lpage = DMA_1PAGE; 6067 c_hpage = DMA_1HPG; 6068 break; 6069 6070 case DMAE_CH2: 6071 c_addr = DMA_2ADR; 6072 c_lpage = DMA_2PAGE; 6073 c_hpage = DMA_2HPG; 6074 break; 6075 6076 case DMAE_CH3: 6077 c_addr = DMA_3ADR; 6078 c_lpage = DMA_3PAGE; 6079 c_hpage = DMA_3HPG; 6080 break; 6081 6082 default: 6083 return; 6084 } 6085 6086 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr]; 6087 ddi_put16(pp->uh.m1553.d_handle, p, addr & 0xFFFF); 6088 6089 dma8237_write(pp, c_lpage, (addr & 0xFF0000) >> 16); 6090 dma8237_write(pp, c_hpage, (addr & 0xFF000000) >> 24); 6091 6092 } 6093 6094 /* 6095 * This function may be useful during debugging, 6096 * so we leave it in, but do not include in the binary 6097 */ 6098 #ifdef INCLUDE_DMA8237_READ_ADDR 6099 static uint32_t 6100 dma8237_read_addr(struct ecppunit *pp) 6101 { 6102 uint8_t rval3, rval4; 6103 uint16_t rval16; 6104 uint32_t rval; 6105 uint8_t c_addr, c_lpage; 6106 uint16_t c_hpage, *p; 6107 6108 switch (pp->uh.m1553.chn) { 6109 case DMAE_CH0: 6110 c_addr = DMA_0ADR; 6111 c_lpage = DMA_0PAGE; 6112 c_hpage = DMA_0HPG; 6113 break; 6114 6115 case DMAE_CH1: 6116 c_addr = DMA_1ADR; 6117 c_lpage = DMA_1PAGE; 6118 c_hpage = DMA_1HPG; 6119 break; 6120 6121 case DMAE_CH2: 6122 c_addr = DMA_2ADR; 6123 c_lpage = DMA_2PAGE; 6124 c_hpage = DMA_2HPG; 6125 break; 6126 6127 case DMAE_CH3: 6128 c_addr = DMA_3ADR; 6129 c_lpage = DMA_3PAGE; 6130 c_hpage = DMA_3HPG; 6131 break; 6132 6133 default: 6134 return (NULL); 6135 } 6136 6137 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr]; 6138 rval16 = ddi_get16(pp->uh.m1553.d_handle, p); 6139 6140 rval3 = dma8237_read(pp, c_lpage); 6141 rval4 = dma8237_read(pp, c_hpage); 6142 6143 rval = rval16 | (rval3 << 16) | (rval4 <<24); 6144 6145 return (rval); 6146 } 6147 #endif 6148 6149 static void 6150 dma8237_write_count(struct ecppunit *pp, uint32_t count) 6151 { 6152 uint8_t c_wcnt; 6153 uint16_t *p; 6154 6155 switch (pp->uh.m1553.chn) { 6156 case DMAE_CH0: 6157 c_wcnt = DMA_0WCNT; 6158 break; 6159 6160 case DMAE_CH1: 6161 c_wcnt = DMA_1WCNT; 6162 break; 6163 6164 case DMAE_CH2: 6165 c_wcnt = DMA_2WCNT; 6166 break; 6167 6168 case DMAE_CH3: 6169 c_wcnt = DMA_3WCNT; 6170 break; 6171 6172 default: 6173 return; 6174 } 6175 6176 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt]; 6177 ddi_put16(pp->uh.m1553.d_handle, p, count & 0xFFFF); 6178 6179 } 6180 6181 static uint32_t 6182 dma8237_read_count(struct ecppunit *pp) 6183 { 6184 uint8_t c_wcnt; 6185 uint16_t *p; 6186 6187 switch (pp->uh.m1553.chn) { 6188 case DMAE_CH0: 6189 c_wcnt = DMA_0WCNT; 6190 break; 6191 6192 case DMAE_CH1: 6193 c_wcnt = DMA_1WCNT; 6194 break; 6195 6196 case DMAE_CH2: 6197 c_wcnt = DMA_2WCNT; 6198 break; 6199 6200 case DMAE_CH3: 6201 c_wcnt = DMA_3WCNT; 6202 break; 6203 6204 default: 6205 return (NULL); 6206 } 6207 6208 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt]; 6209 return (ddi_get16(pp->uh.m1553.d_handle, p)); 6210 6211 } 6212 6213 static void 6214 dma8237_write(struct ecppunit *pp, int reg_num, uint8_t val) 6215 { 6216 ddi_put8(pp->uh.m1553.d_handle, 6217 &pp->uh.m1553.isa_space->isa_reg[reg_num], val); 6218 } 6219 6220 static uint8_t 6221 dma8237_read(struct ecppunit *pp, int reg_num) 6222 { 6223 return (ddi_get8(pp->uh.m1553.d_handle, 6224 &pp->uh.m1553.isa_space->isa_reg[reg_num])); 6225 } 6226 6227 static size_t 6228 dma8237_getcnt(struct ecppunit *pp) 6229 { 6230 uint32_t cnt; 6231 6232 if ((cnt = dma8237_read_count(pp)) == 0xffff) 6233 cnt = 0; 6234 else 6235 cnt++; 6236 return (cnt); 6237 } 6238 6239 6240 /* 6241 * 6242 * Kstat support routines 6243 * 6244 */ 6245 static void 6246 ecpp_kstat_init(struct ecppunit *pp) 6247 { 6248 struct ecppkstat *ekp; 6249 char buf[16]; 6250 6251 /* 6252 * Allocate, initialize and install interrupt counter kstat 6253 */ 6254 (void) sprintf(buf, "ecppc%d", pp->instance); 6255 pp->intrstats = kstat_create("ecpp", pp->instance, buf, "controller", 6256 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT); 6257 if (pp->intrstats == NULL) { 6258 ecpp_error(pp->dip, "ecpp_kstat_init:1: kstat_create failed"); 6259 } else { 6260 pp->intrstats->ks_update = ecpp_kstatintr_update; 6261 pp->intrstats->ks_private = (void *) pp; 6262 kstat_install(pp->intrstats); 6263 } 6264 6265 /* 6266 * Allocate, initialize and install misc stats kstat 6267 */ 6268 pp->ksp = kstat_create("ecpp", pp->instance, NULL, "misc", 6269 KSTAT_TYPE_NAMED, 6270 sizeof (struct ecppkstat) / sizeof (kstat_named_t), 6271 KSTAT_FLAG_PERSISTENT); 6272 if (pp->ksp == NULL) { 6273 ecpp_error(pp->dip, "ecpp_kstat_init:2: kstat_create failed"); 6274 return; 6275 } 6276 6277 ekp = (struct ecppkstat *)pp->ksp->ks_data; 6278 6279 #define EK_NAMED_INIT(name) \ 6280 kstat_named_init(&ekp->ek_##name, #name, KSTAT_DATA_UINT32) 6281 6282 EK_NAMED_INIT(ctx_obytes); 6283 EK_NAMED_INIT(ctxpio_obytes); 6284 EK_NAMED_INIT(nib_ibytes); 6285 EK_NAMED_INIT(ecp_obytes); 6286 EK_NAMED_INIT(ecp_ibytes); 6287 EK_NAMED_INIT(epp_obytes); 6288 EK_NAMED_INIT(epp_ibytes); 6289 EK_NAMED_INIT(diag_obytes); 6290 EK_NAMED_INIT(to_ctx); 6291 EK_NAMED_INIT(to_nib); 6292 EK_NAMED_INIT(to_ecp); 6293 EK_NAMED_INIT(to_epp); 6294 EK_NAMED_INIT(to_diag); 6295 EK_NAMED_INIT(xfer_tout); 6296 EK_NAMED_INIT(ctx_cf); 6297 EK_NAMED_INIT(joblen); 6298 EK_NAMED_INIT(isr_reattempt_high); 6299 EK_NAMED_INIT(mode); 6300 EK_NAMED_INIT(phase); 6301 EK_NAMED_INIT(backchan); 6302 EK_NAMED_INIT(iomode); 6303 EK_NAMED_INIT(state); 6304 6305 pp->ksp->ks_update = ecpp_kstat_update; 6306 pp->ksp->ks_private = (void *) pp; 6307 kstat_install(pp->ksp); 6308 } 6309 6310 static int 6311 ecpp_kstat_update(kstat_t *ksp, int rw) 6312 { 6313 struct ecppunit *pp; 6314 struct ecppkstat *ekp; 6315 6316 /* 6317 * For the time being there is no point 6318 * in supporting writable kstats 6319 */ 6320 if (rw == KSTAT_WRITE) { 6321 return (EACCES); 6322 } 6323 6324 pp = (struct ecppunit *)ksp->ks_private; 6325 ekp = (struct ecppkstat *)ksp->ks_data; 6326 6327 mutex_enter(&pp->umutex); 6328 6329 ekp->ek_ctx_obytes.value.ui32 = pp->obytes[ECPP_CENTRONICS] + 6330 pp->obytes[ECPP_COMPAT_MODE]; 6331 ekp->ek_ctxpio_obytes.value.ui32 = pp->ctxpio_obytes; 6332 ekp->ek_nib_ibytes.value.ui32 = pp->ibytes[ECPP_NIBBLE_MODE]; 6333 ekp->ek_ecp_obytes.value.ui32 = pp->obytes[ECPP_ECP_MODE]; 6334 ekp->ek_ecp_ibytes.value.ui32 = pp->ibytes[ECPP_ECP_MODE]; 6335 ekp->ek_epp_obytes.value.ui32 = pp->obytes[ECPP_EPP_MODE]; 6336 ekp->ek_epp_ibytes.value.ui32 = pp->ibytes[ECPP_EPP_MODE]; 6337 ekp->ek_diag_obytes.value.ui32 = pp->obytes[ECPP_DIAG_MODE]; 6338 ekp->ek_to_ctx.value.ui32 = pp->to_mode[ECPP_CENTRONICS] + 6339 pp->to_mode[ECPP_COMPAT_MODE]; 6340 ekp->ek_to_nib.value.ui32 = pp->to_mode[ECPP_NIBBLE_MODE]; 6341 ekp->ek_to_ecp.value.ui32 = pp->to_mode[ECPP_ECP_MODE]; 6342 ekp->ek_to_epp.value.ui32 = pp->to_mode[ECPP_EPP_MODE]; 6343 ekp->ek_to_diag.value.ui32 = pp->to_mode[ECPP_DIAG_MODE]; 6344 ekp->ek_xfer_tout.value.ui32 = pp->xfer_tout; 6345 ekp->ek_ctx_cf.value.ui32 = pp->ctx_cf; 6346 ekp->ek_joblen.value.ui32 = pp->joblen; 6347 ekp->ek_isr_reattempt_high.value.ui32 = pp->isr_reattempt_high; 6348 ekp->ek_mode.value.ui32 = pp->current_mode; 6349 ekp->ek_phase.value.ui32 = pp->current_phase; 6350 ekp->ek_backchan.value.ui32 = pp->backchannel; 6351 ekp->ek_iomode.value.ui32 = pp->io_mode; 6352 ekp->ek_state.value.ui32 = pp->e_busy; 6353 6354 mutex_exit(&pp->umutex); 6355 6356 return (0); 6357 } 6358 6359 static int 6360 ecpp_kstatintr_update(kstat_t *ksp, int rw) 6361 { 6362 struct ecppunit *pp; 6363 6364 /* 6365 * For the time being there is no point 6366 * in supporting writable kstats 6367 */ 6368 if (rw == KSTAT_WRITE) { 6369 return (EACCES); 6370 } 6371 6372 pp = (struct ecppunit *)ksp->ks_private; 6373 6374 mutex_enter(&pp->umutex); 6375 6376 KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_HARD] = pp->intr_hard; 6377 KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SPURIOUS] = pp->intr_spurious; 6378 KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SOFT] = pp->intr_soft; 6379 6380 mutex_exit(&pp->umutex); 6381 6382 return (0); 6383 }