1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 27 /* 28 * 29 * IEEE 1284 Parallel Port Device Driver 30 * 31 */ 32 33 #include <sys/param.h> 34 #include <sys/errno.h> 35 #include <sys/file.h> 36 #include <sys/cmn_err.h> 37 #include <sys/stropts.h> 38 #include <sys/debug.h> 39 #include <sys/stream.h> 40 #include <sys/strsun.h> 41 #include <sys/kmem.h> 42 #include <sys/ddi.h> 43 #include <sys/sunddi.h> 44 #include <sys/conf.h> /* req. by dev_ops flags MTSAFE etc. */ 45 #include <sys/modctl.h> /* for modldrv */ 46 #include <sys/stat.h> /* ddi_create_minor_node S_IFCHR */ 47 #include <sys/open.h> 48 #include <sys/ddi_impldefs.h> 49 #include <sys/kstat.h> 50 51 #include <sys/prnio.h> 52 #include <sys/ecppreg.h> /* hw description */ 53 #include <sys/ecppio.h> /* ioctl description */ 54 #include <sys/ecppvar.h> /* driver description */ 55 #include <sys/dma_engine.h> 56 #include <sys/dma_i8237A.h> 57 58 /* 59 * Background 60 * ========== 61 * IEEE 1284-1994 standard defines "a signalling method for asynchronous, 62 * fully interlocked, bidirectional parallel communications between hosts 63 * and printers or other peripherals." (1.1) The standard defines 5 modes 64 * of operation - Compatibility, Nibble, Byte, ECP and EPP - which differ 65 * in direction, bandwidth, pins assignment, DMA capability, etc. 66 * 67 * Negotiation is a mechanism for moving between modes. Compatibility mode 68 * is a default mode, from which negotiations to other modes occur and 69 * to which both host and peripheral break in case of interface errors. 70 * Compatibility mode provides a unidirectional (forward) channel for 71 * communicating with old pre-1284 peripherals. 72 * 73 * Each mode has a number of phases. [Mode, phase] pair represents the 74 * interface state. Host initiates all transfers, though peripheral can 75 * request backchannel transfer by asserting nErr pin. 76 * 77 * Ecpp driver implements an IEEE 1284-compliant host using a combination 78 * of hardware and software. Hardware part is represented by a controller, 79 * which is a part of the SuperIO chip. Ecpp supports the following SuperIOs: 80 * PC82332/PC82336 (U5/U10/U60), PC97317 (U100), M1553 (Grover). 81 * Struct ecpp_hw describes each SuperIO and is determined in ecpp_attach(). 82 * 83 * Negotiation is performed in software. Transfer may be performed either 84 * in software by driving output pins for each byte (PIO method), or with 85 * hardware assistance - SuperIO has a 16-byte FIFO, which is filled by 86 * the driver (normally using DMA), while the chip performs the actual xfer. 87 * PIO is used for Nibble and Compat, DMA is used for ECP and Compat modes. 88 * 89 * Driver currently supports the following modes: 90 * 91 * - Compatibility mode: byte-wide forward channel ~50KB/sec; 92 * pp->io_mode defines PIO or DMA method of transfer; 93 * - Nibble mode: nibble-wide (4-bit) reverse channel ~30KB/sec; 94 * - ECP mode: byte-wide bidirectional channel (~1MB/sec); 95 * 96 * Theory of operation 97 * =================== 98 * The manner in which ecpp drives 1284 interface is that of a state machine. 99 * State is a combination of 1284 mode {ECPP_*_MODE}, 1284 phase {ECPP_PHASE_*} 100 * and transfer method {PIO, DMA}. State is a function of application actions 101 * {write(2), ioctl(2)} and peripheral reaction. 102 * 103 * 1284 interface state is described by the following variables: 104 * pp->current_mode -- 1284 mode used for forward transfers; 105 * pp->backchannel -- 1284 mode used for backward transfers; 106 * pp->curent_phase -- 1284 phase; 107 * 108 * Bidirectional operation in Compatibility mode is provided by a combination: 109 * pp->current_mode == ECPP_COMPAT_MODE && pp->backchannel == ECPP_NIBBLE_MODE 110 * ECPP_CENTRONICS means no backchannel 111 * 112 * Driver internal state is defined by pp->e_busy as follows: 113 * ECPP_IDLE -- idle, no active transfers; 114 * ECPP_BUSY -- transfer is in progress; 115 * ECPP_ERR -- have data to transfer, but peripheral can`t receive data; 116 * ECPP_FLUSH -- flushing the queues; 117 * 118 * When opened, driver is in ECPP_IDLE state, current mode is ECPP_CENTRONICS 119 * Default negotiation tries to negotiate to the best mode supported by printer, 120 * sets pp->current_mode and pp->backchannel accordingly. 121 * 122 * When output data arrives in M_DATA mblks ecpp_wput() puts them on the queue 123 * to let ecpp_wsrv() concatenate small blocks into one big transfer 124 * by copying them into pp->ioblock. If first the mblk data is bigger than 125 * pp->ioblock, then it is used instead of i/o block (pointed by pp->msg) 126 * 127 * Before starting the transfer the driver will check if peripheral is ready 128 * by calling ecpp_check_status() and if it is not, driver goes ECPP_ERR state 129 * and schedules ecpp_wsrv_timer() which would qenable() the wq, effectively 130 * rechecking the peripheral readiness and restarting itself until it is ready. 131 * The transfer is then started by calling ecpp_start(), driver goes ECPP_BUSY 132 * 133 * While transfer is in progress all arriving messages will be queued up. 134 * Transfer can end up in either of two ways: 135 * - interrupt occurs, ecpp_isr() checks if all the data was transferred, if so 136 * cleanup and go ECPP_IDLE, otherwise putback untransferred and qenable(); 137 * - ecpp_xfer_timeout() cancels the transfer and puts back untransferred data; 138 * 139 * PIO transfer method is very CPU intensive: for each sent byte the peripheral 140 * state is checked, then the byte is transfered and driver waits for an nAck 141 * interrupt; ecpp_isr() will then look if there is more data and if so 142 * triggers the soft interrupt, which transfers the next byte. PIO method 143 * is needed only for legacy printers which are sensitive to strobe problem 144 * (Bugid 4192788). 145 * 146 * ecpp_wsrv() is responsible for both starting transfers (ecpp_start()) and 147 * going idle (ecpp_idle_phase()). Many routines qenable() the write queue, 148 * meaning "check if there are pending requests, process them and go idle". 149 * 150 * In it`s idle state the driver will always try to listen to the backchannel 151 * (as advised by 1284). 152 * 153 * The mechanism for handling backchannel requests is as follows: 154 * - when the peripheral has data to send it asserts nErr pin 155 * (and also nAck in Nibble Mode) which results in an interrupt on the host; 156 * - ISR creates M_CTL message containing an ECPP_BACKCHANNEL byte and 157 * puts it back on the write queue; 158 * - ecpp_wsrv() gets M_CTL and calls ecpp_peripheral2host(), which kicks off 159 * the transfer; 160 * 161 * This way Nibble and ECP mode backchannel are implemented. 162 * If the read queue gets full, backchannel request is rejected. 163 * As the application reads data and queue size falls below the low watermark, 164 * ecpp_rsrv() gets called and enables the backchannel again. 165 * 166 * Future enhancements 167 * =================== 168 * 169 * Support new modes: Byte and EPP. 170 */ 171 172 #ifndef ECPP_DEBUG 173 #define ECPP_DEBUG 0 174 #endif /* ECPP_DEBUG */ 175 int ecpp_debug = ECPP_DEBUG; 176 177 int noecp = 0; /* flag not to use ECP mode */ 178 179 /* driver entry point fn definitions */ 180 static int ecpp_open(queue_t *, dev_t *, int, int, cred_t *); 181 static int ecpp_close(queue_t *, int, cred_t *); 182 static uint_t ecpp_isr(caddr_t); 183 static uint_t ecpp_softintr(caddr_t); 184 185 /* configuration entry point fn definitions */ 186 static int ecpp_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **); 187 static int ecpp_attach(dev_info_t *, ddi_attach_cmd_t); 188 static int ecpp_detach(dev_info_t *, ddi_detach_cmd_t); 189 static struct ecpp_hw_bind *ecpp_determine_sio_type(struct ecppunit *); 190 191 /* isr support routines */ 192 static uint_t ecpp_nErr_ihdlr(struct ecppunit *); 193 static uint_t ecpp_pio_ihdlr(struct ecppunit *); 194 static uint_t ecpp_dma_ihdlr(struct ecppunit *); 195 static uint_t ecpp_M1553_intr(struct ecppunit *); 196 197 /* configuration support routines */ 198 static void ecpp_get_props(struct ecppunit *); 199 200 /* Streams Routines */ 201 static int ecpp_wput(queue_t *, mblk_t *); 202 static int ecpp_wsrv(queue_t *); 203 static int ecpp_rsrv(queue_t *); 204 static void ecpp_flush(struct ecppunit *, int); 205 static void ecpp_start(struct ecppunit *, caddr_t, size_t); 206 207 /* ioctl handling */ 208 static void ecpp_putioc(queue_t *, mblk_t *); 209 static void ecpp_srvioc(queue_t *, mblk_t *); 210 static void ecpp_wput_iocdata_devid(queue_t *, mblk_t *, uintptr_t); 211 static void ecpp_putioc_copyout(queue_t *, mblk_t *, void *, int); 212 static void ecpp_putioc_stateful_copyin(queue_t *, mblk_t *, size_t); 213 static void ecpp_srvioc_devid(queue_t *, mblk_t *, 214 struct ecpp_device_id *, int *); 215 static void ecpp_srvioc_prnif(queue_t *, mblk_t *); 216 static void ecpp_ack_ioctl(queue_t *, mblk_t *); 217 static void ecpp_nack_ioctl(queue_t *, mblk_t *, int); 218 219 /* kstat routines */ 220 static void ecpp_kstat_init(struct ecppunit *); 221 static int ecpp_kstat_update(kstat_t *, int); 222 static int ecpp_kstatintr_update(kstat_t *, int); 223 224 /* dma routines */ 225 static void ecpp_putback_untransfered(struct ecppunit *, void *, uint_t); 226 static uint8_t ecpp_setup_dma_resources(struct ecppunit *, caddr_t, size_t); 227 static uint8_t ecpp_init_dma_xfer(struct ecppunit *, caddr_t, size_t); 228 229 /* pio routines */ 230 static void ecpp_pio_writeb(struct ecppunit *); 231 static void ecpp_xfer_cleanup(struct ecppunit *); 232 static uint8_t ecpp_prep_pio_xfer(struct ecppunit *, caddr_t, size_t); 233 234 /* misc */ 235 static uchar_t ecpp_reset_port_regs(struct ecppunit *); 236 static void ecpp_xfer_timeout(void *); 237 static void ecpp_fifo_timer(void *); 238 static void ecpp_wsrv_timer(void *); 239 static uchar_t dcr_write(struct ecppunit *, uint8_t); 240 static uchar_t ecr_write(struct ecppunit *, uint8_t); 241 static uchar_t ecpp_check_status(struct ecppunit *); 242 static int ecpp_backchan_req(struct ecppunit *); 243 static void ecpp_untimeout_unblock(struct ecppunit *, timeout_id_t *); 244 static uint_t ecpp_get_prn_ifcap(struct ecppunit *); 245 246 /* stubs */ 247 static void empty_config_mode(struct ecppunit *); 248 static void empty_mask_intr(struct ecppunit *); 249 250 /* PC87332 support */ 251 static int pc87332_map_regs(struct ecppunit *); 252 static void pc87332_unmap_regs(struct ecppunit *); 253 static int pc87332_config_chip(struct ecppunit *); 254 static void pc87332_config_mode(struct ecppunit *); 255 static uint8_t pc87332_read_config_reg(struct ecppunit *, uint8_t); 256 static void pc87332_write_config_reg(struct ecppunit *, uint8_t, uint8_t); 257 static void cheerio_mask_intr(struct ecppunit *); 258 static void cheerio_unmask_intr(struct ecppunit *); 259 static int cheerio_dma_start(struct ecppunit *); 260 static int cheerio_dma_stop(struct ecppunit *, size_t *); 261 static size_t cheerio_getcnt(struct ecppunit *); 262 static void cheerio_reset_dcsr(struct ecppunit *); 263 264 /* PC97317 support */ 265 static int pc97317_map_regs(struct ecppunit *); 266 static void pc97317_unmap_regs(struct ecppunit *); 267 static int pc97317_config_chip(struct ecppunit *); 268 static void pc97317_config_mode(struct ecppunit *); 269 270 /* M1553 Southbridge support */ 271 static int m1553_map_regs(struct ecppunit *pp); 272 static void m1553_unmap_regs(struct ecppunit *pp); 273 static int m1553_config_chip(struct ecppunit *); 274 static uint8_t m1553_read_config_reg(struct ecppunit *, uint8_t); 275 static void m1553_write_config_reg(struct ecppunit *, uint8_t, uint8_t); 276 277 /* M1553 Southbridge DMAC 8237 support routines */ 278 static int dma8237_dma_start(struct ecppunit *); 279 static int dma8237_dma_stop(struct ecppunit *, size_t *); 280 static size_t dma8237_getcnt(struct ecppunit *); 281 static void dma8237_write_addr(struct ecppunit *, uint32_t); 282 static void dma8237_write_count(struct ecppunit *, uint32_t); 283 static uint32_t dma8237_read_count(struct ecppunit *); 284 static void dma8237_write(struct ecppunit *, int, uint8_t); 285 static uint8_t dma8237_read(struct ecppunit *, int); 286 #ifdef INCLUDE_DMA8237_READ_ADDR 287 static uint32_t dma8237_read_addr(struct ecppunit *); 288 #endif 289 290 /* i86 PC support rountines */ 291 292 #if defined(__x86) 293 static int x86_dma_start(struct ecppunit *); 294 static int x86_dma_stop(struct ecppunit *, size_t *); 295 static int x86_map_regs(struct ecppunit *); 296 static void x86_unmap_regs(struct ecppunit *); 297 static int x86_config_chip(struct ecppunit *); 298 static size_t x86_getcnt(struct ecppunit *); 299 #endif 300 301 /* IEEE 1284 phase transitions */ 302 static void ecpp_1284_init_interface(struct ecppunit *); 303 static int ecpp_1284_termination(struct ecppunit *); 304 static uchar_t ecpp_idle_phase(struct ecppunit *); 305 static int ecp_forward2reverse(struct ecppunit *); 306 static int ecp_reverse2forward(struct ecppunit *); 307 static int read_nibble_backchan(struct ecppunit *); 308 309 /* reverse transfers */ 310 static uint_t ecpp_peripheral2host(struct ecppunit *); 311 static uchar_t ecp_peripheral2host(struct ecppunit *); 312 static uchar_t nibble_peripheral2host(struct ecppunit *pp, uint8_t *); 313 static int ecpp_getdevid(struct ecppunit *, uint8_t *, int *, int); 314 static void ecpp_ecp_read_timeout(void *); 315 static void ecpp_ecp_read_completion(struct ecppunit *); 316 317 /* IEEE 1284 mode transitions */ 318 static void ecpp_default_negotiation(struct ecppunit *); 319 static int ecpp_mode_negotiation(struct ecppunit *, uchar_t); 320 static int ecpp_1284_negotiation(struct ecppunit *, uint8_t, uint8_t *); 321 static int ecp_negotiation(struct ecppunit *); 322 static int nibble_negotiation(struct ecppunit *); 323 static int devidnib_negotiation(struct ecppunit *); 324 325 /* IEEE 1284 utility routines */ 326 static int wait_dsr(struct ecppunit *, uint8_t, uint8_t, int); 327 328 /* debugging functions */ 329 static void ecpp_error(dev_info_t *, char *, ...); 330 static uchar_t ecpp_get_error_status(uchar_t); 331 332 /* 333 * Chip-dependent structures 334 */ 335 static ddi_dma_attr_t cheerio_dma_attr = { 336 DMA_ATTR_VERSION, /* version */ 337 0x00000000ull, /* dlim_addr_lo */ 338 0xfffffffeull, /* dlim_addr_hi */ 339 0xffffff, /* DMA counter register */ 340 1, /* DMA address alignment */ 341 0x74, /* burst sizes */ 342 0x0001, /* min effective DMA size */ 343 0xffff, /* maximum transfer size */ 344 0xffff, /* segment boundary */ 345 1, /* s/g list length */ 346 1, /* granularity of device */ 347 0 /* DMA flags */ 348 }; 349 350 static struct ecpp_hw pc87332 = { 351 pc87332_map_regs, 352 pc87332_unmap_regs, 353 pc87332_config_chip, 354 pc87332_config_mode, 355 cheerio_mask_intr, 356 cheerio_unmask_intr, 357 cheerio_dma_start, 358 cheerio_dma_stop, 359 cheerio_getcnt, 360 &cheerio_dma_attr 361 }; 362 363 static struct ecpp_hw pc97317 = { 364 pc97317_map_regs, 365 pc97317_unmap_regs, 366 pc97317_config_chip, 367 pc97317_config_mode, 368 cheerio_mask_intr, 369 cheerio_unmask_intr, 370 cheerio_dma_start, 371 cheerio_dma_stop, 372 cheerio_getcnt, 373 &cheerio_dma_attr 374 }; 375 376 static ddi_dma_attr_t i8237_dma_attr = { 377 DMA_ATTR_VERSION, /* version */ 378 0x00000000ull, /* dlim_addr_lo */ 379 0xfffffffeull, /* dlim_addr_hi */ 380 0xffff, /* DMA counter register */ 381 1, /* DMA address alignment */ 382 0x01, /* burst sizes */ 383 0x0001, /* min effective DMA size */ 384 0xffff, /* maximum transfer size */ 385 0x7fff, /* segment boundary */ 386 1, /* s/g list length */ 387 1, /* granularity of device */ 388 0 /* DMA flags */ 389 }; 390 391 static struct ecpp_hw m1553 = { 392 m1553_map_regs, 393 m1553_unmap_regs, 394 m1553_config_chip, 395 empty_config_mode, /* no config_mode */ 396 empty_mask_intr, /* no mask_intr */ 397 empty_mask_intr, /* no unmask_intr */ 398 dma8237_dma_start, 399 dma8237_dma_stop, 400 dma8237_getcnt, 401 &i8237_dma_attr 402 }; 403 404 #if defined(__x86) 405 static ddi_dma_attr_t sb_dma_attr = { 406 DMA_ATTR_VERSION, /* version */ 407 0x00000000ull, /* dlim_addr_lo */ 408 0xffffff, /* dlim_addr_hi */ 409 0xffff, /* DMA counter register */ 410 1, /* DMA address alignment */ 411 0x01, /* burst sizes */ 412 0x0001, /* min effective DMA size */ 413 0xffffffff, /* maximum transfer size */ 414 0xffff, /* segment boundary */ 415 1, /* s/g list length */ 416 1, /* granularity of device */ 417 0 /* DMA flags */ 418 }; 419 420 static struct ecpp_hw x86 = { 421 x86_map_regs, 422 x86_unmap_regs, 423 x86_config_chip, 424 empty_config_mode, /* no config_mode */ 425 empty_mask_intr, /* no mask_intr */ 426 empty_mask_intr, /* no unmask_intr */ 427 x86_dma_start, 428 x86_dma_stop, 429 x86_getcnt, 430 &sb_dma_attr 431 }; 432 #endif 433 434 /* 435 * list of supported devices 436 */ 437 struct ecpp_hw_bind ecpp_hw_bind[] = { 438 { "ns87317-ecpp", &pc97317, "PC97317" }, 439 { "pnpALI,1533,3", &m1553, "M1553" }, 440 { "ecpp", &pc87332, "PC87332" }, 441 #if defined(__x86) 442 { "lp", &x86, "i86pc"}, 443 #endif 444 }; 445 446 static ddi_device_acc_attr_t acc_attr = { 447 DDI_DEVICE_ATTR_V0, 448 DDI_STRUCTURE_LE_ACC, 449 DDI_STRICTORDER_ACC 450 }; 451 452 static struct ecpp_transfer_parms default_xfer_parms = { 453 FWD_TIMEOUT_DEFAULT, /* write timeout in seconds */ 454 ECPP_CENTRONICS /* supported mode */ 455 }; 456 457 /* prnio interface info string */ 458 static const char prn_ifinfo[] = PRN_PARALLEL; 459 460 /* prnio timeouts */ 461 static const struct prn_timeouts prn_timeouts_default = { 462 FWD_TIMEOUT_DEFAULT, /* forward timeout */ 463 REV_TIMEOUT_DEFAULT /* reverse timeout */ 464 }; 465 466 static int ecpp_isr_max_delay = ECPP_ISR_MAX_DELAY; 467 static int ecpp_def_timeout = 90; /* left in for 2.7 compatibility */ 468 469 static void *ecppsoft_statep; 470 471 struct module_info ecppinfo = { 472 /* id, name, min pkt siz, max pkt siz, hi water, low water */ 473 42, "ecpp", 0, IO_BLOCK_SZ, ECPPHIWAT, ECPPLOWAT 474 }; 475 476 static struct qinit ecpp_rinit = { 477 putq, ecpp_rsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL 478 }; 479 480 static struct qinit ecpp_wint = { 481 ecpp_wput, ecpp_wsrv, ecpp_open, ecpp_close, NULL, &ecppinfo, NULL 482 }; 483 484 struct streamtab ecpp_str_info = { 485 &ecpp_rinit, &ecpp_wint, NULL, NULL 486 }; 487 488 static struct cb_ops ecpp_cb_ops = { 489 nodev, /* cb_open */ 490 nodev, /* cb_close */ 491 nodev, /* cb_strategy */ 492 nodev, /* cb_print */ 493 nodev, /* cb_dump */ 494 nodev, /* cb_read */ 495 nodev, /* cb_write */ 496 nodev, /* cb_ioctl */ 497 nodev, /* cb_devmap */ 498 nodev, /* cb_mmap */ 499 nodev, /* cb_segmap */ 500 nochpoll, /* cb_chpoll */ 501 ddi_prop_op, /* cb_prop_op */ 502 &ecpp_str_info, /* cb_stream */ 503 (D_NEW | D_MP | D_MTPERQ) /* cb_flag */ 504 }; 505 506 /* 507 * Declare ops vectors for auto configuration. 508 */ 509 struct dev_ops ecpp_ops = { 510 DEVO_REV, /* devo_rev */ 511 0, /* devo_refcnt */ 512 ecpp_getinfo, /* devo_getinfo */ 513 nulldev, /* devo_identify */ 514 nulldev, /* devo_probe */ 515 ecpp_attach, /* devo_attach */ 516 ecpp_detach, /* devo_detach */ 517 nodev, /* devo_reset */ 518 &ecpp_cb_ops, /* devo_cb_ops */ 519 (struct bus_ops *)NULL, /* devo_bus_ops */ 520 nulldev, /* devo_power */ 521 ddi_quiesce_not_needed, /* devo_quiesce */ 522 }; 523 524 extern struct mod_ops mod_driverops; 525 526 static struct modldrv ecppmodldrv = { 527 &mod_driverops, /* type of module - driver */ 528 "parallel port driver", 529 &ecpp_ops, 530 }; 531 532 static struct modlinkage ecppmodlinkage = { 533 MODREV_1, 534 &ecppmodldrv, 535 0 536 }; 537 538 539 /* 540 * 541 * DDI/DKI entry points and supplementary routines 542 * 543 */ 544 545 546 int 547 _init(void) 548 { 549 int error; 550 551 if ((error = mod_install(&ecppmodlinkage)) == 0) { 552 (void) ddi_soft_state_init(&ecppsoft_statep, 553 sizeof (struct ecppunit), 1); 554 } 555 556 return (error); 557 } 558 559 int 560 _fini(void) 561 { 562 int error; 563 564 if ((error = mod_remove(&ecppmodlinkage)) == 0) { 565 ddi_soft_state_fini(&ecppsoft_statep); 566 } 567 568 return (error); 569 } 570 571 int 572 _info(struct modinfo *modinfop) 573 { 574 return (mod_info(&ecppmodlinkage, modinfop)); 575 } 576 577 static int 578 ecpp_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 579 { 580 int instance; 581 char name[16]; 582 struct ecppunit *pp; 583 struct ecpp_hw_bind *hw_bind; 584 585 instance = ddi_get_instance(dip); 586 587 switch (cmd) { 588 case DDI_ATTACH: 589 break; 590 591 case DDI_RESUME: 592 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) { 593 return (DDI_FAILURE); 594 } 595 596 mutex_enter(&pp->umutex); 597 598 pp->suspended = FALSE; 599 600 /* 601 * Initialize the chip and restore current mode if needed 602 */ 603 (void) ECPP_CONFIG_CHIP(pp); 604 (void) ecpp_reset_port_regs(pp); 605 606 if (pp->oflag == TRUE) { 607 int current_mode = pp->current_mode; 608 609 (void) ecpp_1284_termination(pp); 610 (void) ecpp_mode_negotiation(pp, current_mode); 611 } 612 613 mutex_exit(&pp->umutex); 614 615 return (DDI_SUCCESS); 616 617 default: 618 return (DDI_FAILURE); 619 } 620 621 if (ddi_soft_state_zalloc(ecppsoft_statep, instance) != 0) { 622 ecpp_error(dip, "ddi_soft_state_zalloc failed\n"); 623 goto fail; 624 } 625 626 pp = ddi_get_soft_state(ecppsoft_statep, instance); 627 628 pp->dip = dip; 629 pp->suspended = FALSE; 630 631 /* 632 * Determine SuperIO type and set chip-dependent variables 633 */ 634 hw_bind = ecpp_determine_sio_type(pp); 635 636 if (hw_bind == NULL) { 637 cmn_err(CE_NOTE, "parallel port controller not supported"); 638 goto fail_sio; 639 } else { 640 pp->hw = hw_bind->hw; 641 ecpp_error(pp->dip, "SuperIO type: %s\n", hw_bind->info); 642 } 643 644 /* 645 * Map registers 646 */ 647 if (ECPP_MAP_REGS(pp) != SUCCESS) { 648 goto fail_map; 649 } 650 651 if (ddi_dma_alloc_handle(dip, pp->hw->attr, DDI_DMA_DONTWAIT, 652 NULL, &pp->dma_handle) != DDI_SUCCESS) { 653 ecpp_error(dip, "ecpp_attach: failed ddi_dma_alloc_handle\n"); 654 goto fail_dma; 655 } 656 657 if (ddi_get_iblock_cookie(dip, 0, 658 &pp->ecpp_trap_cookie) != DDI_SUCCESS) { 659 ecpp_error(dip, "ecpp_attach: failed ddi_get_iblock_cookie\n"); 660 goto fail_ibc; 661 } 662 663 mutex_init(&pp->umutex, NULL, MUTEX_DRIVER, 664 (void *)pp->ecpp_trap_cookie); 665 666 cv_init(&pp->pport_cv, NULL, CV_DRIVER, NULL); 667 668 if (ddi_add_intr(dip, 0, &pp->ecpp_trap_cookie, NULL, ecpp_isr, 669 (caddr_t)pp) != DDI_SUCCESS) { 670 ecpp_error(dip, "ecpp_attach: failed to add hard intr\n"); 671 goto fail_intr; 672 } 673 674 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, 675 &pp->softintr_id, 0, 0, ecpp_softintr, 676 (caddr_t)pp) != DDI_SUCCESS) { 677 ecpp_error(dip, "ecpp_attach: failed to add soft intr\n"); 678 goto fail_softintr; 679 } 680 681 (void) sprintf(name, "ecpp%d", instance); 682 683 if (ddi_create_minor_node(dip, name, S_IFCHR, instance, 684 DDI_NT_PRINTER, NULL) == DDI_FAILURE) { 685 ecpp_error(dip, "ecpp_attach: create_minor_node failed\n"); 686 goto fail_minor; 687 } 688 689 pp->ioblock = (caddr_t)kmem_alloc(IO_BLOCK_SZ, KM_SLEEP); 690 if (pp->ioblock == NULL) { 691 ecpp_error(dip, "ecpp_attach: kmem_alloc failed\n"); 692 goto fail_iob; 693 } else { 694 ecpp_error(pp->dip, "ecpp_attach: ioblock=0x%x\n", pp->ioblock); 695 } 696 697 ecpp_get_props(pp); 698 #if defined(__x86) 699 if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) { 700 if (ddi_dmae_alloc(dip, pp->uh.x86.chn, 701 DDI_DMA_DONTWAIT, NULL) == DDI_SUCCESS) 702 ecpp_error(pp->dip, "dmae_alloc success!\n"); 703 } 704 #endif 705 if (ECPP_CONFIG_CHIP(pp) == FAILURE) { 706 ecpp_error(pp->dip, "config_chip failed.\n"); 707 goto fail_config; 708 } 709 710 ecpp_kstat_init(pp); 711 712 ddi_report_dev(dip); 713 714 return (DDI_SUCCESS); 715 716 fail_config: 717 ddi_prop_remove_all(dip); 718 kmem_free(pp->ioblock, IO_BLOCK_SZ); 719 fail_iob: 720 ddi_remove_minor_node(dip, NULL); 721 fail_minor: 722 ddi_remove_softintr(pp->softintr_id); 723 fail_softintr: 724 ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie); 725 fail_intr: 726 mutex_destroy(&pp->umutex); 727 cv_destroy(&pp->pport_cv); 728 fail_ibc: 729 ddi_dma_free_handle(&pp->dma_handle); 730 fail_dma: 731 ECPP_UNMAP_REGS(pp); 732 fail_map: 733 fail_sio: 734 ddi_soft_state_free(ecppsoft_statep, instance); 735 fail: 736 ecpp_error(dip, "ecpp_attach: failed.\n"); 737 738 return (DDI_FAILURE); 739 } 740 741 static int 742 ecpp_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 743 { 744 int instance; 745 struct ecppunit *pp; 746 747 instance = ddi_get_instance(dip); 748 749 switch (cmd) { 750 case DDI_DETACH: 751 break; 752 753 case DDI_SUSPEND: 754 if (!(pp = ddi_get_soft_state(ecppsoft_statep, instance))) { 755 return (DDI_FAILURE); 756 } 757 758 mutex_enter(&pp->umutex); 759 ASSERT(pp->suspended == FALSE); 760 761 pp->suspended = TRUE; /* prevent new transfers */ 762 763 /* 764 * Wait if there's any activity on the port 765 */ 766 if ((pp->e_busy == ECPP_BUSY) || (pp->e_busy == ECPP_FLUSH)) { 767 (void) cv_reltimedwait(&pp->pport_cv, &pp->umutex, 768 SUSPEND_TOUT * drv_usectohz(1000000), 769 TR_CLOCK_TICK); 770 if ((pp->e_busy == ECPP_BUSY) || 771 (pp->e_busy == ECPP_FLUSH)) { 772 pp->suspended = FALSE; 773 mutex_exit(&pp->umutex); 774 ecpp_error(pp->dip, 775 "ecpp_detach: suspend timeout\n"); 776 return (DDI_FAILURE); 777 } 778 } 779 780 mutex_exit(&pp->umutex); 781 return (DDI_SUCCESS); 782 783 default: 784 return (DDI_FAILURE); 785 } 786 787 pp = ddi_get_soft_state(ecppsoft_statep, instance); 788 #if defined(__x86) 789 if (pp->hw == &x86 && pp->uh.x86.chn != 0xff) 790 (void) ddi_dmae_release(pp->dip, pp->uh.x86.chn); 791 #endif 792 if (pp->dma_handle != NULL) 793 ddi_dma_free_handle(&pp->dma_handle); 794 795 ddi_remove_minor_node(dip, NULL); 796 797 ddi_remove_softintr(pp->softintr_id); 798 799 ddi_remove_intr(dip, (uint_t)0, pp->ecpp_trap_cookie); 800 801 if (pp->ksp) { 802 kstat_delete(pp->ksp); 803 } 804 if (pp->intrstats) { 805 kstat_delete(pp->intrstats); 806 } 807 808 cv_destroy(&pp->pport_cv); 809 810 mutex_destroy(&pp->umutex); 811 812 ECPP_UNMAP_REGS(pp); 813 814 kmem_free(pp->ioblock, IO_BLOCK_SZ); 815 816 ddi_prop_remove_all(dip); 817 818 ddi_soft_state_free(ecppsoft_statep, instance); 819 820 return (DDI_SUCCESS); 821 822 } 823 824 /* 825 * ecpp_get_props() reads ecpp.conf for user defineable tuneables. 826 * If the file or a particular variable is not there, a default value 827 * is assigned. 828 */ 829 830 static void 831 ecpp_get_props(struct ecppunit *pp) 832 { 833 char *prop; 834 #if defined(__x86) 835 int len; 836 int value; 837 #endif 838 /* 839 * If fast_centronics is TRUE, non-compliant IEEE 1284 840 * peripherals ( Centronics peripherals) will operate in DMA mode. 841 * Transfers betwee main memory and the device will be via DMA; 842 * peripheral handshaking will be conducted by superio logic. 843 * If ecpp can not read the variable correctly fast_centronics will 844 * be set to FALSE. In this case, transfers and handshaking 845 * will be conducted by PIO for Centronics devices. 846 */ 847 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0, 848 "fast-centronics", &prop) == DDI_PROP_SUCCESS) { 849 pp->fast_centronics = 850 (strcmp(prop, "true") == 0) ? TRUE : FALSE; 851 ddi_prop_free(prop); 852 } else { 853 pp->fast_centronics = FALSE; 854 } 855 856 /* 857 * If fast-1284-compatible is set to TRUE, when ecpp communicates 858 * with IEEE 1284 compliant peripherals, data transfers between 859 * main memory and the parallel port will be conducted by DMA. 860 * Handshaking between the port and peripheral will be conducted 861 * by superio logic. This is the default characteristic. If 862 * fast-1284-compatible is set to FALSE, transfers and handshaking 863 * will be conducted by PIO. 864 */ 865 866 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0, 867 "fast-1284-compatible", &prop) == DDI_PROP_SUCCESS) { 868 pp->fast_compat = (strcmp(prop, "true") == 0) ? TRUE : FALSE; 869 ddi_prop_free(prop); 870 } else { 871 pp->fast_compat = TRUE; 872 } 873 874 /* 875 * Some centronics peripherals require the nInit signal to be 876 * toggled to reset the device. If centronics_init_seq is set 877 * to TRUE, ecpp will toggle the nInit signal upon every ecpp_open(). 878 * Applications have the opportunity to toggle the nInit signal 879 * with ioctl(2) calls as well. The default is to set it to FALSE. 880 */ 881 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pp->dip, 0, 882 "centronics-init-seq", &prop) == DDI_PROP_SUCCESS) { 883 pp->init_seq = (strcmp(prop, "true") == 0) ? TRUE : FALSE; 884 ddi_prop_free(prop); 885 } else { 886 pp->init_seq = FALSE; 887 } 888 889 /* 890 * If one of the centronics status signals are in an erroneous 891 * state, ecpp_wsrv() will be reinvoked centronics-retry ms to 892 * check if the status is ok to transfer. If the property is not 893 * found, wsrv_retry will be set to CENTRONICS_RETRY ms. 894 */ 895 pp->wsrv_retry = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 896 "centronics-retry", CENTRONICS_RETRY); 897 898 /* 899 * In PIO mode, ecpp_isr() will loop for wait for the busy signal 900 * to be deasserted before transferring the next byte. wait_for_busy 901 * is specificied in microseconds. If the property is not found 902 * ecpp_isr() will wait for a maximum of WAIT_FOR_BUSY us. 903 */ 904 pp->wait_for_busy = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 905 "centronics-wait-for-busy", WAIT_FOR_BUSY); 906 907 /* 908 * In PIO mode, centronics transfers must hold the data signals 909 * for a data_setup_time milliseconds before the strobe is asserted. 910 */ 911 pp->data_setup_time = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 912 "centronics-data-setup-time", DATA_SETUP_TIME); 913 914 /* 915 * In PIO mode, centronics transfers asserts the strobe signal 916 * for a period of strobe_pulse_width milliseconds. 917 */ 918 pp->strobe_pulse_width = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 919 "centronics-strobe-pulse-width", STROBE_PULSE_WIDTH); 920 921 /* 922 * Upon a transfer the peripheral, ecpp waits write_timeout seconds 923 * for the transmission to complete. 924 */ 925 default_xfer_parms.write_timeout = ddi_prop_get_int(DDI_DEV_T_ANY, 926 pp->dip, 0, "ecpp-transfer-timeout", ecpp_def_timeout); 927 928 pp->xfer_parms = default_xfer_parms; 929 930 /* 931 * Get dma channel for M1553 932 */ 933 if (pp->hw == &m1553) { 934 pp->uh.m1553.chn = ddi_prop_get_int(DDI_DEV_T_ANY, 935 pp->dip, 0, "dma-channel", 0x1); 936 ecpp_error(pp->dip, "ecpp_get_prop:chn=%x\n", pp->uh.m1553.chn); 937 } 938 #if defined(__x86) 939 len = sizeof (value); 940 /* Get dma channel for i86 pc */ 941 if (pp->hw == &x86) { 942 if (ddi_prop_op(DDI_DEV_T_ANY, pp->dip, PROP_LEN_AND_VAL_BUF, 943 DDI_PROP_DONTPASS, "dma-channels", (caddr_t)&value, &len) 944 != DDI_PROP_SUCCESS) { 945 ecpp_error(pp->dip, "No dma channel found\n"); 946 pp->uh.x86.chn = 0xff; 947 pp->fast_compat = FALSE; 948 pp->noecpregs = TRUE; 949 } else 950 pp->uh.x86.chn = (uint8_t)value; 951 } 952 #endif 953 /* 954 * these properties are not yet public 955 */ 956 pp->ecp_rev_speed = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 957 "ecp-rev-speed", ECP_REV_SPEED); 958 959 pp->rev_watchdog = ddi_prop_get_int(DDI_DEV_T_ANY, pp->dip, 0, 960 "rev-watchdog", REV_WATCHDOG); 961 962 ecpp_error(pp->dip, 963 "ecpp_get_prop: fast_centronics=%x, fast-1284=%x\n" 964 "ecpp_get_prop: wsrv_retry=%d, wait_for_busy=%d\n" 965 "ecpp_get_prop: data_setup=%d, strobe_pulse=%d\n" 966 "ecpp_get_prop: transfer-timeout=%d\n", 967 pp->fast_centronics, pp->fast_compat, 968 pp->wsrv_retry, pp->wait_for_busy, 969 pp->data_setup_time, pp->strobe_pulse_width, 970 pp->xfer_parms.write_timeout); 971 } 972 973 /*ARGSUSED*/ 974 int 975 ecpp_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 976 { 977 dev_t dev = (dev_t)arg; 978 struct ecppunit *pp; 979 int instance, ret; 980 981 instance = getminor(dev); 982 983 switch (infocmd) { 984 case DDI_INFO_DEVT2DEVINFO: 985 pp = ddi_get_soft_state(ecppsoft_statep, instance); 986 if (pp != NULL) { 987 *result = pp->dip; 988 ret = DDI_SUCCESS; 989 } else { 990 ret = DDI_FAILURE; 991 } 992 break; 993 994 case DDI_INFO_DEVT2INSTANCE: 995 *result = (void *)(uintptr_t)instance; 996 ret = DDI_SUCCESS; 997 break; 998 999 default: 1000 ret = DDI_FAILURE; 1001 break; 1002 } 1003 1004 return (ret); 1005 } 1006 1007 /*ARGSUSED2*/ 1008 static int 1009 ecpp_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *credp) 1010 { 1011 struct ecppunit *pp; 1012 int instance; 1013 struct stroptions *sop; 1014 mblk_t *mop; 1015 1016 instance = getminor(*dev); 1017 1018 if (instance < 0) { 1019 return (ENXIO); 1020 } 1021 1022 pp = (struct ecppunit *)ddi_get_soft_state(ecppsoft_statep, instance); 1023 1024 if (pp == NULL) { 1025 return (ENXIO); 1026 } 1027 1028 mutex_enter(&pp->umutex); 1029 1030 /* 1031 * Parallel port is an exclusive-use device 1032 * thus providing print job integrity 1033 */ 1034 if (pp->oflag == TRUE) { 1035 ecpp_error(pp->dip, "ecpp open failed"); 1036 mutex_exit(&pp->umutex); 1037 return (EBUSY); 1038 } 1039 1040 pp->oflag = TRUE; 1041 1042 /* initialize state variables */ 1043 pp->prn_timeouts = prn_timeouts_default; 1044 pp->xfer_parms = default_xfer_parms; 1045 pp->current_mode = ECPP_CENTRONICS; 1046 pp->backchannel = ECPP_CENTRONICS; 1047 pp->current_phase = ECPP_PHASE_PO; 1048 pp->port = ECPP_PORT_DMA; 1049 pp->instance = instance; 1050 pp->timeout_error = 0; 1051 pp->saved_dsr = DSR_READ(pp); 1052 pp->ecpp_drain_counter = 0; 1053 pp->dma_cancelled = FALSE; 1054 pp->io_mode = ECPP_DMA; 1055 pp->joblen = 0; 1056 pp->tfifo_intr = 0; 1057 pp->softintr_pending = 0; 1058 pp->nread = 0; 1059 1060 /* clear the state flag */ 1061 pp->e_busy = ECPP_IDLE; 1062 1063 pp->readq = RD(q); 1064 pp->writeq = WR(q); 1065 pp->msg = NULL; 1066 1067 RD(q)->q_ptr = WR(q)->q_ptr = (caddr_t)pp; 1068 1069 /* 1070 * Get ready: check host/peripheral, negotiate into default mode 1071 */ 1072 if (ecpp_reset_port_regs(pp) == FAILURE) { 1073 mutex_exit(&pp->umutex); 1074 return (EIO); 1075 } 1076 1077 mutex_exit(&pp->umutex); 1078 1079 /* 1080 * Configure the Stream head and enable the Stream 1081 */ 1082 if (!(mop = allocb(sizeof (struct stroptions), BPRI_MED))) { 1083 return (EAGAIN); 1084 } 1085 1086 mop->b_datap->db_type = M_SETOPTS; 1087 mop->b_wptr += sizeof (struct stroptions); 1088 1089 /* 1090 * if device is open with O_NONBLOCK flag set, let read(2) return 0 1091 * if no data waiting to be read. Writes will block on flow control. 1092 */ 1093 sop = (struct stroptions *)mop->b_rptr; 1094 sop->so_flags = SO_HIWAT | SO_LOWAT | SO_NDELON | SO_MREADON; 1095 sop->so_hiwat = ECPPHIWAT; 1096 sop->so_lowat = ECPPLOWAT; 1097 1098 /* enable the stream */ 1099 qprocson(q); 1100 1101 putnext(q, mop); 1102 1103 mutex_enter(&pp->umutex); 1104 1105 ecpp_default_negotiation(pp); 1106 1107 /* go revidle */ 1108 (void) ecpp_idle_phase(pp); 1109 1110 ecpp_error(pp->dip, 1111 "ecpp_open: mode=%x, phase=%x ecr=%x, dsr=%x, dcr=%x\n", 1112 pp->current_mode, pp->current_phase, 1113 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 1114 1115 mutex_exit(&pp->umutex); 1116 1117 return (0); 1118 } 1119 1120 /*ARGSUSED1*/ 1121 static int 1122 ecpp_close(queue_t *q, int flag, cred_t *cred_p) 1123 { 1124 struct ecppunit *pp; 1125 timeout_id_t timeout_id, fifo_timer_id, wsrv_timer_id; 1126 1127 pp = (struct ecppunit *)q->q_ptr; 1128 1129 ecpp_error(pp->dip, "ecpp_close: entering ...\n"); 1130 1131 mutex_enter(&pp->umutex); 1132 1133 /* 1134 * ecpp_close() will continue to loop until the 1135 * queue has been drained or if the thread 1136 * has received a SIG. Typically, when the queue 1137 * has data, the port will be ECPP_BUSY. However, 1138 * after a dma completes and before the wsrv 1139 * starts the next transfer, the port may be IDLE. 1140 * In this case, ecpp_close() will loop within this 1141 * while(qsize) segment. Since, ecpp_wsrv() runs 1142 * at software interupt level, this shouldn't loop 1143 * very long. 1144 */ 1145 while (pp->e_busy != ECPP_IDLE || qsize(WR(q))) { 1146 if (!cv_wait_sig(&pp->pport_cv, &pp->umutex)) { 1147 ecpp_error(pp->dip, "ecpp_close:B: received SIG\n"); 1148 /* 1149 * Returning from a signal such as 1150 * SIGTERM or SIGKILL 1151 */ 1152 ecpp_flush(pp, FWRITE); 1153 break; 1154 } else { 1155 ecpp_error(pp->dip, "ecpp_close:rcvd cv-sig\n"); 1156 } 1157 } 1158 1159 ecpp_error(pp->dip, "ecpp_close: joblen=%d, ctx_cf=%d, " 1160 "qsize(WR(q))=%d, qsize(RD(q))=%d\n", 1161 pp->joblen, pp->ctx_cf, qsize(pp->writeq), qsize(q)); 1162 1163 /* 1164 * Cancel all timeouts, disable interrupts 1165 * 1166 * Note that we can`t call untimeout(9F) with mutex held: 1167 * callout may be blocked on the same mutex, and untimeout() will 1168 * cv_wait() while callout is executing, thus creating a deadlock 1169 * So we zero the timeout id's inside mutex and call untimeout later 1170 */ 1171 timeout_id = pp->timeout_id; 1172 fifo_timer_id = pp->fifo_timer_id; 1173 wsrv_timer_id = pp->wsrv_timer_id; 1174 1175 pp->timeout_id = pp->fifo_timer_id = pp->wsrv_timer_id = 0; 1176 1177 pp->softintr_pending = 0; 1178 pp->dma_cancelled = TRUE; 1179 ECPP_MASK_INTR(pp); 1180 1181 mutex_exit(&pp->umutex); 1182 1183 qprocsoff(q); 1184 1185 if (timeout_id) { 1186 (void) untimeout(timeout_id); 1187 } 1188 if (fifo_timer_id) { 1189 (void) untimeout(fifo_timer_id); 1190 } 1191 if (wsrv_timer_id) { 1192 (void) untimeout(wsrv_timer_id); 1193 } 1194 1195 mutex_enter(&pp->umutex); 1196 1197 /* set link to Compatible mode */ 1198 if ((pp->current_mode == ECPP_ECP_MODE) && 1199 (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) { 1200 (void) ecp_reverse2forward(pp); 1201 } 1202 1203 (void) ecpp_1284_termination(pp); 1204 1205 pp->oflag = FALSE; 1206 q->q_ptr = WR(q)->q_ptr = NULL; 1207 pp->readq = pp->writeq = NULL; 1208 pp->msg = NULL; 1209 1210 ecpp_error(pp->dip, "ecpp_close: ecr=%x, dsr=%x, dcr=%x\n", 1211 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 1212 1213 mutex_exit(&pp->umutex); 1214 1215 return (0); 1216 } 1217 1218 /* 1219 * standard put procedure for ecpp 1220 */ 1221 static int 1222 ecpp_wput(queue_t *q, mblk_t *mp) 1223 { 1224 struct msgb *nmp; 1225 struct ecppunit *pp; 1226 1227 pp = (struct ecppunit *)q->q_ptr; 1228 1229 if (!mp) { 1230 return (0); 1231 } 1232 1233 if ((mp->b_wptr - mp->b_rptr) <= 0) { 1234 ecpp_error(pp->dip, 1235 "ecpp_wput:bogus packet recieved mp=%x\n", mp); 1236 freemsg(mp); 1237 return (0); 1238 } 1239 1240 switch (DB_TYPE(mp)) { 1241 case M_DATA: 1242 /* 1243 * This is a quick fix for multiple message block problem, 1244 * it will be changed later with better performance code. 1245 */ 1246 if (mp->b_cont) { 1247 /* 1248 * mblk has scattered data ... do msgpullup 1249 * if it fails, continue with the current mblk 1250 */ 1251 if ((nmp = msgpullup(mp, -1)) != NULL) { 1252 freemsg(mp); 1253 mp = nmp; 1254 ecpp_error(pp->dip, 1255 "ecpp_wput:msgpullup: mp=%p len=%d\n", 1256 mp, mp->b_wptr - mp->b_rptr); 1257 } 1258 } 1259 1260 /* let ecpp_wsrv() concatenate small blocks */ 1261 (void) putq(q, mp); 1262 1263 break; 1264 1265 case M_CTL: 1266 (void) putq(q, mp); 1267 1268 break; 1269 1270 case M_IOCTL: { 1271 struct iocblk *iocbp; 1272 1273 iocbp = (struct iocblk *)mp->b_rptr; 1274 1275 ecpp_error(pp->dip, "ecpp_wput:M_IOCTL %x\n", iocbp->ioc_cmd); 1276 1277 mutex_enter(&pp->umutex); 1278 1279 /* TESTIO and GET_STATUS can be used during transfer */ 1280 if ((pp->e_busy == ECPP_BUSY) && 1281 (iocbp->ioc_cmd != BPPIOC_TESTIO) && 1282 (iocbp->ioc_cmd != PRNIOC_GET_STATUS)) { 1283 mutex_exit(&pp->umutex); 1284 (void) putq(q, mp); 1285 } else { 1286 mutex_exit(&pp->umutex); 1287 ecpp_putioc(q, mp); 1288 } 1289 1290 break; 1291 } 1292 1293 case M_IOCDATA: { 1294 struct copyresp *csp; 1295 1296 ecpp_error(pp->dip, "ecpp_wput:M_IOCDATA\n"); 1297 1298 csp = (struct copyresp *)mp->b_rptr; 1299 1300 /* 1301 * If copy request failed, quit now 1302 */ 1303 if (csp->cp_rval != 0) { 1304 freemsg(mp); 1305 return (0); 1306 } 1307 1308 switch (csp->cp_cmd) { 1309 case ECPPIOC_SETPARMS: 1310 case ECPPIOC_SETREGS: 1311 case ECPPIOC_SETPORT: 1312 case ECPPIOC_SETDATA: 1313 case PRNIOC_SET_IFCAP: 1314 case PRNIOC_SET_TIMEOUTS: 1315 /* 1316 * need to retrieve and use the data, but if the 1317 * device is busy, wait. 1318 */ 1319 (void) putq(q, mp); 1320 break; 1321 1322 case ECPPIOC_GETPARMS: 1323 case ECPPIOC_GETREGS: 1324 case ECPPIOC_GETPORT: 1325 case ECPPIOC_GETDATA: 1326 case BPPIOC_GETERR: 1327 case BPPIOC_TESTIO: 1328 case PRNIOC_GET_IFCAP: 1329 case PRNIOC_GET_STATUS: 1330 case PRNIOC_GET_1284_STATUS: 1331 case PRNIOC_GET_TIMEOUTS: 1332 /* data transfered to user space okay */ 1333 ecpp_ack_ioctl(q, mp); 1334 break; 1335 1336 case ECPPIOC_GETDEVID: 1337 ecpp_wput_iocdata_devid(q, mp, 1338 offsetof(struct ecpp_device_id, rlen)); 1339 break; 1340 1341 case PRNIOC_GET_1284_DEVID: 1342 ecpp_wput_iocdata_devid(q, mp, 1343 offsetof(struct prn_1284_device_id, id_rlen)); 1344 break; 1345 1346 case PRNIOC_GET_IFINFO: 1347 ecpp_wput_iocdata_devid(q, mp, 1348 offsetof(struct prn_interface_info, if_rlen)); 1349 break; 1350 1351 default: 1352 ecpp_nack_ioctl(q, mp, EINVAL); 1353 break; 1354 } 1355 1356 break; 1357 } 1358 1359 case M_FLUSH: 1360 ecpp_error(pp->dip, "ecpp_wput:M_FLUSH\n"); 1361 1362 if (*mp->b_rptr & FLUSHW) { 1363 mutex_enter(&pp->umutex); 1364 ecpp_flush(pp, FWRITE); 1365 mutex_exit(&pp->umutex); 1366 } 1367 1368 if (*mp->b_rptr & FLUSHR) { 1369 mutex_enter(&pp->umutex); 1370 ecpp_flush(pp, FREAD); 1371 mutex_exit(&pp->umutex); 1372 qreply(q, mp); 1373 } else { 1374 freemsg(mp); 1375 } 1376 1377 break; 1378 1379 case M_READ: 1380 /* 1381 * When the user calls read(2), M_READ message is sent to us, 1382 * first byte of which is the number of requested bytes 1383 * We add up user requests and use resulting number 1384 * to calculate the reverse transfer block size 1385 */ 1386 mutex_enter(&pp->umutex); 1387 if (pp->e_busy == ECPP_IDLE) { 1388 pp->nread += *(size_t *)mp->b_rptr; 1389 ecpp_error(pp->dip, "ecpp_wput: M_READ %d", pp->nread); 1390 freemsg(mp); 1391 } else { 1392 ecpp_error(pp->dip, "ecpp_wput: M_READ queueing"); 1393 (void) putq(q, mp); 1394 } 1395 mutex_exit(&pp->umutex); 1396 break; 1397 1398 default: 1399 ecpp_error(pp->dip, "ecpp_wput: bad messagetype 0x%x\n", 1400 DB_TYPE(mp)); 1401 freemsg(mp); 1402 break; 1403 } 1404 1405 return (0); 1406 } 1407 1408 /* 1409 * Process ECPPIOC_GETDEVID-like ioctls 1410 */ 1411 static void 1412 ecpp_wput_iocdata_devid(queue_t *q, mblk_t *mp, uintptr_t rlen_offset) 1413 { 1414 struct copyresp *csp; 1415 struct ecpp_copystate *stp; 1416 mblk_t *datamp; 1417 1418 csp = (struct copyresp *)mp->b_rptr; 1419 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 1420 1421 /* determine the state of copyin/copyout process */ 1422 switch (stp->state) { 1423 case ECPP_STRUCTIN: 1424 /* user structure has arrived */ 1425 (void) putq(q, mp); 1426 break; 1427 1428 case ECPP_ADDROUT: 1429 /* 1430 * data transfered to user space okay 1431 * now update user structure 1432 */ 1433 datamp = allocb(sizeof (int), BPRI_MED); 1434 if (datamp == NULL) { 1435 ecpp_nack_ioctl(q, mp, ENOSR); 1436 break; 1437 } 1438 1439 *(int *)datamp->b_rptr = 1440 *(int *)((char *)&stp->un + rlen_offset); 1441 stp->state = ECPP_STRUCTOUT; 1442 1443 mcopyout(mp, csp->cp_private, sizeof (int), 1444 (char *)stp->uaddr + rlen_offset, datamp); 1445 qreply(q, mp); 1446 break; 1447 1448 case ECPP_STRUCTOUT: 1449 /* user structure was updated okay */ 1450 freemsg(csp->cp_private); 1451 ecpp_ack_ioctl(q, mp); 1452 break; 1453 1454 default: 1455 ecpp_nack_ioctl(q, mp, EINVAL); 1456 break; 1457 } 1458 } 1459 1460 static uchar_t 1461 ecpp_get_error_status(uchar_t status) 1462 { 1463 uchar_t pin_status = 0; 1464 1465 if (!(status & ECPP_nERR)) { 1466 pin_status |= BPP_ERR_ERR; 1467 } 1468 1469 if (status & ECPP_PE) { 1470 pin_status |= BPP_PE_ERR; 1471 } 1472 1473 if (!(status & ECPP_SLCT)) { 1474 pin_status |= BPP_SLCT_ERR; 1475 } 1476 1477 if (!(status & ECPP_nBUSY)) { 1478 pin_status |= BPP_SLCT_ERR; 1479 } 1480 1481 return (pin_status); 1482 } 1483 1484 /* 1485 * ioctl handler for output PUT procedure. 1486 */ 1487 static void 1488 ecpp_putioc(queue_t *q, mblk_t *mp) 1489 { 1490 struct iocblk *iocbp; 1491 struct ecppunit *pp; 1492 1493 pp = (struct ecppunit *)q->q_ptr; 1494 1495 iocbp = (struct iocblk *)mp->b_rptr; 1496 1497 /* I_STR ioctls are invalid */ 1498 if (iocbp->ioc_count != TRANSPARENT) { 1499 ecpp_nack_ioctl(q, mp, EINVAL); 1500 return; 1501 } 1502 1503 switch (iocbp->ioc_cmd) { 1504 case ECPPIOC_SETPARMS: { 1505 mcopyin(mp, NULL, sizeof (struct ecpp_transfer_parms), NULL); 1506 qreply(q, mp); 1507 break; 1508 } 1509 1510 case ECPPIOC_GETPARMS: { 1511 struct ecpp_transfer_parms xfer_parms; 1512 1513 mutex_enter(&pp->umutex); 1514 1515 pp->xfer_parms.mode = pp->current_mode; 1516 xfer_parms = pp->xfer_parms; 1517 1518 mutex_exit(&pp->umutex); 1519 1520 ecpp_putioc_copyout(q, mp, &xfer_parms, sizeof (xfer_parms)); 1521 break; 1522 } 1523 1524 case ECPPIOC_SETREGS: { 1525 mutex_enter(&pp->umutex); 1526 if (pp->current_mode != ECPP_DIAG_MODE) { 1527 mutex_exit(&pp->umutex); 1528 ecpp_nack_ioctl(q, mp, EINVAL); 1529 break; 1530 } 1531 mutex_exit(&pp->umutex); 1532 1533 mcopyin(mp, NULL, sizeof (struct ecpp_regs), NULL); 1534 qreply(q, mp); 1535 break; 1536 } 1537 1538 case ECPPIOC_GETREGS: { 1539 struct ecpp_regs rg; 1540 1541 mutex_enter(&pp->umutex); 1542 1543 if (pp->current_mode != ECPP_DIAG_MODE) { 1544 mutex_exit(&pp->umutex); 1545 ecpp_nack_ioctl(q, mp, EINVAL); 1546 break; 1547 } 1548 1549 rg.dsr = DSR_READ(pp); 1550 rg.dcr = DCR_READ(pp); 1551 1552 mutex_exit(&pp->umutex); 1553 1554 ecpp_error(pp->dip, "ECPPIOC_GETREGS: dsr=%x,dcr=%x\n", 1555 rg.dsr, rg.dcr); 1556 1557 /* these bits must be 1 */ 1558 rg.dsr |= ECPP_SETREGS_DSR_MASK; 1559 rg.dcr |= ECPP_SETREGS_DCR_MASK; 1560 1561 ecpp_putioc_copyout(q, mp, &rg, sizeof (rg)); 1562 break; 1563 } 1564 1565 case ECPPIOC_SETPORT: 1566 case ECPPIOC_SETDATA: { 1567 mutex_enter(&pp->umutex); 1568 if (pp->current_mode != ECPP_DIAG_MODE) { 1569 mutex_exit(&pp->umutex); 1570 ecpp_nack_ioctl(q, mp, EINVAL); 1571 break; 1572 } 1573 mutex_exit(&pp->umutex); 1574 1575 /* 1576 * each of the commands fetches a byte quantity. 1577 */ 1578 mcopyin(mp, NULL, sizeof (uchar_t), NULL); 1579 qreply(q, mp); 1580 break; 1581 } 1582 1583 case ECPPIOC_GETDATA: 1584 case ECPPIOC_GETPORT: { 1585 uchar_t byte; 1586 1587 mutex_enter(&pp->umutex); 1588 1589 /* must be in diagnostic mode for these commands to work */ 1590 if (pp->current_mode != ECPP_DIAG_MODE) { 1591 mutex_exit(&pp->umutex); 1592 ecpp_nack_ioctl(q, mp, EINVAL); 1593 break; 1594 } 1595 1596 if (iocbp->ioc_cmd == ECPPIOC_GETPORT) { 1597 byte = pp->port; 1598 } else if (iocbp->ioc_cmd == ECPPIOC_GETDATA) { 1599 switch (pp->port) { 1600 case ECPP_PORT_PIO: 1601 byte = DATAR_READ(pp); 1602 break; 1603 case ECPP_PORT_TDMA: 1604 byte = TFIFO_READ(pp); 1605 ecpp_error(pp->dip, "GETDATA=0x%x\n", byte); 1606 break; 1607 default: 1608 ecpp_nack_ioctl(q, mp, EINVAL); 1609 break; 1610 } 1611 } else { 1612 mutex_exit(&pp->umutex); 1613 ecpp_error(pp->dip, "weird command"); 1614 ecpp_nack_ioctl(q, mp, EINVAL); 1615 break; 1616 } 1617 1618 mutex_exit(&pp->umutex); 1619 1620 ecpp_putioc_copyout(q, mp, &byte, sizeof (byte)); 1621 1622 break; 1623 } 1624 1625 case BPPIOC_GETERR: { 1626 struct bpp_error_status bpp_status; 1627 1628 mutex_enter(&pp->umutex); 1629 1630 bpp_status.timeout_occurred = pp->timeout_error; 1631 bpp_status.bus_error = 0; /* not used */ 1632 bpp_status.pin_status = ecpp_get_error_status(pp->saved_dsr); 1633 1634 mutex_exit(&pp->umutex); 1635 1636 ecpp_putioc_copyout(q, mp, &bpp_status, sizeof (bpp_status)); 1637 1638 break; 1639 } 1640 1641 case BPPIOC_TESTIO: { 1642 mutex_enter(&pp->umutex); 1643 1644 if (!((pp->current_mode == ECPP_CENTRONICS) || 1645 (pp->current_mode == ECPP_COMPAT_MODE))) { 1646 ecpp_nack_ioctl(q, mp, EINVAL); 1647 } else { 1648 pp->saved_dsr = DSR_READ(pp); 1649 1650 if ((pp->saved_dsr & ECPP_PE) || 1651 !(pp->saved_dsr & ECPP_SLCT) || 1652 !(pp->saved_dsr & ECPP_nERR)) { 1653 ecpp_nack_ioctl(q, mp, EIO); 1654 } else { 1655 ecpp_ack_ioctl(q, mp); 1656 } 1657 } 1658 1659 mutex_exit(&pp->umutex); 1660 1661 break; 1662 } 1663 1664 case PRNIOC_RESET: 1665 /* 1666 * Initialize interface only if no transfer is in progress 1667 */ 1668 mutex_enter(&pp->umutex); 1669 if (pp->e_busy == ECPP_BUSY) { 1670 mutex_exit(&pp->umutex); 1671 ecpp_nack_ioctl(q, mp, EIO); 1672 } else { 1673 (void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS); 1674 1675 DCR_WRITE(pp, ECPP_SLCTIN); 1676 drv_usecwait(2); 1677 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 1678 1679 ecpp_default_negotiation(pp); 1680 1681 mutex_exit(&pp->umutex); 1682 ecpp_ack_ioctl(q, mp); 1683 } 1684 break; 1685 1686 case PRNIOC_GET_IFCAP: { 1687 uint_t ifcap; 1688 1689 mutex_enter(&pp->umutex); 1690 1691 ifcap = ecpp_get_prn_ifcap(pp); 1692 1693 mutex_exit(&pp->umutex); 1694 1695 ecpp_putioc_copyout(q, mp, &ifcap, sizeof (ifcap)); 1696 break; 1697 } 1698 1699 case PRNIOC_SET_IFCAP: { 1700 mcopyin(mp, NULL, sizeof (uint_t), NULL); 1701 qreply(q, mp); 1702 break; 1703 } 1704 1705 case PRNIOC_GET_TIMEOUTS: { 1706 struct prn_timeouts timeouts; 1707 1708 mutex_enter(&pp->umutex); 1709 timeouts = pp->prn_timeouts; 1710 mutex_exit(&pp->umutex); 1711 1712 ecpp_putioc_copyout(q, mp, &timeouts, sizeof (timeouts)); 1713 1714 break; 1715 } 1716 1717 case PRNIOC_SET_TIMEOUTS: 1718 mcopyin(mp, NULL, sizeof (struct prn_timeouts), 1719 *(caddr_t *)(void *)mp->b_cont->b_rptr); 1720 qreply(q, mp); 1721 break; 1722 1723 case PRNIOC_GET_STATUS: { 1724 uint8_t dsr; 1725 uint_t status; 1726 1727 mutex_enter(&pp->umutex); 1728 1729 /* DSR only makes sense in Centronics & Compat mode */ 1730 if (pp->current_mode == ECPP_CENTRONICS || 1731 pp->current_mode == ECPP_COMPAT_MODE) { 1732 dsr = DSR_READ(pp); 1733 if ((dsr & ECPP_PE) || 1734 !(dsr & ECPP_SLCT) || !(dsr & ECPP_nERR)) { 1735 status = PRN_ONLINE; 1736 } else { 1737 status = PRN_ONLINE | PRN_READY; 1738 } 1739 } else { 1740 status = PRN_ONLINE | PRN_READY; 1741 } 1742 1743 mutex_exit(&pp->umutex); 1744 1745 ecpp_putioc_copyout(q, mp, &status, sizeof (status)); 1746 break; 1747 } 1748 1749 case PRNIOC_GET_1284_STATUS: { 1750 uint8_t dsr; 1751 uchar_t status; 1752 1753 mutex_enter(&pp->umutex); 1754 1755 /* status only makes sense in Centronics & Compat mode */ 1756 if (pp->current_mode != ECPP_COMPAT_MODE && 1757 pp->current_mode != ECPP_CENTRONICS) { 1758 mutex_exit(&pp->umutex); 1759 ecpp_nack_ioctl(q, mp, EINVAL); 1760 break; 1761 } 1762 1763 dsr = DSR_READ(pp); /* read status */ 1764 1765 mutex_exit(&pp->umutex); 1766 1767 ecpp_error(pp->dip, "PRNIOC_GET_STATUS: %x\n", dsr); 1768 1769 status = (dsr & (ECPP_SLCT | ECPP_PE | ECPP_nERR)) | 1770 (~dsr & ECPP_nBUSY); 1771 1772 ecpp_putioc_copyout(q, mp, &status, sizeof (status)); 1773 break; 1774 } 1775 1776 case ECPPIOC_GETDEVID: 1777 ecpp_putioc_stateful_copyin(q, mp, 1778 sizeof (struct ecpp_device_id)); 1779 break; 1780 1781 case PRNIOC_GET_1284_DEVID: 1782 ecpp_putioc_stateful_copyin(q, mp, 1783 sizeof (struct prn_1284_device_id)); 1784 break; 1785 1786 case PRNIOC_GET_IFINFO: 1787 ecpp_putioc_stateful_copyin(q, mp, 1788 sizeof (struct prn_interface_info)); 1789 break; 1790 1791 default: 1792 ecpp_error(pp->dip, "putioc: unknown IOCTL: %x\n", 1793 iocbp->ioc_cmd); 1794 ecpp_nack_ioctl(q, mp, EINVAL); 1795 break; 1796 } 1797 } 1798 1799 /* 1800 * allocate mblk and copyout the requested number of bytes 1801 */ 1802 static void 1803 ecpp_putioc_copyout(queue_t *q, mblk_t *mp, void *buf, int len) 1804 { 1805 mblk_t *tmp; 1806 1807 if ((tmp = allocb(len, BPRI_MED)) == NULL) { 1808 ecpp_nack_ioctl(q, mp, ENOSR); 1809 return; 1810 } 1811 1812 bcopy(buf, tmp->b_wptr, len); 1813 1814 mcopyout(mp, NULL, len, NULL, tmp); 1815 qreply(q, mp); 1816 } 1817 1818 /* 1819 * copyin the structure using struct ecpp_copystate 1820 */ 1821 static void 1822 ecpp_putioc_stateful_copyin(queue_t *q, mblk_t *mp, size_t size) 1823 { 1824 mblk_t *tmp; 1825 struct ecpp_copystate *stp; 1826 1827 if ((tmp = allocb(sizeof (struct ecpp_copystate), BPRI_MED)) == NULL) { 1828 ecpp_nack_ioctl(q, mp, EAGAIN); 1829 return; 1830 } 1831 1832 stp = (struct ecpp_copystate *)tmp->b_rptr; 1833 stp->state = ECPP_STRUCTIN; 1834 stp->uaddr = *(caddr_t *)mp->b_cont->b_rptr; 1835 1836 tmp->b_wptr += sizeof (struct ecpp_copystate); 1837 1838 mcopyin(mp, tmp, size, stp->uaddr); 1839 qreply(q, mp); 1840 } 1841 1842 /* 1843 * read queue is only used when the peripheral sends data faster, 1844 * then the application consumes it; 1845 * once the low water mark is reached, this routine will be scheduled 1846 */ 1847 static int 1848 ecpp_rsrv(queue_t *q) 1849 { 1850 struct msgb *mp; 1851 1852 /* 1853 * send data upstream until next queue is full or the queue is empty 1854 */ 1855 while (canputnext(q) && (mp = getq(q))) { 1856 putnext(q, mp); 1857 } 1858 1859 /* 1860 * if there is still space on the queue, enable backchannel 1861 */ 1862 if (canputnext(q)) { 1863 struct ecppunit *pp = (struct ecppunit *)q->q_ptr; 1864 1865 mutex_enter(&pp->umutex); 1866 1867 if (pp->e_busy == ECPP_IDLE) { 1868 (void) ecpp_idle_phase(pp); 1869 cv_signal(&pp->pport_cv); /* signal ecpp_close() */ 1870 } 1871 1872 mutex_exit(&pp->umutex); 1873 } 1874 1875 return (0); 1876 } 1877 1878 static int 1879 ecpp_wsrv(queue_t *q) 1880 { 1881 struct ecppunit *pp = (struct ecppunit *)q->q_ptr; 1882 struct msgb *mp; 1883 size_t len, total_len; 1884 size_t my_ioblock_sz; 1885 caddr_t my_ioblock; 1886 caddr_t start_addr; 1887 1888 mutex_enter(&pp->umutex); 1889 1890 ecpp_error(pp->dip, "ecpp_wsrv: e_busy=%x\n", pp->e_busy); 1891 1892 /* if channel is actively doing work, wait till completed */ 1893 if (pp->e_busy == ECPP_BUSY || pp->e_busy == ECPP_FLUSH) { 1894 mutex_exit(&pp->umutex); 1895 return (0); 1896 } else if (pp->suspended == TRUE) { 1897 /* 1898 * if the system is about to suspend and ecpp_detach() 1899 * is blocked due to active transfers, wake it up and exit 1900 */ 1901 cv_signal(&pp->pport_cv); 1902 mutex_exit(&pp->umutex); 1903 return (0); 1904 } 1905 1906 /* peripheral status should be okay before starting transfer */ 1907 if (pp->e_busy == ECPP_ERR) { 1908 if (ecpp_check_status(pp) == FAILURE) { 1909 if (pp->wsrv_timer_id == 0) { 1910 ecpp_error(pp->dip, "wsrv: start wrsv_timer\n"); 1911 pp->wsrv_timer_id = timeout(ecpp_wsrv_timer, 1912 (caddr_t)pp, 1913 drv_usectohz(pp->wsrv_retry * 1000)); 1914 } else { 1915 ecpp_error(pp->dip, 1916 "ecpp_wsrv: wrsv_timer is active\n"); 1917 } 1918 1919 mutex_exit(&pp->umutex); 1920 return (0); 1921 } else { 1922 pp->e_busy = ECPP_IDLE; 1923 } 1924 } 1925 1926 my_ioblock = pp->ioblock; 1927 my_ioblock_sz = IO_BLOCK_SZ; 1928 1929 /* 1930 * it`s important to null pp->msg here, 1931 * cleaning up from the previous transfer attempts 1932 */ 1933 pp->msg = NULL; 1934 1935 start_addr = NULL; 1936 len = total_len = 0; 1937 /* 1938 * The following loop is implemented to gather the 1939 * many small writes that the lp subsystem makes and 1940 * compile them into one large dma transfer. The len and 1941 * total_len variables are a running count of the number of 1942 * bytes that have been gathered. They are bcopied to the 1943 * ioblock buffer. The pp->e_busy is set to E_BUSY as soon as 1944 * we start gathering packets to indicate the following transfer. 1945 */ 1946 while (mp = getq(q)) { 1947 switch (DB_TYPE(mp)) { 1948 case M_DATA: 1949 pp->e_busy = ECPP_BUSY; 1950 len = mp->b_wptr - mp->b_rptr; 1951 1952 if ((total_len == 0) && (len >= my_ioblock_sz)) { 1953 /* 1954 * if the first M_DATA is bigger than ioblock, 1955 * just use this mblk and start the transfer 1956 */ 1957 total_len = len; 1958 start_addr = (caddr_t)mp->b_rptr; 1959 pp->msg = mp; 1960 goto breakout; 1961 } else if (total_len + len > my_ioblock_sz) { 1962 /* 1963 * current M_DATA does not fit in ioblock, 1964 * put it back and start the transfer 1965 */ 1966 (void) putbq(q, mp); 1967 goto breakout; 1968 } else { 1969 /* 1970 * otherwise add data to ioblock and free mblk 1971 */ 1972 bcopy(mp->b_rptr, my_ioblock, len); 1973 my_ioblock += len; 1974 total_len += len; 1975 start_addr = (caddr_t)pp->ioblock; 1976 freemsg(mp); 1977 } 1978 break; 1979 1980 case M_IOCTL: 1981 /* 1982 * Assume a simple loopback test: an application 1983 * writes data into the TFIFO, reads it using 1984 * ECPPIOC_GETDATA and compares. If the transfer 1985 * times out (which is only possible on Grover), 1986 * the ioctl might be processed before the data 1987 * got to the TFIFO, which leads to miscompare. 1988 * So if we met ioctl, postpone it until after xfer. 1989 */ 1990 if (total_len > 0) { 1991 (void) putbq(q, mp); 1992 goto breakout; 1993 } 1994 1995 ecpp_error(pp->dip, "M_IOCTL.\n"); 1996 1997 mutex_exit(&pp->umutex); 1998 1999 ecpp_putioc(q, mp); 2000 2001 mutex_enter(&pp->umutex); 2002 2003 break; 2004 2005 case M_IOCDATA: { 2006 struct copyresp *csp = (struct copyresp *)mp->b_rptr; 2007 2008 ecpp_error(pp->dip, "M_IOCDATA\n"); 2009 2010 /* 2011 * If copy request failed, quit now 2012 */ 2013 if (csp->cp_rval != 0) { 2014 freemsg(mp); 2015 break; 2016 } 2017 2018 switch (csp->cp_cmd) { 2019 case ECPPIOC_SETPARMS: 2020 case ECPPIOC_SETREGS: 2021 case ECPPIOC_SETPORT: 2022 case ECPPIOC_SETDATA: 2023 case ECPPIOC_GETDEVID: 2024 case PRNIOC_SET_IFCAP: 2025 case PRNIOC_GET_1284_DEVID: 2026 case PRNIOC_SET_TIMEOUTS: 2027 case PRNIOC_GET_IFINFO: 2028 ecpp_srvioc(q, mp); 2029 break; 2030 2031 default: 2032 ecpp_nack_ioctl(q, mp, EINVAL); 2033 break; 2034 } 2035 2036 break; 2037 } 2038 2039 case M_CTL: 2040 if (pp->e_busy != ECPP_IDLE) { 2041 ecpp_error(pp->dip, "wsrv: M_CTL postponed\n"); 2042 (void) putbq(q, mp); 2043 goto breakout; 2044 } else { 2045 ecpp_error(pp->dip, "wsrv: M_CTL\n"); 2046 } 2047 2048 /* sanity check */ 2049 if ((mp->b_wptr - mp->b_rptr != sizeof (int)) || 2050 (*(int *)mp->b_rptr != ECPP_BACKCHANNEL)) { 2051 ecpp_error(pp->dip, "wsrv: bogus M_CTL"); 2052 freemsg(mp); 2053 break; 2054 } else { 2055 freemsg(mp); 2056 } 2057 2058 /* This was a backchannel request */ 2059 (void) ecpp_peripheral2host(pp); 2060 2061 /* exit if transfer have been initiated */ 2062 if (pp->e_busy == ECPP_BUSY) { 2063 goto breakout; 2064 } 2065 break; 2066 2067 case M_READ: 2068 pp->nread += *(size_t *)mp->b_rptr; 2069 freemsg(mp); 2070 ecpp_error(pp->dip, "wsrv: M_READ %d", pp->nread); 2071 break; 2072 2073 default: 2074 ecpp_error(pp->dip, "wsrv: should never get here\n"); 2075 freemsg(mp); 2076 break; 2077 } 2078 } 2079 breakout: 2080 /* 2081 * If total_len > 0 then start the transfer, otherwise goto idle state 2082 */ 2083 if (total_len > 0) { 2084 ecpp_error(pp->dip, "wsrv:starting: total_len=%d\n", total_len); 2085 pp->e_busy = ECPP_BUSY; 2086 ecpp_start(pp, start_addr, total_len); 2087 } else { 2088 ecpp_error(pp->dip, "wsrv:finishing: ebusy=%x\n", pp->e_busy); 2089 2090 /* IDLE if xfer_timeout, or FIFO_EMPTY */ 2091 if (pp->e_busy == ECPP_IDLE) { 2092 (void) ecpp_idle_phase(pp); 2093 cv_signal(&pp->pport_cv); /* signal ecpp_close() */ 2094 } 2095 } 2096 2097 mutex_exit(&pp->umutex); 2098 return (1); 2099 } 2100 2101 /* 2102 * Ioctl processor for queued ioctl data transfer messages. 2103 */ 2104 static void 2105 ecpp_srvioc(queue_t *q, mblk_t *mp) 2106 { 2107 struct iocblk *iocbp; 2108 struct ecppunit *pp; 2109 2110 iocbp = (struct iocblk *)mp->b_rptr; 2111 pp = (struct ecppunit *)q->q_ptr; 2112 2113 switch (iocbp->ioc_cmd) { 2114 case ECPPIOC_SETPARMS: { 2115 struct ecpp_transfer_parms *xferp; 2116 2117 xferp = (struct ecpp_transfer_parms *)mp->b_cont->b_rptr; 2118 2119 if (xferp->write_timeout <= 0 || 2120 xferp->write_timeout >= ECPP_MAX_TIMEOUT) { 2121 ecpp_nack_ioctl(q, mp, EINVAL); 2122 break; 2123 } 2124 2125 if (!((xferp->mode == ECPP_CENTRONICS) || 2126 (xferp->mode == ECPP_COMPAT_MODE) || 2127 (xferp->mode == ECPP_NIBBLE_MODE) || 2128 (xferp->mode == ECPP_ECP_MODE) || 2129 (xferp->mode == ECPP_DIAG_MODE))) { 2130 ecpp_nack_ioctl(q, mp, EINVAL); 2131 break; 2132 } 2133 2134 pp->xfer_parms = *xferp; 2135 pp->prn_timeouts.tmo_forward = pp->xfer_parms.write_timeout; 2136 2137 ecpp_error(pp->dip, "srvioc: current_mode =%x new mode=%x\n", 2138 pp->current_mode, pp->xfer_parms.mode); 2139 2140 if (ecpp_mode_negotiation(pp, pp->xfer_parms.mode) == FAILURE) { 2141 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT); 2142 } else { 2143 /* 2144 * mode nego was a success. If nibble mode check 2145 * back channel and set into REVIDLE. 2146 */ 2147 if ((pp->current_mode == ECPP_NIBBLE_MODE) && 2148 (read_nibble_backchan(pp) == FAILURE)) { 2149 /* 2150 * problems reading the backchannel 2151 * returned to centronics; 2152 * ioctl fails. 2153 */ 2154 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT); 2155 break; 2156 } 2157 2158 ecpp_ack_ioctl(q, mp); 2159 } 2160 if (pp->current_mode != ECPP_DIAG_MODE) { 2161 pp->port = ECPP_PORT_DMA; 2162 } else { 2163 pp->port = ECPP_PORT_PIO; 2164 } 2165 2166 pp->xfer_parms.mode = pp->current_mode; 2167 2168 break; 2169 } 2170 2171 case ECPPIOC_SETREGS: { 2172 struct ecpp_regs *rg; 2173 uint8_t dcr; 2174 2175 rg = (struct ecpp_regs *)mp->b_cont->b_rptr; 2176 2177 /* must be in diagnostic mode for these commands to work */ 2178 if (pp->current_mode != ECPP_DIAG_MODE) { 2179 ecpp_nack_ioctl(q, mp, EINVAL); 2180 break; 2181 } 2182 2183 /* bits 4-7 must be 1 or return EINVAL */ 2184 if ((rg->dcr & ECPP_SETREGS_DCR_MASK) != 2185 ECPP_SETREGS_DCR_MASK) { 2186 ecpp_nack_ioctl(q, mp, EINVAL); 2187 break; 2188 } 2189 2190 /* get the old dcr */ 2191 dcr = DCR_READ(pp) & ~ECPP_REV_DIR; 2192 /* get the new dcr */ 2193 dcr = (dcr & ECPP_SETREGS_DCR_MASK) | 2194 (rg->dcr & ~ECPP_SETREGS_DCR_MASK); 2195 DCR_WRITE(pp, dcr); 2196 ecpp_error(pp->dip, "ECPPIOC_SETREGS:dcr=%x\n", dcr); 2197 ecpp_ack_ioctl(q, mp); 2198 break; 2199 } 2200 2201 case ECPPIOC_SETPORT: { 2202 uchar_t *port; 2203 2204 port = (uchar_t *)mp->b_cont->b_rptr; 2205 2206 /* must be in diagnostic mode for these commands to work */ 2207 if (pp->current_mode != ECPP_DIAG_MODE) { 2208 ecpp_nack_ioctl(q, mp, EINVAL); 2209 break; 2210 } 2211 2212 switch (*port) { 2213 case ECPP_PORT_PIO: 2214 /* put superio into PIO mode */ 2215 ECR_WRITE(pp, 2216 ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 2217 pp->port = *port; 2218 ecpp_ack_ioctl(q, mp); 2219 break; 2220 2221 case ECPP_PORT_TDMA: 2222 ecpp_error(pp->dip, "SETPORT: to TDMA\n"); 2223 pp->tfifo_intr = 1; 2224 /* change to mode 110 */ 2225 ECR_WRITE(pp, 2226 ECR_mode_110 | ECPP_INTR_MASK | ECPP_INTR_SRV); 2227 pp->port = *port; 2228 ecpp_ack_ioctl(q, mp); 2229 break; 2230 2231 default: 2232 ecpp_nack_ioctl(q, mp, EINVAL); 2233 } 2234 2235 break; 2236 } 2237 2238 case ECPPIOC_SETDATA: { 2239 uchar_t *data; 2240 2241 data = (uchar_t *)mp->b_cont->b_rptr; 2242 2243 /* must be in diagnostic mode for these commands to work */ 2244 if (pp->current_mode != ECPP_DIAG_MODE) { 2245 ecpp_nack_ioctl(q, mp, EINVAL); 2246 break; 2247 } 2248 2249 switch (pp->port) { 2250 case ECPP_PORT_PIO: 2251 DATAR_WRITE(pp, *data); 2252 ecpp_ack_ioctl(q, mp); 2253 break; 2254 2255 case ECPP_PORT_TDMA: 2256 TFIFO_WRITE(pp, *data); 2257 ecpp_ack_ioctl(q, mp); 2258 break; 2259 2260 default: 2261 ecpp_nack_ioctl(q, mp, EINVAL); 2262 } 2263 2264 break; 2265 } 2266 2267 case ECPPIOC_GETDEVID: { 2268 struct copyresp *csp; 2269 struct ecpp_copystate *stp; 2270 struct ecpp_device_id *dp; 2271 struct ecpp_device_id id; 2272 2273 csp = (struct copyresp *)mp->b_rptr; 2274 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2275 dp = (struct ecpp_device_id *)mp->b_cont->b_rptr; 2276 2277 #ifdef _MULTI_DATAMODEL 2278 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) { 2279 struct ecpp_device_id32 *dp32; 2280 2281 dp32 = (struct ecpp_device_id32 *)dp; 2282 id.mode = dp32->mode; 2283 id.len = dp32->len; 2284 id.addr = (char *)(uintptr_t)dp32->addr; 2285 } else { 2286 #endif /* _MULTI_DATAMODEL */ 2287 id = *dp; 2288 #ifdef _MULTI_DATAMODEL 2289 } 2290 #endif /* _MULTI_DATAMODEL */ 2291 2292 ecpp_srvioc_devid(q, mp, &id, &stp->un.devid.rlen); 2293 break; 2294 } 2295 2296 case PRNIOC_GET_1284_DEVID: { 2297 struct copyresp *csp; 2298 struct ecpp_copystate *stp; 2299 struct prn_1284_device_id *dp; 2300 struct ecpp_device_id id; 2301 2302 csp = (struct copyresp *)mp->b_rptr; 2303 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2304 dp = (struct prn_1284_device_id *)mp->b_cont->b_rptr; 2305 2306 /* imitate struct ecpp_device_id */ 2307 id.mode = ECPP_NIBBLE_MODE; 2308 2309 #ifdef _MULTI_DATAMODEL 2310 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) { 2311 struct prn_1284_device_id32 *dp32; 2312 2313 dp32 = (struct prn_1284_device_id32 *)dp; 2314 id.len = dp32->id_len; 2315 id.addr = (char *)(uintptr_t)dp32->id_data; 2316 } else { 2317 #endif /* _MULTI_DATAMODEL */ 2318 id.len = dp->id_len; 2319 id.addr = (char *)dp->id_data; 2320 #ifdef _MULTI_DATAMODEL 2321 } 2322 #endif /* _MULTI_DATAMODEL */ 2323 2324 ecpp_srvioc_devid(q, mp, &id, 2325 (int *)&stp->un.prn_devid.id_rlen); 2326 break; 2327 } 2328 2329 case PRNIOC_SET_IFCAP: { 2330 uint_t ifcap, new_ifcap; 2331 2332 ifcap = ecpp_get_prn_ifcap(pp); 2333 new_ifcap = *(uint_t *)mp->b_cont->b_rptr; 2334 2335 if (ifcap == new_ifcap) { 2336 ecpp_ack_ioctl(q, mp); 2337 break; 2338 } 2339 2340 /* only changing PRN_BIDI is supported */ 2341 if ((ifcap ^ new_ifcap) & ~PRN_BIDI) { 2342 ecpp_nack_ioctl(q, mp, EINVAL); 2343 break; 2344 } 2345 2346 if (new_ifcap & PRN_BIDI) { /* go bidirectional */ 2347 ecpp_default_negotiation(pp); 2348 } else { /* go unidirectional */ 2349 (void) ecpp_mode_negotiation(pp, ECPP_CENTRONICS); 2350 } 2351 2352 ecpp_ack_ioctl(q, mp); 2353 break; 2354 } 2355 2356 case PRNIOC_SET_TIMEOUTS: { 2357 struct prn_timeouts *prn_timeouts; 2358 2359 prn_timeouts = (struct prn_timeouts *)mp->b_cont->b_rptr; 2360 2361 if (prn_timeouts->tmo_forward > ECPP_MAX_TIMEOUT) { 2362 ecpp_nack_ioctl(q, mp, EINVAL); 2363 break; 2364 } 2365 2366 pp->prn_timeouts = *prn_timeouts; 2367 pp->xfer_parms.write_timeout = (int)prn_timeouts->tmo_forward; 2368 2369 ecpp_ack_ioctl(q, mp); 2370 break; 2371 } 2372 2373 case PRNIOC_GET_IFINFO: 2374 ecpp_srvioc_prnif(q, mp); 2375 break; 2376 2377 default: /* unexpected ioctl type */ 2378 ecpp_nack_ioctl(q, mp, EINVAL); 2379 break; 2380 } 2381 } 2382 2383 static void 2384 ecpp_srvioc_devid(queue_t *q, mblk_t *mp, struct ecpp_device_id *id, int *rlen) 2385 { 2386 struct ecppunit *pp; 2387 struct copyresp *csp; 2388 struct ecpp_copystate *stp; 2389 int error; 2390 int len; 2391 int mode; 2392 mblk_t *datamp; 2393 2394 pp = (struct ecppunit *)q->q_ptr; 2395 csp = (struct copyresp *)mp->b_rptr; 2396 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2397 mode = id->mode; 2398 2399 /* check arguments */ 2400 if ((mode < ECPP_CENTRONICS) || (mode > ECPP_ECP_MODE)) { 2401 ecpp_error(pp->dip, "ecpp_srvioc_devid: mode=%x, len=%x\n", 2402 mode, id->len); 2403 ecpp_nack_ioctl(q, mp, EINVAL); 2404 return; 2405 } 2406 2407 /* Currently only Nibble mode is supported */ 2408 if (mode != ECPP_NIBBLE_MODE) { 2409 ecpp_nack_ioctl(q, mp, EPROTONOSUPPORT); 2410 return; 2411 } 2412 2413 if ((id->addr == NULL) && (id->len != 0)) { 2414 ecpp_nack_ioctl(q, mp, EFAULT); 2415 return; 2416 } 2417 2418 /* read device ID length */ 2419 if (error = ecpp_getdevid(pp, NULL, &len, mode)) { 2420 ecpp_nack_ioctl(q, mp, error); 2421 goto breakout; 2422 } 2423 2424 /* don't take into account two length bytes */ 2425 len -= 2; 2426 *rlen = len; 2427 2428 /* limit transfer to user buffer length */ 2429 if (id->len < len) { 2430 len = id->len; 2431 } 2432 2433 if (len == 0) { 2434 /* just return rlen */ 2435 stp->state = ECPP_ADDROUT; 2436 ecpp_wput_iocdata_devid(q, mp, 2437 (uintptr_t)rlen - (uintptr_t)&stp->un); 2438 goto breakout; 2439 } 2440 2441 if ((datamp = allocb(len, BPRI_MED)) == NULL) { 2442 ecpp_nack_ioctl(q, mp, ENOSR); 2443 goto breakout; 2444 } 2445 2446 /* read ID string */ 2447 error = ecpp_getdevid(pp, datamp->b_rptr, &len, mode); 2448 if (error) { 2449 freemsg(datamp); 2450 ecpp_nack_ioctl(q, mp, error); 2451 goto breakout; 2452 } else { 2453 datamp->b_wptr += len; 2454 2455 stp->state = ECPP_ADDROUT; 2456 mcopyout(mp, csp->cp_private, len, id->addr, datamp); 2457 qreply(q, mp); 2458 } 2459 2460 return; 2461 2462 breakout: 2463 (void) ecpp_1284_termination(pp); 2464 } 2465 2466 /* 2467 * PRNIOC_GET_IFINFO: return prnio interface info string 2468 */ 2469 static void 2470 ecpp_srvioc_prnif(queue_t *q, mblk_t *mp) 2471 { 2472 struct copyresp *csp; 2473 struct ecpp_copystate *stp; 2474 uint_t len; 2475 struct prn_interface_info *ip; 2476 struct prn_interface_info info; 2477 mblk_t *datamp; 2478 #ifdef _MULTI_DATAMODEL 2479 struct iocblk *iocbp = (struct iocblk *)mp->b_rptr; 2480 #endif 2481 2482 csp = (struct copyresp *)mp->b_rptr; 2483 stp = (struct ecpp_copystate *)csp->cp_private->b_rptr; 2484 ip = (struct prn_interface_info *)mp->b_cont->b_rptr; 2485 2486 #ifdef _MULTI_DATAMODEL 2487 if (IOC_CONVERT_FROM(iocbp) == IOC_ILP32) { 2488 struct prn_interface_info32 *ip32; 2489 2490 ip32 = (struct prn_interface_info32 *)ip; 2491 info.if_len = ip32->if_len; 2492 info.if_data = (char *)(uintptr_t)ip32->if_data; 2493 } else { 2494 #endif /* _MULTI_DATAMODEL */ 2495 info = *ip; 2496 #ifdef _MULTI_DATAMODEL 2497 } 2498 #endif /* _MULTI_DATAMODEL */ 2499 2500 len = strlen(prn_ifinfo); 2501 stp->un.prn_if.if_rlen = len; 2502 stp->state = ECPP_ADDROUT; 2503 2504 /* check arguments */ 2505 if ((info.if_data == NULL) && (info.if_len != 0)) { 2506 ecpp_nack_ioctl(q, mp, EFAULT); 2507 return; 2508 } 2509 2510 if (info.if_len == 0) { 2511 /* just copyout rlen */ 2512 ecpp_wput_iocdata_devid(q, mp, 2513 offsetof(struct prn_interface_info, if_rlen)); 2514 return; 2515 } 2516 2517 /* if needed, trim to the buffer size */ 2518 if (len > info.if_len) { 2519 len = info.if_len; 2520 } 2521 2522 if ((datamp = allocb(len, BPRI_MED)) == NULL) { 2523 ecpp_nack_ioctl(q, mp, ENOSR); 2524 return; 2525 } 2526 2527 bcopy(&prn_ifinfo[0], datamp->b_wptr, len); 2528 datamp->b_wptr += len; 2529 2530 mcopyout(mp, csp->cp_private, len, info.if_data, datamp); 2531 qreply(q, mp); 2532 } 2533 2534 static void 2535 ecpp_flush(struct ecppunit *pp, int cmd) 2536 { 2537 queue_t *q; 2538 uint8_t ecr, dcr; 2539 timeout_id_t timeout_id, fifo_timer_id, wsrv_timer_id; 2540 2541 ASSERT(mutex_owned(&pp->umutex)); 2542 2543 if (!(cmd & FWRITE)) { 2544 return; 2545 } 2546 2547 q = pp->writeq; 2548 timeout_id = fifo_timer_id = wsrv_timer_id = 0; 2549 2550 ecpp_error(pp->dip, "ecpp_flush e_busy=%x\n", pp->e_busy); 2551 2552 /* if there is an ongoing DMA, it needs to be turned off. */ 2553 switch (pp->e_busy) { 2554 case ECPP_BUSY: 2555 /* 2556 * Change the port status to ECPP_FLUSH to 2557 * indicate to ecpp_wsrv that the wq is being flushed. 2558 */ 2559 pp->e_busy = ECPP_FLUSH; 2560 2561 /* 2562 * dma_cancelled indicates to ecpp_isr() that we have 2563 * turned off the DMA. Since the mutex is held, ecpp_isr() 2564 * may be blocked. Once ecpp_flush() finishes and ecpp_isr() 2565 * gains the mutex, ecpp_isr() will have a _reset_ DMAC. Most 2566 * significantly, the DMAC will be reset after ecpp_isr() was 2567 * invoked. Therefore we need to have a flag "dma_cancelled" 2568 * to signify when the described condition has occured. If 2569 * ecpp_isr() notes a dma_cancelled, it will ignore the DMAC csr 2570 * and simply claim the interupt. 2571 */ 2572 2573 pp->dma_cancelled = TRUE; 2574 2575 /* either DMA or PIO transfer */ 2576 if (COMPAT_DMA(pp) || 2577 (pp->current_mode == ECPP_ECP_MODE) || 2578 (pp->current_mode == ECPP_DIAG_MODE)) { 2579 /* 2580 * if the bcr is zero, then DMA is complete and 2581 * we are waiting for the fifo to drain. Therefore, 2582 * turn off dma. 2583 */ 2584 if (ECPP_DMA_STOP(pp, NULL) == FAILURE) { 2585 ecpp_error(pp->dip, 2586 "ecpp_flush: dma_stop failed.\n"); 2587 } 2588 2589 /* 2590 * If the status of the port is ECPP_BUSY, 2591 * the DMA is stopped by either explicitly above, or by 2592 * ecpp_isr() but the FIFO hasn't drained yet. In either 2593 * case, we need to unbind the dma mappings. 2594 */ 2595 if (ddi_dma_unbind_handle( 2596 pp->dma_handle) != DDI_SUCCESS) 2597 ecpp_error(pp->dip, 2598 "ecpp_flush: unbind failed.\n"); 2599 2600 if (pp->msg != NULL) { 2601 freemsg(pp->msg); 2602 pp->msg = NULL; 2603 } 2604 } else { 2605 /* 2606 * PIO transfer: disable nAck interrups 2607 */ 2608 dcr = DCR_READ(pp); 2609 dcr &= ~(ECPP_REV_DIR | ECPP_INTR_EN); 2610 DCR_WRITE(pp, dcr); 2611 ECPP_MASK_INTR(pp); 2612 } 2613 2614 /* 2615 * The transfer is cleaned up. There may or may not be data 2616 * in the fifo. We don't care at this point. Ie. SuperIO may 2617 * transfer the remaining bytes in the fifo or not. it doesn't 2618 * matter. All that is important at this stage is that no more 2619 * fifo timers are started. 2620 */ 2621 2622 timeout_id = pp->timeout_id; 2623 fifo_timer_id = pp->fifo_timer_id; 2624 pp->timeout_id = pp->fifo_timer_id = 0; 2625 pp->softintr_pending = 0; 2626 2627 break; 2628 2629 case ECPP_ERR: 2630 /* 2631 * Change the port status to ECPP_FLUSH to 2632 * indicate to ecpp_wsrv that the wq is being flushed. 2633 */ 2634 pp->e_busy = ECPP_FLUSH; 2635 2636 /* 2637 * Most likely there are mblks in the queue, 2638 * but the driver can not transmit because 2639 * of the bad port status. In this case, 2640 * ecpp_flush() should make sure ecpp_wsrv_timer() 2641 * is turned off. 2642 */ 2643 wsrv_timer_id = pp->wsrv_timer_id; 2644 pp->wsrv_timer_id = 0; 2645 2646 break; 2647 2648 case ECPP_IDLE: 2649 /* No work to do. Ready to flush */ 2650 break; 2651 2652 default: 2653 ecpp_error(pp->dip, 2654 "ecpp_flush: illegal state %x\n", pp->e_busy); 2655 } 2656 2657 /* in DIAG mode clear TFIFO if needed */ 2658 if (pp->current_mode == ECPP_DIAG_MODE) { 2659 ecr = ECR_READ(pp); 2660 if (!(ecr & ECPP_FIFO_EMPTY)) { 2661 ECR_WRITE(pp, 2662 ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 2663 ECR_WRITE(pp, ecr); 2664 } 2665 } 2666 2667 /* Discard all messages on the output queue. */ 2668 flushq(q, FLUSHDATA); 2669 2670 /* The port is no longer flushing or dma'ing for that matter. */ 2671 pp->e_busy = ECPP_IDLE; 2672 2673 /* Set the right phase */ 2674 if (pp->current_mode == ECPP_ECP_MODE) { 2675 if (pp->current_phase == ECPP_PHASE_ECP_REV_XFER) { 2676 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 2677 } else { 2678 pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE; 2679 } 2680 } 2681 2682 /* cancel timeouts if any */ 2683 mutex_exit(&pp->umutex); 2684 2685 if (timeout_id) { 2686 (void) untimeout(timeout_id); 2687 } 2688 if (fifo_timer_id) { 2689 (void) untimeout(fifo_timer_id); 2690 } 2691 if (wsrv_timer_id) { 2692 (void) untimeout(wsrv_timer_id); 2693 } 2694 2695 mutex_enter(&pp->umutex); 2696 2697 cv_signal(&pp->pport_cv); /* wake up ecpp_close() */ 2698 } 2699 2700 static void 2701 ecpp_start(struct ecppunit *pp, caddr_t addr, size_t len) 2702 { 2703 ASSERT(mutex_owned(&pp->umutex)); 2704 ASSERT(pp->e_busy == ECPP_BUSY); 2705 2706 ecpp_error(pp->dip, 2707 "ecpp_start:current_mode=%x,current_phase=%x,ecr=%x,len=%d\n", 2708 pp->current_mode, pp->current_phase, ECR_READ(pp), len); 2709 2710 pp->dma_dir = DDI_DMA_WRITE; /* this is a forward transfer */ 2711 2712 switch (pp->current_mode) { 2713 case ECPP_NIBBLE_MODE: 2714 (void) ecpp_1284_termination(pp); 2715 2716 /* After termination we are either Compatible or Centronics */ 2717 2718 /* FALLTHRU */ 2719 2720 case ECPP_CENTRONICS: 2721 case ECPP_COMPAT_MODE: 2722 if (pp->io_mode == ECPP_DMA) { 2723 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) { 2724 return; 2725 } 2726 } else { 2727 /* PIO mode */ 2728 if (ecpp_prep_pio_xfer(pp, addr, len) == FAILURE) { 2729 return; 2730 } 2731 (void) ecpp_pio_writeb(pp); 2732 } 2733 break; 2734 2735 case ECPP_DIAG_MODE: { 2736 int oldlen; 2737 2738 /* put superio into TFIFO mode, if not already */ 2739 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110); 2740 /* 2741 * DMA would block if the TFIFO is not empty 2742 * if by this moment nobody read these bytes, they`re gone 2743 */ 2744 drv_usecwait(1); 2745 if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) { 2746 ecpp_error(pp->dip, 2747 "ecpp_start: TFIFO not empty, clearing\n"); 2748 ECR_WRITE(pp, 2749 ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 2750 ECR_WRITE(pp, 2751 ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_110); 2752 } 2753 2754 /* we can DMA at most 16 bytes into TFIFO */ 2755 oldlen = len; 2756 if (len > ECPP_FIFO_SZ) { 2757 len = ECPP_FIFO_SZ; 2758 } 2759 2760 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) { 2761 return; 2762 } 2763 2764 /* put the rest of data back on the queue */ 2765 if (oldlen > len) { 2766 ecpp_putback_untransfered(pp, addr + len, oldlen - len); 2767 } 2768 2769 break; 2770 } 2771 2772 case ECPP_ECP_MODE: 2773 ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE || 2774 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 2775 2776 /* if in Reverse Phase negotiate to Forward */ 2777 if (pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) { 2778 if (ecp_reverse2forward(pp) == FAILURE) { 2779 if (pp->msg) { 2780 (void) putbq(pp->writeq, pp->msg); 2781 } else { 2782 ecpp_putback_untransfered(pp, 2783 addr, len); 2784 } 2785 } 2786 } 2787 2788 if (ecpp_init_dma_xfer(pp, addr, len) == FAILURE) { 2789 return; 2790 } 2791 2792 break; 2793 } 2794 2795 /* schedule transfer timeout */ 2796 pp->timeout_id = timeout(ecpp_xfer_timeout, (caddr_t)pp, 2797 pp->xfer_parms.write_timeout * drv_usectohz(1000000)); 2798 } 2799 2800 /* 2801 * Transfer a PIO "block" a byte at a time. 2802 * The block is starts at addr and ends at pp->last_byte 2803 */ 2804 static uint8_t 2805 ecpp_prep_pio_xfer(struct ecppunit *pp, caddr_t addr, size_t len) 2806 { 2807 pp->next_byte = addr; 2808 pp->last_byte = (caddr_t)((ulong_t)addr + len); 2809 2810 if (ecpp_check_status(pp) == FAILURE) { 2811 /* 2812 * if status signals are bad, do not start PIO, 2813 * put everything back on the queue. 2814 */ 2815 ecpp_error(pp->dip, 2816 "ecpp_prep_pio_xfer:suspend PIO len=%d\n", len); 2817 2818 if (pp->msg != NULL) { 2819 /* 2820 * this circumstance we want to copy the 2821 * untransfered section of msg to a new mblk, 2822 * then free the orignal one. 2823 */ 2824 ecpp_putback_untransfered(pp, 2825 (void *)pp->msg->b_rptr, len); 2826 ecpp_error(pp->dip, 2827 "ecpp_prep_pio_xfer: len1=%d\n", len); 2828 2829 freemsg(pp->msg); 2830 pp->msg = NULL; 2831 } else { 2832 ecpp_putback_untransfered(pp, pp->ioblock, len); 2833 ecpp_error(pp->dip, 2834 "ecpp_prep_pio_xfer: len2=%d\n", len); 2835 } 2836 qenable(pp->writeq); 2837 2838 return (FAILURE); 2839 } 2840 2841 pp->dma_cancelled = FALSE; 2842 2843 /* pport must be in PIO mode */ 2844 if (ecr_write(pp, ECR_mode_001 | 2845 ECPP_INTR_MASK | ECPP_INTR_SRV) != SUCCESS) { 2846 ecpp_error(pp->dip, "ecpp_prep_pio_xfer: failed w/ECR.\n"); 2847 } 2848 2849 ecpp_error(pp->dip, "ecpp_prep_pio_xfer: dcr=%x ecr=%x\n", 2850 DCR_READ(pp), ECR_READ(pp)); 2851 2852 return (SUCCESS); 2853 } 2854 2855 static uint8_t 2856 ecpp_init_dma_xfer(struct ecppunit *pp, caddr_t addr, size_t len) 2857 { 2858 uint8_t ecr_mode[] = { 2859 0, 2860 ECR_mode_010, /* Centronix */ 2861 ECR_mode_010, /* Compat */ 2862 0, /* Byte */ 2863 0, /* Nibble */ 2864 ECR_mode_011, /* ECP */ 2865 0, /* Failure */ 2866 ECR_mode_110, /* Diag */ 2867 }; 2868 uint8_t ecr; 2869 2870 ASSERT((pp->current_mode <= ECPP_DIAG_MODE) && 2871 (ecr_mode[pp->current_mode] != 0)); 2872 2873 if (ecpp_setup_dma_resources(pp, addr, len) == FAILURE) { 2874 qenable(pp->writeq); 2875 return (FAILURE); 2876 } 2877 2878 if (ecpp_check_status(pp) == FAILURE) { 2879 /* 2880 * if status signals are bad, do not start DMA, but 2881 * rather put everything back on the queue. 2882 */ 2883 ecpp_error(pp->dip, 2884 "ecpp_init_dma_xfer: suspending DMA len=%d\n", 2885 pp->dma_cookie.dmac_size); 2886 2887 if (pp->msg != NULL) { 2888 /* 2889 * this circumstance we want to copy the 2890 * untransfered section of msg to a new mblk, 2891 * then free the orignal one. 2892 */ 2893 ecpp_putback_untransfered(pp, 2894 (void *)pp->msg->b_rptr, len); 2895 ecpp_error(pp->dip, 2896 "ecpp_init_dma_xfer:a:len=%d\n", len); 2897 2898 freemsg(pp->msg); 2899 pp->msg = NULL; 2900 } else { 2901 ecpp_putback_untransfered(pp, pp->ioblock, len); 2902 ecpp_error(pp->dip, 2903 "ecpp_init_dma_xfer:b:len=%d\n", len); 2904 } 2905 2906 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 2907 ecpp_error(pp->dip, 2908 "ecpp_init_dma_xfer: unbind FAILURE.\n"); 2909 } 2910 qenable(pp->writeq); 2911 return (FAILURE); 2912 } 2913 2914 pp->xfercnt = pp->resid = len; 2915 pp->dma_cancelled = FALSE; 2916 pp->tfifo_intr = 0; 2917 2918 /* set the right ECR mode and disable DMA */ 2919 ecr = ecr_mode[pp->current_mode]; 2920 (void) ecr_write(pp, ecr | ECPP_INTR_SRV | ECPP_INTR_MASK); 2921 2922 /* prepare DMAC for a transfer */ 2923 if (ECPP_DMA_START(pp) == FAILURE) { 2924 ecpp_error(pp->dip, "ecpp_init_dma_xfer: dma_start FAILED.\n"); 2925 return (FAILURE); 2926 } 2927 2928 /* GO! */ 2929 (void) ecr_write(pp, ecr | ECPP_DMA_ENABLE | ECPP_INTR_MASK); 2930 2931 return (SUCCESS); 2932 } 2933 2934 static uint8_t 2935 ecpp_setup_dma_resources(struct ecppunit *pp, caddr_t addr, size_t len) 2936 { 2937 int err; 2938 off_t woff; 2939 size_t wlen; 2940 2941 ASSERT(pp->dma_dir == DDI_DMA_READ || pp->dma_dir == DDI_DMA_WRITE); 2942 2943 err = ddi_dma_addr_bind_handle(pp->dma_handle, NULL, 2944 addr, len, pp->dma_dir | DDI_DMA_PARTIAL, 2945 DDI_DMA_DONTWAIT, NULL, 2946 &pp->dma_cookie, &pp->dma_cookie_count); 2947 2948 switch (err) { 2949 case DDI_DMA_MAPPED: 2950 ecpp_error(pp->dip, "ecpp_setup_dma: DMA_MAPPED\n"); 2951 2952 pp->dma_nwin = 1; 2953 pp->dma_curwin = 1; 2954 break; 2955 2956 case DDI_DMA_PARTIAL_MAP: { 2957 ecpp_error(pp->dip, "ecpp_setup_dma: DMA_PARTIAL_MAP\n"); 2958 2959 if (ddi_dma_numwin(pp->dma_handle, 2960 &pp->dma_nwin) != DDI_SUCCESS) { 2961 (void) ddi_dma_unbind_handle(pp->dma_handle); 2962 return (FAILURE); 2963 } 2964 pp->dma_curwin = 1; 2965 2966 /* 2967 * The very first window is returned by bind_handle, 2968 * but we must do this explicitly here, otherwise 2969 * next getwin would return wrong cookie dmac_size 2970 */ 2971 if (ddi_dma_getwin(pp->dma_handle, 0, &woff, &wlen, 2972 &pp->dma_cookie, &pp->dma_cookie_count) != DDI_SUCCESS) { 2973 ecpp_error(pp->dip, 2974 "ecpp_setup_dma: ddi_dma_getwin failed!"); 2975 (void) ddi_dma_unbind_handle(pp->dma_handle); 2976 return (FAILURE); 2977 } 2978 2979 ecpp_error(pp->dip, 2980 "ecpp_setup_dma: cookies=%d, windows=%d" 2981 " addr=%lx len=%d\n", 2982 pp->dma_cookie_count, pp->dma_nwin, 2983 pp->dma_cookie.dmac_address, pp->dma_cookie.dmac_size); 2984 2985 break; 2986 } 2987 2988 default: 2989 ecpp_error(pp->dip, "ecpp_setup_dma: err=%x\n", err); 2990 return (FAILURE); 2991 } 2992 2993 return (SUCCESS); 2994 } 2995 2996 static void 2997 ecpp_ack_ioctl(queue_t *q, mblk_t *mp) 2998 { 2999 struct iocblk *iocbp; 3000 3001 mp->b_datap->db_type = M_IOCACK; 3002 mp->b_wptr = mp->b_rptr + sizeof (struct iocblk); 3003 3004 if (mp->b_cont) { 3005 freemsg(mp->b_cont); 3006 mp->b_cont = NULL; 3007 } 3008 3009 iocbp = (struct iocblk *)mp->b_rptr; 3010 iocbp->ioc_error = 0; 3011 iocbp->ioc_count = 0; 3012 iocbp->ioc_rval = 0; 3013 3014 qreply(q, mp); 3015 } 3016 3017 static void 3018 ecpp_nack_ioctl(queue_t *q, mblk_t *mp, int err) 3019 { 3020 struct iocblk *iocbp; 3021 3022 mp->b_datap->db_type = M_IOCNAK; 3023 mp->b_wptr = mp->b_rptr + sizeof (struct iocblk); 3024 iocbp = (struct iocblk *)mp->b_rptr; 3025 iocbp->ioc_error = err; 3026 3027 if (mp->b_cont) { 3028 freemsg(mp->b_cont); 3029 mp->b_cont = NULL; 3030 } 3031 3032 qreply(q, mp); 3033 } 3034 3035 uint_t 3036 ecpp_isr(caddr_t arg) 3037 { 3038 struct ecppunit *pp = (struct ecppunit *)(void *)arg; 3039 uint32_t dcsr; 3040 uint8_t dsr; 3041 int cheerio_pend_counter; 3042 int retval = DDI_INTR_UNCLAIMED; 3043 hrtime_t now; 3044 3045 mutex_enter(&pp->umutex); 3046 /* 3047 * interrupt may occur while other thread is holding the lock 3048 * and cancels DMA transfer (e.g. ecpp_flush()) 3049 * since it cannot cancel the interrupt thread, 3050 * it just sets dma_cancelled to TRUE, 3051 * telling interrupt handler to exit immediately 3052 */ 3053 if (pp->dma_cancelled == TRUE) { 3054 ecpp_error(pp->dip, "dma-cancel isr\n"); 3055 3056 pp->intr_hard++; 3057 pp->dma_cancelled = FALSE; 3058 3059 mutex_exit(&pp->umutex); 3060 return (DDI_INTR_CLAIMED); 3061 } 3062 3063 /* Southbridge interrupts are handled separately */ 3064 #if defined(__x86) 3065 if (pp->hw == &x86) 3066 #else 3067 if (pp->hw == &m1553) 3068 #endif 3069 { 3070 retval = ecpp_M1553_intr(pp); 3071 if (retval == DDI_INTR_UNCLAIMED) { 3072 goto unexpected; 3073 } 3074 mutex_exit(&pp->umutex); 3075 return (DDI_INTR_CLAIMED); 3076 } 3077 3078 /* 3079 * the intr is through the motherboard. it is faster than PCI route. 3080 * sometimes ecpp_isr() is invoked before cheerio csr is updated. 3081 */ 3082 cheerio_pend_counter = ecpp_isr_max_delay; 3083 dcsr = GET_DMAC_CSR(pp); 3084 3085 while (!(dcsr & DCSR_INT_PEND) && cheerio_pend_counter-- > 0) { 3086 drv_usecwait(1); 3087 dcsr = GET_DMAC_CSR(pp); 3088 } 3089 3090 /* 3091 * This is a workaround for what seems to be a timing problem 3092 * with the delivery of interrupts and CSR updating with the 3093 * ebus2 csr, superio and the n_ERR pin from the peripheral. 3094 * 3095 * delay is not needed for PIO mode 3096 */ 3097 if (!COMPAT_PIO(pp)) { 3098 drv_usecwait(100); 3099 dcsr = GET_DMAC_CSR(pp); 3100 } 3101 3102 /* on 97317 in Extended mode IRQ_ST of DSR is deasserted when read */ 3103 dsr = DSR_READ(pp); 3104 3105 /* 3106 * check if interrupt is for this device: 3107 * it should be reflected either in cheerio DCSR register 3108 * or in IRQ_ST bit of DSR on 97317 3109 */ 3110 if ((dcsr & DCSR_INT_PEND) == 0) { 3111 if (pp->hw != &pc97317) { 3112 goto unclaimed; 3113 } 3114 /* 3115 * on Excalibur, reading DSR will deassert SuperIO IRQx line 3116 * RIO's DCSR_INT_PEND seems to follow IRQx transitions, 3117 * so if DSR is read after interrupt occured, but before 3118 * we get here, IRQx and hence INT_PEND will be deasserted 3119 * as a result, we can miss a service interrupt in PIO mode 3120 * 3121 * malicious DSR reader is BPPIOC_TESTIO, which is called 3122 * by LP in between data blocks to check printer status 3123 * this workaround lets us not to miss an interrupt 3124 * 3125 * also, nErr interrupt (ECP mode) not always reflected in DCSR 3126 */ 3127 if (((dsr & ECPP_IRQ_ST) == 0) || 3128 ((COMPAT_PIO(pp)) && (pp->e_busy == ECPP_BUSY)) || 3129 (((dsr & ECPP_nERR) == 0) && 3130 (pp->current_mode == ECPP_ECP_MODE))) { 3131 dcsr = 0; 3132 } else { 3133 goto unclaimed; 3134 } 3135 } 3136 3137 pp->intr_hard++; 3138 3139 /* the intr is for us - check all possible interrupt sources */ 3140 if (dcsr & DCSR_ERR_PEND) { 3141 size_t bcr; 3142 3143 /* we are expecting a data transfer interrupt */ 3144 ASSERT(pp->e_busy == ECPP_BUSY); 3145 3146 /* 3147 * some kind of DMA error 3148 */ 3149 if (ECPP_DMA_STOP(pp, &bcr) == FAILURE) { 3150 ecpp_error(pp->dip, "ecpp_isr: dma_stop failed\n"); 3151 } 3152 3153 ecpp_error(pp->dip, "ecpp_isr: DMAC ERROR bcr=%d\n", bcr); 3154 3155 ecpp_xfer_cleanup(pp); 3156 3157 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 3158 ecpp_error(pp->dip, "ecpp_isr(e): unbind failed\n"); 3159 } 3160 3161 mutex_exit(&pp->umutex); 3162 return (DDI_INTR_CLAIMED); 3163 } 3164 3165 if (dcsr & DCSR_TC) { 3166 retval = ecpp_dma_ihdlr(pp); 3167 mutex_exit(&pp->umutex); 3168 return (DDI_INTR_CLAIMED); 3169 } 3170 3171 if (COMPAT_PIO(pp)) { 3172 retval = ecpp_pio_ihdlr(pp); 3173 mutex_exit(&pp->umutex); 3174 return (DDI_INTR_CLAIMED); 3175 } 3176 3177 /* does peripheral need attention? */ 3178 if ((dsr & ECPP_nERR) == 0) { 3179 retval = ecpp_nErr_ihdlr(pp); 3180 mutex_exit(&pp->umutex); 3181 return (DDI_INTR_CLAIMED); 3182 } 3183 3184 pp->intr_hard--; 3185 3186 unexpected: 3187 3188 pp->intr_spurious++; 3189 3190 /* 3191 * The following procedure tries to prevent soft hangs 3192 * in event of peripheral/superio misbehaviour: 3193 * if number of unexpected interrupts in the last SPUR_PERIOD ns 3194 * exceeded SPUR_CRITICAL, then shut up interrupts 3195 */ 3196 now = gethrtime(); 3197 if (pp->lastspur == 0 || now - pp->lastspur > SPUR_PERIOD) { 3198 /* last unexpected interrupt was long ago */ 3199 pp->lastspur = now; 3200 pp->nspur = 1; 3201 } else { 3202 /* last unexpected interrupt was recently */ 3203 pp->nspur++; 3204 } 3205 3206 if (pp->nspur >= SPUR_CRITICAL) { 3207 ECPP_MASK_INTR(pp); 3208 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK | ECPP_INTR_SRV); 3209 pp->nspur = 0; 3210 cmn_err(CE_NOTE, "%s%d: too many interrupt requests", 3211 ddi_get_name(pp->dip), ddi_get_instance(pp->dip)); 3212 } else { 3213 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_SRV | ECPP_INTR_MASK); 3214 } 3215 3216 ecpp_error(pp->dip, 3217 "isr:unknown: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n", 3218 dcsr, ECR_READ(pp), dsr, DCR_READ(pp), 3219 pp->current_mode, pp->current_phase); 3220 3221 mutex_exit(&pp->umutex); 3222 return (DDI_INTR_CLAIMED); 3223 3224 unclaimed: 3225 3226 pp->intr_spurious++; 3227 3228 ecpp_error(pp->dip, 3229 "isr:UNCL: dcsr=%x ecr=%x dsr=%x dcr=%x\nmode=%x phase=%x\n", 3230 dcsr, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp), 3231 pp->current_mode, pp->current_phase); 3232 3233 mutex_exit(&pp->umutex); 3234 return (DDI_INTR_UNCLAIMED); 3235 } 3236 3237 /* 3238 * M1553 intr handler 3239 */ 3240 static uint_t 3241 ecpp_M1553_intr(struct ecppunit *pp) 3242 { 3243 int retval = DDI_INTR_UNCLAIMED; 3244 3245 pp->intr_hard++; 3246 3247 if (pp->e_busy == ECPP_BUSY) { 3248 /* Centronics or Compat PIO transfer */ 3249 if (COMPAT_PIO(pp)) { 3250 return (ecpp_pio_ihdlr(pp)); 3251 } 3252 3253 /* Centronics or Compat DMA transfer */ 3254 if (COMPAT_DMA(pp) || 3255 (pp->current_mode == ECPP_ECP_MODE) || 3256 (pp->current_mode == ECPP_DIAG_MODE)) { 3257 return (ecpp_dma_ihdlr(pp)); 3258 } 3259 } 3260 3261 /* Nibble or ECP backchannel request? */ 3262 if ((DSR_READ(pp) & ECPP_nERR) == 0) { 3263 return (ecpp_nErr_ihdlr(pp)); 3264 } 3265 3266 return (retval); 3267 } 3268 3269 /* 3270 * DMA completion interrupt handler 3271 */ 3272 static uint_t 3273 ecpp_dma_ihdlr(struct ecppunit *pp) 3274 { 3275 clock_t tm; 3276 3277 ecpp_error(pp->dip, "ecpp_dma_ihdlr(%x): ecr=%x, dsr=%x, dcr=%x\n", 3278 pp->current_mode, ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 3279 3280 /* we are expecting a data transfer interrupt */ 3281 ASSERT(pp->e_busy == ECPP_BUSY); 3282 3283 /* Intr generated while invoking TFIFO mode. Exit */ 3284 if (pp->tfifo_intr == 1) { 3285 pp->tfifo_intr = 0; 3286 ecpp_error(pp->dip, "ecpp_dma_ihdlr: tfifo_intr is 1\n"); 3287 return (DDI_INTR_CLAIMED); 3288 } 3289 3290 if (ECPP_DMA_STOP(pp, NULL) == FAILURE) { 3291 ecpp_error(pp->dip, "ecpp_dma_ihdlr: dma_stop failed\n"); 3292 } 3293 3294 if (pp->current_mode == ECPP_ECP_MODE && 3295 pp->current_phase == ECPP_PHASE_ECP_REV_XFER) { 3296 ecpp_ecp_read_completion(pp); 3297 } else { 3298 /* 3299 * fifo_timer() will do the cleanup when the FIFO drains 3300 */ 3301 if ((ECR_READ(pp) & ECPP_FIFO_EMPTY) || 3302 (pp->current_mode == ECPP_DIAG_MODE)) { 3303 tm = 0; /* no use in waiting if FIFO is already empty */ 3304 } else { 3305 tm = drv_usectohz(FIFO_DRAIN_PERIOD); 3306 } 3307 pp->fifo_timer_id = timeout(ecpp_fifo_timer, (caddr_t)pp, tm); 3308 } 3309 3310 /* 3311 * Stop the DMA transfer timeout timer 3312 * this operation will temporarily give up the mutex, 3313 * so we do it in the end of the handler to avoid races 3314 */ 3315 ecpp_untimeout_unblock(pp, &pp->timeout_id); 3316 3317 return (DDI_INTR_CLAIMED); 3318 } 3319 3320 /* 3321 * ecpp_pio_ihdlr() is a PIO interrupt processing routine 3322 * It masks interrupts, updates statistics and initiates next byte transfer 3323 */ 3324 static uint_t 3325 ecpp_pio_ihdlr(struct ecppunit *pp) 3326 { 3327 ASSERT(mutex_owned(&pp->umutex)); 3328 ASSERT(pp->e_busy == ECPP_BUSY); 3329 3330 /* update statistics */ 3331 pp->joblen++; 3332 pp->ctxpio_obytes++; 3333 3334 /* disable nAck interrups */ 3335 ECPP_MASK_INTR(pp); 3336 DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_REV_DIR | ECPP_INTR_EN)); 3337 3338 /* 3339 * If it was the last byte of the data block cleanup, 3340 * otherwise trigger a soft interrupt to send the next byte 3341 */ 3342 if (pp->next_byte >= pp->last_byte) { 3343 ecpp_xfer_cleanup(pp); 3344 ecpp_error(pp->dip, 3345 "ecpp_pio_ihdlr: pp->joblen=%d,pp->ctx_cf=%d,\n", 3346 pp->joblen, pp->ctx_cf); 3347 } else { 3348 if (pp->softintr_pending) { 3349 ecpp_error(pp->dip, 3350 "ecpp_pio_ihdlr:E: next byte in progress\n"); 3351 } else { 3352 pp->softintr_flags = ECPP_SOFTINTR_PIONEXT; 3353 pp->softintr_pending = 1; 3354 ddi_trigger_softintr(pp->softintr_id); 3355 } 3356 } 3357 3358 return (DDI_INTR_CLAIMED); 3359 } 3360 3361 /* 3362 * ecpp_pio_writeb() sends a byte using Centronics handshake 3363 */ 3364 static void 3365 ecpp_pio_writeb(struct ecppunit *pp) 3366 { 3367 uint8_t dcr; 3368 3369 dcr = DCR_READ(pp) & ~ECPP_REV_DIR; 3370 dcr |= ECPP_INTR_EN; 3371 3372 /* send the next byte */ 3373 DATAR_WRITE(pp, *(pp->next_byte++)); 3374 3375 drv_usecwait(pp->data_setup_time); 3376 3377 /* Now Assert (neg logic) nStrobe */ 3378 if (dcr_write(pp, dcr | ECPP_STB) == FAILURE) { 3379 ecpp_error(pp->dip, "ecpp_pio_writeb:1: failed w/DCR\n"); 3380 } 3381 3382 /* Enable nAck interrupts */ 3383 (void) DSR_READ(pp); /* ensure IRQ_ST is armed */ 3384 ECPP_UNMASK_INTR(pp); 3385 3386 drv_usecwait(pp->strobe_pulse_width); 3387 3388 if (dcr_write(pp, dcr & ~ECPP_STB) == FAILURE) { 3389 ecpp_error(pp->dip, "ecpp_pio_writeb:2: failed w/DCR\n"); 3390 } 3391 } 3392 3393 /* 3394 * Backchannel request interrupt handler 3395 */ 3396 static uint_t 3397 ecpp_nErr_ihdlr(struct ecppunit *pp) 3398 { 3399 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: mode=%x, phase=%x\n", 3400 pp->current_mode, pp->current_phase); 3401 3402 if (pp->oflag != TRUE) { 3403 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: not open!\n"); 3404 return (DDI_INTR_UNCLAIMED); 3405 } 3406 3407 if (pp->e_busy == ECPP_BUSY) { 3408 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: busy\n"); 3409 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK); 3410 return (DDI_INTR_CLAIMED); 3411 } 3412 3413 /* mask nErr & nAck interrupts */ 3414 ECPP_MASK_INTR(pp); 3415 DCR_WRITE(pp, DCR_READ(pp) & ~(ECPP_INTR_EN | ECPP_REV_DIR)); 3416 ECR_WRITE(pp, ECR_READ(pp) | ECPP_INTR_MASK); 3417 3418 /* going reverse */ 3419 switch (pp->current_mode) { 3420 case ECPP_ECP_MODE: 3421 /* 3422 * Peripheral asserts nPeriphRequest (nFault) 3423 */ 3424 break; 3425 case ECPP_NIBBLE_MODE: 3426 /* 3427 * Event 18: Periph asserts nErr to indicate data avail 3428 * Event 19: After waiting minimum pulse width, 3429 * periph sets nAck high to generate an interrupt 3430 * 3431 * Interface is in Interrupt Phase 3432 */ 3433 pp->current_phase = ECPP_PHASE_NIBT_REVINTR; 3434 3435 break; 3436 default: 3437 ecpp_error(pp->dip, "ecpp_nErr_ihdlr: wrong mode!\n"); 3438 return (DDI_INTR_UNCLAIMED); 3439 } 3440 3441 (void) ecpp_backchan_req(pp); /* put backchannel request on the wq */ 3442 3443 return (DDI_INTR_CLAIMED); 3444 } 3445 3446 /* 3447 * Softintr handler does work according to softintr_flags: 3448 * in case of ECPP_SOFTINTR_PIONEXT it sends next byte of PIO transfer 3449 */ 3450 static uint_t 3451 ecpp_softintr(caddr_t arg) 3452 { 3453 struct ecppunit *pp = (struct ecppunit *)arg; 3454 uint32_t unx_len, ecpp_reattempts = 0; 3455 3456 mutex_enter(&pp->umutex); 3457 3458 pp->intr_soft++; 3459 3460 if (!pp->softintr_pending) { 3461 mutex_exit(&pp->umutex); 3462 return (DDI_INTR_CLAIMED); 3463 } else { 3464 pp->softintr_pending = 0; 3465 } 3466 3467 if (pp->softintr_flags & ECPP_SOFTINTR_PIONEXT) { 3468 pp->softintr_flags &= ~ECPP_SOFTINTR_PIONEXT; 3469 /* 3470 * Sent next byte in PIO mode 3471 */ 3472 ecpp_reattempts = 0; 3473 do { 3474 if (ecpp_check_status(pp) == SUCCESS) { 3475 pp->e_busy = ECPP_BUSY; 3476 break; 3477 } 3478 drv_usecwait(1); 3479 if (pp->isr_reattempt_high < ecpp_reattempts) { 3480 pp->isr_reattempt_high = ecpp_reattempts; 3481 } 3482 } while (++ecpp_reattempts < pp->wait_for_busy); 3483 3484 /* if the peripheral still not recovered suspend the transfer */ 3485 if (pp->e_busy == ECPP_ERR) { 3486 ++pp->ctx_cf; /* check status fail */ 3487 ecpp_error(pp->dip, "ecpp_softintr:check_status:F: " 3488 "dsr=%x jl=%d cf_isr=%d\n", 3489 DSR_READ(pp), pp->joblen, pp->ctx_cf); 3490 3491 /* 3492 * if status signals are bad, 3493 * put everything back on the wq. 3494 */ 3495 unx_len = pp->last_byte - pp->next_byte; 3496 if (pp->msg != NULL) { 3497 ecpp_putback_untransfered(pp, 3498 (void *)pp->msg->b_rptr, unx_len); 3499 ecpp_error(pp->dip, 3500 "ecpp_softintr:e1:unx_len=%d\n", unx_len); 3501 3502 freemsg(pp->msg); 3503 pp->msg = NULL; 3504 } else { 3505 ecpp_putback_untransfered(pp, 3506 pp->next_byte, unx_len); 3507 ecpp_error(pp->dip, 3508 "ecpp_softintr:e2:unx_len=%d\n", unx_len); 3509 } 3510 3511 ecpp_xfer_cleanup(pp); 3512 pp->e_busy = ECPP_ERR; 3513 qenable(pp->writeq); 3514 } else { 3515 /* send the next one */ 3516 pp->e_busy = ECPP_BUSY; 3517 (void) ecpp_pio_writeb(pp); 3518 } 3519 } 3520 3521 mutex_exit(&pp->umutex); 3522 return (DDI_INTR_CLAIMED); 3523 } 3524 3525 3526 /* 3527 * Transfer clean-up: 3528 * shut down the DMAC 3529 * stop the transfer timer 3530 * enable write queue 3531 */ 3532 static void 3533 ecpp_xfer_cleanup(struct ecppunit *pp) 3534 { 3535 ASSERT(mutex_owned(&pp->umutex)); 3536 3537 /* 3538 * if we did not use the ioblock, the mblk that 3539 * was used should be freed. 3540 */ 3541 if (pp->msg != NULL) { 3542 freemsg(pp->msg); 3543 pp->msg = NULL; 3544 } 3545 3546 /* The port is no longer active */ 3547 pp->e_busy = ECPP_IDLE; 3548 3549 /* Stop the transfer timeout timer */ 3550 ecpp_untimeout_unblock(pp, &pp->timeout_id); 3551 3552 qenable(pp->writeq); 3553 } 3554 3555 /*VARARGS*/ 3556 static void 3557 ecpp_error(dev_info_t *dip, char *fmt, ...) 3558 { 3559 static long last; 3560 static char *lastfmt; 3561 char msg_buffer[255]; 3562 va_list ap; 3563 time_t now; 3564 3565 if (!ecpp_debug) { 3566 return; 3567 } 3568 3569 /* 3570 * Don't print same error message too often. 3571 */ 3572 now = gethrestime_sec(); 3573 if ((last == (now & ~1)) && (lastfmt == fmt)) 3574 return; 3575 3576 last = now & ~1; 3577 lastfmt = fmt; 3578 3579 va_start(ap, fmt); 3580 (void) vsprintf(msg_buffer, fmt, ap); 3581 cmn_err(CE_CONT, "%s%d: %s", ddi_get_name(dip), 3582 ddi_get_instance(dip), msg_buffer); 3583 va_end(ap); 3584 } 3585 3586 /* 3587 * Forward transfer timeout 3588 */ 3589 static void 3590 ecpp_xfer_timeout(void *arg) 3591 { 3592 struct ecppunit *pp = arg; 3593 void *unx_addr; 3594 size_t unx_len, xferd; 3595 uint8_t dcr; 3596 timeout_id_t fifo_timer_id; 3597 3598 mutex_enter(&pp->umutex); 3599 3600 if (pp->timeout_id == 0) { 3601 mutex_exit(&pp->umutex); 3602 return; 3603 } else { 3604 pp->timeout_id = 0; 3605 } 3606 3607 pp->xfer_tout++; 3608 3609 pp->dma_cancelled = TRUE; /* prevent race with isr() */ 3610 3611 if (COMPAT_PIO(pp)) { 3612 /* 3613 * PIO mode timeout 3614 */ 3615 3616 /* turn off nAck interrupts */ 3617 dcr = DCR_READ(pp); 3618 (void) dcr_write(pp, dcr & ~(ECPP_REV_DIR | ECPP_INTR_EN)); 3619 ECPP_MASK_INTR(pp); 3620 3621 pp->softintr_pending = 0; 3622 unx_len = pp->last_byte - pp->next_byte; 3623 ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len); 3624 3625 if (unx_len > 0) { 3626 unx_addr = pp->next_byte; 3627 } else { 3628 ecpp_xfer_cleanup(pp); 3629 qenable(pp->writeq); 3630 mutex_exit(&pp->umutex); 3631 return; 3632 } 3633 } else { 3634 /* 3635 * DMA mode timeout 3636 * 3637 * If DMAC fails to shut off, continue anyways and attempt 3638 * to put untransfered data back on queue. 3639 */ 3640 if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) { 3641 ecpp_error(pp->dip, 3642 "ecpp_xfer_timeout: failed dma_stop\n"); 3643 } 3644 3645 ecpp_error(pp->dip, "xfer_timeout: unx_len=%d\n", unx_len); 3646 3647 if (ddi_dma_unbind_handle(pp->dma_handle) == DDI_FAILURE) { 3648 ecpp_error(pp->dip, 3649 "ecpp_xfer_timeout: failed unbind\n"); 3650 } 3651 3652 /* 3653 * if the bcr is zero, then DMA is complete and 3654 * we are waiting for the fifo to drain. So let 3655 * ecpp_fifo_timer() look after the clean up. 3656 */ 3657 if (unx_len == 0) { 3658 qenable(pp->writeq); 3659 mutex_exit(&pp->umutex); 3660 return; 3661 } else { 3662 xferd = pp->dma_cookie.dmac_size - unx_len; 3663 pp->resid -= xferd; 3664 unx_len = pp->resid; 3665 3666 /* update statistics */ 3667 pp->obytes[pp->current_mode] += xferd; 3668 pp->joblen += xferd; 3669 3670 if (pp->msg != NULL) { 3671 unx_addr = (caddr_t)pp->msg->b_wptr - unx_len; 3672 } else { 3673 unx_addr = pp->ioblock + 3674 (pp->xfercnt - unx_len); 3675 } 3676 } 3677 } 3678 3679 /* Following code is common for PIO and DMA modes */ 3680 3681 ecpp_putback_untransfered(pp, (caddr_t)unx_addr, unx_len); 3682 3683 if (pp->msg != NULL) { 3684 freemsg(pp->msg); 3685 pp->msg = NULL; 3686 } 3687 3688 /* mark the error status structure */ 3689 pp->timeout_error = 1; 3690 pp->e_busy = ECPP_ERR; 3691 fifo_timer_id = pp->fifo_timer_id; 3692 pp->fifo_timer_id = 0; 3693 3694 qenable(pp->writeq); 3695 3696 mutex_exit(&pp->umutex); 3697 3698 if (fifo_timer_id) { 3699 (void) untimeout(fifo_timer_id); 3700 } 3701 } 3702 3703 static void 3704 ecpp_putback_untransfered(struct ecppunit *pp, void *startp, uint_t len) 3705 { 3706 mblk_t *new_mp; 3707 3708 ecpp_error(pp->dip, "ecpp_putback_untrans=%d\n", len); 3709 3710 if (len == 0) { 3711 return; 3712 } 3713 3714 new_mp = allocb(len, BPRI_MED); 3715 if (new_mp == NULL) { 3716 ecpp_error(pp->dip, 3717 "ecpp_putback_untransfered: allocb FAILURE.\n"); 3718 return; 3719 } 3720 3721 bcopy(startp, new_mp->b_rptr, len); 3722 new_mp->b_wptr = new_mp->b_rptr + len; 3723 3724 if (!putbq(pp->writeq, new_mp)) { 3725 freemsg(new_mp); 3726 } 3727 } 3728 3729 static uchar_t 3730 ecr_write(struct ecppunit *pp, uint8_t ecr_byte) 3731 { 3732 int i, current_ecr; 3733 3734 for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) { 3735 ECR_WRITE(pp, ecr_byte); 3736 3737 current_ecr = ECR_READ(pp); 3738 3739 /* mask off the lower two read-only bits */ 3740 if ((ecr_byte & 0xFC) == (current_ecr & 0xFC)) 3741 return (SUCCESS); 3742 } 3743 return (FAILURE); 3744 } 3745 3746 static uchar_t 3747 dcr_write(struct ecppunit *pp, uint8_t dcr_byte) 3748 { 3749 uint8_t current_dcr; 3750 int i; 3751 3752 for (i = ECPP_REG_WRITE_MAX_LOOP; i > 0; i--) { 3753 DCR_WRITE(pp, dcr_byte); 3754 3755 current_dcr = DCR_READ(pp); 3756 3757 /* compare only bits 0-4 (direction bit return 1) */ 3758 if ((dcr_byte & 0x1F) == (current_dcr & 0x1F)) 3759 return (SUCCESS); 3760 } 3761 ecpp_error(pp->dip, 3762 "(%d)dcr_write: dcr written =%x, dcr readback =%x\n", 3763 i, dcr_byte, current_dcr); 3764 3765 return (FAILURE); 3766 } 3767 3768 static uchar_t 3769 ecpp_reset_port_regs(struct ecppunit *pp) 3770 { 3771 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 3772 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 3773 return (SUCCESS); 3774 } 3775 3776 /* 3777 * The data transferred by the DMA engine goes through the FIFO, 3778 * so that when the DMA counter reaches zero (and an interrupt occurs) 3779 * the FIFO can still contain data. If this is the case, the ISR will 3780 * schedule this callback to wait until the FIFO drains or a timeout occurs. 3781 */ 3782 static void 3783 ecpp_fifo_timer(void *arg) 3784 { 3785 struct ecppunit *pp = arg; 3786 uint8_t ecr; 3787 timeout_id_t timeout_id; 3788 3789 mutex_enter(&pp->umutex); 3790 3791 /* 3792 * If the FIFO timer has been turned off, exit. 3793 */ 3794 if (pp->fifo_timer_id == 0) { 3795 ecpp_error(pp->dip, "ecpp_fifo_timer: untimedout\n"); 3796 mutex_exit(&pp->umutex); 3797 return; 3798 } else { 3799 pp->fifo_timer_id = 0; 3800 } 3801 3802 /* 3803 * If the FIFO is not empty restart timer. Wait FIFO_DRAIN_PERIOD 3804 * (250 ms) and check FIFO_EMPTY bit again. Repeat until FIFO is 3805 * empty or until 10 * FIFO_DRAIN_PERIOD expires. 3806 */ 3807 ecr = ECR_READ(pp); 3808 3809 if ((pp->current_mode != ECPP_DIAG_MODE) && 3810 (((ecr & ECPP_FIFO_EMPTY) == 0) && 3811 (pp->ecpp_drain_counter < 10))) { 3812 3813 ecpp_error(pp->dip, 3814 "ecpp_fifo_timer(%d):FIFO not empty:ecr=%x\n", 3815 pp->ecpp_drain_counter, ecr); 3816 3817 pp->fifo_timer_id = timeout(ecpp_fifo_timer, 3818 (caddr_t)pp, drv_usectohz(FIFO_DRAIN_PERIOD)); 3819 ++pp->ecpp_drain_counter; 3820 3821 mutex_exit(&pp->umutex); 3822 return; 3823 } 3824 3825 if (pp->current_mode != ECPP_DIAG_MODE) { 3826 /* 3827 * If the FIFO won't drain after 10 FIFO_DRAIN_PERIODs 3828 * then don't wait any longer. Simply clean up the transfer. 3829 */ 3830 if (pp->ecpp_drain_counter >= 10) { 3831 ecpp_error(pp->dip, "ecpp_fifo_timer(%d):" 3832 " clearing FIFO,can't wait:ecr=%x\n", 3833 pp->ecpp_drain_counter, ecr); 3834 } else { 3835 ecpp_error(pp->dip, 3836 "ecpp_fifo_timer(%d):FIFO empty:ecr=%x\n", 3837 pp->ecpp_drain_counter, ecr); 3838 } 3839 3840 pp->ecpp_drain_counter = 0; 3841 } 3842 3843 /* 3844 * Main section of routine: 3845 * - stop the DMA transfer timer 3846 * - program DMA with next cookie/window or unbind the DMA mapping 3847 * - update stats 3848 * - if last mblk in queue, signal to close() & return to idle state 3849 */ 3850 3851 /* Stop the DMA transfer timeout timer */ 3852 timeout_id = pp->timeout_id; 3853 pp->timeout_id = 0; 3854 3855 /* data has drained from fifo, it is ok to free dma resource */ 3856 if (pp->current_mode == ECPP_ECP_MODE || 3857 pp->current_mode == ECPP_DIAG_MODE || 3858 COMPAT_DMA(pp)) { 3859 off_t off; 3860 size_t len; 3861 3862 /* update residual */ 3863 pp->resid -= pp->dma_cookie.dmac_size; 3864 3865 /* update statistics */ 3866 pp->joblen += pp->dma_cookie.dmac_size; 3867 if (pp->dma_dir == DDI_DMA_WRITE) { 3868 pp->obytes[pp->current_mode] += 3869 pp->dma_cookie.dmac_size; 3870 } else { 3871 pp->ibytes[pp->current_mode] += 3872 pp->dma_cookie.dmac_size; 3873 } 3874 3875 /* 3876 * Look if any cookies/windows left 3877 */ 3878 if (--pp->dma_cookie_count > 0) { 3879 /* process the next cookie */ 3880 ddi_dma_nextcookie(pp->dma_handle, 3881 &pp->dma_cookie); 3882 } else if (pp->dma_curwin < pp->dma_nwin) { 3883 /* process the next window */ 3884 if (ddi_dma_getwin(pp->dma_handle, 3885 pp->dma_curwin, &off, &len, 3886 &pp->dma_cookie, 3887 &pp->dma_cookie_count) != DDI_SUCCESS) { 3888 ecpp_error(pp->dip, 3889 "ecpp_fifo_timer: ddi_dma_getwin failed\n"); 3890 goto dma_done; 3891 } 3892 3893 pp->dma_curwin++; 3894 } else { 3895 goto dma_done; 3896 } 3897 3898 ecpp_error(pp->dip, "ecpp_fifo_timer: next addr=%llx len=%d\n", 3899 pp->dma_cookie.dmac_address, 3900 pp->dma_cookie.dmac_size); 3901 3902 /* kick off new transfer */ 3903 if (ECPP_DMA_START(pp) != SUCCESS) { 3904 ecpp_error(pp->dip, 3905 "ecpp_fifo_timer: dma_start failed\n"); 3906 goto dma_done; 3907 } 3908 3909 (void) ecr_write(pp, (ecr & 0xe0) | 3910 ECPP_DMA_ENABLE | ECPP_INTR_MASK); 3911 3912 mutex_exit(&pp->umutex); 3913 3914 if (timeout_id) { 3915 (void) untimeout(timeout_id); 3916 } 3917 return; 3918 3919 dma_done: 3920 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 3921 ecpp_error(pp->dip, "ecpp_fifo_timer: unbind failed\n"); 3922 } else { 3923 ecpp_error(pp->dip, "ecpp_fifo_timer: unbind ok\n"); 3924 } 3925 } 3926 3927 /* 3928 * if we did not use the dmablock, the mblk that 3929 * was used should be freed. 3930 */ 3931 if (pp->msg != NULL) { 3932 freemsg(pp->msg); 3933 pp->msg = NULL; 3934 } 3935 3936 /* The port is no longer active */ 3937 pp->e_busy = ECPP_IDLE; 3938 3939 qenable(pp->writeq); 3940 3941 mutex_exit(&pp->umutex); 3942 3943 if (timeout_id) { 3944 (void) untimeout(timeout_id); 3945 } 3946 } 3947 3948 /* 3949 * In Compatibility mode, check if the peripheral is ready to accept data 3950 */ 3951 static uint8_t 3952 ecpp_check_status(struct ecppunit *pp) 3953 { 3954 uint8_t dsr; 3955 uint8_t statmask; 3956 3957 if (pp->current_mode == ECPP_ECP_MODE || 3958 pp->current_mode == ECPP_DIAG_MODE) 3959 return (SUCCESS); 3960 3961 statmask = ECPP_nERR | ECPP_SLCT | ECPP_nBUSY | ECPP_nACK; 3962 3963 dsr = DSR_READ(pp); 3964 if ((dsr & ECPP_PE) || ((dsr & statmask) != statmask)) { 3965 pp->e_busy = ECPP_ERR; 3966 return (FAILURE); 3967 } else { 3968 return (SUCCESS); 3969 } 3970 } 3971 3972 /* 3973 * if the peripheral is not ready to accept data, write service routine 3974 * periodically reschedules itself to recheck peripheral status 3975 * and start data transfer as soon as possible 3976 */ 3977 static void 3978 ecpp_wsrv_timer(void *arg) 3979 { 3980 struct ecppunit *pp = arg; 3981 3982 ecpp_error(pp->dip, "ecpp_wsrv_timer: starting\n"); 3983 3984 mutex_enter(&pp->umutex); 3985 3986 if (pp->wsrv_timer_id == 0) { 3987 mutex_exit(&pp->umutex); 3988 return; 3989 } else { 3990 pp->wsrv_timer_id = 0; 3991 } 3992 3993 ecpp_error(pp->dip, "ecpp_wsrv_timer: qenabling...\n"); 3994 3995 qenable(pp->writeq); 3996 3997 mutex_exit(&pp->umutex); 3998 } 3999 4000 /* 4001 * Allocate a message indicating a backchannel request 4002 * and put it on the write queue 4003 */ 4004 static int 4005 ecpp_backchan_req(struct ecppunit *pp) 4006 { 4007 mblk_t *mp; 4008 4009 if ((mp = allocb(sizeof (int), BPRI_MED)) == NULL) { 4010 ecpp_error(pp->dip, "ecpp_backchan_req: allocb failed\n"); 4011 return (FAILURE); 4012 } else { 4013 mp->b_datap->db_type = M_CTL; 4014 *(int *)mp->b_rptr = ECPP_BACKCHANNEL; 4015 mp->b_wptr = mp->b_rptr + sizeof (int); 4016 if (!putbq(pp->writeq, mp)) { 4017 ecpp_error(pp->dip, "ecpp_backchan_req:putbq failed\n"); 4018 freemsg(mp); 4019 return (FAILURE); 4020 } 4021 return (SUCCESS); 4022 } 4023 } 4024 4025 /* 4026 * Cancel the function scheduled with timeout(9F) 4027 * This function is to be called with the mutex held 4028 */ 4029 static void 4030 ecpp_untimeout_unblock(struct ecppunit *pp, timeout_id_t *id) 4031 { 4032 timeout_id_t saved_id; 4033 4034 ASSERT(mutex_owned(&pp->umutex)); 4035 4036 if (*id) { 4037 saved_id = *id; 4038 *id = 0; 4039 mutex_exit(&pp->umutex); 4040 (void) untimeout(saved_id); 4041 mutex_enter(&pp->umutex); 4042 } 4043 } 4044 4045 /* 4046 * get prnio interface capabilities 4047 */ 4048 static uint_t 4049 ecpp_get_prn_ifcap(struct ecppunit *pp) 4050 { 4051 uint_t ifcap; 4052 4053 ifcap = PRN_1284_DEVID | PRN_TIMEOUTS | PRN_STREAMS; 4054 4055 /* status (DSR) only makes sense in Centronics & Compat modes */ 4056 if (pp->current_mode == ECPP_CENTRONICS || 4057 pp->current_mode == ECPP_COMPAT_MODE) { 4058 ifcap |= PRN_1284_STATUS; 4059 } else if (pp->current_mode == ECPP_NIBBLE_MODE || 4060 pp->current_mode == ECPP_ECP_MODE) { 4061 ifcap |= PRN_BIDI; 4062 } 4063 4064 return (ifcap); 4065 } 4066 4067 /* 4068 * Determine SuperI/O type 4069 */ 4070 static struct ecpp_hw_bind * 4071 ecpp_determine_sio_type(struct ecppunit *pp) 4072 { 4073 struct ecpp_hw_bind *hw_bind; 4074 char *name; 4075 int i; 4076 4077 name = ddi_binding_name(pp->dip); 4078 4079 for (hw_bind = NULL, i = 0; i < NELEM(ecpp_hw_bind); i++) { 4080 if (strcmp(name, ecpp_hw_bind[i].name) == 0) { 4081 hw_bind = &ecpp_hw_bind[i]; 4082 break; 4083 } 4084 } 4085 4086 return (hw_bind); 4087 } 4088 4089 4090 /* 4091 * 4092 * IEEE 1284 support routines: 4093 * negotiation and termination; 4094 * phase transitions; 4095 * device ID; 4096 * 4097 */ 4098 4099 /* 4100 * Interface initialization, abnormal termination into Compatibility mode 4101 * 4102 * Peripheral may be non-1284, so we set current mode to ECPP_CENTRONICS 4103 */ 4104 static void 4105 ecpp_1284_init_interface(struct ecppunit *pp) 4106 { 4107 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 4108 4109 /* 4110 * Toggle the nInit signal if configured in ecpp.conf 4111 * for most peripherals it is not needed 4112 */ 4113 if (pp->init_seq == TRUE) { 4114 DCR_WRITE(pp, ECPP_SLCTIN); 4115 drv_usecwait(50); /* T(ER) = 50us */ 4116 } 4117 4118 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4119 4120 pp->current_mode = pp->backchannel = ECPP_CENTRONICS; 4121 pp->current_phase = ECPP_PHASE_C_IDLE; 4122 ECPP_CONFIG_MODE(pp); 4123 pp->to_mode[pp->current_mode]++; 4124 4125 ecpp_error(pp->dip, "ecpp_1284_init_interface: ok\n"); 4126 } 4127 4128 /* 4129 * ECP mode negotiation 4130 */ 4131 static int 4132 ecp_negotiation(struct ecppunit *pp) 4133 { 4134 uint8_t dsr; 4135 4136 /* ECP mode negotiation */ 4137 4138 if (ecpp_1284_negotiation(pp, ECPP_XREQ_ECP, &dsr) == FAILURE) 4139 return (FAILURE); 4140 4141 /* Event 5: peripheral deasserts PError and Busy, asserts Select */ 4142 if ((dsr & (ECPP_PE | ECPP_nBUSY | ECPP_SLCT)) != 4143 (ECPP_nBUSY | ECPP_SLCT)) { 4144 ecpp_error(pp->dip, 4145 "ecp_negotiation: failed event 5 %x\n", DSR_READ(pp)); 4146 (void) ecpp_1284_termination(pp); 4147 return (FAILURE); 4148 } 4149 4150 /* entered Setup Phase */ 4151 pp->current_phase = ECPP_PHASE_ECP_SETUP; 4152 4153 /* Event 30: host asserts nAutoFd */ 4154 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX); 4155 4156 /* Event 31: peripheral asserts PError */ 4157 if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) { 4158 ecpp_error(pp->dip, 4159 "ecp_negotiation: failed event 31 %x\n", DSR_READ(pp)); 4160 (void) ecpp_1284_termination(pp); 4161 return (FAILURE); 4162 } 4163 4164 /* entered Forward Idle Phase */ 4165 pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE; 4166 4167 /* successful negotiation into ECP mode */ 4168 pp->current_mode = ECPP_ECP_MODE; 4169 pp->backchannel = ECPP_ECP_MODE; 4170 4171 ecpp_error(pp->dip, "ecp_negotiation: ok\n"); 4172 4173 return (SUCCESS); 4174 } 4175 4176 /* 4177 * Nibble mode negotiation 4178 */ 4179 static int 4180 nibble_negotiation(struct ecppunit *pp) 4181 { 4182 uint8_t dsr; 4183 4184 if (ecpp_1284_negotiation(pp, ECPP_XREQ_NIBBLE, &dsr) == FAILURE) { 4185 return (FAILURE); 4186 } 4187 4188 /* 4189 * If peripheral has data available, PE and nErr will 4190 * be set low at Event 5 & 6. 4191 */ 4192 if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) { 4193 pp->current_phase = ECPP_PHASE_NIBT_AVAIL; 4194 } else { 4195 pp->current_phase = ECPP_PHASE_NIBT_NAVAIL; 4196 } 4197 4198 /* successful negotiation into Nibble mode */ 4199 pp->current_mode = ECPP_NIBBLE_MODE; 4200 pp->backchannel = ECPP_NIBBLE_MODE; 4201 4202 ecpp_error(pp->dip, "nibble_negotiation: ok (phase=%x)\n", 4203 pp->current_phase); 4204 4205 return (SUCCESS); 4206 4207 } 4208 4209 /* 4210 * Wait ptimeout usec for periph to set 'mask' bits to 'val' state 4211 * 4212 * return value < 0 indicates timeout 4213 */ 4214 static int 4215 wait_dsr(struct ecppunit *pp, uint8_t mask, uint8_t val, int ptimeout) 4216 { 4217 while (((DSR_READ(pp) & mask) != val) && ptimeout--) { 4218 drv_usecwait(1); 4219 } 4220 4221 return (ptimeout); 4222 } 4223 4224 /* 4225 * 1284 negotiation Events 0..6 4226 * required mode is indicated by extensibility request value 4227 * 4228 * After successful negotiation SUCCESS is returned and 4229 * current mode is set according to xreq, 4230 * otherwise FAILURE is returned and current mode is set to 4231 * either COMPAT (1284 periph) or CENTRONICS (non-1284 periph) 4232 * 4233 * Current phase must be set by the caller (mode-specific negotiation) 4234 * 4235 * If rdsr is not NULL, DSR value after Event 6 is stored here 4236 */ 4237 static int 4238 ecpp_1284_negotiation(struct ecppunit *pp, uint8_t xreq, uint8_t *rdsr) 4239 { 4240 int xflag; 4241 4242 ecpp_error(pp->dip, "nego(%x): entering...\n", xreq); 4243 4244 /* negotiation should start in Compatibility mode */ 4245 (void) ecpp_1284_termination(pp); 4246 4247 /* Set host into Compat mode */ 4248 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 4249 4250 pp->current_phase = ECPP_PHASE_NEGO; 4251 4252 /* Event 0: host sets extensibility request on data lines */ 4253 DATAR_WRITE(pp, xreq); 4254 4255 /* Event 1: host deassert nSelectin and assert nAutoFd */ 4256 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX); 4257 4258 drv_usecwait(1); /* Tp(ecp) == 0.5us */ 4259 4260 /* 4261 * Event 2: peripheral asserts nAck, deasserts nFault, 4262 * asserts Select, asserts PError 4263 */ 4264 if (wait_dsr(pp, ECPP_nERR | ECPP_SLCT | ECPP_PE | ECPP_nACK, 4265 ECPP_nERR | ECPP_SLCT | ECPP_PE, 35000) < 0) { 4266 /* peripheral is not 1284-compliant */ 4267 ecpp_error(pp->dip, 4268 "nego(%x): failed event 2 %x\n", xreq, DSR_READ(pp)); 4269 (void) ecpp_1284_termination(pp); 4270 return (FAILURE); 4271 } 4272 4273 /* 4274 * Event 3: host asserts nStrobe, latching extensibility value into 4275 * peripherals input latch. 4276 */ 4277 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_STB); 4278 4279 drv_usecwait(2); /* Tp(ecp) = 0.5us */ 4280 4281 /* 4282 * Event 4: hosts deasserts nStrobe and nAutoFD to acknowledge that 4283 * it has recognized an 1284 compatible peripheral 4284 */ 4285 DCR_WRITE(pp, ECPP_nINIT); 4286 4287 /* 4288 * Event 5: Peripheral confirms it supports requested extension 4289 * For Nibble mode Xflag must be low, otherwise it must be high 4290 */ 4291 xflag = (xreq == ECPP_XREQ_NIBBLE) ? 0 : ECPP_SLCT; 4292 4293 /* 4294 * Event 6: Peripheral sets nAck high 4295 * indicating that status lines are valid 4296 */ 4297 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) { 4298 /* Something wrong with peripheral */ 4299 ecpp_error(pp->dip, 4300 "nego(%x): failed event 6 %x\n", xreq, DSR_READ(pp)); 4301 (void) ecpp_1284_termination(pp); 4302 return (FAILURE); 4303 } 4304 4305 if ((DSR_READ(pp) & ECPP_SLCT) != xflag) { 4306 /* Extensibility value is not supported */ 4307 ecpp_error(pp->dip, 4308 "nego(%x): failed event 5 %x\n", xreq, DSR_READ(pp)); 4309 (void) ecpp_1284_termination(pp); 4310 return (FAILURE); 4311 } 4312 4313 if (rdsr) { 4314 *rdsr = DSR_READ(pp); 4315 } 4316 4317 return (SUCCESS); 4318 } 4319 4320 /* 4321 * 1284 Termination: Events 22..28 - set link to Compatibility mode 4322 * 4323 * This routine is not designed for Immediate termination, 4324 * caller must take care of waiting for a valid state, 4325 * (in particular, in ECP mode current phase must be Forward Idle) 4326 * otherwise interface will be reinitialized 4327 * 4328 * In case of Valid state termination SUCCESS is returned and 4329 * current_mode is ECPP_COMPAT_MODE, current phase is ECPP_PHASE_C_IDLE 4330 * Otherwise interface is reinitialized, FAILURE is returned and 4331 * current mode is ECPP_CENTRONICS, current phase is ECPP_PHASE_C_IDLE 4332 */ 4333 static int 4334 ecpp_1284_termination(struct ecppunit *pp) 4335 { 4336 int previous_mode = pp->current_mode; 4337 4338 if (((pp->current_mode == ECPP_COMPAT_MODE || 4339 pp->current_mode == ECPP_CENTRONICS) && 4340 pp->current_phase == ECPP_PHASE_C_IDLE) || 4341 pp->current_mode == ECPP_DIAG_MODE) { 4342 ecpp_error(pp->dip, "termination: not needed\n"); 4343 return (SUCCESS); 4344 } 4345 4346 /* Set host into Compat mode, interrupts disabled */ 4347 ECPP_MASK_INTR(pp); 4348 ECR_WRITE(pp, ECPP_INTR_SRV | ECPP_INTR_MASK | ECR_mode_001); 4349 4350 pp->current_mode = ECPP_COMPAT_MODE; /* needed by next function */ 4351 4352 ECPP_CONFIG_MODE(pp); 4353 4354 /* 4355 * EPP mode uses simple nInit pulse for termination 4356 */ 4357 if (previous_mode == ECPP_EPP_MODE) { 4358 /* Event 68: host sets nInit low */ 4359 DCR_WRITE(pp, 0); 4360 4361 drv_usecwait(55); /* T(ER) = 50us */ 4362 4363 /* Event 69: host sets nInit high */ 4364 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4365 4366 goto endterm; 4367 } 4368 4369 /* terminate peripheral to Compat mode */ 4370 pp->current_phase = ECPP_PHASE_TERM; 4371 4372 /* Event 22: hosts sets nSelectIn low and nAutoFd high */ 4373 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4374 4375 /* Event 23: peripheral deasserts nFault and nBusy */ 4376 /* Event 24: peripheral asserts nAck */ 4377 if (wait_dsr(pp, ECPP_nERR | ECPP_nBUSY | ECPP_nACK, 4378 ECPP_nERR, 35000) < 0) { 4379 ecpp_error(pp->dip, 4380 "termination: failed events 23,24 %x\n", DSR_READ(pp)); 4381 ecpp_1284_init_interface(pp); 4382 return (FAILURE); 4383 } 4384 4385 drv_usecwait(1); /* Tp = 0.5us */ 4386 4387 /* Event 25: hosts sets nAutoFd low */ 4388 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN | ECPP_AFX); 4389 4390 /* Event 26: the peripheral puts itself in Compatible mode */ 4391 4392 /* Event 27: peripheral deasserts nAck */ 4393 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) { 4394 ecpp_error(pp->dip, 4395 "termination: failed event 27 %x\n", DSR_READ(pp)); 4396 ecpp_1284_init_interface(pp); 4397 return (FAILURE); 4398 } 4399 4400 drv_usecwait(1); /* Tp = 0.5us */ 4401 4402 /* Event 28: hosts deasserts nAutoFd */ 4403 DCR_WRITE(pp, ECPP_nINIT | ECPP_SLCTIN); 4404 4405 drv_usecwait(1); /* Tp = 0.5us */ 4406 4407 endterm: 4408 /* Compatible mode Idle Phase */ 4409 pp->current_phase = ECPP_PHASE_C_IDLE; 4410 4411 ecpp_error(pp->dip, "termination: completed %x %x\n", 4412 DSR_READ(pp), DCR_READ(pp)); 4413 4414 return (SUCCESS); 4415 } 4416 4417 /* 4418 * Initiate ECP backchannel DMA transfer 4419 */ 4420 static uchar_t 4421 ecp_peripheral2host(struct ecppunit *pp) 4422 { 4423 mblk_t *mp = NULL; 4424 size_t len; 4425 uint32_t xfer_time; 4426 4427 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4428 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 4429 4430 /* 4431 * hardware generates cycles to receive data from the peripheral 4432 * we only need to read from FIFO 4433 */ 4434 4435 /* 4436 * If user issued read(2) of rev_resid bytes, xfer exactly this amount 4437 * unless it exceeds ECP_REV_BLKSZ_MAX; otherwise try to read 4438 * ECP_REV_BLKSZ_MAX or at least ECP_REV_BLKSZ bytes 4439 */ 4440 if (pp->nread > 0) { 4441 len = min(pp->nread, ECP_REV_BLKSZ_MAX); 4442 } else { 4443 len = ECP_REV_BLKSZ_MAX; 4444 } 4445 4446 pp->nread = 0; /* clear after use */ 4447 4448 /* 4449 * Allocate mblk for data, make max 2 attepmts: 4450 * if len bytes block fails, try our block size 4451 */ 4452 while ((mp = allocb(len, BPRI_MED)) == NULL) { 4453 ecpp_error(pp->dip, 4454 "ecp_periph2host: failed allocb(%d)\n", len); 4455 if (len > ECP_REV_BLKSZ) { 4456 len = ECP_REV_BLKSZ; 4457 } else { 4458 break; 4459 } 4460 } 4461 4462 if (mp == NULL) { 4463 goto fail; 4464 } 4465 4466 pp->msg = mp; 4467 pp->e_busy = ECPP_BUSY; 4468 pp->dma_dir = DDI_DMA_READ; 4469 pp->current_phase = ECPP_PHASE_ECP_REV_XFER; 4470 4471 if (ecpp_init_dma_xfer(pp, (caddr_t)mp->b_rptr, len) == FAILURE) { 4472 goto fail; 4473 } 4474 4475 /* 4476 * there are two problems with defining ECP backchannel xfer timeout 4477 * 4478 * a) IEEE 1284 allows infinite time between backchannel bytes, 4479 * but we must stop at some point to send the data upstream, 4480 * look if any forward transfer requests are pending, etc; 4481 * all that done, we can continue with backchannel data; 4482 * 4483 * b) we don`t know how much data peripheral has; 4484 * DMA counter is set to our buffer size, which can be bigger 4485 * than needed - in this case a timeout must detect this; 4486 * 4487 * The timeout we schedule here serves as both the transfer timeout 4488 * and a means of detecting backchannel stalls; in fact, there are 4489 * two timeouts in one: 4490 * 4491 * - transfer timeout is based on the ECP bandwidth of ~1MB/sec and 4492 * equals the time needed to transfer the whole buffer 4493 * (but not less than ECP_REV_MINTOUT ms); if it occurs, 4494 * DMA is stopped and the data is sent upstream; 4495 * 4496 * - backchannel watchdog, which would look at DMA counter 4497 * every rev_watchdog ms and stop the transfer only 4498 * if the counter hasn`t changed since the last time; 4499 * otherwise it would save DMA counter value and restart itself; 4500 * 4501 * transfer timeout is a multiple of rev_watchdog 4502 * and implemented as a downward counter 4503 * 4504 * on Grover, we can`t access DMAC registers while DMA is in flight, 4505 * so we can`t have watchdog on Grover, only timeout 4506 */ 4507 4508 /* calculate number of watchdog invocations equal to the xfer timeout */ 4509 xfer_time = max((1000 * len) / pp->ecp_rev_speed, ECP_REV_MINTOUT); 4510 #if defined(__x86) 4511 pp->rev_timeout_cnt = (pp->hw == &x86) ? 1 : 4512 max(xfer_time / pp->rev_watchdog, 1); 4513 #else 4514 pp->rev_timeout_cnt = (pp->hw == &m1553) ? 1 : 4515 max(xfer_time / pp->rev_watchdog, 1); 4516 #endif 4517 4518 pp->last_dmacnt = len; /* nothing xferred yet */ 4519 4520 pp->timeout_id = timeout(ecpp_ecp_read_timeout, (caddr_t)pp, 4521 drv_usectohz(pp->rev_watchdog * 1000)); 4522 4523 ecpp_error(pp->dip, "ecp_periph2host: DMA started len=%d\n" 4524 "xfer_time=%d wdog=%d cnt=%d\n", 4525 len, xfer_time, pp->rev_watchdog, pp->rev_timeout_cnt); 4526 4527 return (SUCCESS); 4528 4529 fail: 4530 if (mp) { 4531 freemsg(mp); 4532 } 4533 pp->e_busy = ECPP_IDLE; 4534 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 4535 4536 return (FAILURE); 4537 } 4538 4539 /* 4540 * ECP backchannel read timeout 4541 * implements both backchannel watchdog and transfer timeout in ECP mode 4542 * if the transfer is still in progress, reschedule itself, 4543 * otherwise call completion routine 4544 */ 4545 static void 4546 ecpp_ecp_read_timeout(void *arg) 4547 { 4548 struct ecppunit *pp = arg; 4549 size_t dmacnt; 4550 4551 mutex_enter(&pp->umutex); 4552 4553 if (pp->timeout_id == 0) { 4554 mutex_exit(&pp->umutex); 4555 return; 4556 } else { 4557 pp->timeout_id = 0; 4558 } 4559 4560 if (--pp->rev_timeout_cnt == 0) { 4561 /* 4562 * Transfer timed out 4563 */ 4564 ecpp_error(pp->dip, "ecp_read_timeout: timeout\n"); 4565 pp->xfer_tout++; 4566 ecpp_ecp_read_completion(pp); 4567 } else { 4568 /* 4569 * Backchannel watchdog: 4570 * look if DMA made any progress from the last time 4571 */ 4572 dmacnt = ECPP_DMA_GETCNT(pp); 4573 if (dmacnt - pp->last_dmacnt == 0) { 4574 /* 4575 * No progress - stop the transfer and send 4576 * whatever has been read so far up the stream 4577 */ 4578 ecpp_error(pp->dip, "ecp_read_timeout: no progress\n"); 4579 pp->xfer_tout++; 4580 ecpp_ecp_read_completion(pp); 4581 } else { 4582 /* 4583 * Something was transferred - restart ourselves 4584 */ 4585 ecpp_error(pp->dip, "ecp_read_timeout: restarting\n"); 4586 pp->last_dmacnt = dmacnt; 4587 pp->timeout_id = timeout(ecpp_ecp_read_timeout, 4588 (caddr_t)pp, 4589 drv_usectohz(pp->rev_watchdog * 1000)); 4590 } 4591 } 4592 4593 mutex_exit(&pp->umutex); 4594 } 4595 4596 /* 4597 * ECP backchannel read completion: 4598 * stop the DMA, free DMA resources and send read data upstream 4599 */ 4600 static void 4601 ecpp_ecp_read_completion(struct ecppunit *pp) 4602 { 4603 size_t xfer_len, unx_len; 4604 mblk_t *mp; 4605 4606 ASSERT(mutex_owned(&pp->umutex)); 4607 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4608 pp->current_phase == ECPP_PHASE_ECP_REV_XFER); 4609 ASSERT(pp->msg != NULL); 4610 4611 /* 4612 * Stop the transfer and unbind DMA handle 4613 */ 4614 if (ECPP_DMA_STOP(pp, &unx_len) == FAILURE) { 4615 unx_len = pp->resid; 4616 ecpp_error(pp->dip, "ecp_read_completion: failed dma_stop\n"); 4617 } 4618 4619 mp = pp->msg; 4620 xfer_len = pp->resid - unx_len; /* how much data was transferred */ 4621 4622 if (ddi_dma_unbind_handle(pp->dma_handle) != DDI_SUCCESS) { 4623 ecpp_error(pp->dip, "ecp_read_completion: unbind failed.\n"); 4624 } 4625 4626 ecpp_error(pp->dip, "ecp_read_completion: xfered %d bytes of %d\n", 4627 xfer_len, pp->resid); 4628 4629 /* clean up and update statistics */ 4630 pp->msg = NULL; 4631 pp->resid -= xfer_len; 4632 pp->ibytes[pp->current_mode] += xfer_len; 4633 pp->e_busy = ECPP_IDLE; 4634 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 4635 4636 /* 4637 * Send the read data up the stream 4638 */ 4639 mp->b_wptr += xfer_len; 4640 if (canputnext(pp->readq)) { 4641 mutex_exit(&pp->umutex); 4642 putnext(pp->readq, mp); 4643 mutex_enter(&pp->umutex); 4644 } else { 4645 ecpp_error(pp->dip, "ecp_read_completion: fail canputnext\n"); 4646 if (!putq(pp->readq, mp)) { 4647 freemsg(mp); 4648 } 4649 } 4650 4651 /* if bytes left in the FIFO another transfer is needed */ 4652 if (!(ECR_READ(pp) & ECPP_FIFO_EMPTY)) { 4653 (void) ecpp_backchan_req(pp); 4654 } 4655 4656 qenable(pp->writeq); 4657 } 4658 4659 /* 4660 * Read one byte in the Nibble mode 4661 */ 4662 static uchar_t 4663 nibble_peripheral2host(struct ecppunit *pp, uint8_t *byte) 4664 { 4665 uint8_t n[2]; /* two nibbles */ 4666 int i; 4667 4668 /* 4669 * One byte is made of two nibbles 4670 */ 4671 for (i = 0; i < 2; i++) { 4672 /* Event 7, 12: host asserts nAutoFd to move to read a nibble */ 4673 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX); 4674 4675 /* Event 8: peripheral puts data on the status lines */ 4676 4677 /* Event 9: peripheral asserts nAck, data available */ 4678 if (wait_dsr(pp, ECPP_nACK, 0, 35000) < 0) { 4679 ecpp_error(pp->dip, 4680 "nibble_periph2host(%d): failed event 9 %x\n", 4681 i + 1, DSR_READ(pp)); 4682 (void) ecpp_1284_termination(pp); 4683 return (FAILURE); 4684 } 4685 4686 n[i] = DSR_READ(pp); /* get a nibble */ 4687 4688 /* Event 10: host deasserts nAutoFd to say it grabbed data */ 4689 DCR_WRITE(pp, ECPP_nINIT); 4690 4691 /* (2) Event 13: peripheral asserts PE - end of data phase */ 4692 4693 /* Event 11: peripheral deasserts nAck to finish handshake */ 4694 if (wait_dsr(pp, ECPP_nACK, ECPP_nACK, 35000) < 0) { 4695 ecpp_error(pp->dip, 4696 "nibble_periph2host(%d): failed event 11 %x\n", 4697 i + 1, DSR_READ(pp)); 4698 (void) ecpp_1284_termination(pp); 4699 return (FAILURE); 4700 } 4701 } 4702 4703 /* extract data byte from two nibbles - optimized formula */ 4704 *byte = ((((n[1] & ~ECPP_nACK) << 1) | (~n[1] & ECPP_nBUSY)) & 0xf0) | 4705 ((((n[0] & ~ECPP_nACK) >> 3) | ((~n[0] & ECPP_nBUSY) >> 4)) & 0x0f); 4706 4707 pp->ibytes[ECPP_NIBBLE_MODE]++; 4708 return (SUCCESS); 4709 } 4710 4711 /* 4712 * process data transfers requested by the peripheral 4713 */ 4714 static uint_t 4715 ecpp_peripheral2host(struct ecppunit *pp) 4716 { 4717 if (!canputnext(pp->readq)) { 4718 ecpp_error(pp->dip, "ecpp_peripheral2host: readq full\n"); 4719 return (SUCCESS); 4720 } 4721 4722 switch (pp->backchannel) { 4723 case ECPP_CENTRONICS: 4724 /* no backchannel */ 4725 return (SUCCESS); 4726 4727 case ECPP_NIBBLE_MODE: 4728 ASSERT(pp->current_mode == ECPP_NIBBLE_MODE); 4729 4730 /* 4731 * Event 20: Host sets nAutoFd high to ack request 4732 */ 4733 DCR_WRITE(pp, ECPP_nINIT); 4734 4735 /* Event 21: Periph sets PError low to ack host */ 4736 if (wait_dsr(pp, ECPP_PE, 0, 35000) < 0) { 4737 ecpp_error(pp->dip, 4738 "ecpp_periph2host: failed event 21 %x\n", 4739 DSR_READ(pp)); 4740 (void) ecpp_1284_termination(pp); 4741 return (FAILURE); 4742 } 4743 4744 pp->current_phase = ECPP_PHASE_NIBT_AVAIL; 4745 4746 /* this routine will read the data in Nibble mode */ 4747 return (ecpp_idle_phase(pp)); 4748 4749 case ECPP_ECP_MODE: 4750 if ((pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE) && 4751 (ecp_forward2reverse(pp) == FAILURE)) { 4752 return (FAILURE); 4753 } 4754 4755 return (ecp_peripheral2host(pp)); /* start the transfer */ 4756 4757 case ECPP_DIAG_MODE: { 4758 mblk_t *mp; 4759 int i; 4760 4761 if (ECR_READ(pp) & ECPP_FIFO_EMPTY) { 4762 ecpp_error(pp->dip, "ecpp_periph2host: fifo empty\n"); 4763 return (SUCCESS); 4764 } 4765 4766 /* allocate the FIFO size */ 4767 if ((mp = allocb(ECPP_FIFO_SZ, BPRI_MED)) == NULL) { 4768 ecpp_error(pp->dip, 4769 "ecpp_periph2host: allocb FAILURE.\n"); 4770 return (FAILURE); 4771 } 4772 4773 /* 4774 * For the time being just read it byte by byte 4775 */ 4776 i = ECPP_FIFO_SZ; 4777 while (i-- && (!(ECR_READ(pp) & ECPP_FIFO_EMPTY))) { 4778 *mp->b_wptr++ = TFIFO_READ(pp); 4779 drv_usecwait(1); /* ECR is sometimes slow to update */ 4780 } 4781 4782 if (canputnext(pp->readq)) { 4783 mutex_exit(&pp->umutex); 4784 mp->b_datap->db_type = M_DATA; 4785 ecpp_error(pp->dip, 4786 "ecpp_periph2host: sending %d bytes\n", 4787 mp->b_wptr - mp->b_rptr); 4788 putnext(pp->readq, mp); 4789 mutex_enter(&pp->umutex); 4790 return (SUCCESS); 4791 } else { 4792 ecpp_error(pp->dip, 4793 "ecpp_periph2host: !canputnext data lost\n"); 4794 freemsg(mp); 4795 return (FAILURE); 4796 } 4797 } 4798 4799 default: 4800 ecpp_error(pp->dip, "ecpp_peripheraltohost: illegal back"); 4801 return (FAILURE); 4802 } 4803 } 4804 4805 /* 4806 * Negotiate from ECP Forward Idle to Reverse Idle Phase 4807 * 4808 * (manipulations with dcr/ecr are according to ECP Specification) 4809 */ 4810 static int 4811 ecp_forward2reverse(struct ecppunit *pp) 4812 { 4813 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4814 pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE); 4815 4816 /* place port into PS2 mode */ 4817 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4818 4819 /* set direction bit (DCR3-0 must be 0100 - National) */ 4820 DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT); 4821 4822 /* enable hardware assist */ 4823 ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4824 4825 drv_usecwait(1); /* Tp(ecp) = 0.5us */ 4826 4827 /* Event 39: host sets nInit low */ 4828 DCR_WRITE(pp, ECPP_REV_DIR); 4829 4830 /* Event 40: peripheral sets PError low */ 4831 4832 pp->current_phase = ECPP_PHASE_ECP_REV_IDLE; 4833 4834 ecpp_error(pp->dip, "ecp_forward2reverse ok\n"); 4835 4836 return (SUCCESS); 4837 } 4838 4839 /* 4840 * Negotiate from ECP Reverse Idle to Forward Idle Phase 4841 * 4842 * (manipulations with dcr/ecr are according to ECP Specification) 4843 */ 4844 static int 4845 ecp_reverse2forward(struct ecppunit *pp) 4846 { 4847 ASSERT(pp->current_mode == ECPP_ECP_MODE && 4848 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 4849 4850 /* Event 47: host deasserts nInit */ 4851 DCR_WRITE(pp, ECPP_REV_DIR | ECPP_nINIT); 4852 4853 /* 4854 * Event 48: peripheral deasserts nAck 4855 * Event 49: peripheral asserts PError 4856 */ 4857 if (wait_dsr(pp, ECPP_PE, ECPP_PE, 35000) < 0) { 4858 ecpp_error(pp->dip, 4859 "ecp_reverse2forward: failed event 49 %x\n", DSR_READ(pp)); 4860 (void) ecpp_1284_termination(pp); 4861 return (FAILURE); 4862 } 4863 4864 /* place port into PS2 mode */ 4865 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4866 4867 /* clear direction bit */ 4868 DCR_WRITE(pp, ECPP_nINIT); 4869 4870 /* reenable hardware assist */ 4871 ECR_WRITE(pp, ECR_mode_011 | ECPP_INTR_SRV | ECPP_INTR_MASK); 4872 4873 pp->current_phase = ECPP_PHASE_ECP_FWD_IDLE; 4874 4875 ecpp_error(pp->dip, "ecp_reverse2forward ok\n"); 4876 4877 return (SUCCESS); 4878 } 4879 4880 /* 4881 * Default negotiation chooses the best mode supported by peripheral 4882 * Note that backchannel mode may be different from forward mode 4883 */ 4884 static void 4885 ecpp_default_negotiation(struct ecppunit *pp) 4886 { 4887 if (!noecp && (ecpp_mode_negotiation(pp, ECPP_ECP_MODE) == SUCCESS)) { 4888 /* 1284 compatible device */ 4889 pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO; 4890 return; 4891 } else if (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == SUCCESS) { 4892 /* 1284 compatible device */ 4893 pp->io_mode = (pp->fast_compat == TRUE) ? ECPP_DMA : ECPP_PIO; 4894 } else { 4895 /* Centronics device */ 4896 pp->io_mode = 4897 (pp->fast_centronics == TRUE) ? ECPP_DMA : ECPP_PIO; 4898 } 4899 ECPP_CONFIG_MODE(pp); 4900 } 4901 4902 /* 4903 * Negotiate to the mode indicated by newmode 4904 */ 4905 static int 4906 ecpp_mode_negotiation(struct ecppunit *pp, uchar_t newmode) 4907 { 4908 /* any other mode is impossible */ 4909 ASSERT(pp->current_mode == ECPP_CENTRONICS || 4910 pp->current_mode == ECPP_COMPAT_MODE || 4911 pp->current_mode == ECPP_NIBBLE_MODE || 4912 pp->current_mode == ECPP_ECP_MODE || 4913 pp->current_mode == ECPP_DIAG_MODE); 4914 4915 if (pp->current_mode == newmode) { 4916 return (SUCCESS); 4917 } 4918 4919 /* termination from ECP is only allowed from the Forward Idle Phase */ 4920 if ((pp->current_mode == ECPP_ECP_MODE) && 4921 (pp->current_phase != ECPP_PHASE_ECP_FWD_IDLE)) { 4922 /* this may break into Centronics */ 4923 (void) ecp_reverse2forward(pp); 4924 } 4925 4926 switch (newmode) { 4927 case ECPP_CENTRONICS: 4928 (void) ecpp_1284_termination(pp); 4929 4930 /* put superio into PIO mode */ 4931 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 4932 4933 pp->current_mode = ECPP_CENTRONICS; 4934 pp->backchannel = ECPP_CENTRONICS; 4935 ECPP_CONFIG_MODE(pp); 4936 4937 pp->to_mode[pp->current_mode]++; 4938 return (SUCCESS); 4939 4940 case ECPP_COMPAT_MODE: 4941 /* ECPP_COMPAT_MODE should support Nibble as a backchannel */ 4942 if (pp->current_mode == ECPP_NIBBLE_MODE) { 4943 if (ecpp_1284_termination(pp) == SUCCESS) { 4944 pp->current_mode = ECPP_COMPAT_MODE; 4945 pp->backchannel = ECPP_NIBBLE_MODE; 4946 ECPP_CONFIG_MODE(pp); 4947 pp->to_mode[pp->current_mode]++; 4948 return (SUCCESS); 4949 } else { 4950 return (FAILURE); 4951 } 4952 } 4953 4954 if ((nibble_negotiation(pp) == SUCCESS) && 4955 (ecpp_1284_termination(pp) == SUCCESS)) { 4956 pp->backchannel = ECPP_NIBBLE_MODE; 4957 pp->current_mode = ECPP_COMPAT_MODE; 4958 ECPP_CONFIG_MODE(pp); 4959 pp->to_mode[pp->current_mode]++; 4960 return (SUCCESS); 4961 } else { 4962 return (FAILURE); 4963 } 4964 4965 case ECPP_NIBBLE_MODE: 4966 if (nibble_negotiation(pp) == FAILURE) { 4967 return (FAILURE); 4968 } 4969 4970 pp->backchannel = ECPP_NIBBLE_MODE; 4971 ECPP_CONFIG_MODE(pp); 4972 pp->to_mode[pp->current_mode]++; 4973 4974 return (SUCCESS); 4975 4976 case ECPP_ECP_MODE: 4977 if (pp->noecpregs) 4978 return (FAILURE); 4979 if (ecp_negotiation(pp) == FAILURE) { 4980 return (FAILURE); 4981 } 4982 4983 /* 4984 * National says CTR[3:0] should be 0100b before moving to 011 4985 */ 4986 DCR_WRITE(pp, ECPP_nINIT); 4987 4988 if (ecr_write(pp, ECR_mode_011 | 4989 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 4990 ecpp_error(pp->dip, "mode_nego:ECP: failed w/ecr\n"); 4991 return (FAILURE); 4992 } 4993 4994 ECPP_CONFIG_MODE(pp); 4995 pp->to_mode[pp->current_mode]++; 4996 4997 return (SUCCESS); 4998 4999 case ECPP_DIAG_MODE: 5000 /* 5001 * In DIAG mode application can do nasty things(e.g drive pins) 5002 * To keep peripheral sane, terminate to Compatibility mode 5003 */ 5004 (void) ecpp_1284_termination(pp); 5005 5006 /* put superio into TFIFO mode */ 5007 if (ecr_write(pp, ECR_mode_001 | 5008 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5009 ecpp_error(pp->dip, "put to TFIFO: failed w/ecr\n"); 5010 return (FAILURE); 5011 } 5012 5013 pp->current_mode = ECPP_DIAG_MODE; 5014 pp->backchannel = ECPP_DIAG_MODE; 5015 ECPP_CONFIG_MODE(pp); 5016 pp->to_mode[pp->current_mode]++; 5017 5018 return (SUCCESS); 5019 5020 default: 5021 ecpp_error(pp->dip, 5022 "ecpp_mode_negotiation: mode %d not supported\n", newmode); 5023 return (FAILURE); 5024 } 5025 } 5026 5027 /* 5028 * Standard (9.1): Peripheral data is available only when the host places 5029 * the interface in a mode capable of peripheral-to-host data transfer. 5030 * This requires the host periodically to place the interface in such a mode. 5031 * Polling can be eliminated by leaving the interface in an 1284 idle phase. 5032 */ 5033 static uchar_t 5034 ecpp_idle_phase(struct ecppunit *pp) 5035 { 5036 uchar_t rval = FAILURE; 5037 5038 /* 5039 * If there is no space on the read queue, do not reverse channel 5040 */ 5041 if (!canputnext(pp->readq)) { 5042 ecpp_error(pp->dip, "ecpp_idle_phase: readq full\n"); 5043 return (SUCCESS); 5044 } 5045 5046 switch (pp->backchannel) { 5047 case ECPP_CENTRONICS: 5048 case ECPP_COMPAT_MODE: 5049 case ECPP_DIAG_MODE: 5050 /* nothing */ 5051 ecpp_error(pp->dip, "ecpp_idle_phase: compat idle\n"); 5052 return (SUCCESS); 5053 5054 case ECPP_NIBBLE_MODE: 5055 /* 5056 * read as much data as possible, ending up in either 5057 * Reverse Idle or Host Busy Data Available phase 5058 */ 5059 ecpp_error(pp->dip, "ecpp_idle_phase: nibble backchannel\n"); 5060 if ((pp->current_mode != ECPP_NIBBLE_MODE) && 5061 (ecpp_mode_negotiation(pp, ECPP_NIBBLE_MODE) == FAILURE)) { 5062 break; 5063 } 5064 5065 rval = read_nibble_backchan(pp); 5066 5067 /* put interface into Reverse Idle phase */ 5068 if (pp->current_phase == ECPP_PHASE_NIBT_NAVAIL && 5069 canputnext(pp->readq)) { 5070 ecpp_error(pp->dip, "ecpp_idle_phase: going revidle\n"); 5071 5072 /* 5073 * Event 7: host asserts nAutoFd 5074 * enable nAck interrupt to get a backchannel request 5075 */ 5076 DCR_WRITE(pp, ECPP_nINIT | ECPP_AFX | ECPP_INTR_EN); 5077 5078 ECPP_UNMASK_INTR(pp); 5079 } 5080 5081 break; 5082 5083 case ECPP_ECP_MODE: 5084 /* 5085 * if data is already available, request the backchannel xfer 5086 * otherwise stay in Forward Idle and enable nErr interrupts 5087 */ 5088 ecpp_error(pp->dip, "ecpp_idle_phase: ECP forward\n"); 5089 5090 ASSERT(pp->current_phase == ECPP_PHASE_ECP_FWD_IDLE || 5091 pp->current_phase == ECPP_PHASE_ECP_REV_IDLE); 5092 5093 /* put interface into Forward Idle phase */ 5094 if ((pp->current_phase == ECPP_PHASE_ECP_REV_IDLE) && 5095 (ecp_reverse2forward(pp) == FAILURE)) { 5096 return (FAILURE); 5097 } 5098 5099 /* 5100 * if data already available, put backchannel request on the wq 5101 * otherwise enable nErr interrupts 5102 */ 5103 if ((DSR_READ(pp) & ECPP_nERR) == 0) { 5104 (void) ecpp_backchan_req(pp); 5105 } else { 5106 ECR_WRITE(pp, 5107 ECR_READ(pp) & ~ECPP_INTR_MASK | ECPP_INTR_SRV); 5108 5109 ECPP_UNMASK_INTR(pp); 5110 } 5111 5112 return (SUCCESS); 5113 5114 default: 5115 ecpp_error(pp->dip, "ecpp_idle_phase: illegal backchannel"); 5116 } 5117 5118 return (rval); 5119 } 5120 5121 /* 5122 * This routine will leave the port in ECPP_PHASE_NIBT_REVIDLE 5123 * Due to flow control, though, it may stop at ECPP_PHASE_NIBT_AVAIL, 5124 * and continue later as the user consumes data from the read queue 5125 * 5126 * The current phase should be NIBT_AVAIL or NIBT_NAVAIL 5127 * If some events fail during transfer, termination puts link 5128 * to Compatibility mode and FAILURE is returned 5129 */ 5130 static int 5131 read_nibble_backchan(struct ecppunit *pp) 5132 { 5133 mblk_t *mp; 5134 int i; 5135 int rval = SUCCESS; 5136 5137 ASSERT(pp->current_mode == ECPP_NIBBLE_MODE); 5138 5139 pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE)) 5140 ? ECPP_PHASE_NIBT_NAVAIL : ECPP_PHASE_NIBT_AVAIL; 5141 5142 ecpp_error(pp->dip, "read_nibble_backchan: %x\n", DSR_READ(pp)); 5143 5144 /* 5145 * While data is available, read it in NIBBLE_REV_BLKSZ byte chunks 5146 * and send up the stream 5147 */ 5148 while (pp->current_phase == ECPP_PHASE_NIBT_AVAIL && rval == SUCCESS) { 5149 /* see if there's space on the queue */ 5150 if (!canputnext(pp->readq)) { 5151 ecpp_error(pp->dip, 5152 "read_nibble_backchan: canputnext failed\n"); 5153 return (SUCCESS); 5154 } 5155 5156 if ((mp = allocb(NIBBLE_REV_BLKSZ, BPRI_MED)) == NULL) { 5157 ecpp_error(pp->dip, 5158 "read_nibble_backchan: allocb failed\n"); 5159 return (SUCCESS); 5160 } 5161 5162 /* read a chunk of data from the peripheral byte by byte */ 5163 i = NIBBLE_REV_BLKSZ; 5164 while (i-- && !(DSR_READ(pp) & ECPP_nERR)) { 5165 if (nibble_peripheral2host(pp, mp->b_wptr) != SUCCESS) { 5166 rval = FAILURE; 5167 break; 5168 } 5169 mp->b_wptr++; 5170 } 5171 5172 pp->current_phase = (DSR_READ(pp) & (ECPP_nERR | ECPP_PE)) 5173 ? ECPP_PHASE_NIBT_NAVAIL 5174 : ECPP_PHASE_NIBT_AVAIL; 5175 5176 if (mp->b_wptr - mp->b_rptr > 0) { 5177 ecpp_error(pp->dip, 5178 "read_nibble_backchan: sending %d bytes\n", 5179 mp->b_wptr - mp->b_rptr); 5180 pp->nread = 0; 5181 mutex_exit(&pp->umutex); 5182 putnext(pp->readq, mp); 5183 mutex_enter(&pp->umutex); 5184 } else { 5185 freemsg(mp); 5186 } 5187 } 5188 5189 return (rval); 5190 } 5191 5192 /* 5193 * 'Request Device ID using nibble mode' negotiation 5194 */ 5195 static int 5196 devidnib_negotiation(struct ecppunit *pp) 5197 { 5198 uint8_t dsr; 5199 5200 if (ecpp_1284_negotiation(pp, 5201 ECPP_XREQ_NIBBLE | ECPP_XREQ_ID, &dsr) == FAILURE) { 5202 return (FAILURE); 5203 } 5204 5205 /* 5206 * If peripheral has data available, PE and nErr will 5207 * be set low at Event 5 & 6. 5208 */ 5209 if ((dsr & (ECPP_PE | ECPP_nERR)) == 0) { 5210 pp->current_phase = ECPP_PHASE_NIBT_AVAIL; 5211 } else { 5212 pp->current_phase = ECPP_PHASE_NIBT_NAVAIL; 5213 } 5214 5215 ecpp_error(pp->dip, "ecpp_devidnib_nego: current_phase=%x\n", 5216 pp->current_phase); 5217 5218 /* successful negotiation into Nibble mode */ 5219 pp->current_mode = ECPP_NIBBLE_MODE; 5220 pp->backchannel = ECPP_NIBBLE_MODE; 5221 5222 ecpp_error(pp->dip, "ecpp_devidnib_nego: ok\n"); 5223 5224 return (SUCCESS); 5225 } 5226 5227 /* 5228 * Read 1284 device ID sequence 5229 * 5230 * This function should be called two times: 5231 * 1) ecpp_getdevid(pp, NULL, &len) - to retrieve ID length; 5232 * 2) ecpp_getdevid(pp, buffer, &len) - to read len bytes into buffer 5233 * 5234 * After 2) port is in Compatible mode 5235 * If the caller fails to make second call, it must reset port to Centronics 5236 * 5237 */ 5238 static int 5239 ecpp_getdevid(struct ecppunit *pp, uint8_t *id, int *lenp, int mode) 5240 { 5241 uint8_t lenhi, lenlo; 5242 uint8_t dsr; 5243 int i; 5244 5245 switch (mode) { 5246 case ECPP_NIBBLE_MODE: 5247 /* negotiate only if neccessary */ 5248 if ((pp->current_mode != mode) || (id == NULL)) { 5249 if (devidnib_negotiation(pp) == FAILURE) { 5250 return (EIO); 5251 } 5252 } 5253 5254 if (pp->current_phase != ECPP_PHASE_NIBT_AVAIL) { 5255 return (EIO); 5256 } 5257 5258 /* 5259 * Event 14: Host tristates data bus, peripheral 5260 * asserts nERR if data available, usually the 5261 * status bits (7-0) and requires two reads since 5262 * only nibbles are transfered. 5263 */ 5264 dsr = DSR_READ(pp); 5265 5266 if (id == NULL) { 5267 /* 5268 * first two bytes are the length of the sequence 5269 * (incl. these bytes) 5270 * first byte is MSB 5271 */ 5272 if ((dsr & ECPP_nERR) || 5273 (nibble_peripheral2host(pp, &lenhi) == FAILURE) || 5274 (dsr & ECPP_nERR) || 5275 (nibble_peripheral2host(pp, &lenlo) == FAILURE)) { 5276 ecpp_error(pp->dip, 5277 "ecpp_getdevid: id length read error\n"); 5278 return (EIO); 5279 } 5280 5281 *lenp = (lenhi << 8) | (lenlo); 5282 5283 ecpp_error(pp->dip, 5284 "ecpp_getdevid: id length = %d\n", *lenp); 5285 5286 if (*lenp < 2) { 5287 return (EIO); 5288 } 5289 } else { 5290 /* 5291 * read the rest of the data 5292 */ 5293 i = *lenp; 5294 while (i && ((dsr & ECPP_nERR) == 0)) { 5295 if (nibble_peripheral2host(pp, id++) == FAILURE) 5296 break; 5297 5298 i--; 5299 dsr = DSR_READ(pp); 5300 } 5301 ecpp_error(pp->dip, 5302 "ecpp_getdevid: read %d bytes\n", *lenp - i); 5303 5304 /* 5305 * 1284: After receiving the sequence, the host is 5306 * required to return the link to the Compatibility mode 5307 */ 5308 (void) ecpp_1284_termination(pp); 5309 } 5310 5311 break; 5312 5313 /* Other modes are not yet supported */ 5314 default: 5315 return (EINVAL); 5316 } 5317 5318 return (0); 5319 } 5320 5321 /* 5322 * Various hardware support 5323 * 5324 * First define some stubs for functions that do nothing 5325 */ 5326 5327 /*ARGSUSED*/ 5328 static void 5329 empty_config_mode(struct ecppunit *pp) 5330 { 5331 } 5332 5333 /*ARGSUSED*/ 5334 static void 5335 empty_mask_intr(struct ecppunit *pp) 5336 { 5337 } 5338 5339 #if defined(__x86) 5340 static size_t 5341 x86_getcnt(struct ecppunit *pp) 5342 { 5343 int count; 5344 5345 (void) ddi_dmae_getcnt(pp->dip, pp->uh.x86.chn, &count); 5346 return (count); 5347 } 5348 #endif 5349 5350 /* 5351 * 5352 * National PC87332 and PC97317 SuperIOs support routines 5353 * These chips are used in PCI-based Darwin, Quark, Quasar, Excalibur 5354 * and use EBus DMA facilities (Cheerio or RIO) 5355 * 5356 */ 5357 5358 static int 5359 pc87332_map_regs(struct ecppunit *pp) 5360 { 5361 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.ebus.c_reg, 0, 5362 sizeof (struct config_reg), &acc_attr, 5363 &pp->uh.ebus.c_handle) != DDI_SUCCESS) { 5364 ecpp_error(pp->dip, "pc87332_map_regs: failed c_reg\n"); 5365 goto fail; 5366 } 5367 5368 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0, 5369 sizeof (struct info_reg), &acc_attr, &pp->i_handle) 5370 != DDI_SUCCESS) { 5371 ecpp_error(pp->dip, "pc87332_map_regs: failed i_reg\n"); 5372 goto fail; 5373 } 5374 5375 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400, 5376 sizeof (struct fifo_reg), &acc_attr, &pp->f_handle) 5377 != DDI_SUCCESS) { 5378 ecpp_error(pp->dip, "pc87332_map_regs: failed f_reg\n"); 5379 goto fail; 5380 } 5381 5382 if (ddi_regs_map_setup(pp->dip, 2, (caddr_t *)&pp->uh.ebus.dmac, 0, 5383 sizeof (struct cheerio_dma_reg), &acc_attr, 5384 &pp->uh.ebus.d_handle) != DDI_SUCCESS) { 5385 ecpp_error(pp->dip, "pc87332_map_regs: failed dmac\n"); 5386 goto fail; 5387 } 5388 5389 return (SUCCESS); 5390 5391 fail: 5392 pc87332_unmap_regs(pp); 5393 return (FAILURE); 5394 } 5395 5396 static void 5397 pc87332_unmap_regs(struct ecppunit *pp) 5398 { 5399 if (pp->uh.ebus.c_handle) { 5400 ddi_regs_map_free(&pp->uh.ebus.c_handle); 5401 } 5402 if (pp->uh.ebus.d_handle) { 5403 ddi_regs_map_free(&pp->uh.ebus.d_handle); 5404 } 5405 if (pp->i_handle) { 5406 ddi_regs_map_free(&pp->i_handle); 5407 } 5408 if (pp->f_handle) { 5409 ddi_regs_map_free(&pp->f_handle); 5410 } 5411 } 5412 5413 static uint8_t 5414 pc87332_read_config_reg(struct ecppunit *pp, uint8_t reg_num) 5415 { 5416 uint8_t retval; 5417 5418 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num); 5419 retval = PP_GETB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data); 5420 5421 return (retval); 5422 } 5423 5424 static void 5425 pc87332_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val) 5426 { 5427 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->index, reg_num); 5428 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val); 5429 5430 /* 5431 * second write to this register is needed. the register behaves as 5432 * a fifo. the first value written goes to the data register. the 5433 * second write pushes the initial value to the register indexed. 5434 */ 5435 5436 PP_PUTB(pp->uh.ebus.c_handle, &pp->uh.ebus.c_reg->data, val); 5437 } 5438 5439 static int 5440 pc87332_config_chip(struct ecppunit *pp) 5441 { 5442 uint8_t pmc, fcr; 5443 5444 pp->current_phase = ECPP_PHASE_INIT; 5445 5446 /* ECP DMA configuration bit (PMC4) must be set */ 5447 pmc = pc87332_read_config_reg(pp, PMC); 5448 if (!(pmc & PC87332_PMC_ECP_DMA_CONFIG)) { 5449 pc87332_write_config_reg(pp, PMC, 5450 pmc | PC87332_PMC_ECP_DMA_CONFIG); 5451 } 5452 5453 /* 5454 * The Parallel Port Multiplexor pins must be driven. 5455 * Check to see if FCR3 is zero, if not clear FCR3. 5456 */ 5457 fcr = pc87332_read_config_reg(pp, FCR); 5458 if (fcr & PC87332_FCR_PPM_FLOAT_CTL) { 5459 pc87332_write_config_reg(pp, FCR, 5460 fcr & ~PC87332_FCR_PPM_FLOAT_CTL); 5461 } 5462 5463 /* 5464 * clear bits 3-0 in CTR (aka DCR) prior to enabling ECP mode 5465 * CTR5 can not be cleared in SPP mode, CTR5 will return 1. 5466 * "FAILURE" in this case is ok. Better to use dcr_write() 5467 * to ensure reliable writing to DCR. 5468 */ 5469 if (dcr_write(pp, ECPP_DCR_SET | ECPP_nINIT) == FAILURE) { 5470 ecpp_error(pp->dip, "ecpp_config_87332: DCR config\n"); 5471 } 5472 5473 /* enable ECP mode, level intr (note that DCR bits 3-0 == 0x0) */ 5474 pc87332_write_config_reg(pp, PCR, 5475 PC87332_PCR_INTR_LEVL | PC87332_PCR_ECP_EN); 5476 5477 /* put SuperIO in initial state */ 5478 if (ecr_write(pp, ECR_mode_001 | 5479 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5480 ecpp_error(pp->dip, "ecpp_config_87332: ECR\n"); 5481 } 5482 5483 if (dcr_write(pp, ECPP_DCR_SET | ECPP_SLCTIN | ECPP_nINIT) == FAILURE) { 5484 ecpp_error(pp->dip, "ecpp_config_87332: w/DCR failed2.\n"); 5485 return (FAILURE); 5486 5487 } 5488 /* we are in centronic mode */ 5489 pp->current_mode = ECPP_CENTRONICS; 5490 5491 /* in compatible mode with no data transfer in progress */ 5492 pp->current_phase = ECPP_PHASE_C_IDLE; 5493 5494 return (SUCCESS); 5495 } 5496 5497 /* 5498 * A new mode was set, do some mode specific reconfiguration 5499 * in this case - set interrupt characteristic 5500 */ 5501 static void 5502 pc87332_config_mode(struct ecppunit *pp) 5503 { 5504 if (COMPAT_PIO(pp)) { 5505 pc87332_write_config_reg(pp, PCR, 0x04); 5506 } else { 5507 pc87332_write_config_reg(pp, PCR, 0x14); 5508 } 5509 } 5510 5511 static int 5512 pc97317_map_regs(struct ecppunit *pp) 5513 { 5514 if (pc87332_map_regs(pp) != SUCCESS) { 5515 return (FAILURE); 5516 } 5517 5518 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->uh.ebus.c2_reg, 5519 0x403, sizeof (struct config2_reg), &acc_attr, 5520 &pp->uh.ebus.c2_handle) != DDI_SUCCESS) { 5521 ecpp_error(pp->dip, "pc97317_map_regs: failed c2_reg\n"); 5522 pc87332_unmap_regs(pp); 5523 return (FAILURE); 5524 } else { 5525 return (SUCCESS); 5526 } 5527 } 5528 5529 static void 5530 pc97317_unmap_regs(struct ecppunit *pp) 5531 { 5532 if (pp->uh.ebus.c2_handle) { 5533 ddi_regs_map_free(&pp->uh.ebus.c2_handle); 5534 } 5535 5536 pc87332_unmap_regs(pp); 5537 } 5538 5539 /* 5540 * OBP should configure the PC97317 such that it does not need further 5541 * configuration. Upon sustaining, it may be necessary to examine 5542 * or change the configuration registers. This routine is left in 5543 * the file for that purpose. 5544 */ 5545 static int 5546 pc97317_config_chip(struct ecppunit *pp) 5547 { 5548 uint8_t conreg; 5549 5550 /* set the logical device name */ 5551 pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4); 5552 5553 /* SPP Compatibility */ 5554 PP_PUTB(pp->uh.ebus.c2_handle, 5555 &pp->uh.ebus.c2_reg->eir, PC97317_CONFIG2_CONTROL2); 5556 PP_PUTB(pp->uh.ebus.c2_handle, &pp->uh.ebus.c2_reg->edr, 0x80); 5557 5558 /* low interrupt polarity */ 5559 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00); 5560 5561 /* ECP mode */ 5562 pc87332_write_config_reg(pp, PC97317_CONFIG_PP_CONFIG, 0xf2); 5563 5564 if (dcr_write(pp, ECPP_SLCTIN | ECPP_nINIT) == FAILURE) { 5565 ecpp_error(pp->dip, "pc97317_config_chip: failed w/DCR\n"); 5566 } 5567 5568 if (ecr_write(pp, ECR_mode_001 | 5569 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5570 ecpp_error(pp->dip, "pc97317_config_chip: failed w/ECR\n"); 5571 } 5572 5573 #ifdef DEBUG 5574 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DEV_NO); 5575 ecpp_error(pp->dip, "97317:conreg7(logical dev)=%x\n", conreg); 5576 5577 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_MSB); 5578 ecpp_error(pp->dip, "97317:conreg60(addrHi)=%x\n", conreg); 5579 5580 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_BASE_ADDR_LSB); 5581 ecpp_error(pp->dip, "97317:conreg61(addrLo)=%x\n", conreg); 5582 5583 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_SEL); 5584 ecpp_error(pp->dip, "97317:conreg70(IRQL)=%x\n", conreg); 5585 5586 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_INTR_TYPE); 5587 ecpp_error(pp->dip, "97317:conreg71(intr type)=%x\n", conreg); 5588 5589 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_ACTIVATE); 5590 ecpp_error(pp->dip, "97317:conreg30(Active)=%x\n", conreg); 5591 5592 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_IO_RANGE); 5593 ecpp_error(pp->dip, "97317:conreg31(IO Range Check)=%x\n", conreg); 5594 5595 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA0_CHAN); 5596 ecpp_error(pp->dip, "97317:conreg74(DMA0 Chan)=%x\n", conreg); 5597 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_DMA1_CHAN); 5598 ecpp_error(pp->dip, "97317:conreg75(DMA1 Chan)=%x\n", conreg); 5599 5600 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG); 5601 ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg); 5602 5603 conreg = pc87332_read_config_reg(pp, PC97317_CONFIG_PP_CONFIG); 5604 ecpp_error(pp->dip, "97317:conregFO(pport conf)=%x\n", conreg); 5605 #endif /* DEBUG */ 5606 5607 return (SUCCESS); 5608 } 5609 5610 /* 5611 * A new mode was set, do some mode specific reconfiguration 5612 * in this case - set interrupt polarity 5613 */ 5614 static void 5615 pc97317_config_mode(struct ecppunit *pp) 5616 { 5617 /* set the logical device name */ 5618 pc87332_write_config_reg(pp, PC97317_CONFIG_DEV_NO, 0x4); 5619 5620 if (COMPAT_PIO(pp) || pp->current_mode == ECPP_NIBBLE_MODE) { 5621 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x02); 5622 } else { 5623 pc87332_write_config_reg(pp, PC97317_CONFIG_INTR_TYPE, 0x00); 5624 } 5625 } 5626 5627 static void 5628 cheerio_mask_intr(struct ecppunit *pp) 5629 { 5630 /* mask Cheerio interrupts */ 5631 AND_SET_LONG_R(pp->uh.ebus.d_handle, 5632 &pp->uh.ebus.dmac->csr, ~DCSR_INT_EN); 5633 } 5634 5635 static void 5636 cheerio_unmask_intr(struct ecppunit *pp) 5637 { 5638 /* unmask Cheerio interrupts */ 5639 OR_SET_LONG_R(pp->uh.ebus.d_handle, 5640 &pp->uh.ebus.dmac->csr, DCSR_INT_EN | DCSR_TCI_DIS); 5641 } 5642 5643 static int 5644 cheerio_dma_start(struct ecppunit *pp) 5645 { 5646 cheerio_reset_dcsr(pp); 5647 SET_DMAC_BCR(pp, pp->dma_cookie.dmac_size); 5648 SET_DMAC_ACR(pp, pp->dma_cookie.dmac_address); 5649 5650 if (pp->dma_dir == DDI_DMA_READ) { 5651 SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA | 5652 DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0 | DCSR_WRITE); 5653 } else { 5654 SET_DMAC_CSR(pp, DCSR_INT_EN | DCSR_EN_CNT | DCSR_EN_DMA | 5655 DCSR_CSR_DRAIN | DCSR_BURST_1 | DCSR_BURST_0); 5656 } 5657 5658 return (SUCCESS); 5659 } 5660 5661 /* 5662 * Note: BCR is reset to 0, so counter should always be read before dma_stop 5663 */ 5664 static int 5665 cheerio_dma_stop(struct ecppunit *pp, size_t *countp) 5666 { 5667 uint8_t ecr; 5668 5669 /* disable DMA and byte counter */ 5670 AND_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr, 5671 ~(DCSR_EN_DMA | DCSR_EN_CNT| DCSR_INT_EN)); 5672 5673 /* ACK and disable the TC interrupt */ 5674 OR_SET_LONG_R(pp->uh.ebus.d_handle, &pp->uh.ebus.dmac->csr, 5675 DCSR_TC | DCSR_TCI_DIS); 5676 5677 /* read DMA count if requested */ 5678 if (countp) { 5679 *countp = cheerio_getcnt(pp); 5680 } 5681 5682 cheerio_reset_dcsr(pp); 5683 SET_DMAC_BCR(pp, 0); 5684 5685 /* turn off SuperIO's DMA */ 5686 ecr = ECR_READ(pp); 5687 if (ecr_write(pp, ecr & ~ECPP_DMA_ENABLE) == FAILURE) { 5688 return (FAILURE); 5689 } 5690 5691 /* Disable SuperIO interrupts and DMA */ 5692 ecr = ECR_READ(pp); 5693 5694 return (ecr_write(pp, ecr | ECPP_INTR_SRV)); 5695 } 5696 5697 static size_t 5698 cheerio_getcnt(struct ecppunit *pp) 5699 { 5700 return (GET_DMAC_BCR(pp)); 5701 } 5702 5703 /* 5704 * Reset the DCSR by first setting the RESET bit to 1. Poll the 5705 * DCSR_CYC_PEND bit to make sure there are no more pending DMA cycles. 5706 * If there are no more pending cycles, clear the RESET bit. 5707 */ 5708 static void 5709 cheerio_reset_dcsr(struct ecppunit *pp) 5710 { 5711 int timeout = DMAC_RESET_TIMEOUT; 5712 5713 SET_DMAC_CSR(pp, DCSR_RESET); 5714 5715 while (GET_DMAC_CSR(pp) & DCSR_CYC_PEND) { 5716 if (timeout == 0) { 5717 ecpp_error(pp->dip, "cheerio_reset_dcsr: timeout\n"); 5718 break; 5719 } else { 5720 drv_usecwait(1); 5721 timeout--; 5722 } 5723 } 5724 5725 SET_DMAC_CSR(pp, 0); 5726 } 5727 5728 /* 5729 * 5730 * Grover Southbridge (M1553) support routines 5731 * Southbridge contains an Intel 8237 DMAC onboard which is used 5732 * to transport data to/from PCI space to superio parallel port 5733 * 5734 */ 5735 5736 5737 static int 5738 m1553_map_regs(struct ecppunit *pp) 5739 { 5740 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->uh.m1553.isa_space, 5741 0, sizeof (struct isaspace), &acc_attr, 5742 &pp->uh.m1553.d_handle) != DDI_SUCCESS) { 5743 ecpp_error(pp->dip, "m1553_map_regs: failed isa space\n"); 5744 goto fail; 5745 } 5746 5747 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0, 5748 sizeof (struct info_reg), &acc_attr, &pp->i_handle) 5749 != DDI_SUCCESS) { 5750 ecpp_error(pp->dip, "m1553_map_regs: failed i_reg\n"); 5751 goto fail; 5752 } 5753 5754 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->f_reg, 0x400, 5755 sizeof (struct fifo_reg), &acc_attr, &pp->f_handle) 5756 != DDI_SUCCESS) { 5757 ecpp_error(pp->dip, "m1553_map_regs: failed f_reg\n"); 5758 goto fail; 5759 } 5760 5761 return (SUCCESS); 5762 5763 fail: 5764 m1553_unmap_regs(pp); 5765 return (FAILURE); 5766 } 5767 5768 static void 5769 m1553_unmap_regs(struct ecppunit *pp) 5770 { 5771 if (pp->uh.m1553.d_handle) { 5772 ddi_regs_map_free(&pp->uh.m1553.d_handle); 5773 } 5774 if (pp->i_handle) { 5775 ddi_regs_map_free(&pp->i_handle); 5776 } 5777 if (pp->f_handle) { 5778 ddi_regs_map_free(&pp->f_handle); 5779 } 5780 } 5781 5782 #if defined(__x86) 5783 static int 5784 x86_map_regs(struct ecppunit *pp) 5785 { 5786 int nregs = 0; 5787 5788 if (ddi_regs_map_setup(pp->dip, 0, (caddr_t *)&pp->i_reg, 0, 5789 sizeof (struct info_reg), &acc_attr, &pp->i_handle) 5790 != DDI_SUCCESS) { 5791 ecpp_error(pp->dip, "x86_map_regs: failed i_reg\n"); 5792 goto fail; 5793 } 5794 if (ddi_dev_nregs(pp->dip, &nregs) == DDI_SUCCESS && nregs == 2) { 5795 if (ddi_regs_map_setup(pp->dip, 1, (caddr_t *)&pp->f_reg, 0, 5796 sizeof (struct fifo_reg), &acc_attr, &pp->f_handle) 5797 != DDI_SUCCESS) { 5798 ecpp_error(pp->dip, "x86_map_regs: failed f_reg\n"); 5799 goto fail; 5800 } else 5801 pp->noecpregs = FALSE; 5802 } else { 5803 pp->noecpregs = TRUE; 5804 } 5805 return (SUCCESS); 5806 fail: 5807 x86_unmap_regs(pp); 5808 return (FAILURE); 5809 } 5810 5811 static void 5812 x86_unmap_regs(struct ecppunit *pp) 5813 { 5814 if (pp->i_handle) { 5815 ddi_regs_map_free(&pp->i_handle); 5816 } 5817 if (pp->f_handle) { 5818 ddi_regs_map_free(&pp->f_handle); 5819 } 5820 } 5821 #endif 5822 5823 static uint8_t 5824 m1553_read_config_reg(struct ecppunit *pp, uint8_t reg_num) 5825 { 5826 uint8_t retval; 5827 5828 dma8237_write(pp, 0x3F0, reg_num); 5829 retval = dma8237_read(pp, 0x3F1); 5830 5831 return (retval); 5832 } 5833 5834 static void 5835 m1553_write_config_reg(struct ecppunit *pp, uint8_t reg_num, uint8_t val) 5836 { 5837 dma8237_write(pp, 0x3F0, reg_num); 5838 dma8237_write(pp, 0x3F1, val); 5839 } 5840 5841 static int 5842 m1553_config_chip(struct ecppunit *pp) 5843 { 5844 uint8_t conreg; 5845 5846 /* Unlock configuration regs with "key sequence" */ 5847 dma8237_write(pp, 0x3F0, 0x51); 5848 dma8237_write(pp, 0x3F0, 0x23); 5849 5850 m1553_write_config_reg(pp, PnP_CONFIG_DEV_NO, 0x3); 5851 conreg = m1553_read_config_reg(pp, PnP_CONFIG_DEV_NO); 5852 ecpp_error(pp->dip, "M1553:conreg7(logical dev)=%x\n", conreg); 5853 5854 conreg = m1553_read_config_reg(pp, PnP_CONFIG_ACTIVATE); 5855 ecpp_error(pp->dip, "M1553:conreg30(Active)=%x\n", conreg); 5856 5857 conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_MSB); 5858 ecpp_error(pp->dip, "M1553:conreg60(addrHi)=%x\n", conreg); 5859 conreg = m1553_read_config_reg(pp, PnP_CONFIG_BASE_ADDR_LSB); 5860 ecpp_error(pp->dip, "M1553:conreg61(addrLo)=%x\n", conreg); 5861 5862 conreg = m1553_read_config_reg(pp, PnP_CONFIG_INTR_SEL); 5863 ecpp_error(pp->dip, "M1553:conreg70(IRQL)=%x\n", conreg); 5864 5865 conreg = m1553_read_config_reg(pp, PnP_CONFIG_DMA0_CHAN); 5866 ecpp_error(pp->dip, "M1553:conreg74(DMA0 Chan)=%x\n", conreg); 5867 5868 /* set FIFO threshold 1 and ECP mode, preserve bit 7 (IRQ polarity) */ 5869 conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0); 5870 conreg = (conreg & ~0x7F) | 0x0A; 5871 m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG0, conreg); 5872 conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG0); 5873 ecpp_error(pp->dip, "M1553:conregFO(pport conf)=%x\n", conreg); 5874 5875 m1553_write_config_reg(pp, PnP_CONFIG_PP_CONFIG1, 0x04); 5876 conreg = m1553_read_config_reg(pp, PnP_CONFIG_PP_CONFIG1); 5877 ecpp_error(pp->dip, "M1553:conregF1(outconf)=%x\n", conreg); 5878 5879 /* lock configuration regs with key */ 5880 dma8237_write(pp, 0x3F0, 0xBB); 5881 5882 /* Set ECR, DCR in known state */ 5883 ECR_WRITE(pp, ECR_mode_001 | ECPP_INTR_MASK | ECPP_INTR_SRV); 5884 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 5885 5886 ecpp_error(pp->dip, "m1553_config_chip: ecr=%x, dsr=%x, dcr=%x\n", 5887 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 5888 5889 return (SUCCESS); 5890 } 5891 5892 #if defined(__x86) 5893 static int 5894 x86_config_chip(struct ecppunit *pp) 5895 { 5896 if (ecr_write(pp, ECR_mode_001 | 5897 ECPP_INTR_MASK | ECPP_INTR_SRV) == FAILURE) { 5898 ecpp_error(pp->dip, "config chip: failed w/ecr\n"); 5899 pp->noecpregs = TRUE; 5900 } 5901 if (pp->noecpregs) 5902 pp->fast_compat = FALSE; 5903 DCR_WRITE(pp, ECPP_SLCTIN | ECPP_nINIT); 5904 ecpp_error(pp->dip, "x86_config_chip: ecr=%x, dsr=%x, dcr=%x\n", 5905 ECR_READ(pp), DSR_READ(pp), DCR_READ(pp)); 5906 return (SUCCESS); 5907 } 5908 #endif 5909 5910 /* 5911 * dma8237_dma_start() programs the selected 8 bit channel 5912 * of DMAC1 with the dma cookie. pp->dma_cookie must 5913 * be set before this routine is called. 5914 */ 5915 static int 5916 dma8237_dma_start(struct ecppunit *pp) 5917 { 5918 uint8_t chn; 5919 5920 chn = pp->uh.m1553.chn; 5921 5922 ASSERT(chn <= DMAE_CH3 && 5923 pp->dma_cookie.dmac_size != 0 && 5924 pp->dma_cookie.dmac_address != NULL); 5925 5926 /* At this point Southbridge has not yet asserted DREQ */ 5927 5928 /* set mode to read-from-memory. */ 5929 dma8237_write(pp, DMAC2_MODE, DMAMODE_CASC); 5930 if (pp->dma_dir == DDI_DMA_READ) { 5931 dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE | 5932 DMAMODE_READ | chn); 5933 } else { 5934 dma8237_write(pp, DMAC1_MODE, DMAMODE_SINGLE | 5935 DMAMODE_WRITE | chn); 5936 } 5937 5938 dma8237_write_addr(pp, pp->dma_cookie.dmac_address); 5939 dma8237_write_count(pp, pp->dma_cookie.dmac_size - 1); 5940 5941 /* 5942 * M1553 chip does not permit to access DMA register banks 5943 * while DMA is in flight. As a result, ecpp and floppy drivers 5944 * can potentially corrupt each other's DMA. The interlocking mechanism 5945 * is provided by a parent nexus driver (isadma), which is enabled 5946 * indirectly through a DMAC1_ALLMASK register access: 5947 * 5948 * writing a non-zero value to this register enters a lock, 5949 * writing zero releases the lock. 5950 * 5951 * DMA transfer must only occur after entering a lock. 5952 * If the lock is already owned by other driver, we will block. 5953 * 5954 * The following operation unmasks our channel and masks all others 5955 */ 5956 dma8237_write(pp, DMAC1_ALLMASK, ~(1 << chn)); 5957 pp->uh.m1553.isadma_entered = 1; 5958 5959 return (SUCCESS); 5960 } 5961 5962 static int 5963 dma8237_dma_stop(struct ecppunit *pp, size_t *countp) 5964 { 5965 uint8_t ecr; 5966 5967 /* stop DMA */ 5968 ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV; 5969 (void) ecr_write(pp, ecr); 5970 5971 if (pp->uh.m1553.isadma_entered) { 5972 /* reset the channel mask so we can issue PIO's to our device */ 5973 dma8237_write(pp, DMAC1_ALLMASK, 0); 5974 pp->uh.m1553.isadma_entered = 0; 5975 5976 } 5977 5978 /* read DMA count if requested */ 5979 if (countp) { 5980 *countp = dma8237_getcnt(pp); 5981 if (pp->dma_dir == DDI_DMA_READ && *countp > 0) { 5982 (*countp)++; /* need correction for reverse xfers */ 5983 } 5984 } 5985 return (SUCCESS); 5986 } 5987 #if defined(__x86) 5988 static int 5989 x86_dma_start(struct ecppunit *pp) 5990 { 5991 uint8_t chn; 5992 struct ddi_dmae_req dmaereq; 5993 5994 chn = pp->uh.x86.chn; 5995 ASSERT(chn <= DMAE_CH3 && 5996 pp->dma_cookie.dmac_size != 0 && 5997 pp->dma_cookie.dmac_address != NULL); 5998 bzero(&dmaereq, sizeof (struct ddi_dmae_req)); 5999 dmaereq.der_command = 6000 (pp->dma_dir & DDI_DMA_READ) ? DMAE_CMD_READ : DMAE_CMD_WRITE; 6001 if (ddi_dmae_prog(pp->dip, &dmaereq, &pp->dma_cookie, chn) 6002 != DDI_SUCCESS) 6003 ecpp_error(pp->dip, "prog failed !!!\n"); 6004 ecpp_error(pp->dip, "dma_started..\n"); 6005 return (SUCCESS); 6006 } 6007 6008 static int 6009 x86_dma_stop(struct ecppunit *pp, size_t *countp) 6010 { 6011 uint8_t ecr; 6012 6013 /* stop DMA */ 6014 if (pp->uh.x86.chn == 0xff) 6015 return (FAILURE); 6016 ecr = (ECR_READ(pp) & 0xe0) | ECPP_INTR_MASK | ECPP_INTR_SRV; 6017 (void) ecr_write(pp, ecr); 6018 ecpp_error(pp->dip, "dma_stop\n"); 6019 6020 /* read DMA count if requested */ 6021 if (countp) { 6022 *countp = x86_getcnt(pp); 6023 } 6024 ecpp_error(pp->dip, "dma_stoped..\n"); 6025 return (SUCCESS); 6026 } 6027 #endif 6028 6029 /* channel must be masked */ 6030 static void 6031 dma8237_write_addr(struct ecppunit *pp, uint32_t addr) 6032 { 6033 uint8_t c_addr, c_lpage; 6034 uint16_t c_hpage, *p; 6035 6036 switch (pp->uh.m1553.chn) { 6037 case DMAE_CH0: 6038 c_addr = DMA_0ADR; 6039 c_lpage = DMA_0PAGE; 6040 c_hpage = DMA_0HPG; 6041 break; 6042 6043 case DMAE_CH1: 6044 c_addr = DMA_1ADR; 6045 c_lpage = DMA_1PAGE; 6046 c_hpage = DMA_1HPG; 6047 break; 6048 6049 case DMAE_CH2: 6050 c_addr = DMA_2ADR; 6051 c_lpage = DMA_2PAGE; 6052 c_hpage = DMA_2HPG; 6053 break; 6054 6055 case DMAE_CH3: 6056 c_addr = DMA_3ADR; 6057 c_lpage = DMA_3PAGE; 6058 c_hpage = DMA_3HPG; 6059 break; 6060 6061 default: 6062 return; 6063 } 6064 6065 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr]; 6066 ddi_put16(pp->uh.m1553.d_handle, p, addr & 0xFFFF); 6067 6068 dma8237_write(pp, c_lpage, (addr & 0xFF0000) >> 16); 6069 dma8237_write(pp, c_hpage, (addr & 0xFF000000) >> 24); 6070 6071 } 6072 6073 /* 6074 * This function may be useful during debugging, 6075 * so we leave it in, but do not include in the binary 6076 */ 6077 #ifdef INCLUDE_DMA8237_READ_ADDR 6078 static uint32_t 6079 dma8237_read_addr(struct ecppunit *pp) 6080 { 6081 uint8_t rval3, rval4; 6082 uint16_t rval16; 6083 uint32_t rval; 6084 uint8_t c_addr, c_lpage; 6085 uint16_t c_hpage, *p; 6086 6087 switch (pp->uh.m1553.chn) { 6088 case DMAE_CH0: 6089 c_addr = DMA_0ADR; 6090 c_lpage = DMA_0PAGE; 6091 c_hpage = DMA_0HPG; 6092 break; 6093 6094 case DMAE_CH1: 6095 c_addr = DMA_1ADR; 6096 c_lpage = DMA_1PAGE; 6097 c_hpage = DMA_1HPG; 6098 break; 6099 6100 case DMAE_CH2: 6101 c_addr = DMA_2ADR; 6102 c_lpage = DMA_2PAGE; 6103 c_hpage = DMA_2HPG; 6104 break; 6105 6106 case DMAE_CH3: 6107 c_addr = DMA_3ADR; 6108 c_lpage = DMA_3PAGE; 6109 c_hpage = DMA_3HPG; 6110 break; 6111 6112 default: 6113 return (NULL); 6114 } 6115 6116 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_addr]; 6117 rval16 = ddi_get16(pp->uh.m1553.d_handle, p); 6118 6119 rval3 = dma8237_read(pp, c_lpage); 6120 rval4 = dma8237_read(pp, c_hpage); 6121 6122 rval = rval16 | (rval3 << 16) | (rval4 <<24); 6123 6124 return (rval); 6125 } 6126 #endif 6127 6128 static void 6129 dma8237_write_count(struct ecppunit *pp, uint32_t count) 6130 { 6131 uint8_t c_wcnt; 6132 uint16_t *p; 6133 6134 switch (pp->uh.m1553.chn) { 6135 case DMAE_CH0: 6136 c_wcnt = DMA_0WCNT; 6137 break; 6138 6139 case DMAE_CH1: 6140 c_wcnt = DMA_1WCNT; 6141 break; 6142 6143 case DMAE_CH2: 6144 c_wcnt = DMA_2WCNT; 6145 break; 6146 6147 case DMAE_CH3: 6148 c_wcnt = DMA_3WCNT; 6149 break; 6150 6151 default: 6152 return; 6153 } 6154 6155 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt]; 6156 ddi_put16(pp->uh.m1553.d_handle, p, count & 0xFFFF); 6157 6158 } 6159 6160 static uint32_t 6161 dma8237_read_count(struct ecppunit *pp) 6162 { 6163 uint8_t c_wcnt; 6164 uint16_t *p; 6165 6166 switch (pp->uh.m1553.chn) { 6167 case DMAE_CH0: 6168 c_wcnt = DMA_0WCNT; 6169 break; 6170 6171 case DMAE_CH1: 6172 c_wcnt = DMA_1WCNT; 6173 break; 6174 6175 case DMAE_CH2: 6176 c_wcnt = DMA_2WCNT; 6177 break; 6178 6179 case DMAE_CH3: 6180 c_wcnt = DMA_3WCNT; 6181 break; 6182 6183 default: 6184 return (NULL); 6185 } 6186 6187 p = (uint16_t *)&pp->uh.m1553.isa_space->isa_reg[c_wcnt]; 6188 return (ddi_get16(pp->uh.m1553.d_handle, p)); 6189 6190 } 6191 6192 static void 6193 dma8237_write(struct ecppunit *pp, int reg_num, uint8_t val) 6194 { 6195 ddi_put8(pp->uh.m1553.d_handle, 6196 &pp->uh.m1553.isa_space->isa_reg[reg_num], val); 6197 } 6198 6199 static uint8_t 6200 dma8237_read(struct ecppunit *pp, int reg_num) 6201 { 6202 return (ddi_get8(pp->uh.m1553.d_handle, 6203 &pp->uh.m1553.isa_space->isa_reg[reg_num])); 6204 } 6205 6206 static size_t 6207 dma8237_getcnt(struct ecppunit *pp) 6208 { 6209 uint32_t cnt; 6210 6211 if ((cnt = dma8237_read_count(pp)) == 0xffff) 6212 cnt = 0; 6213 else 6214 cnt++; 6215 return (cnt); 6216 } 6217 6218 6219 /* 6220 * 6221 * Kstat support routines 6222 * 6223 */ 6224 static void 6225 ecpp_kstat_init(struct ecppunit *pp) 6226 { 6227 struct ecppkstat *ekp; 6228 char buf[16]; 6229 6230 /* 6231 * Allocate, initialize and install interrupt counter kstat 6232 */ 6233 (void) sprintf(buf, "ecppc%d", pp->instance); 6234 pp->intrstats = kstat_create("ecpp", pp->instance, buf, "controller", 6235 KSTAT_TYPE_INTR, 1, KSTAT_FLAG_PERSISTENT); 6236 if (pp->intrstats == NULL) { 6237 ecpp_error(pp->dip, "ecpp_kstat_init:1: kstat_create failed"); 6238 } else { 6239 pp->intrstats->ks_update = ecpp_kstatintr_update; 6240 pp->intrstats->ks_private = (void *) pp; 6241 kstat_install(pp->intrstats); 6242 } 6243 6244 /* 6245 * Allocate, initialize and install misc stats kstat 6246 */ 6247 pp->ksp = kstat_create("ecpp", pp->instance, NULL, "misc", 6248 KSTAT_TYPE_NAMED, 6249 sizeof (struct ecppkstat) / sizeof (kstat_named_t), 6250 KSTAT_FLAG_PERSISTENT); 6251 if (pp->ksp == NULL) { 6252 ecpp_error(pp->dip, "ecpp_kstat_init:2: kstat_create failed"); 6253 return; 6254 } 6255 6256 ekp = (struct ecppkstat *)pp->ksp->ks_data; 6257 6258 #define EK_NAMED_INIT(name) \ 6259 kstat_named_init(&ekp->ek_##name, #name, KSTAT_DATA_UINT32) 6260 6261 EK_NAMED_INIT(ctx_obytes); 6262 EK_NAMED_INIT(ctxpio_obytes); 6263 EK_NAMED_INIT(nib_ibytes); 6264 EK_NAMED_INIT(ecp_obytes); 6265 EK_NAMED_INIT(ecp_ibytes); 6266 EK_NAMED_INIT(epp_obytes); 6267 EK_NAMED_INIT(epp_ibytes); 6268 EK_NAMED_INIT(diag_obytes); 6269 EK_NAMED_INIT(to_ctx); 6270 EK_NAMED_INIT(to_nib); 6271 EK_NAMED_INIT(to_ecp); 6272 EK_NAMED_INIT(to_epp); 6273 EK_NAMED_INIT(to_diag); 6274 EK_NAMED_INIT(xfer_tout); 6275 EK_NAMED_INIT(ctx_cf); 6276 EK_NAMED_INIT(joblen); 6277 EK_NAMED_INIT(isr_reattempt_high); 6278 EK_NAMED_INIT(mode); 6279 EK_NAMED_INIT(phase); 6280 EK_NAMED_INIT(backchan); 6281 EK_NAMED_INIT(iomode); 6282 EK_NAMED_INIT(state); 6283 6284 pp->ksp->ks_update = ecpp_kstat_update; 6285 pp->ksp->ks_private = (void *) pp; 6286 kstat_install(pp->ksp); 6287 } 6288 6289 static int 6290 ecpp_kstat_update(kstat_t *ksp, int rw) 6291 { 6292 struct ecppunit *pp; 6293 struct ecppkstat *ekp; 6294 6295 /* 6296 * For the time being there is no point 6297 * in supporting writable kstats 6298 */ 6299 if (rw == KSTAT_WRITE) { 6300 return (EACCES); 6301 } 6302 6303 pp = (struct ecppunit *)ksp->ks_private; 6304 ekp = (struct ecppkstat *)ksp->ks_data; 6305 6306 mutex_enter(&pp->umutex); 6307 6308 ekp->ek_ctx_obytes.value.ui32 = pp->obytes[ECPP_CENTRONICS] + 6309 pp->obytes[ECPP_COMPAT_MODE]; 6310 ekp->ek_ctxpio_obytes.value.ui32 = pp->ctxpio_obytes; 6311 ekp->ek_nib_ibytes.value.ui32 = pp->ibytes[ECPP_NIBBLE_MODE]; 6312 ekp->ek_ecp_obytes.value.ui32 = pp->obytes[ECPP_ECP_MODE]; 6313 ekp->ek_ecp_ibytes.value.ui32 = pp->ibytes[ECPP_ECP_MODE]; 6314 ekp->ek_epp_obytes.value.ui32 = pp->obytes[ECPP_EPP_MODE]; 6315 ekp->ek_epp_ibytes.value.ui32 = pp->ibytes[ECPP_EPP_MODE]; 6316 ekp->ek_diag_obytes.value.ui32 = pp->obytes[ECPP_DIAG_MODE]; 6317 ekp->ek_to_ctx.value.ui32 = pp->to_mode[ECPP_CENTRONICS] + 6318 pp->to_mode[ECPP_COMPAT_MODE]; 6319 ekp->ek_to_nib.value.ui32 = pp->to_mode[ECPP_NIBBLE_MODE]; 6320 ekp->ek_to_ecp.value.ui32 = pp->to_mode[ECPP_ECP_MODE]; 6321 ekp->ek_to_epp.value.ui32 = pp->to_mode[ECPP_EPP_MODE]; 6322 ekp->ek_to_diag.value.ui32 = pp->to_mode[ECPP_DIAG_MODE]; 6323 ekp->ek_xfer_tout.value.ui32 = pp->xfer_tout; 6324 ekp->ek_ctx_cf.value.ui32 = pp->ctx_cf; 6325 ekp->ek_joblen.value.ui32 = pp->joblen; 6326 ekp->ek_isr_reattempt_high.value.ui32 = pp->isr_reattempt_high; 6327 ekp->ek_mode.value.ui32 = pp->current_mode; 6328 ekp->ek_phase.value.ui32 = pp->current_phase; 6329 ekp->ek_backchan.value.ui32 = pp->backchannel; 6330 ekp->ek_iomode.value.ui32 = pp->io_mode; 6331 ekp->ek_state.value.ui32 = pp->e_busy; 6332 6333 mutex_exit(&pp->umutex); 6334 6335 return (0); 6336 } 6337 6338 static int 6339 ecpp_kstatintr_update(kstat_t *ksp, int rw) 6340 { 6341 struct ecppunit *pp; 6342 6343 /* 6344 * For the time being there is no point 6345 * in supporting writable kstats 6346 */ 6347 if (rw == KSTAT_WRITE) { 6348 return (EACCES); 6349 } 6350 6351 pp = (struct ecppunit *)ksp->ks_private; 6352 6353 mutex_enter(&pp->umutex); 6354 6355 KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_HARD] = pp->intr_hard; 6356 KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SPURIOUS] = pp->intr_spurious; 6357 KSTAT_INTR_PTR(ksp)->intrs[KSTAT_INTR_SOFT] = pp->intr_soft; 6358 6359 mutex_exit(&pp->umutex); 6360 6361 return (0); 6362 }