Annotation of sys/dev/ic/pdq.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: pdq.c,v 1.13 2007/02/14 00:53:48 jsg Exp $ */
2: /* $NetBSD: pdq.c,v 1.9 1996/10/13 01:37:26 christos Exp $ */
3:
4: /*-
5: * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. The name of the author may not be used to endorse or promote products
14: * derived from this software without specific prior written permission
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26: *
27: * Id: pdq.c,v 1.27 1996/06/07 20:02:25 thomas Exp
28: *
29: */
30:
31: /*
32: * DEC PDQ FDDI Controller O/S independent code
33: *
34: * This module should work any PDQ based board. Note that changes for
35: * MIPS and Alpha architectures (or any other architecture which requires
36: * a flushing of memory or write buffers and/or has incoherent caches)
37: * have yet to be made.
38: *
39: * However, it is expected that the PDQ_CSR_WRITE macro will cause a
40: * flushing of the write buffers.
41: */
42:
43: #define PDQ_HWSUPPORT /* for pdq.h */
44:
45: #include "pdqvar.h"
46: #include "pdqreg.h"
47:
48: #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
49: #define PDQ_CMD_RX_ALIGNMENT 16
50:
51: #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
52: #define PDQ_PRINTF(x) printf x
53: #else
54: #define PDQ_PRINTF(x) do { } while (0)
55: #endif
56:
57: static const char * const pdq_halt_codes[] = {
58: "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
59: "Software Fault", "Hardware Fault", "PC Trace Path Test",
60: "DMA Error", "Image CRC Error", "Adapter Processer Error"
61: };
62:
63: static const char * const pdq_adapter_states[] = {
64: "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
65: "Link Available", "Link Unavailable", "Halted", "Ring Member"
66: };
67:
68: /*
69: * The following are used in conjunction with
70: * unsolicited events
71: */
72: static const char * const pdq_entities[] = {
73: "Station", "Link", "Phy Port"
74: };
75:
76: static const char * const pdq_station_events[] = {
77: "Trace Received"
78: };
79:
80: static const char * const pdq_station_arguments[] = {
81: "Reason"
82: };
83:
84: static const char * const pdq_link_events[] = {
85: "Transmit Underrun",
86: "Transmit Failed",
87: "Block Check Error (CRC)",
88: "Frame Status Error",
89: "PDU Length Error",
90: NULL,
91: NULL,
92: "Receive Data Overrun",
93: NULL,
94: "No User Buffer",
95: "Ring Initialization Initiated",
96: "Ring Initialization Received",
97: "Ring Beacon Initiated",
98: "Duplicate Address Failure",
99: "Duplicate Token Detected",
100: "Ring Purger Error",
101: "FCI Strip Error",
102: "Trace Initiated",
103: "Directed Beacon Received",
104: };
105:
106: static const char * const pdq_link_arguments[] = {
107: "Reason",
108: "Data Link Header",
109: "Source",
110: "Upstream Neighbor"
111: };
112:
113: static const char * const pdq_phy_events[] = {
114: "LEM Error Monitor Reject",
115: "Elasticy Buffer Error",
116: "Link Confidence Test Reject"
117: };
118:
119: static const char * const pdq_phy_arguments[] = {
120: "Direction"
121: };
122:
123: static const char * const * const pdq_event_arguments[] = {
124: pdq_station_arguments,
125: pdq_link_arguments,
126: pdq_phy_arguments
127: };
128:
129: static const char * const * const pdq_event_codes[] = {
130: pdq_station_events,
131: pdq_link_events,
132: pdq_phy_events
133: };
134:
135: static const char * const pdq_station_types[] = {
136: "SAS", "DAC", "SAC", "NAC", "DAS"
137: };
138:
139: static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
140:
141: static const char pdq_phy_types[] = "ABSM";
142:
143: static const char * const pdq_pmd_types0[] = {
144: "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
145: "ANSI Sonet"
146: };
147:
148: static const char * const pdq_pmd_types100[] = {
149: "Low Power", "Thin Wire", "Shielded Twisted Pair",
150: "Unshielded Twisted Pair"
151: };
152:
153: static const char * const * const pdq_pmd_types[] = {
154: pdq_pmd_types0, pdq_pmd_types100
155: };
156:
157: static const char * const pdq_descriptions[] = {
158: "DEFPA PCI",
159: "DEFEA EISA",
160: "DEFTA TC",
161: "DEFAA Futurebus",
162: "DEFQA Q-bus",
163: };
164:
165: static void
166: pdq_print_fddi_chars(
167: pdq_t *pdq,
168: const pdq_response_status_chars_get_t *rsp)
169: {
170: const char hexchars[] = "0123456789abcdef";
171:
172: printf(PDQ_OS_PREFIX "DEC %s FDDI %s Controller\n",
173: PDQ_OS_PREFIX_ARGS, pdq_descriptions[pdq->pdq_type],
174: pdq_station_types[rsp->status_chars_get.station_type]);
175:
176: printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
177: PDQ_OS_PREFIX_ARGS,
178: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
179: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
180: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
181: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
182: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
183: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
184: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
185: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
186: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
187: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
188: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
189: hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
190: pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
191: pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
192: rsp->status_chars_get.module_rev.fwrev_bytes[0]);
193:
194: if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
195: printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
196: }
197:
198: printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
199: PDQ_OS_PREFIX_ARGS,
200: rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
201: pdq_phy_types[rsp->status_chars_get.phy_type[0]],
202: pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
203:
204: if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
205: printf(", FDDI Port[B] = %c (PMD = %s)",
206: pdq_phy_types[rsp->status_chars_get.phy_type[1]],
207: pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
208:
209: printf("\n");
210: }
211:
212: static void
213: pdq_init_csrs(
214: pdq_csrs_t *csrs,
215: pdq_bus_t bus,
216: pdq_bus_memaddr_t csr_base,
217: size_t csrsize)
218: {
219: csrs->csr_bus = bus;
220: csrs->csr_base = csr_base;
221: csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
222: csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
223: csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
224: csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
225: csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
226: csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
227: csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
228: csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
229: csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
230: csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
231: csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
232: csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
233: csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
234: }
235:
236: static void
237: pdq_init_pci_csrs(
238: pdq_pci_csrs_t *csrs,
239: pdq_bus_t bus,
240: pdq_bus_memaddr_t csr_base,
241: size_t csrsize)
242: {
243: csrs->csr_bus = bus;
244: csrs->csr_base = csr_base;
245: csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
246: csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
247: csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
248: csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
249: }
250:
251: static void
252: pdq_flush_databuf_queue(
253: pdq_databuf_queue_t *q)
254: {
255: PDQ_OS_DATABUF_T *pdu;
256: for (;;) {
257: PDQ_OS_DATABUF_DEQUEUE(q, pdu);
258: if (pdu == NULL)
259: return;
260: PDQ_OS_DATABUF_FREE(pdu);
261: }
262: }
263:
264: static pdq_boolean_t
265: pdq_do_port_control(
266: const pdq_csrs_t * const csrs,
267: pdq_uint32_t cmd)
268: {
269: int cnt = 0;
270: PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
271: PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
272: while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
273: cnt++;
274: PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
275: if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
276: PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
277: return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
278: }
279: /* adapter failure */
280: PDQ_ASSERT(0);
281: return PDQ_FALSE;
282: }
283:
284: static void
285: pdq_read_mla(
286: const pdq_csrs_t * const csrs,
287: pdq_lanaddr_t *hwaddr)
288: {
289: pdq_uint32_t data;
290:
291: PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
292: pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
293: data = PDQ_CSR_READ(csrs, csr_host_data);
294:
295: hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
296: hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
297: hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
298: hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
299:
300: PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
301: pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
302: data = PDQ_CSR_READ(csrs, csr_host_data);
303:
304: hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
305: hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
306: }
307:
308: static void
309: pdq_read_fwrev(
310: const pdq_csrs_t * const csrs,
311: pdq_fwrev_t *fwrev)
312: {
313: pdq_uint32_t data;
314:
315: pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
316: data = PDQ_CSR_READ(csrs, csr_host_data);
317:
318: fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
319: fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
320: fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
321: fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
322: }
323:
324: static pdq_boolean_t
325: pdq_read_error_log(
326: pdq_t *pdq,
327: pdq_response_error_log_get_t *log_entry)
328: {
329: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
330: pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
331:
332: pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
333:
334: while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
335: *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
336: if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
337: break;
338: }
339: return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
340: }
341:
342: static pdq_chip_rev_t
343: pdq_read_chiprev(
344: const pdq_csrs_t * const csrs)
345: {
346: pdq_uint32_t data;
347:
348: PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
349: pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
350: data = PDQ_CSR_READ(csrs, csr_host_data);
351:
352: return (pdq_chip_rev_t) data;
353: }
354:
355: static const struct {
356: size_t cmd_len;
357: size_t rsp_len;
358: const char *cmd_name;
359: } pdq_cmd_info[] = {
360: { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
361: sizeof(pdq_response_generic_t),
362: "Start"
363: },
364: { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
365: sizeof(pdq_response_generic_t),
366: "Filter Set"
367: },
368: { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
369: sizeof(pdq_response_filter_get_t),
370: "Filter Get"
371: },
372: { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
373: sizeof(pdq_response_generic_t),
374: "Chars Set"
375: },
376: { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
377: sizeof(pdq_response_status_chars_get_t),
378: "Status Chars Get"
379: },
380: #if 0
381: { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
382: sizeof(pdq_response_counters_get_t),
383: "Counters Get"
384: },
385: { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
386: sizeof(pdq_response_generic_t),
387: "Counters Set"
388: },
389: #else
390: { 0, 0, "Counters Get" },
391: { 0, 0, "Counters Set" },
392: #endif
393: { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
394: sizeof(pdq_response_generic_t),
395: "Addr Filter Set"
396: },
397: { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
398: sizeof(pdq_response_addr_filter_get_t),
399: "Addr Filter Get"
400: },
401: #if 0
402: { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
403: sizeof(pdq_response_generic_t),
404: "Error Log Clear"
405: },
406: { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
407: sizeof(pdq_response_generic_t),
408: "Error Log Set"
409: },
410: { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
411: sizeof(pdq_response_generic_t),
412: "FDDI MIB Get"
413: },
414: { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
415: sizeof(pdq_response_generic_t),
416: "DEC Ext MIB Get"
417: },
418: { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
419: sizeof(pdq_response_generic_t),
420: "DEC Specific Get"
421: },
422: { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
423: sizeof(pdq_response_generic_t),
424: "SNMP Set"
425: },
426: { 0, 0, "N/A" },
427: { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
428: sizeof(pdq_response_generic_t),
429: "SMT MIB Get"
430: },
431: { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
432: sizeof(pdq_response_generic_t),
433: "SMT MIB Set",
434: },
435: #endif
436: };
437:
438: static void
439: pdq_queue_commands(
440: pdq_t *pdq)
441: {
442: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
443: pdq_command_info_t * const ci = &pdq->pdq_command_info;
444: pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
445: pdq_cmd_code_t op;
446: pdq_uint32_t cmdlen, rsplen, mask;
447:
448: /*
449: * If there are commands or responses active or there aren't
450: * any pending commands, then don't queue any more.
451: */
452: if (ci->ci_command_active || ci->ci_pending_commands == 0)
453: return;
454:
455: /*
456: * Determine which command needs to be queued.
457: */
458: op = PDQC_SMT_MIB_SET;
459: for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
460: op = (pdq_cmd_code_t) ((int) op - 1);
461: /*
462: * Obtain the sizes needed for the command and response.
463: * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
464: * always properly aligned.
465: */
466: cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
467: rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
468: if (cmdlen < rsplen)
469: cmdlen = rsplen;
470: /*
471: * Since only one command at a time will be queued, there will always
472: * be enough space.
473: */
474:
475: /*
476: * Obtain and fill in the descriptor for the command (descriptor is
477: * pre-initialized)
478: */
479: dbp->pdqdb_command_requests[ci->ci_request_producer].txd_seg_len = cmdlen;
480: PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
481:
482: /*
483: * Obtain and fill in the descriptor for the response (descriptor is
484: * pre-initialized)
485: */
486: dbp->pdqdb_command_responses[ci->ci_response_producer].rxd_seg_len_hi = cmdlen / 16;
487: PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
488:
489: /*
490: * Clear the command area, set the opcode, and the command from the pending
491: * mask.
492: */
493:
494: PDQ_OS_MEMZERO(ci->ci_bufstart, cmdlen);
495: *(pdq_cmd_code_t *) ci->ci_bufstart = op;
496: ci->ci_pending_commands &= ~mask;
497:
498: /*
499: * Fill in the command area, if needed.
500: */
501: switch (op) {
502: case PDQC_FILTER_SET: {
503: pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_bufstart;
504: unsigned idx = 0;
505: filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
506: filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
507: idx++;
508: filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
509: filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
510: idx++;
511: filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
512: filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
513: idx++;
514: filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
515: filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
516: idx++;
517: filter_set->filter_set_items[idx].item_code = PDQI_EOL;
518: break;
519: }
520: case PDQC_ADDR_FILTER_SET: {
521: pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_bufstart;
522: pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
523: addr->lanaddr_bytes[0] = 0xFF;
524: addr->lanaddr_bytes[1] = 0xFF;
525: addr->lanaddr_bytes[2] = 0xFF;
526: addr->lanaddr_bytes[3] = 0xFF;
527: addr->lanaddr_bytes[4] = 0xFF;
528: addr->lanaddr_bytes[5] = 0xFF;
529: addr++;
530: pdq_os_addr_fill(pdq, addr, 61);
531: break;
532: }
533: default:
534: break;
535: }
536: /*
537: * At this point the command is done. All that needs to be done is to
538: * produce it to the PDQ.
539: */
540: PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
541: pdq_cmd_info[op].cmd_name));
542:
543: ci->ci_command_active++;
544: PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
545: PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
546: }
547:
548: static void
549: pdq_process_command_responses(
550: pdq_t * const pdq)
551: {
552: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
553: pdq_command_info_t * const ci = &pdq->pdq_command_info;
554: volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
555: pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
556: const pdq_response_generic_t *rspgen;
557:
558: /*
559: * We have to process the command and response in tandem so
560: * just wait for the response to be consumed. If it has been
561: * consumed then the command must have been as well.
562: */
563:
564: if (cbp->pdqcb_command_response == ci->ci_response_completion)
565: return;
566:
567: PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
568:
569: rspgen = (const pdq_response_generic_t *) ci->ci_bufstart;
570: PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
571: PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d)\n",
572: pdq_cmd_info[rspgen->generic_op].cmd_name,
573: rspgen->generic_status));
574:
575: if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
576: pdq->pdq_flags &= ~PDQ_PRINTCHARS;
577: pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
578: }
579:
580: PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
581: PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
582: ci->ci_command_active = 0;
583:
584: if (ci->ci_pending_commands != 0) {
585: pdq_queue_commands(pdq);
586: } else {
587: PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
588: ci->ci_response_producer | (ci->ci_response_completion << 8));
589: PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
590: ci->ci_request_producer | (ci->ci_request_completion << 8));
591: }
592: }
593:
594: /*
595: * This following routine processes unsolicited events.
596: * In addition, it also fills the unsolicited queue with
597: * event buffers so it can be used to initialize the queue
598: * as well.
599: */
600: static void
601: pdq_process_unsolicited_events(
602: pdq_t *pdq)
603: {
604: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
605: pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
606: volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
607: pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
608: const pdq_unsolicited_event_t *event;
609: pdq_rxdesc_t *rxd;
610:
611: /*
612: * Process each unsolicited event (if any).
613: */
614:
615: while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
616: rxd = &dbp->pdqdb_unsolicited_events[ui->ui_completion];
617: event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
618:
619: switch (event->event_type) {
620: case PDQ_UNSOLICITED_EVENT: {
621: printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
622: PDQ_OS_PREFIX_ARGS,
623: pdq_entities[event->event_entity],
624: pdq_event_codes[event->event_entity][event->event_code.value]);
625: if (event->event_entity == PDQ_ENTITY_PHY_PORT)
626: printf("[%d]", event->event_index);
627: printf("\n");
628: break;
629: }
630: case PDQ_UNSOLICITED_COUNTERS: {
631: break;
632: }
633: }
634: PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
635: ui->ui_free++;
636: }
637:
638: /*
639: * Now give back the event buffers back to the PDQ.
640: */
641: PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
642: ui->ui_free = 0;
643:
644: PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
645: ui->ui_producer | (ui->ui_completion << 8));
646: }
647:
648: static void
649: pdq_process_received_data(
650: pdq_t *pdq,
651: pdq_rx_info_t *rx,
652: pdq_rxdesc_t *receives,
653: pdq_uint32_t completion_goal,
654: pdq_uint32_t ring_mask)
655: {
656: pdq_uint32_t completion = rx->rx_completion;
657: pdq_uint32_t producer = rx->rx_producer;
658: PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
659: pdq_rxdesc_t *rxd;
660: pdq_uint32_t idx;
661:
662: while (completion != completion_goal) {
663: PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
664: pdq_uint8_t *dataptr;
665: pdq_uint32_t fc, datalen, pdulen, segcnt;
666: pdq_rxstatus_t status;
667:
668: fpdu = lpdu = buffers[completion];
669: PDQ_ASSERT(fpdu != NULL);
670:
671: dataptr = PDQ_OS_DATABUF_PTR(fpdu);
672: status = *(pdq_rxstatus_t *) dataptr;
673: if ((status.rxs_status & 0x200000) == 0) {
674: datalen = status.rxs_status & 0x1FFF;
675: fc = dataptr[PDQ_RX_FC_OFFSET];
676: switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
677: case PDQ_FDDI_LLC_ASYNC:
678: case PDQ_FDDI_LLC_SYNC:
679: case PDQ_FDDI_IMP_ASYNC:
680: case PDQ_FDDI_IMP_SYNC: {
681: if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
682: PDQ_PRINTF(("discard: bad length %d\n", datalen));
683: goto discard_frame;
684: }
685: break;
686: }
687: case PDQ_FDDI_SMT: {
688: if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
689: goto discard_frame;
690: break;
691: }
692: default: {
693: PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
694: goto discard_frame;
695: }
696: }
697: /*
698: * Update the lengths of the data buffers now that we know
699: * the real length.
700: */
701: pdulen = datalen - 4 /* CRC */;
702: segcnt = (pdulen + PDQ_RX_FC_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
703: PDQ_OS_DATABUF_ALLOC(npdu);
704: if (npdu == NULL) {
705: PDQ_PRINTF(("discard: no databuf #0\n"));
706: goto discard_frame;
707: }
708: buffers[completion] = npdu;
709: for (idx = 1; idx < segcnt; idx++) {
710: PDQ_OS_DATABUF_ALLOC(npdu);
711: if (npdu == NULL) {
712: PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
713: PDQ_OS_DATABUF_FREE(fpdu);
714: goto discard_frame;
715: }
716: PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
717: lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
718: buffers[(completion + idx) & ring_mask] = npdu;
719: }
720: PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
721: for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
722: buffers[(producer + idx) & ring_mask] =
723: buffers[(completion + idx) & ring_mask];
724: buffers[(completion + idx) & ring_mask] = NULL;
725: }
726: PDQ_OS_DATABUF_ADJ(fpdu, PDQ_RX_FC_OFFSET);
727: if (segcnt == 1) {
728: PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
729: } else {
730: PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_RX_FC_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
731: }
732: pdq_os_receive_pdu(pdq, fpdu, pdulen);
733: rx->rx_free += PDQ_RX_SEGCNT;
734: PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
735: PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
736: continue;
737: } else {
738: PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
739: status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
740: status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
741: if (status.rxs_rcc_reason == 7)
742: goto discard_frame;
743: if (status.rxs_rcc_reason != 0) {
744: /* hardware fault */
745: }
746: if (status.rxs_rcc_badcrc) {
747: printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
748: PDQ_OS_PREFIX_ARGS,
749: dataptr[PDQ_RX_FC_OFFSET+1],
750: dataptr[PDQ_RX_FC_OFFSET+2],
751: dataptr[PDQ_RX_FC_OFFSET+3],
752: dataptr[PDQ_RX_FC_OFFSET+4],
753: dataptr[PDQ_RX_FC_OFFSET+5],
754: dataptr[PDQ_RX_FC_OFFSET+6]);
755: /* rx->rx_badcrc++; */
756: } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
757: /* rx->rx_frame_status_errors++; */
758: } else {
759: /* hardware fault */
760: }
761: }
762: discard_frame:
763: /*
764: * Discarded frames go right back on the queue; therefore
765: * ring entries were freed.
766: */
767: for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
768: buffers[producer] = buffers[completion];
769: buffers[completion] = NULL;
770: rxd = &receives[rx->rx_producer];
771: if (idx == 0) {
772: rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
773: } else {
774: rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
775: }
776: rxd->rxd_pa_hi = 0;
777: rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
778: rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(buffers[rx->rx_producer]));
779: PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
780: PDQ_ADVANCE(producer, 1, ring_mask);
781: PDQ_ADVANCE(completion, 1, ring_mask);
782: }
783: }
784: rx->rx_completion = completion;
785:
786: while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
787: PDQ_OS_DATABUF_T *pdu;
788: /*
789: * Allocate the needed number of data buffers.
790: * Try to obtain them from our free queue before
791: * asking the system for more.
792: */
793: for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
794: if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
795: PDQ_OS_DATABUF_ALLOC(pdu);
796: if (pdu == NULL)
797: break;
798: buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
799: }
800: rxd = &receives[(rx->rx_producer + idx) & ring_mask];
801: if (idx == 0) {
802: rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
803: } else {
804: rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
805: }
806: rxd->rxd_pa_hi = 0;
807: rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
808: rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(pdu));
809: }
810: if (idx < PDQ_RX_SEGCNT) {
811: /*
812: * We didn't get all databufs required to complete a new
813: * receive buffer. Keep the ones we got and retry a bit
814: * later for the rest.
815: */
816: break;
817: }
818: PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
819: rx->rx_free -= PDQ_RX_SEGCNT;
820: }
821: }
822:
823: pdq_boolean_t
824: pdq_queue_transmit_data(
825: pdq_t *pdq,
826: PDQ_OS_DATABUF_T *pdu)
827: {
828: pdq_tx_info_t *tx = &pdq->pdq_tx_info;
829: pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
830: pdq_uint32_t producer = tx->tx_producer;
831: pdq_txdesc_t *eop = NULL;
832: PDQ_OS_DATABUF_T *pdu0;
833: pdq_uint32_t freecnt;
834:
835: if (tx->tx_free < 1)
836: return PDQ_FALSE;
837:
838: dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
839: PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
840:
841: for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
842: pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
843: const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
844:
845: /*
846: * The first segment is limited to the space remaining in
847: * page. All segments after that can be up to a full page
848: * in size.
849: */
850: fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
851: while (datalen > 0 && freecnt > 0) {
852: pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
853:
854: /*
855: * Initialize the transmit descriptor
856: */
857: eop = &dbp->pdqdb_transmits[producer];
858: eop->txd_seg_len = seglen;
859: eop->txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, dataptr);
860: eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
861:
862: datalen -= seglen;
863: dataptr += seglen;
864: fraglen = PDQ_OS_PAGESIZE;
865: freecnt--;
866: PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
867: }
868: pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
869: }
870: if (pdu0 != NULL) {
871: PDQ_ASSERT(freecnt == 0);
872: /*
873: * If we still have data to process then the ring was too full
874: * to store the PDU. Return FALSE so the caller will requeue
875: * the PDU for later.
876: */
877: return PDQ_FALSE;
878: }
879: /*
880: * Everything went fine. Finish it up.
881: */
882: tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
883: eop->txd_eop = 1;
884: PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
885: tx->tx_producer = producer;
886: tx->tx_free = freecnt;
887: PDQ_DO_TYPE2_PRODUCER(pdq);
888: return PDQ_TRUE;
889: }
890:
891: static void
892: pdq_process_transmitted_data(
893: pdq_t *pdq)
894: {
895: pdq_tx_info_t *tx = &pdq->pdq_tx_info;
896: volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
897: pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
898: pdq_uint32_t completion = tx->tx_completion;
899:
900: while (completion != cbp->pdqcb_transmits) {
901: PDQ_OS_DATABUF_T *pdu;
902: pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
903: PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
904: PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
905: PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
906: pdq_os_transmit_done(pdq, pdu);
907: tx->tx_free += descriptor_count;
908:
909: PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
910: }
911: if (tx->tx_completion != completion) {
912: tx->tx_completion = completion;
913: pdq_os_restart_transmitter(pdq);
914: }
915: PDQ_DO_TYPE2_PRODUCER(pdq);
916: }
917:
918: void
919: pdq_flush_transmitter(
920: pdq_t *pdq)
921: {
922: volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
923: pdq_tx_info_t *tx = &pdq->pdq_tx_info;
924:
925: for (;;) {
926: PDQ_OS_DATABUF_T *pdu;
927: PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
928: if (pdu == NULL)
929: break;
930: /*
931: * Don't call transmit done since the packet never made it
932: * out on the wire.
933: */
934: PDQ_OS_DATABUF_FREE(pdu);
935: }
936:
937: tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
938: tx->tx_completion = cbp->pdqcb_transmits = tx->tx_producer;
939:
940: PDQ_DO_TYPE2_PRODUCER(pdq);
941: }
942:
943: void
944: pdq_hwreset(
945: pdq_t *pdq)
946: {
947: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
948: pdq_state_t state;
949: int cnt;
950:
951: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
952: if (state == PDQS_DMA_UNAVAILABLE)
953: return;
954: PDQ_CSR_WRITE(csrs, csr_port_data_a,
955: (state == PDQS_HALTED) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
956: PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
957: PDQ_OS_USEC_DELAY(100);
958: PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
959: for (cnt = 45000;;cnt--) {
960: PDQ_OS_USEC_DELAY(1000);
961: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
962: if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
963: break;
964: }
965: PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 45000 - cnt));
966: PDQ_OS_USEC_DELAY(10000);
967: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
968: PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
969: PDQ_ASSERT(cnt > 0);
970: }
971:
972: /*
973: * The following routine brings the PDQ from whatever state it is
974: * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
975: */
976: pdq_state_t
977: pdq_stop(
978: pdq_t *pdq)
979: {
980: pdq_state_t state;
981: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
982: int cnt, pass = 0, idx;
983: PDQ_OS_DATABUF_T **buffers;
984:
985: restart:
986: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
987: if (state != PDQS_DMA_UNAVAILABLE) {
988: pdq_hwreset(pdq);
989: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
990: PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
991: }
992: #if 0
993: switch (state) {
994: case PDQS_RING_MEMBER:
995: case PDQS_LINK_UNAVAILABLE:
996: case PDQS_LINK_AVAILABLE: {
997: PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
998: PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
999: pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1000: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1001: PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1002: /* FALLTHROUGH */
1003: }
1004: case PDQS_DMA_AVAILABLE: {
1005: PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1006: PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1007: pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1008: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1009: PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1010: /* FALLTHROUGH */
1011: }
1012: case PDQS_DMA_UNAVAILABLE: {
1013: break;
1014: }
1015: }
1016: #endif
1017: /*
1018: * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1019: * DMA_AVAILABLE.
1020: */
1021:
1022: /*
1023: * Obtain the hardware address and firmware revisions
1024: * (MLA = my long address which is FDDI speak for hardware address)
1025: */
1026: pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1027: pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1028: pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1029:
1030: if (pdq->pdq_type == PDQ_DEFPA) {
1031: /*
1032: * Disable interrupts and DMA.
1033: */
1034: PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1035: PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1036: }
1037:
1038: /*
1039: * Flush all the databuf queues.
1040: */
1041: pdq_flush_databuf_queue(&pdq->pdq_tx_info.tx_txq);
1042: pdq->pdq_flags &= ~PDQ_TXOK;
1043: buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1044: for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1045: if (buffers[idx] != NULL) {
1046: PDQ_OS_DATABUF_FREE(buffers[idx]);
1047: buffers[idx] = NULL;
1048: }
1049: }
1050: pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1051: buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1052: for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1053: if (buffers[idx] != NULL) {
1054: PDQ_OS_DATABUF_FREE(buffers[idx]);
1055: buffers[idx] = NULL;
1056: }
1057: }
1058: pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1059:
1060: /*
1061: * Reset the consumer indexes to 0.
1062: */
1063: pdq->pdq_cbp->pdqcb_receives = 0;
1064: pdq->pdq_cbp->pdqcb_transmits = 0;
1065: pdq->pdq_cbp->pdqcb_host_smt = 0;
1066: pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1067: pdq->pdq_cbp->pdqcb_command_response = 0;
1068: pdq->pdq_cbp->pdqcb_command_request = 0;
1069:
1070: /*
1071: * Reset the producer and completion indexes to 0.
1072: */
1073: pdq->pdq_command_info.ci_request_producer = 0;
1074: pdq->pdq_command_info.ci_response_producer = 0;
1075: pdq->pdq_command_info.ci_request_completion = 0;
1076: pdq->pdq_command_info.ci_response_completion = 0;
1077: pdq->pdq_unsolicited_info.ui_producer = 0;
1078: pdq->pdq_unsolicited_info.ui_completion = 0;
1079: pdq->pdq_rx_info.rx_producer = 0;
1080: pdq->pdq_rx_info.rx_completion = 0;
1081: pdq->pdq_tx_info.tx_producer = 0;
1082: pdq->pdq_tx_info.tx_completion = 0;
1083: pdq->pdq_host_smt_info.rx_producer = 0;
1084: pdq->pdq_host_smt_info.rx_completion = 0;
1085:
1086: pdq->pdq_command_info.ci_command_active = 0;
1087: pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1088: pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1089:
1090: /*
1091: * Allow the DEFPA to do DMA. Then program the physical
1092: * addresses of the consumer and descriptor blocks.
1093: */
1094: if (pdq->pdq_type == PDQ_DEFPA) {
1095: #ifdef PDQTEST
1096: PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1097: PDQ_PFI_MODE_DMA_ENABLE);
1098: #else
1099: PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1100: PDQ_PFI_MODE_DMA_ENABLE
1101: /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1102: #endif
1103: }
1104:
1105: /*
1106: * Make sure the unsolicited queue has events ...
1107: */
1108: pdq_process_unsolicited_events(pdq);
1109:
1110: if (pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1111: PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1112: else
1113: PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1114: PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1115: pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1116:
1117: PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1118: PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_OS_VA_TO_PA(pdq, pdq->pdq_cbp));
1119: pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1120:
1121: PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1122: PDQ_CSR_WRITE(csrs, csr_port_data_a,
1123: PDQ_OS_VA_TO_PA(pdq, pdq->pdq_dbp) | PDQ_DMA_INIT_LW_BSWAP_DATA);
1124: pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1125:
1126: for (cnt = 0; cnt < 1000; cnt++) {
1127: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1128: if (state == PDQS_HALTED) {
1129: if (pass > 0)
1130: return PDQS_HALTED;
1131: pass = 1;
1132: goto restart;
1133: }
1134: if (state == PDQS_DMA_AVAILABLE) {
1135: PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1136: break;
1137: }
1138: PDQ_OS_USEC_DELAY(1000);
1139: }
1140: PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1141:
1142: PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1143: PDQ_CSR_WRITE(csrs, csr_host_int_enable, 0) /* PDQ_HOST_INT_STATE_CHANGE
1144: |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1145: |PDQ_HOST_INT_UNSOL_ENABLE */;
1146:
1147: /*
1148: * Any other command but START should be valid.
1149: */
1150: pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1151: if (pdq->pdq_flags & PDQ_PRINTCHARS)
1152: pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1153: pdq_queue_commands(pdq);
1154:
1155: if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1156: /*
1157: * Now wait (up to 100ms) for the command(s) to finish.
1158: */
1159: for (cnt = 0; cnt < 1000; cnt++) {
1160: pdq_process_command_responses(pdq);
1161: if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1162: break;
1163: PDQ_OS_USEC_DELAY(1000);
1164: }
1165: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1166: }
1167:
1168: return state;
1169: }
1170:
1171: void
1172: pdq_run(
1173: pdq_t *pdq)
1174: {
1175: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1176: pdq_state_t state;
1177:
1178: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1179: PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1180: PDQ_ASSERT(state != PDQS_RESET);
1181: PDQ_ASSERT(state != PDQS_HALTED);
1182: PDQ_ASSERT(state != PDQS_UPGRADE);
1183: PDQ_ASSERT(state != PDQS_RING_MEMBER);
1184: switch (state) {
1185: case PDQS_DMA_AVAILABLE: {
1186: /*
1187: * The PDQ after being reset screws up some of its state.
1188: * So we need to clear all the errors/interrupts so the real
1189: * ones will get through.
1190: */
1191: PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1192: PDQ_CSR_WRITE(csrs, csr_host_int_enable, PDQ_HOST_INT_STATE_CHANGE|PDQ_HOST_INT_XMT_DATA_FLUSH
1193: |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1194: |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_TX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE);
1195: /*
1196: * Set the MAC and address filters and start up the PDQ.
1197: */
1198: pdq_process_unsolicited_events(pdq);
1199: pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1200: pdq->pdq_dbp->pdqdb_receives,
1201: pdq->pdq_cbp->pdqcb_receives,
1202: PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1203: PDQ_DO_TYPE2_PRODUCER(pdq);
1204: if (pdq->pdq_flags & PDQ_PASS_SMT) {
1205: pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1206: pdq->pdq_dbp->pdqdb_host_smt,
1207: pdq->pdq_cbp->pdqcb_host_smt,
1208: PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1209: PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1210: pdq->pdq_host_smt_info.rx_producer
1211: | (pdq->pdq_host_smt_info.rx_completion << 8));
1212: }
1213: pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1214: | PDQ_BITMASK(PDQC_ADDR_FILTER_SET) | PDQ_BITMASK(PDQC_START);
1215: if (pdq->pdq_flags & PDQ_PRINTCHARS)
1216: pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1217: pdq_queue_commands(pdq);
1218: break;
1219: }
1220: case PDQS_LINK_UNAVAILABLE:
1221: case PDQS_LINK_AVAILABLE: {
1222: pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1223: | PDQ_BITMASK(PDQC_ADDR_FILTER_SET);
1224: if (pdq->pdq_flags & PDQ_PRINTCHARS)
1225: pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1226: if (pdq->pdq_flags & PDQ_PASS_SMT) {
1227: pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1228: pdq->pdq_dbp->pdqdb_host_smt,
1229: pdq->pdq_cbp->pdqcb_host_smt,
1230: PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1231: PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1232: pdq->pdq_host_smt_info.rx_producer
1233: | (pdq->pdq_host_smt_info.rx_completion << 8));
1234: }
1235: pdq_process_unsolicited_events(pdq);
1236: pdq_queue_commands(pdq);
1237: break;
1238: }
1239: case PDQS_RING_MEMBER: {
1240: }
1241: default:
1242: break;
1243: }
1244: }
1245:
1246: int
1247: pdq_interrupt(
1248: pdq_t *pdq)
1249: {
1250: const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1251: pdq_uint32_t data;
1252: int progress = 0;
1253:
1254: if (pdq->pdq_type == PDQ_DEFPA)
1255: PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1256:
1257: while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1258: progress = 1;
1259: PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1260: if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1261: pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1262: pdq->pdq_dbp->pdqdb_receives,
1263: pdq->pdq_cbp->pdqcb_receives,
1264: PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1265: PDQ_DO_TYPE2_PRODUCER(pdq);
1266: }
1267: if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1268: pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1269: pdq->pdq_dbp->pdqdb_host_smt,
1270: pdq->pdq_cbp->pdqcb_host_smt,
1271: PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1272: PDQ_DO_HOST_SMT_PRODUCER(pdq);
1273: }
1274: if (data & PDQ_PSTS_XMT_DATA_PENDING)
1275: pdq_process_transmitted_data(pdq);
1276: if (data & PDQ_PSTS_UNSOL_PENDING)
1277: pdq_process_unsolicited_events(pdq);
1278: if (data & PDQ_PSTS_CMD_RSP_PENDING)
1279: pdq_process_command_responses(pdq);
1280: if (data & PDQ_PSTS_TYPE_0_PENDING) {
1281: data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1282: if (data & PDQ_HOST_INT_STATE_CHANGE) {
1283: pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1284: printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1285: if (state == PDQS_LINK_UNAVAILABLE) {
1286: pdq->pdq_flags &= ~PDQ_TXOK;
1287: } else if (state == PDQS_LINK_AVAILABLE) {
1288: pdq->pdq_flags |= PDQ_TXOK;
1289: pdq_os_restart_transmitter(pdq);
1290: } else if (state == PDQS_HALTED) {
1291: pdq_response_error_log_get_t log_entry;
1292: pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1293: printf(": halt code = %d (%s)\n",
1294: halt_code, pdq_halt_codes[halt_code]);
1295: if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1296: PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1297: PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1298: data & PDQ_HOST_INT_FATAL_ERROR));
1299: }
1300: pdq_read_error_log(pdq, &log_entry);
1301: pdq_stop(pdq);
1302: if (pdq->pdq_flags & PDQ_RUNNING)
1303: pdq_run(pdq);
1304: return 1;
1305: }
1306: printf("\n");
1307: PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1308: }
1309: if (data & PDQ_HOST_INT_FATAL_ERROR) {
1310: pdq_stop(pdq);
1311: if (pdq->pdq_flags & PDQ_RUNNING)
1312: pdq_run(pdq);
1313: return 1;
1314: }
1315: if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1316: printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1317: pdq->pdq_flags &= ~PDQ_TXOK;
1318: pdq_flush_transmitter(pdq);
1319: pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1320: PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1321: }
1322: }
1323: if (pdq->pdq_type == PDQ_DEFPA)
1324: PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1325: }
1326: return progress;
1327: }
1328:
1329: pdq_t *
1330: pdq_initialize(
1331: pdq_bus_t bus,
1332: pdq_bus_memaddr_t csr_base,
1333: const char *name,
1334: int unit,
1335: void *ctx,
1336: pdq_type_t type)
1337: {
1338: pdq_t *pdq;
1339: pdq_state_t state;
1340: const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1341: pdq_uint8_t *p;
1342: int idx;
1343:
1344: PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1345: PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1346: PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1347: PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1348: PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1349: PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1350: PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1351: PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1352: PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1353:
1354: pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1355: if (pdq == NULL) {
1356: PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1357: return NULL;
1358: }
1359: PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1360: pdq->pdq_type = type;
1361: pdq->pdq_unit = unit;
1362: pdq->pdq_os_ctx = (void *) ctx;
1363: pdq->pdq_os_name = name;
1364: pdq->pdq_flags = PDQ_PRINTCHARS;
1365: /*
1366: * Allocate the additional data structures required by
1367: * the PDQ driver. Allocate a contiguous region of memory
1368: * for the descriptor block. We need to allocated enough
1369: * to guarantee that we will a get 8KB block of memory aligned
1370: * on a 8KB boundary. This turns to require that we allocate
1371: * (N*2 - 1 page) pages of memory. On machine with less than
1372: * a 8KB page size, it mean we will allocate more memory than
1373: * we need. The extra will be used for the unsolicited event
1374: * buffers (though on machines with 8KB pages we will to allocate
1375: * them separately since there will be nothing left overs.)
1376: */
1377: p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1378: if (p != NULL) {
1379: pdq_physaddr_t physaddr = PDQ_OS_VA_TO_PA(pdq, p);
1380: /*
1381: * Assert that we really got contiguous memory. This isn't really
1382: * needed on systems that actually have physical contiguous allocation
1383: * routines, but on those systems that don't ...
1384: */
1385: for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1386: if (PDQ_OS_VA_TO_PA(pdq, p + idx) - physaddr != idx)
1387: goto cleanup_and_return;
1388: }
1389: physaddr &= 0x1FFF;
1390: if (physaddr) {
1391: pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1392: pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - physaddr];
1393: } else {
1394: pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1395: pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1396: }
1397: }
1398: if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1399: pdq->pdq_unsolicited_info.ui_events =
1400: (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1401: PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1402: }
1403:
1404: /*
1405: * Make sure everything got allocated. If not, free what did
1406: * get allocated and return.
1407: */
1408: if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1409: cleanup_and_return:
1410: if (p /* pdq->pdq_dbp */ != NULL)
1411: PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1412: if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1413: PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1414: PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1415: PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1416: return NULL;
1417: }
1418:
1419: pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1420: pdq->pdq_command_info.ci_bufstart = (pdq_uint8_t *) pdq->pdq_dbp->pdqdb_command_pool;
1421: pdq->pdq_rx_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_receive_buffers;
1422:
1423: pdq->pdq_host_smt_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_host_smt_buffers;
1424:
1425: PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp));
1426: PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_receives));
1427: PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_transmits));
1428: PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_host_smt));
1429: PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_responses));
1430: PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_requests));
1431: PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1432:
1433: /*
1434: * Zero out the descriptor block. Not really required but
1435: * it pays to be neat. This will also zero out the consumer
1436: * block, command pool, and buffer pointers for the receive
1437: * host_smt rings.
1438: */
1439: PDQ_OS_MEMZERO(pdq->pdq_dbp, sizeof(*pdq->pdq_dbp));
1440:
1441: /*
1442: * Initialize the CSR references.
1443: * the DEFAA (FutureBus+) skips a longword between registers
1444: */
1445: pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1446: if (pdq->pdq_type == PDQ_DEFPA)
1447: pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1448:
1449: PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_PTR_FMT "\n", pdq->pdq_csrs.csr_base));
1450: PDQ_PRINTF((" Port Reset = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1451: pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1452: PDQ_PRINTF((" Host Data = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1453: pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1454: PDQ_PRINTF((" Port Control = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1455: pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1456: PDQ_PRINTF((" Port Data A = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1457: pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1458: PDQ_PRINTF((" Port Data B = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1459: pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1460: PDQ_PRINTF((" Port Status = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1461: pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1462: PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1463: pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1464: PDQ_PRINTF((" Host Int Enable = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1465: pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1466: PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1467: pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1468: PDQ_PRINTF((" Command Response Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1469: pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1470: PDQ_PRINTF((" Command Request Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1471: pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1472: PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1473: pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1474: PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1475: pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1476:
1477: /*
1478: * Initialize the command information block
1479: */
1480: pdq->pdq_command_info.ci_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_command_info.ci_bufstart);
1481: for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_requests)/sizeof(pdq->pdq_dbp->pdqdb_command_requests[0]); idx++) {
1482: pdq_txdesc_t *txd = &pdq->pdq_dbp->pdqdb_command_requests[idx];
1483:
1484: txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1485: txd->txd_eop = txd->txd_sop = 1;
1486: txd->txd_pa_hi = 0;
1487: }
1488: for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_responses)/sizeof(pdq->pdq_dbp->pdqdb_command_responses[0]); idx++) {
1489: pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_command_responses[idx];
1490:
1491: rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1492: rxd->rxd_sop = 1;
1493: rxd->rxd_seg_cnt = 0;
1494: rxd->rxd_seg_len_lo = 0;
1495: }
1496:
1497: /*
1498: * Initialize the unsolicited event information block
1499: */
1500: pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1501: pdq->pdq_unsolicited_info.ui_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_unsolicited_info.ui_events);
1502: for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events)/sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events[0]); idx++) {
1503: pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_unsolicited_events[idx];
1504: pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1505:
1506: rxd->rxd_sop = 1;
1507: rxd->rxd_seg_cnt = 0;
1508: rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1509: rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1510: - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1511: rxd->rxd_pa_hi = 0;
1512: }
1513: /*
1514: * Initialize the receive information blocks (normal and SMT).
1515: */
1516: pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1517: pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1518:
1519: pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1520: pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1521:
1522: /*
1523: * Initialize the transmit information block.
1524: */
1525: pdq->pdq_tx_hdr[0] = PDQ_FDDI_PH0;
1526: pdq->pdq_tx_hdr[1] = PDQ_FDDI_PH1;
1527: pdq->pdq_tx_hdr[2] = PDQ_FDDI_PH2;
1528: pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1529: pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = sizeof(pdq->pdq_tx_hdr);
1530: pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1531: pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_tx_hdr);
1532:
1533: state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1534: PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1535:
1536: /*
1537: * Stop the PDQ if it is running and put it into a known state.
1538: */
1539: state = pdq_stop(pdq);
1540:
1541: PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1542: PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1543: /*
1544: * If the adapter is not the state we expect, then the initialization
1545: * failed. Cleanup and exit.
1546: */
1547: #if defined(PDQVERBOSE)
1548: if (state == PDQS_HALTED) {
1549: pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1550: printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1551: if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1552: PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1553: PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1554: PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1555: }
1556: #endif
1557: if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1558: goto cleanup_and_return;
1559:
1560: PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1561: pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1562: pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1563: pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1564: PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1565: pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1566: pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1567: PDQ_PRINTF(("PDQ Chip Revision = "));
1568: switch (pdq->pdq_chip_rev) {
1569: case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1570: case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1571: case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1572: default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));
1573: }
1574: PDQ_PRINTF(("\n"));
1575:
1576: return pdq;
1577: }
CVSweb