Annotation of sys/dev/pci/if_tht.c, Revision 1.1.1.1
1.1 nbrk 1: /* $OpenBSD: if_tht.c,v 1.108 2007/07/22 03:54:15 dlg Exp $ */
2:
3: /*
4: * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5: *
6: * Permission to use, copy, modify, and distribute this software for any
7: * purpose with or without fee is hereby granted, provided that the above
8: * copyright notice and this permission notice appear in all copies.
9: *
10: * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11: * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12: * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13: * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14: * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15: * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16: * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17: */
18:
19: /*
20: * Driver for the Tehuti TN30xx multi port 10Gb Ethernet chipsets,
21: * see http://www.tehutinetworks.net/.
22: *
23: * This driver was made possible because Tehuti networks provided
24: * hardware and documentation. Thanks!
25: */
26:
27: #include "bpfilter.h"
28:
29: #include <sys/param.h>
30: #include <sys/systm.h>
31: #include <sys/sockio.h>
32: #include <sys/mbuf.h>
33: #include <sys/kernel.h>
34: #include <sys/socket.h>
35: #include <sys/malloc.h>
36: #include <sys/device.h>
37: #include <sys/proc.h>
38: #include <sys/queue.h>
39: #include <sys/rwlock.h>
40:
41: #include <machine/bus.h>
42:
43: #include <dev/pci/pcireg.h>
44: #include <dev/pci/pcivar.h>
45: #include <dev/pci/pcidevs.h>
46:
47: #include <net/if.h>
48: #include <net/if_dl.h>
49: #include <net/if_media.h>
50: #include <net/if_types.h>
51:
52: #if NBPFILTER > 0
53: #include <net/bpf.h>
54: #endif
55:
56: #ifdef INET
57: #include <netinet/in.h>
58: #include <netinet/if_ether.h>
59: #endif
60:
61: #ifdef THT_DEBUG
62: #define THT_D_FIFO (1<<0)
63: #define THT_D_TX (1<<1)
64: #define THT_D_RX (1<<2)
65: #define THT_D_INTR (1<<3)
66:
67: int thtdebug = THT_D_TX | THT_D_RX | THT_D_INTR;
68:
69: #define DPRINTF(l, f...) do { if (thtdebug & (l)) printf(f); } while (0)
70: #else
71: #define DPRINTF(l, f...)
72: #endif
73:
74: /* registers */
75:
76: #define THT_PCI_BAR 0x10
77:
78: #define _Q(_q) ((_q) * 4)
79:
80: /* General Configuration */
81: #define THT_REG_END_SEL 0x5448 /* PCI Endian Select */
82: #define THT_REG_CLKPLL 0x5000
83: #define THT_REG_CLKPLL_PLLLK (1<<9) /* PLL is locked */
84: #define THT_REG_CLKPLL_RSTEND (1<<8) /* Reset ended */
85: #define THT_REG_CLKPLL_TXF_DIS (1<<3) /* TX Free disabled */
86: #define THT_REG_CLKPLL_VNT_STOP (1<<2) /* VENETO Stop */
87: #define THT_REG_CLKPLL_PLLRST (1<<1) /* PLL Reset */
88: #define THT_REG_CLKPLL_SFTRST (1<<0) /* Software Reset */
89: /* Descriptors and FIFO Registers */
90: #define THT_REG_TXT_CFG0(_q) (0x4040 + _Q(_q)) /* CFG0 TX Task queues */
91: #define THT_REG_RXF_CFG0(_q) (0x4050 + _Q(_q)) /* CFG0 RX Free queues */
92: #define THT_REG_RXD_CFG0(_q) (0x4060 + _Q(_q)) /* CFG0 RX DSC queues */
93: #define THT_REG_TXF_CFG0(_q) (0x4070 + _Q(_q)) /* CFG0 TX Free queues */
94: #define THT_REG_TXT_CFG1(_q) (0x4000 + _Q(_q)) /* CFG1 TX Task queues */
95: #define THT_REG_RXF_CFG1(_q) (0x4010 + _Q(_q)) /* CFG1 RX Free queues */
96: #define THT_REG_RXD_CFG1(_q) (0x4020 + _Q(_q)) /* CFG1 RX DSC queues */
97: #define THT_REG_TXF_CFG1(_q) (0x4030 + _Q(_q)) /* CFG1 TX Free queues */
98: #define THT_REG_TXT_RPTR(_q) (0x40c0 + _Q(_q)) /* TX Task read ptr */
99: #define THT_REG_RXF_RPTR(_q) (0x40d0 + _Q(_q)) /* RX Free read ptr */
100: #define THT_REG_RXD_RPTR(_q) (0x40e0 + _Q(_q)) /* RX DSC read ptr */
101: #define THT_REG_TXF_RPTR(_q) (0x40f0 + _Q(_q)) /* TX Free read ptr */
102: #define THT_REG_TXT_WPTR(_q) (0x4080 + _Q(_q)) /* TX Task write ptr */
103: #define THT_REG_RXF_WPTR(_q) (0x4090 + _Q(_q)) /* RX Free write ptr */
104: #define THT_REG_RXD_WPTR(_q) (0x40a0 + _Q(_q)) /* RX DSC write ptr */
105: #define THT_REG_TXF_WPTR(_q) (0x40b0 + _Q(_q)) /* TX Free write ptr */
106: #define THT_REG_HTB_ADDR 0x4100 /* HTB Addressing Mechanism enable */
107: #define THT_REG_HTB_ADDR_HI 0x4110 /* High HTB Address */
108: #define THT_REG_HTB_ST_TMR 0x3290 /* HTB Timer */
109: #define THT_REG_RDINTCM(_q) (0x5120 + _Q(_q)) /* RX DSC Intr Coalescing */
110: #define THT_REG_RDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */
111: #define THT_REG_RDINTCM_RXF_TH(_c) ((_c)<<16) /* rxf intr req thresh */
112: #define THT_REG_RDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */
113: #define THT_REG_RDINTCM_COAL(_c) (_c) /* coalescing timer */
114: #define THT_REG_TDINTCM(_q) (0x5130 + _Q(_q)) /* TX DSC Intr Coalescing */
115: #define THT_REG_TDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */
116: #define THT_REG_TDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */
117: #define THT_REG_TDINTCM_COAL(_c) (_c) /* coalescing timer */
118: /* 10G Ethernet MAC */
119: #define THT_REG_10G_REV 0x6000 /* Revision */
120: #define THT_REG_10G_SCR 0x6004 /* Scratch */
121: #define THT_REG_10G_CTL 0x6008 /* Control/Status */
122: #define THT_REG_10G_CTL_CMD_FRAME_EN (1<<13) /* cmd frame enable */
123: #define THT_REG_10G_CTL_SW_RESET (1<<12) /* sw reset */
124: #define THT_REG_10G_CTL_STATS_AUTO_CLR (1<<11) /* auto clear statistics */
125: #define THT_REG_10G_CTL_LOOPBACK (1<<10) /* enable loopback */
126: #define THT_REG_10G_CTL_TX_ADDR_INS (1<<9) /* set mac on tx */
127: #define THT_REG_10G_CTL_PAUSE_IGNORE (1<<8) /* ignore pause */
128: #define THT_REG_10G_CTL_PAUSE_FWD (1<<7) /* forward pause */
129: #define THT_REG_10G_CTL_CRC_FWD (1<<6) /* crc forward */
130: #define THT_REG_10G_CTL_PAD (1<<5) /* frame padding */
131: #define THT_REG_10G_CTL_PROMISC (1<<4) /* promiscuous mode */
132: #define THT_REG_10G_CTL_WAN_MODE (1<<3) /* WAN mode */
133: #define THT_REG_10G_CTL_RX_EN (1<<1) /* RX enable */
134: #define THT_REG_10G_CTL_TX_EN (1<<0) /* TX enable */
135: #define THT_REG_10G_FRM_LEN 0x6014 /* Frame Length */
136: #define THT_REG_10G_PAUSE 0x6018 /* Pause Quanta */
137: #define THT_REG_10G_RX_SEC 0x601c /* RX Section */
138: #define THT_REG_10G_TX_SEC 0x6020 /* TX Section */
139: #define THT_REG_10G_SEC_AVAIL(_t) (_t) /* section available thresh*/
140: #define THT_REG_10G_SEC_EMPTY(_t) ((_t)<<16) /* section empty avail */
141: #define THT_REG_10G_RFIFO_AEF 0x6024 /* RX FIFO Almost Empty/Full */
142: #define THT_REG_10G_TFIFO_AEF 0x6028 /* TX FIFO Almost Empty/Full */
143: #define THT_REG_10G_FIFO_AE(_t) (_t) /* almost empty */
144: #define THT_REG_10G_FIFO_AF(_t) ((_t)<<16) /* almost full */
145: #define THT_REG_10G_SM_STAT 0x6030 /* MDIO Status */
146: #define THT_REG_10G_SM_CMD 0x6034 /* MDIO Command */
147: #define THT_REG_10G_SM_DAT 0x6038 /* MDIO Data */
148: #define THT_REG_10G_SM_ADD 0x603c /* MDIO Address */
149: #define THT_REG_10G_STAT 0x6040 /* Status */
150: /* Statistic Counters */
151: /* XXX todo */
152: /* Status Registers */
153: #define THT_REG_MAC_LNK_STAT 0x0200 /* Link Status */
154: #define THT_REG_MAC_LNK_STAT_DIS (1<<4) /* Mac Stats read disable */
155: #define THT_REG_MAC_LNK_STAT_LINK (1<<2) /* Link State */
156: #define THT_REG_MAC_LNK_STAT_REM_FAULT (1<<1) /* Remote Fault */
157: #define THT_REG_MAC_LNK_STAT_LOC_FAULT (1<<0) /* Local Fault */
158: /* Interrupt Registers */
159: #define THT_REG_ISR 0x5100 /* Interrupt Status */
160: #define THT_REG_ISR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */
161: #define THT_REG_ISR_GPIO (1<<26) /* GPIO */
162: #define THT_REG_ISR_RFRSH (1<<25) /* DDR Refresh */
163: #define THT_REG_ISR_SWI (1<<23) /* software interrupt */
164: #define THT_REG_ISR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */
165: #define THT_REG_ISR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */
166: #define THT_REG_ISR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */
167: #define THT_REG_ISR_TMR(_t) (1<<(6+(_t))) /* timer */
168: #define THT_REG_ISR_VNT (1<<5) /* optistrata */
169: #define THT_REG_ISR_RxFL (1<<4) /* RX Full */
170: #define THT_REG_ISR_TR (1<<2) /* table read */
171: #define THT_REG_ISR_PCIE_LNK_INT (1<<1) /* pcie link fail */
172: #define THT_REG_ISR_GPLE_CLR (1<<0) /* pcie timeout */
173: #define THT_FMT_ISR "\020" "\035LINKCHG1" "\034LINKCHG0" \
174: "\033GPIO" "\032RFRSH" "\030SWI" \
175: "\027RXF3" "\026RXF2" "\025RXF1" \
176: "\024RXF0" "\023TXF3" "\022TXF2" \
177: "\021TXF1" "\020TXF0" "\017RXD3" \
178: "\016RXD2" "\015RXD1" "\014RXD0" \
179: "\012TMR3" "\011TMR2" "\010TMR1" \
180: "\007TMR0" "\006VNT" "\005RxFL" \
181: "\003TR" "\002PCI_LNK_INT" \
182: "\001GPLE_CLR"
183: #define THT_REG_ISR_GTI 0x5080 /* GTI Interrupt Status */
184: #define THT_REG_IMR 0x5110 /* Interrupt Mask */
185: #define THT_REG_IMR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */
186: #define THT_REG_IMR_GPIO (1<<26) /* GPIO */
187: #define THT_REG_IMR_RFRSH (1<<25) /* DDR Refresh */
188: #define THT_REG_IMR_SWI (1<<23) /* software interrupt */
189: #define THT_REG_IMR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */
190: #define THT_REG_IMR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */
191: #define THT_REG_IMR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */
192: #define THT_REG_IMR_TMR(_t) (1<<(6+(_t))) /* timer */
193: #define THT_REG_IMR_VNT (1<<5) /* optistrata */
194: #define THT_REG_IMR_RxFL (1<<4) /* RX Full */
195: #define THT_REG_IMR_TR (1<<2) /* table read */
196: #define THT_REG_IMR_PCIE_LNK_INT (1<<1) /* pcie link fail */
197: #define THT_REG_IMR_GPLE_CLR (1<<0) /* pcie timeout */
198: #define THT_REG_IMR_GTI 0x5090 /* GTI Interrupt Mask */
199: #define THT_REG_ISR_MSK 0x5140 /* ISR Masked */
200: /* Global Counters */
201: /* XXX todo */
202: /* DDR2 SDRAM Controller Registers */
203: /* XXX TBD */
204: /* EEPROM Registers */
205: /* XXX todo */
206: /* Init arbitration and status registers */
207: #define THT_REG_INIT_SEMAPHORE 0x5170 /* Init Semaphore */
208: #define THT_REG_INIT_STATUS 0x5180 /* Init Status */
209: /* PCI Credits Registers */
210: /* XXX todo */
211: /* TX Arbitration Registers */
212: #define THT_REG_TXTSK_PR(_q) (0x41b0 + _Q(_q)) /* TX Queue Priority */
213: /* RX Part Registers */
214: #define THT_REG_RX_FLT 0x1240 /* RX Filter Configuration */
215: #define THT_REG_RX_FLT_ATXER (1<<15) /* accept with xfer err */
216: #define THT_REG_RX_FLT_ATRM (1<<14) /* accept with term err */
217: #define THT_REG_RX_FLT_AFTSQ (1<<13) /* accept with fault seq */
218: #define THT_REG_RX_FLT_OSEN (1<<12) /* enable pkts */
219: #define THT_REG_RX_FLT_APHER (1<<11) /* accept with phy err */
220: #define THT_REG_RX_FLT_TXFC (1<<10) /* TX flow control */
221: #define THT_REG_RX_FLT_FDA (1<<8) /* filter direct address */
222: #define THT_REG_RX_FLT_AOF (1<<7) /* accept overflow frame */
223: #define THT_REG_RX_FLT_ACF (1<<6) /* accept control frame */
224: #define THT_REG_RX_FLT_ARUNT (1<<5) /* accept runt */
225: #define THT_REG_RX_FLT_ACRC (1<<4) /* accept crc error */
226: #define THT_REG_RX_FLT_AM (1<<3) /* accept multicast */
227: #define THT_REG_RX_FLT_AB (1<<2) /* accept broadcast */
228: #define THT_REG_RX_FLT_PRM_MASK 0x3 /* promiscuous mode */
229: #define THT_REG_RX_FLT_PRM_NORMAL 0x0 /* normal mode */
230: #define THT_REG_RX_FLT_PRM_ALL 0x1 /* pass all incoming frames */
231: #define THT_REG_RX_MAX_FRAME 0x12c0 /* Max Frame Size */
232: #define THT_REG_RX_UNC_MAC0 0x1250 /* MAC Address low word */
233: #define THT_REG_RX_UNC_MAC1 0x1260 /* MAC Address mid word */
234: #define THT_REG_RX_UNC_MAC2 0x1270 /* MAC Address high word */
235: #define THT_REG_RX_MAC_MCST0(_m) (0x1a80 + (_m)*8)
236: #define THT_REG_RX_MAC_MCST1(_m) (0x1a84 + (_m)*8)
237: #define THT_REG_RX_MAC_MCST_CNT 15
238: #define THT_REG_RX_MCST_HASH 0x1a00 /* imperfect multicast filter hash */
239: #define THT_REG_RX_MCST_HASH_SIZE (256 / NBBY)
240: /* OptiStrata Debug Registers */
241: #define THT_REG_VPC 0x2300 /* Program Counter */
242: #define THT_REG_VLI 0x2310 /* Last Interrupt */
243: #define THT_REG_VIC 0x2320 /* Interrupts Count */
244: #define THT_REG_VTMR 0x2330 /* Timer */
245: #define THT_REG_VGLB 0x2340 /* Global */
246: /* SW Reset Registers */
247: #define THT_REG_RST_PRT 0x7000 /* Reset Port */
248: #define THT_REG_RST_PRT_ACTIVE 0x1 /* port reset is active */
249: #define THT_REG_DIS_PRT 0x7010 /* Disable Port */
250: #define THT_REG_RST_QU_0 0x7020 /* Reset Queue 0 */
251: #define THT_REG_RST_QU_1 0x7028 /* Reset Queue 1 */
252: #define THT_REG_DIS_QU_0 0x7030 /* Disable Queue 0 */
253: #define THT_REG_DIS_QU_1 0x7038 /* Disable Queue 1 */
254:
255: #define THT_PORT_SIZE 0x8000
256: #define THT_PORT_REGION(_p) ((_p) * THT_PORT_SIZE)
257: #define THT_NQUEUES 4
258:
259: #define THT_FIFO_ALIGN 4096
260: #define THT_FIFO_SIZE_4k 0x0
261: #define THT_FIFO_SIZE_8k 0x1
262: #define THT_FIFO_SIZE_16k 0x2
263: #define THT_FIFO_SIZE_32k 0x3
264: #define THT_FIFO_SIZE(_r) (4096 * (1<<(_r)))
265: #define THT_FIFO_GAP 8 /* keep 8 bytes between ptrs */
266: #define THT_FIFO_PTR_MASK 0x00007ff8 /* rptr/wptr mask */
267:
268: #define THT_FIFO_DESC_LEN 208 /* a descriptor cant be bigger than this */
269:
270: #define THT_IMR_DOWN(_p) (THT_REG_IMR_LINKCHG(_p))
271: #define THT_IMR_UP(_p) (THT_REG_IMR_LINKCHG(_p) | \
272: THT_REG_IMR_RXF(0) | THT_REG_IMR_TXF(0) | \
273: THT_REG_IMR_RXD(0))
274:
275: /* hardware structures (we're using the 64 bit variants) */
276:
277: /* physical buffer descriptor */
278: struct tht_pbd {
279: u_int32_t addr_lo;
280: u_int32_t addr_hi;
281: u_int32_t len;
282: } __packed;
283: #define THT_PBD_PKTLEN (64 * 1024)
284:
285: /* rx free fifo */
286: struct tht_rx_free {
287: u_int16_t bc; /* buffer count (0:4) */
288: u_int16_t type;
289:
290: u_int64_t uid;
291:
292: /* followed by a pdb list */
293: } __packed;
294: #define THT_RXF_TYPE 1
295: #define THT_RXF_1ST_PDB_LEN 128
296: #define THT_RXF_SGL_LEN ((THT_FIFO_DESC_LEN - \
297: sizeof(struct tht_rx_free)) / \
298: sizeof(struct tht_pbd))
299:
300: /* rx descriptor */
301: struct tht_rx_desc {
302: u_int32_t flags;
303: #define THT_RXD_FLAGS_BC(_f) ((_f) & 0x1f) /* buffer count */
304: #define THT_RXD_FLAGS_RXFQ(_f) (((_f)>>8) & 0x3) /* rxf queue id */
305: #define THT_RXD_FLAGS_TO (1<<15)
306: #define THT_RXD_FLAGS_TYPE(_f) (((_f)>>16) & 0xf) /* desc type */
307: #define THT_RXD_FLAGS_OVF (1<<21) /* overflow error */
308: #define THT_RXD_FLAGS_RUNT (1<<22) /* runt error */
309: #define THT_RXD_FLAGS_CRC (1<<23) /* crc error */
310: #define THT_RXD_FLAGS_UDPCS (1<<24) /* udp checksum error */
311: #define THT_RXD_FLAGS_TCPCS (1<<25) /* tcp checksum error */
312: #define THT_RXD_FLAGS_IPCS (1<<26) /* ip checksum error */
313: #define THT_RXD_FLAGS_PKT_ID 0x70000000
314: #define THT_RXD_FLAGS_PKT_ID_NONIP 0x00000000
315: #define THT_RXD_FLAGS_PKT_ID_TCP4 0x10000000
316: #define THT_RXD_FLAGS_PKT_ID_UDP4 0x20000000
317: #define THT_RXD_FLAGS_PKT_ID_IPV4 0x30000000
318: #define THT_RXD_FLAGS_PKT_ID_TCP6 0x50000000
319: #define THT_RXD_FLAGS_PKT_ID_UDP6 0x60000000
320: #define THT_RXD_FLAGS_PKT_ID_IPV6 0x70000000
321: #define THT_RXD_FLAGS_VTAG (1<<31)
322: u_int16_t len;
323: u_int16_t vlan;
324: #define THT_RXD_VLAN_ID(_v) ((_v) & 0xfff)
325: #define THT_RXD_VLAN_CFI (1<<12)
326: #define THT_RXD_VLAN_PRI(_v) ((_v) & 0x7) >> 13)
327:
328: u_int64_t uid;
329: } __packed;
330: #define THT_RXD_TYPE 2
331:
332: /* rx decriptor type 3: data chain instruction */
333: struct tht_rx_desc_dc {
334: /* preceded by tht_rx_desc */
335:
336: u_int16_t cd_offset;
337: u_int16_t flags;
338:
339: u_int8_t data[4];
340: } __packed;
341: #define THT_RXD_TYPE_DC 3
342:
343: /* rx descriptor type 4: rss (recv side scaling) information */
344: struct tht_rx_desc_rss {
345: /* preceded by tht_rx_desc */
346:
347: u_int8_t rss_hft;
348: u_int8_t rss_type;
349: u_int8_t rss_tcpu;
350: u_int8_t reserved;
351:
352: u_int32_t rss_hash;
353: } __packed;
354: #define THT_RXD_TYPE_RSS 4
355:
356: /* tx task fifo */
357: struct tht_tx_task {
358: u_int32_t flags;
359: #define THT_TXT_FLAGS_BC(_f) (_f) /* buffer count */
360: #define THT_TXT_FLAGS_UDPCS (1<<5) /* udp checksum */
361: #define THT_TXT_FLAGS_TCPCS (1<<6) /* tcp checksum */
362: #define THT_TXT_FLAGS_IPCS (1<<7) /* ip checksum */
363: #define THT_TXT_FLAGS_VTAG (1<<8) /* insert vlan tag */
364: #define THT_TXT_FLAGS_LGSND (1<<9) /* tcp large send enabled */
365: #define THT_TXT_FLAGS_FRAG (1<<10) /* ip fragmentation enabled */
366: #define THT_TXT_FLAGS_CFI (1<<12) /* canonical format indicator */
367: #define THT_TXT_FLAGS_PRIO(_f) ((_f)<<13) /* vlan priority */
368: #define THT_TXT_FLAGS_VLAN(_f) ((_f)<<20) /* vlan id */
369: u_int16_t mss_mtu;
370: u_int16_t len;
371:
372: u_int64_t uid;
373:
374: /* followed by a pbd list */
375: } __packed;
376: #define THT_TXT_TYPE (3<<16)
377: #define THT_TXT_SGL_LEN ((THT_FIFO_DESC_LEN - \
378: sizeof(struct tht_tx_task)) / \
379: sizeof(struct tht_pbd))
380:
381: /* tx free fifo */
382: struct tht_tx_free {
383: u_int32_t status;
384:
385: u_int64_t uid;
386:
387: u_int32_t pad;
388: } __packed;
389:
390: /* pci controller autoconf glue */
391:
392: struct thtc_softc {
393: struct device sc_dev;
394:
395: bus_dma_tag_t sc_dmat;
396:
397: bus_space_tag_t sc_memt;
398: bus_space_handle_t sc_memh;
399: bus_size_t sc_mems;
400: };
401:
402: int thtc_match(struct device *, void *, void *);
403: void thtc_attach(struct device *, struct device *, void *);
404: int thtc_print(void *, const char *);
405:
406: struct cfattach thtc_ca = {
407: sizeof(struct thtc_softc), thtc_match, thtc_attach
408: };
409:
410: struct cfdriver thtc_cd = {
411: NULL, "thtc", DV_DULL
412: };
413:
414: /* glue between the controller and the port */
415:
416: struct tht_attach_args {
417: int taa_port;
418:
419: struct pci_attach_args *taa_pa;
420: pci_intr_handle_t taa_ih;
421: };
422:
423: /* tht itself */
424:
425: struct tht_dmamem {
426: bus_dmamap_t tdm_map;
427: bus_dma_segment_t tdm_seg;
428: size_t tdm_size;
429: caddr_t tdm_kva;
430: };
431: #define THT_DMA_MAP(_tdm) ((_tdm)->tdm_map)
432: #define THT_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr)
433: #define THT_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva)
434:
435: struct tht_fifo_desc {
436: bus_size_t tfd_cfg0;
437: bus_size_t tfd_cfg1;
438: bus_size_t tfd_rptr;
439: bus_size_t tfd_wptr;
440: u_int32_t tfd_size;
441: int tfd_write;
442: };
443: #define THT_FIFO_PRE_SYNC(_d) ((_d)->tfd_write ? \
444: BUS_DMASYNC_PREWRITE : \
445: BUS_DMASYNC_PREREAD)
446: #define THT_FIFO_POST_SYNC(_d) ((_d)->tfd_write ? \
447: BUS_DMASYNC_POSTWRITE : \
448: BUS_DMASYNC_POSTREAD)
449:
450: struct tht_fifo {
451: struct tht_fifo_desc *tf_desc;
452: struct tht_dmamem *tf_mem;
453: int tf_len;
454: int tf_rptr;
455: int tf_wptr;
456: int tf_ready;
457: };
458:
459: struct tht_pkt {
460: u_int64_t tp_id;
461:
462: bus_dmamap_t tp_dmap;
463: struct mbuf *tp_m;
464:
465: TAILQ_ENTRY(tht_pkt) tp_link;
466: };
467:
468: struct tht_pkt_list {
469: struct tht_pkt *tpl_pkts;
470: TAILQ_HEAD(, tht_pkt) tpl_free;
471: TAILQ_HEAD(, tht_pkt) tpl_used;
472: };
473:
474: struct tht_softc {
475: struct device sc_dev;
476: struct thtc_softc *sc_thtc;
477: int sc_port;
478:
479: void *sc_ih;
480:
481: bus_space_handle_t sc_memh;
482:
483: struct arpcom sc_ac;
484: struct ifmedia sc_media;
485:
486: u_int16_t sc_lladdr[3];
487:
488: struct tht_pkt_list sc_tx_list;
489: struct tht_pkt_list sc_rx_list;
490:
491: struct tht_fifo sc_txt;
492: struct tht_fifo sc_rxf;
493: struct tht_fifo sc_rxd;
494: struct tht_fifo sc_txf;
495:
496: u_int32_t sc_imr;
497:
498: struct rwlock sc_lock;
499: };
500:
501: int tht_match(struct device *, void *, void *);
502: void tht_attach(struct device *, struct device *, void *);
503: void tht_mountroot(void *);
504: int tht_intr(void *);
505:
506: struct cfattach tht_ca = {
507: sizeof(struct tht_softc), tht_match, tht_attach
508: };
509:
510: struct cfdriver tht_cd = {
511: NULL, "tht", DV_IFNET
512: };
513:
514: /* pkts */
515: int tht_pkt_alloc(struct tht_softc *,
516: struct tht_pkt_list *, int, int);
517: void tht_pkt_free(struct tht_softc *,
518: struct tht_pkt_list *);
519: void tht_pkt_put(struct tht_pkt_list *, struct tht_pkt *);
520: struct tht_pkt *tht_pkt_get(struct tht_pkt_list *);
521: struct tht_pkt *tht_pkt_used(struct tht_pkt_list *);
522:
523: /* fifos */
524:
525: struct tht_fifo_desc tht_txt_desc = {
526: THT_REG_TXT_CFG0(0),
527: THT_REG_TXT_CFG1(0),
528: THT_REG_TXT_RPTR(0),
529: THT_REG_TXT_WPTR(0),
530: THT_FIFO_SIZE_16k,
531: 1
532: };
533:
534: struct tht_fifo_desc tht_rxf_desc = {
535: THT_REG_RXF_CFG0(0),
536: THT_REG_RXF_CFG1(0),
537: THT_REG_RXF_RPTR(0),
538: THT_REG_RXF_WPTR(0),
539: THT_FIFO_SIZE_16k,
540: 1
541: };
542:
543: struct tht_fifo_desc tht_rxd_desc = {
544: THT_REG_RXD_CFG0(0),
545: THT_REG_RXD_CFG1(0),
546: THT_REG_RXD_RPTR(0),
547: THT_REG_RXD_WPTR(0),
548: THT_FIFO_SIZE_16k,
549: 0
550: };
551:
552: struct tht_fifo_desc tht_txf_desc = {
553: THT_REG_TXF_CFG0(0),
554: THT_REG_TXF_CFG1(0),
555: THT_REG_TXF_RPTR(0),
556: THT_REG_TXF_WPTR(0),
557: THT_FIFO_SIZE_4k,
558: 0
559: };
560:
561: int tht_fifo_alloc(struct tht_softc *, struct tht_fifo *,
562: struct tht_fifo_desc *);
563: void tht_fifo_free(struct tht_softc *, struct tht_fifo *);
564:
565: size_t tht_fifo_readable(struct tht_softc *,
566: struct tht_fifo *);
567: size_t tht_fifo_writable(struct tht_softc *,
568: struct tht_fifo *);
569: void tht_fifo_pre(struct tht_softc *,
570: struct tht_fifo *);
571: void tht_fifo_read(struct tht_softc *, struct tht_fifo *,
572: void *, size_t);
573: void tht_fifo_write(struct tht_softc *, struct tht_fifo *,
574: void *, size_t);
575: void tht_fifo_write_dmap(struct tht_softc *,
576: struct tht_fifo *, bus_dmamap_t);
577: void tht_fifo_write_pad(struct tht_softc *,
578: struct tht_fifo *, int);
579: void tht_fifo_post(struct tht_softc *,
580: struct tht_fifo *);
581:
582: /* port operations */
583: void tht_lladdr_read(struct tht_softc *);
584: void tht_lladdr_write(struct tht_softc *);
585: int tht_sw_reset(struct tht_softc *);
586: int tht_fw_load(struct tht_softc *);
587: void tht_fw_tick(void *arg);
588: void tht_link_state(struct tht_softc *);
589:
590: /* interface operations */
591: int tht_ioctl(struct ifnet *, u_long, caddr_t);
592: void tht_watchdog(struct ifnet *);
593: void tht_start(struct ifnet *);
594: int tht_load_pkt(struct tht_softc *, struct tht_pkt *,
595: struct mbuf *);
596: void tht_txf(struct tht_softc *sc);
597:
598: void tht_rxf_fill(struct tht_softc *, int);
599: void tht_rxf_drain(struct tht_softc *);
600: void tht_rxd(struct tht_softc *);
601:
602: void tht_up(struct tht_softc *);
603: void tht_iff(struct tht_softc *);
604: void tht_down(struct tht_softc *);
605:
606: /* ifmedia operations */
607: int tht_media_change(struct ifnet *);
608: void tht_media_status(struct ifnet *, struct ifmediareq *);
609:
610: /* wrapper around dma memory */
611: struct tht_dmamem *tht_dmamem_alloc(struct tht_softc *, bus_size_t,
612: bus_size_t);
613: void tht_dmamem_free(struct tht_softc *,
614: struct tht_dmamem *);
615:
616: /* bus space operations */
617: u_int32_t tht_read(struct tht_softc *, bus_size_t);
618: void tht_write(struct tht_softc *, bus_size_t, u_int32_t);
619: void tht_write_region(struct tht_softc *, bus_size_t,
620: void *, size_t);
621: int tht_wait_eq(struct tht_softc *, bus_size_t, u_int32_t,
622: u_int32_t, int);
623: int tht_wait_ne(struct tht_softc *, bus_size_t, u_int32_t,
624: u_int32_t, int);
625:
626: #define tht_set(_s, _r, _b) tht_write((_s), (_r), \
627: tht_read((_s), (_r)) | (_b))
628: #define tht_clr(_s, _r, _b) tht_write((_s), (_r), \
629: tht_read((_s), (_r)) & ~(_b))
630: #define tht_wait_set(_s, _r, _b, _t) tht_wait_eq((_s), (_r), \
631: (_b), (_b), (_t))
632:
633:
634: /* misc */
635: #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
636: #define sizeofa(_a) (sizeof(_a) / sizeof((_a)[0]))
637: #define LWORDS(_b) (((_b) + 7) >> 3)
638:
639:
640: struct thtc_device {
641: pci_vendor_id_t td_vendor;
642: pci_vendor_id_t td_product;
643: u_int td_nports;
644: };
645:
646: const struct thtc_device *thtc_lookup(struct pci_attach_args *);
647:
648: static const struct thtc_device thtc_devices[] = {
649: { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3009, 1 },
650: { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3010, 1 },
651: { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3014, 2 }
652: };
653:
654: const struct thtc_device *
655: thtc_lookup(struct pci_attach_args *pa)
656: {
657: int i;
658: const struct thtc_device *td;
659:
660: for (i = 0; i < sizeofa(thtc_devices); i++) {
661: td = &thtc_devices[i];
662: if (td->td_vendor == PCI_VENDOR(pa->pa_id) &&
663: td->td_product == PCI_PRODUCT(pa->pa_id))
664: return (td);
665: }
666:
667: return (NULL);
668: }
669:
670: int
671: thtc_match(struct device *parent, void *match, void *aux)
672: {
673: struct pci_attach_args *pa = aux;
674:
675: if (thtc_lookup(pa) != NULL)
676: return (1);
677:
678: return (0);
679: }
680:
681: void
682: thtc_attach(struct device *parent, struct device *self, void *aux)
683: {
684: struct thtc_softc *sc = (struct thtc_softc *)self;
685: struct pci_attach_args *pa = aux;
686: pcireg_t memtype;
687: const struct thtc_device *td;
688: struct tht_attach_args taa;
689: int i;
690:
691: bzero(&taa, sizeof(taa));
692: td = thtc_lookup(pa);
693:
694: sc->sc_dmat = pa->pa_dmat;
695:
696: memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, THT_PCI_BAR);
697: if (pci_mapreg_map(pa, THT_PCI_BAR, memtype, 0, &sc->sc_memt,
698: &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
699: printf(": unable to map host registers\n");
700: return;
701: }
702:
703: if (pci_intr_map(pa, &taa.taa_ih) != 0) {
704: printf(": unable to map interrupt\n");
705: goto unmap;
706: }
707: printf(": %s\n", pci_intr_string(pa->pa_pc, taa.taa_ih));
708:
709: taa.taa_pa = pa;
710: for (i = 0; i < td->td_nports; i++) {
711: taa.taa_port = i;
712:
713: config_found(self, &taa, thtc_print);
714: }
715:
716: return;
717:
718: unmap:
719: bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
720: sc->sc_mems = 0;
721: }
722:
723: int
724: thtc_print(void *aux, const char *pnp)
725: {
726: struct tht_attach_args *taa = aux;
727:
728: if (pnp != NULL)
729: printf("\"%s\" at %s", tht_cd.cd_name, pnp);
730:
731: printf(" port %d", taa->taa_port);
732:
733: return (UNCONF);
734: }
735:
736: int
737: tht_match(struct device *parent, void *match, void *aux)
738: {
739: return (1);
740: }
741:
742: void
743: tht_attach(struct device *parent, struct device *self, void *aux)
744: {
745: struct thtc_softc *csc = (struct thtc_softc *)parent;
746: struct tht_softc *sc = (struct tht_softc *)self;
747: struct tht_attach_args *taa = aux;
748: struct ifnet *ifp;
749:
750: sc->sc_thtc = csc;
751: sc->sc_port = taa->taa_port;
752: sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
753: rw_init(&sc->sc_lock, "thtioc");
754:
755: if (bus_space_subregion(csc->sc_memt, csc->sc_memh,
756: THT_PORT_REGION(sc->sc_port), THT_PORT_SIZE,
757: &sc->sc_memh) != 0) {
758: printf(": unable to map port registers\n");
759: return;
760: }
761:
762: if (tht_sw_reset(sc) != 0) {
763: printf(": unable to reset port\n");
764: /* bus_space(9) says we dont have to free subregions */
765: return;
766: }
767:
768: sc->sc_ih = pci_intr_establish(taa->taa_pa->pa_pc, taa->taa_ih,
769: IPL_NET, tht_intr, sc, DEVNAME(sc));
770: if (sc->sc_ih == NULL) {
771: printf(": unable to establish interrupt\n");
772: /* bus_space(9) says we dont have to free subregions */
773: return;
774: }
775:
776: tht_lladdr_read(sc);
777: bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
778:
779: ifp = &sc->sc_ac.ac_if;
780: ifp->if_softc = sc;
781: ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
782: ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
783: IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
784: ifp->if_ioctl = tht_ioctl;
785: ifp->if_start = tht_start;
786: ifp->if_watchdog = tht_watchdog;
787: ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */
788: strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
789: IFQ_SET_MAXLEN(&ifp->if_snd, 400);
790: IFQ_SET_READY(&ifp->if_snd);
791:
792: ifmedia_init(&sc->sc_media, 0, tht_media_change, tht_media_status);
793: ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
794: ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
795:
796: if_attach(ifp);
797: ether_ifattach(ifp);
798:
799: printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
800:
801: mountroothook_establish(tht_mountroot, sc);
802: }
803:
804: void
805: tht_mountroot(void *arg)
806: {
807: struct tht_softc *sc = arg;
808:
809: if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
810: return;
811:
812: if (tht_fw_load(sc) != 0)
813: printf("%s: firmware load failed\n", DEVNAME(sc));
814:
815: tht_sw_reset(sc);
816:
817: tht_fifo_free(sc, &sc->sc_txt);
818:
819: tht_link_state(sc);
820: tht_write(sc, THT_REG_IMR, sc->sc_imr);
821: }
822:
823: int
824: tht_intr(void *arg)
825: {
826: struct tht_softc *sc = arg;
827: struct ifnet *ifp;
828: u_int32_t isr;
829:
830: isr = tht_read(sc, THT_REG_ISR);
831: if (isr == 0x0) {
832: tht_write(sc, THT_REG_IMR, sc->sc_imr);
833: return (0);
834: }
835:
836: DPRINTF(THT_D_INTR, "%s: isr: 0x%b\n", DEVNAME(sc), isr, THT_FMT_ISR);
837:
838: if (ISSET(isr, THT_REG_ISR_LINKCHG(0) | THT_REG_ISR_LINKCHG(1)))
839: tht_link_state(sc);
840:
841: ifp = &sc->sc_ac.ac_if;
842: if (ifp->if_flags & IFF_RUNNING) {
843: if (ISSET(isr, THT_REG_ISR_RXD(0)))
844: tht_rxd(sc);
845:
846: if (ISSET(isr, THT_REG_ISR_RXF(0)))
847: tht_rxf_fill(sc, 0);
848:
849: if (ISSET(isr, THT_REG_ISR_TXF(0)))
850: tht_txf(sc);
851:
852: tht_start(ifp);
853: }
854:
855: tht_write(sc, THT_REG_IMR, sc->sc_imr);
856: return (1);
857: }
858:
859: int
860: tht_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
861: {
862: struct tht_softc *sc = ifp->if_softc;
863: struct ifreq *ifr = (struct ifreq *)addr;
864: struct ifaddr *ifa;
865: int error;
866: int s;
867:
868: rw_enter_write(&sc->sc_lock);
869: s = splnet();
870:
871: error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
872: if (error > 0)
873: goto err;
874:
875: switch (cmd) {
876: case SIOCSIFADDR:
877: ifa = (struct ifaddr *)addr;
878:
879: #ifdef INET
880: if (ifa->ifa_addr->sa_family == AF_INET)
881: arp_ifinit(&sc->sc_ac, ifa);
882: #endif
883:
884: ifp->if_flags |= IFF_UP;
885: /* FALLTHROUGH */
886: case SIOCSIFFLAGS:
887: if (ifp->if_flags & IFF_UP) {
888: if (ifp->if_flags & IFF_RUNNING)
889: tht_iff(sc);
890: else
891: tht_up(sc);
892: } else {
893: if (ifp->if_flags & IFF_RUNNING)
894: tht_down(sc);
895: }
896: break;
897:
898: case SIOCSIFMTU:
899: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
900: error = EINVAL;
901: else
902: ifp->if_mtu = ifr->ifr_mtu;
903: break;
904:
905: case SIOCADDMULTI:
906: error = ether_addmulti(ifr, &sc->sc_ac);
907: break;
908: case SIOCDELMULTI:
909: error = ether_delmulti(ifr, &sc->sc_ac);
910: break;
911:
912: case SIOCGIFMEDIA:
913: case SIOCSIFMEDIA:
914: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
915: break;
916:
917: default:
918: error = ENOTTY;
919: break;
920: }
921:
922: if (error == ENETRESET) {
923: if (ifp->if_flags & IFF_RUNNING)
924: tht_iff(sc);
925: error = 0;
926: }
927:
928: err:
929: splx(s);
930: rw_exit_write(&sc->sc_lock);
931:
932: return (error);
933: }
934:
935: void
936: tht_up(struct tht_softc *sc)
937: {
938: struct ifnet *ifp = &sc->sc_ac.ac_if;
939:
940: if (ISSET(ifp->if_flags, IFF_RUNNING)) {
941: printf("%s: interface is already up\n", DEVNAME(sc));
942: return;
943: }
944:
945: if (tht_pkt_alloc(sc, &sc->sc_tx_list, 128, THT_TXT_SGL_LEN) != 0)
946: return;
947: if (tht_pkt_alloc(sc, &sc->sc_rx_list, 128, THT_RXF_SGL_LEN) != 0)
948: goto free_tx_list;
949:
950: if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
951: goto free_rx_list;
952: if (tht_fifo_alloc(sc, &sc->sc_rxf, &tht_rxf_desc) != 0)
953: goto free_txt;
954: if (tht_fifo_alloc(sc, &sc->sc_rxd, &tht_rxd_desc) != 0)
955: goto free_rxf;
956: if (tht_fifo_alloc(sc, &sc->sc_txf, &tht_txf_desc) != 0)
957: goto free_rxd;
958:
959: tht_write(sc, THT_REG_10G_FRM_LEN, MCLBYTES - ETHER_ALIGN);
960: tht_write(sc, THT_REG_10G_PAUSE, 0x96);
961: tht_write(sc, THT_REG_10G_RX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
962: THT_REG_10G_SEC_EMPTY(0x80));
963: tht_write(sc, THT_REG_10G_TX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
964: THT_REG_10G_SEC_EMPTY(0xe0));
965: tht_write(sc, THT_REG_10G_RFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
966: THT_REG_10G_FIFO_AF(0x0));
967: tht_write(sc, THT_REG_10G_TFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
968: THT_REG_10G_FIFO_AF(0x0));
969: tht_write(sc, THT_REG_10G_CTL, THT_REG_10G_CTL_TX_EN |
970: THT_REG_10G_CTL_RX_EN | THT_REG_10G_CTL_PAD |
971: THT_REG_10G_CTL_PROMISC);
972:
973: tht_write(sc, THT_REG_VGLB, 0);
974:
975: tht_write(sc, THT_REG_RX_MAX_FRAME, MCLBYTES - ETHER_ALIGN);
976:
977: tht_write(sc, THT_REG_RDINTCM(0), THT_REG_RDINTCM_PKT_TH(12) |
978: THT_REG_RDINTCM_RXF_TH(4) | THT_REG_RDINTCM_COAL_RC |
979: THT_REG_RDINTCM_COAL(0x20));
980: tht_write(sc, THT_REG_TDINTCM(0), THT_REG_TDINTCM_PKT_TH(12) |
981: THT_REG_TDINTCM_COAL_RC | THT_REG_TDINTCM_COAL(0x20));
982:
983: bcopy(sc->sc_ac.ac_enaddr, sc->sc_lladdr, ETHER_ADDR_LEN);
984: tht_lladdr_write(sc);
985:
986: /* populate rxf fifo */
987: tht_rxf_fill(sc, 1);
988:
989: tht_iff(sc);
990:
991: ifp->if_flags |= IFF_RUNNING;
992: ifp->if_flags &= ~IFF_OACTIVE;
993:
994: /* enable interrupts */
995: sc->sc_imr = THT_IMR_UP(sc->sc_port);
996: tht_write(sc, THT_REG_IMR, sc->sc_imr);
997:
998: return;
999:
1000: free_rxd:
1001: tht_fifo_free(sc, &sc->sc_rxd);
1002: free_rxf:
1003: tht_fifo_free(sc, &sc->sc_rxf);
1004: free_txt:
1005: tht_fifo_free(sc, &sc->sc_txt);
1006:
1007: tht_sw_reset(sc);
1008:
1009: free_rx_list:
1010: tht_pkt_free(sc, &sc->sc_rx_list);
1011: free_tx_list:
1012: tht_pkt_free(sc, &sc->sc_tx_list);
1013: }
1014:
1015: void
1016: tht_iff(struct tht_softc *sc)
1017: {
1018: struct ifnet *ifp = &sc->sc_ac.ac_if;
1019: struct ether_multi *enm;
1020: struct ether_multistep step;
1021: u_int32_t rxf;
1022: u_int8_t imf[THT_REG_RX_MCST_HASH_SIZE];
1023: u_int8_t hash;
1024: int i;
1025:
1026: ifp->if_flags &= ~IFF_ALLMULTI;
1027:
1028: rxf = THT_REG_RX_FLT_OSEN | THT_REG_RX_FLT_AM | THT_REG_RX_FLT_AB;
1029: for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1030: tht_write(sc, THT_REG_RX_MAC_MCST0(i), 0);
1031: tht_write(sc, THT_REG_RX_MAC_MCST1(i), 0);
1032: }
1033: memset(imf, 0x00, sizeof(imf));
1034:
1035: if (ifp->if_flags & IFF_PROMISC)
1036: rxf |= THT_REG_RX_FLT_PRM_ALL;
1037: else if (sc->sc_ac.ac_multirangecnt > 0) {
1038: ifp->if_flags |= IFF_ALLMULTI;
1039: memset(imf, 0xff, sizeof(imf));
1040: } else {
1041: ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1042:
1043: #if 0
1044: /* fill the perfect multicast filters */
1045: for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1046: if (enm == NULL)
1047: break;
1048:
1049: tht_write(sc, THT_REG_RX_MAC_MCST0(i),
1050: (enm->enm_addrlo[0] << 0) |
1051: (enm->enm_addrlo[1] << 8) |
1052: (enm->enm_addrlo[2] << 16) |
1053: (enm->enm_addrlo[3] << 24));
1054: tht_write(sc, THT_REG_RX_MAC_MCST1(i),
1055: (enm->enm_addrlo[4] << 0) |
1056: (enm->enm_addrlo[5] << 8));
1057:
1058: ETHER_NEXT_MULTI(step, enm);
1059: }
1060: #endif
1061:
1062: /* fill the imperfect multicast filter with whats left */
1063: while (enm != NULL) {
1064: hash = 0x00;
1065: for (i = 0; i < ETHER_ADDR_LEN; i++)
1066: hash ^= enm->enm_addrlo[i];
1067: setbit(imf, hash);
1068:
1069: ETHER_NEXT_MULTI(step, enm);
1070: }
1071: }
1072:
1073: tht_write_region(sc, THT_REG_RX_MCST_HASH, imf, sizeof(imf));
1074: tht_write(sc, THT_REG_RX_FLT, rxf);
1075: }
1076:
1077: void
1078: tht_down(struct tht_softc *sc)
1079: {
1080: struct ifnet *ifp = &sc->sc_ac.ac_if;
1081:
1082: if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
1083: printf("%s: interface is already down\n", DEVNAME(sc));
1084: return;
1085: }
1086:
1087: ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE | IFF_ALLMULTI);
1088:
1089: while (tht_fifo_writable(sc, &sc->sc_txt) < sc->sc_txt.tf_len &&
1090: tht_fifo_readable(sc, &sc->sc_txf) > 0)
1091: tsleep(sc, 0, "thtdown", hz);
1092:
1093: sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
1094: tht_write(sc, THT_REG_IMR, sc->sc_imr);
1095:
1096: tht_sw_reset(sc);
1097:
1098: tht_fifo_free(sc, &sc->sc_txf);
1099: tht_fifo_free(sc, &sc->sc_rxd);
1100: tht_fifo_free(sc, &sc->sc_rxf);
1101: tht_fifo_free(sc, &sc->sc_txt);
1102:
1103: /* free mbufs that were on the rxf fifo */
1104: tht_rxf_drain(sc);
1105:
1106: tht_pkt_free(sc, &sc->sc_rx_list);
1107: tht_pkt_free(sc, &sc->sc_tx_list);
1108: }
1109:
1110: void
1111: tht_start(struct ifnet *ifp)
1112: {
1113: struct tht_softc *sc = ifp->if_softc;
1114: struct tht_pkt *pkt;
1115: struct tht_tx_task txt;
1116: u_int32_t flags;
1117: struct mbuf *m;
1118: int bc;
1119:
1120: if (!(ifp->if_flags & IFF_RUNNING))
1121: return;
1122: if (ifp->if_flags & IFF_OACTIVE)
1123: return;
1124: if (IFQ_IS_EMPTY(&ifp->if_snd))
1125: return;
1126:
1127: if (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_DESC_LEN)
1128: return;
1129:
1130: bzero(&txt, sizeof(txt));
1131:
1132: tht_fifo_pre(sc, &sc->sc_txt);
1133:
1134: do {
1135: IFQ_POLL(&ifp->if_snd, m);
1136: if (m == NULL)
1137: break;
1138:
1139: pkt = tht_pkt_get(&sc->sc_tx_list);
1140: if (pkt == NULL) {
1141: ifp->if_flags |= IFF_OACTIVE;
1142: break;
1143: }
1144:
1145: IFQ_DEQUEUE(&ifp->if_snd, m);
1146: if (tht_load_pkt(sc, pkt, m) != 0) {
1147: m_freem(m);
1148: tht_pkt_put(&sc->sc_tx_list, pkt);
1149: ifp->if_oerrors++;
1150: break;
1151: }
1152: /* thou shalt not use m after this point, only pkt->tp_m */
1153:
1154: #if NBPFILTER > 0
1155: if (ifp->if_bpf)
1156: bpf_mtap(ifp->if_bpf, pkt->tp_m, BPF_DIRECTION_OUT);
1157: #endif
1158:
1159: bc = sizeof(txt) +
1160: sizeof(struct tht_pbd) * pkt->tp_dmap->dm_nsegs;
1161:
1162: flags = THT_TXT_TYPE | THT_TXT_FLAGS_UDPCS |
1163: THT_TXT_FLAGS_TCPCS | THT_TXT_FLAGS_IPCS | LWORDS(bc);
1164: txt.flags = htole32(flags);
1165: txt.len = htole16(pkt->tp_m->m_pkthdr.len);
1166: txt.uid = pkt->tp_id;
1167:
1168: DPRINTF(THT_D_TX, "%s: txt uid 0x%llx flags 0x%08x len %d\n",
1169: DEVNAME(sc), pkt->tp_id, flags, pkt->tp_m->m_pkthdr.len);
1170:
1171: tht_fifo_write(sc, &sc->sc_txt, &txt, sizeof(txt));
1172: tht_fifo_write_dmap(sc, &sc->sc_txt, pkt->tp_dmap);
1173: tht_fifo_write_pad(sc, &sc->sc_txt, bc);
1174:
1175: bus_dmamap_sync(sc->sc_thtc->sc_dmat, pkt->tp_dmap, 0,
1176: pkt->tp_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1177:
1178: ifp->if_opackets++;
1179:
1180: } while (sc->sc_txt.tf_ready > THT_FIFO_DESC_LEN);
1181:
1182: tht_fifo_post(sc, &sc->sc_txt);
1183: }
1184:
1185: int
1186: tht_load_pkt(struct tht_softc *sc, struct tht_pkt *pkt, struct mbuf *m)
1187: {
1188: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1189: bus_dmamap_t dmap = pkt->tp_dmap;
1190: struct mbuf *m0 = NULL;
1191:
1192: switch(bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) {
1193: case 0:
1194: pkt->tp_m = m;
1195: break;
1196:
1197: case EFBIG: /* mbuf chain is too fragmented */
1198: MGETHDR(m0, M_DONTWAIT, MT_DATA);
1199: if (m0 == NULL)
1200: return (ENOBUFS);
1201: if (m->m_pkthdr.len > MHLEN) {
1202: MCLGET(m0, M_DONTWAIT);
1203: if (!(m0->m_flags & M_EXT)) {
1204: m_freem(m0);
1205: return (ENOBUFS);
1206: }
1207: }
1208: m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1209: m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1210: if (bus_dmamap_load_mbuf(dmat, dmap, m0, BUS_DMA_NOWAIT)) {
1211: m_freem(m0);
1212: return (ENOBUFS);
1213: }
1214:
1215: m_freem(m);
1216: pkt->tp_m = m0;
1217: break;
1218:
1219: default:
1220: return (ENOBUFS);
1221: }
1222:
1223: return (0);
1224: }
1225:
1226: void
1227: tht_txf(struct tht_softc *sc)
1228: {
1229: struct ifnet *ifp = &sc->sc_ac.ac_if;
1230: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1231: bus_dmamap_t dmap;
1232: struct tht_tx_free txf;
1233: struct tht_pkt *pkt;
1234:
1235: if (tht_fifo_readable(sc, &sc->sc_txf) < sizeof(txf))
1236: return;
1237:
1238: tht_fifo_pre(sc, &sc->sc_txf);
1239:
1240: do {
1241: tht_fifo_read(sc, &sc->sc_txf, &txf, sizeof(txf));
1242:
1243: DPRINTF(THT_D_TX, "%s: txf uid 0x%llx\n", DEVNAME(sc), txf.uid);
1244:
1245: pkt = &sc->sc_tx_list.tpl_pkts[txf.uid];
1246: dmap = pkt->tp_dmap;
1247:
1248: bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1249: BUS_DMASYNC_POSTWRITE);
1250: bus_dmamap_unload(dmat, dmap);
1251:
1252: m_freem(pkt->tp_m);
1253:
1254: tht_pkt_put(&sc->sc_tx_list, pkt);
1255:
1256: } while (sc->sc_txf.tf_ready >= sizeof(txf));
1257:
1258: ifp->if_flags &= ~IFF_OACTIVE;
1259:
1260: tht_fifo_post(sc, &sc->sc_txf);
1261: }
1262:
1263: void
1264: tht_rxf_fill(struct tht_softc *sc, int wait)
1265: {
1266: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1267: bus_dmamap_t dmap;
1268: struct tht_rx_free rxf;
1269: struct tht_pkt *pkt;
1270: struct mbuf *m;
1271: int bc;
1272:
1273: if (tht_fifo_writable(sc, &sc->sc_rxf) <= THT_FIFO_DESC_LEN)
1274: return;
1275:
1276: tht_fifo_pre(sc, &sc->sc_rxf);
1277:
1278: for (;;) {
1279: if ((pkt = tht_pkt_get(&sc->sc_rx_list)) == NULL)
1280: goto done;
1281:
1282: MGETHDR(m, wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1283: if (m == NULL)
1284: goto put_pkt;
1285:
1286: MCLGET(m, wait ? M_WAIT : M_DONTWAIT);
1287: if (!ISSET(m->m_flags, M_EXT))
1288: goto free_m;
1289:
1290: m->m_data += ETHER_ALIGN;
1291: m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1292:
1293: dmap = pkt->tp_dmap;
1294: if (bus_dmamap_load_mbuf(dmat, dmap, m,
1295: wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0)
1296: goto free_m;
1297:
1298: pkt->tp_m = m;
1299:
1300: bc = sizeof(rxf) + sizeof(struct tht_pbd) * dmap->dm_nsegs;
1301:
1302: rxf.bc = htole16(LWORDS(bc));
1303: rxf.type = htole16(THT_RXF_TYPE);
1304: rxf.uid = pkt->tp_id;
1305:
1306: tht_fifo_write(sc, &sc->sc_rxf, &rxf, sizeof(rxf));
1307: tht_fifo_write_dmap(sc, &sc->sc_rxf, dmap);
1308: tht_fifo_write_pad(sc, &sc->sc_rxf, bc);
1309:
1310: bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1311: BUS_DMASYNC_PREREAD);
1312:
1313: if (sc->sc_rxf.tf_ready <= THT_FIFO_DESC_LEN)
1314: goto done;
1315: }
1316:
1317: free_m:
1318: m_freem(m);
1319: put_pkt:
1320: tht_pkt_put(&sc->sc_rx_list, pkt);
1321: done:
1322: tht_fifo_post(sc, &sc->sc_rxf);
1323: }
1324:
1325: void
1326: tht_rxf_drain(struct tht_softc *sc)
1327: {
1328: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1329: bus_dmamap_t dmap;
1330: struct tht_pkt *pkt;
1331:
1332: while ((pkt = tht_pkt_used(&sc->sc_rx_list)) != NULL) {
1333: dmap = pkt->tp_dmap;
1334:
1335: bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1336: BUS_DMASYNC_POSTREAD);
1337: bus_dmamap_unload(dmat, dmap);
1338:
1339: m_freem(pkt->tp_m);
1340:
1341: tht_pkt_put(&sc->sc_rx_list, pkt);
1342: }
1343: }
1344:
1345: void
1346: tht_rxd(struct tht_softc *sc)
1347: {
1348: struct ifnet *ifp = &sc->sc_ac.ac_if;
1349: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1350: bus_dmamap_t dmap;
1351: struct tht_rx_desc rxd;
1352: struct tht_pkt *pkt;
1353: struct mbuf *m;
1354: int bc;
1355: u_int32_t flags;
1356:
1357: if (tht_fifo_readable(sc, &sc->sc_rxd) < sizeof(rxd))
1358: return;
1359:
1360: tht_fifo_pre(sc, &sc->sc_rxd);
1361:
1362: do {
1363: tht_fifo_read(sc, &sc->sc_rxd, &rxd, sizeof(rxd));
1364:
1365: flags = letoh32(rxd.flags);
1366: bc = THT_RXD_FLAGS_BC(flags) * 8;
1367: bc -= sizeof(rxd);
1368: pkt = &sc->sc_rx_list.tpl_pkts[rxd.uid];
1369:
1370: dmap = pkt->tp_dmap;
1371:
1372: bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1373: BUS_DMASYNC_POSTREAD);
1374: bus_dmamap_unload(dmat, dmap);
1375:
1376: m = pkt->tp_m;
1377: m->m_pkthdr.rcvif = ifp;
1378: m->m_pkthdr.len = m->m_len = letoh16(rxd.len);
1379:
1380: if (!ISSET(flags, THT_RXD_FLAGS_IPCS))
1381: m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1382: if (!ISSET(flags, THT_RXD_FLAGS_TCPCS))
1383: m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
1384: if (!ISSET(flags, THT_RXD_FLAGS_UDPCS))
1385: m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
1386:
1387: /* XXX process type 3 rx descriptors */
1388:
1389: #if NBPFILTER > 0
1390: if (ifp->if_bpf)
1391: bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1392: #endif
1393:
1394: ether_input_mbuf(ifp, m);
1395:
1396: tht_pkt_put(&sc->sc_rx_list, pkt);
1397:
1398: while (bc > 0) {
1399: static u_int32_t pad;
1400:
1401: tht_fifo_read(sc, &sc->sc_rxd, &pad, sizeof(pad));
1402: bc -= sizeof(pad);
1403: }
1404:
1405: ifp->if_ipackets++;
1406:
1407: } while (sc->sc_rxd.tf_ready >= sizeof(rxd));
1408:
1409: tht_fifo_post(sc, &sc->sc_rxd);
1410:
1411: /* put more pkts on the fifo */
1412: tht_rxf_fill(sc, 0);
1413: }
1414:
1415: void
1416: tht_watchdog(struct ifnet *ifp)
1417: {
1418: /* do nothing */
1419: }
1420:
1421: int
1422: tht_media_change(struct ifnet *ifp)
1423: {
1424: /* ignore */
1425: return (0);
1426: }
1427:
1428: void
1429: tht_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1430: {
1431: struct tht_softc *sc = ifp->if_softc;
1432:
1433: imr->ifm_active = IFM_ETHER | IFM_AUTO;
1434: imr->ifm_status = IFM_AVALID;
1435:
1436: tht_link_state(sc);
1437:
1438: if (LINK_STATE_IS_UP(ifp->if_link_state))
1439: imr->ifm_status |= IFM_ACTIVE;
1440: }
1441:
1442: int
1443: tht_fifo_alloc(struct tht_softc *sc, struct tht_fifo *tf,
1444: struct tht_fifo_desc *tfd)
1445: {
1446: u_int64_t dva;
1447:
1448: tf->tf_len = THT_FIFO_SIZE(tfd->tfd_size);
1449: tf->tf_mem = tht_dmamem_alloc(sc, tf->tf_len, THT_FIFO_ALIGN);
1450: if (tf->tf_mem == NULL)
1451: return (1);
1452:
1453: tf->tf_desc = tfd;
1454: tf->tf_rptr = tf->tf_wptr = 0;
1455:
1456: bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1457: 0, tf->tf_len, THT_FIFO_PRE_SYNC(tfd));
1458:
1459: dva = THT_DMA_DVA(tf->tf_mem);
1460: tht_write(sc, tfd->tfd_cfg0, (u_int32_t)dva | tfd->tfd_size);
1461: tht_write(sc, tfd->tfd_cfg1, (u_int32_t)(dva >> 32));
1462:
1463: return (0);
1464: }
1465:
1466: void
1467: tht_fifo_free(struct tht_softc *sc, struct tht_fifo *tf)
1468: {
1469: bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1470: 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1471: tht_dmamem_free(sc, tf->tf_mem);
1472: }
1473:
1474: size_t
1475: tht_fifo_readable(struct tht_softc *sc, struct tht_fifo *tf)
1476: {
1477: tf->tf_wptr = tht_read(sc, tf->tf_desc->tfd_wptr);
1478: tf->tf_wptr &= THT_FIFO_PTR_MASK;
1479: tf->tf_ready = tf->tf_wptr - tf->tf_rptr;
1480: if (tf->tf_ready < 0)
1481: tf->tf_ready += tf->tf_len;
1482:
1483: DPRINTF(THT_D_FIFO, "%s: fifo rdable wptr: %d rptr: %d ready: %d\n",
1484: DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1485:
1486: return (tf->tf_ready);
1487: }
1488:
1489: size_t
1490: tht_fifo_writable(struct tht_softc *sc, struct tht_fifo *tf)
1491: {
1492: tf->tf_rptr = tht_read(sc, tf->tf_desc->tfd_rptr);
1493: tf->tf_rptr &= THT_FIFO_PTR_MASK;
1494: tf->tf_ready = tf->tf_rptr - tf->tf_wptr;
1495: if (tf->tf_ready <= 0)
1496: tf->tf_ready += tf->tf_len;
1497:
1498: DPRINTF(THT_D_FIFO, "%s: fifo wrable wptr: %d rptr: %d ready: %d\n",
1499: DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1500:
1501: return (tf->tf_ready);
1502: }
1503:
1504: void
1505: tht_fifo_pre(struct tht_softc *sc, struct tht_fifo *tf)
1506: {
1507: bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1508: 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1509: }
1510:
1511: void
1512: tht_fifo_read(struct tht_softc *sc, struct tht_fifo *tf,
1513: void *buf, size_t buflen)
1514: {
1515: u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem);
1516: u_int8_t *desc = buf;
1517: size_t len;
1518:
1519: tf->tf_ready -= buflen;
1520:
1521: len = tf->tf_len - tf->tf_rptr;
1522:
1523: if (len < buflen) {
1524: memcpy(desc, fifo + tf->tf_rptr, len);
1525:
1526: buflen -= len;
1527: desc += len;
1528:
1529: tf->tf_rptr = 0;
1530: }
1531:
1532: memcpy(desc, fifo + tf->tf_rptr, buflen);
1533: tf->tf_rptr += buflen;
1534:
1535: DPRINTF(THT_D_FIFO, "%s: fifo rd wptr: %d rptr: %d ready: %d\n",
1536: DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1537: }
1538:
1539: void
1540: tht_fifo_write(struct tht_softc *sc, struct tht_fifo *tf,
1541: void *buf, size_t buflen)
1542: {
1543: u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem);
1544: u_int8_t *desc = buf;
1545: size_t len;
1546:
1547: tf->tf_ready -= buflen;
1548:
1549: len = tf->tf_len - tf->tf_wptr;
1550:
1551: if (len < buflen) {
1552: memcpy(fifo + tf->tf_wptr, desc, len);
1553:
1554: buflen -= len;
1555: desc += len;
1556:
1557: tf->tf_wptr = 0;
1558: }
1559:
1560: memcpy(fifo + tf->tf_wptr, desc, buflen);
1561: tf->tf_wptr += buflen;
1562: tf->tf_wptr %= tf->tf_len;
1563:
1564: DPRINTF(THT_D_FIFO, "%s: fifo wr wptr: %d rptr: %d ready: %d\n",
1565: DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1566: }
1567:
1568: void
1569: tht_fifo_write_dmap(struct tht_softc *sc, struct tht_fifo *tf,
1570: bus_dmamap_t dmap)
1571: {
1572: struct tht_pbd pbd;
1573: u_int64_t dva;
1574: int i;
1575:
1576: for (i = 0; i < dmap->dm_nsegs; i++) {
1577: dva = dmap->dm_segs[i].ds_addr;
1578:
1579: pbd.addr_lo = htole32(dva);
1580: pbd.addr_hi = htole32(dva >> 32);
1581: pbd.len = htole32(dmap->dm_segs[i].ds_len);
1582:
1583: tht_fifo_write(sc, tf, &pbd, sizeof(pbd));
1584: }
1585: }
1586:
1587: void
1588: tht_fifo_write_pad(struct tht_softc *sc, struct tht_fifo *tf, int bc)
1589: {
1590: const static u_int32_t pad = 0x0;
1591:
1592: /* this assumes you'll only ever be writing multiples of 4 bytes */
1593: if (bc % 8)
1594: tht_fifo_write(sc, tf, (void *)&pad, sizeof(pad));
1595: }
1596:
1597: void
1598: tht_fifo_post(struct tht_softc *sc, struct tht_fifo *tf)
1599: {
1600: bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1601: 0, tf->tf_len, THT_FIFO_PRE_SYNC(tf->tf_desc));
1602: if (tf->tf_desc->tfd_write)
1603: tht_write(sc, tf->tf_desc->tfd_wptr, tf->tf_wptr);
1604: else
1605: tht_write(sc, tf->tf_desc->tfd_rptr, tf->tf_rptr);
1606:
1607: DPRINTF(THT_D_FIFO, "%s: fifo post wptr: %d rptr: %d\n", DEVNAME(sc),
1608: tf->tf_wptr, tf->tf_rptr);
1609: }
1610:
1611: const static bus_size_t tht_mac_regs[3] = {
1612: THT_REG_RX_UNC_MAC2, THT_REG_RX_UNC_MAC1, THT_REG_RX_UNC_MAC0
1613: };
1614:
1615: void
1616: tht_lladdr_read(struct tht_softc *sc)
1617: {
1618: int i;
1619:
1620: for (i = 0; i < sizeofa(tht_mac_regs); i++)
1621: sc->sc_lladdr[i] = betoh16(tht_read(sc, tht_mac_regs[i]));
1622: }
1623:
1624: void
1625: tht_lladdr_write(struct tht_softc *sc)
1626: {
1627: int i;
1628:
1629: for (i = 0; i < sizeofa(tht_mac_regs); i++)
1630: tht_write(sc, tht_mac_regs[i], htobe16(sc->sc_lladdr[i]));
1631: }
1632:
1633: #define tht_swrst_set(_s, _r) tht_write((_s), (_r), 0x1)
1634: #define tht_swrst_clr(_s, _r) tht_write((_s), (_r), 0x0)
1635: int
1636: tht_sw_reset(struct tht_softc *sc)
1637: {
1638: int i;
1639:
1640: /* this follows SW Reset process in 8.8 of the doco */
1641:
1642: /* 1. disable rx */
1643: tht_clr(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1644:
1645: /* 2. initiate port disable */
1646: tht_swrst_set(sc, THT_REG_DIS_PRT);
1647:
1648: /* 3. initiate queue disable */
1649: tht_swrst_set(sc, THT_REG_DIS_QU_0);
1650: tht_swrst_set(sc, THT_REG_DIS_QU_1);
1651:
1652: /* 4. wait for successful finish of previous tasks */
1653: if (!tht_wait_set(sc, THT_REG_RST_PRT, THT_REG_RST_PRT_ACTIVE, 1000))
1654: return (1);
1655:
1656: /* 5. Reset interrupt registers */
1657: tht_write(sc, THT_REG_IMR, 0x0); /* 5.a */
1658: tht_read(sc, THT_REG_ISR); /* 5.b */
1659: for (i = 0; i < THT_NQUEUES; i++) {
1660: tht_write(sc, THT_REG_RDINTCM(i), 0x0); /* 5.c/5.d */
1661: tht_write(sc, THT_REG_TDINTCM(i), 0x0); /* 5.e */
1662: }
1663:
1664: /* 6. initiate queue reset */
1665: tht_swrst_set(sc, THT_REG_RST_QU_0);
1666: tht_swrst_set(sc, THT_REG_RST_QU_1);
1667:
1668: /* 7. initiate port reset */
1669: tht_swrst_set(sc, THT_REG_RST_PRT);
1670:
1671: /* 8. clear txt/rxf/rxd/txf read and write ptrs */
1672: for (i = 0; i < THT_NQUEUES; i++) {
1673: tht_write(sc, THT_REG_TXT_RPTR(i), 0);
1674: tht_write(sc, THT_REG_RXF_RPTR(i), 0);
1675: tht_write(sc, THT_REG_RXD_RPTR(i), 0);
1676: tht_write(sc, THT_REG_TXF_RPTR(i), 0);
1677:
1678: tht_write(sc, THT_REG_TXT_WPTR(i), 0);
1679: tht_write(sc, THT_REG_RXF_WPTR(i), 0);
1680: tht_write(sc, THT_REG_RXD_WPTR(i), 0);
1681: tht_write(sc, THT_REG_TXF_WPTR(i), 0);
1682: }
1683:
1684: /* 9. unset port disable */
1685: tht_swrst_clr(sc, THT_REG_DIS_PRT);
1686:
1687: /* 10. unset queue disable */
1688: tht_swrst_clr(sc, THT_REG_DIS_QU_0);
1689: tht_swrst_clr(sc, THT_REG_DIS_QU_1);
1690:
1691: /* 11. unset queue reset */
1692: tht_swrst_clr(sc, THT_REG_RST_QU_0);
1693: tht_swrst_clr(sc, THT_REG_RST_QU_1);
1694:
1695: /* 12. unset port reset */
1696: tht_swrst_clr(sc, THT_REG_RST_PRT);
1697:
1698: /* 13. enable rx */
1699: tht_set(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1700:
1701: return (0);
1702: }
1703:
1704: int
1705: tht_fw_load(struct tht_softc *sc)
1706: {
1707: struct timeout ticker;
1708: volatile int ok = 1;
1709: u_int8_t *fw, *buf;
1710: size_t fwlen, wrlen;
1711: int error = 1;
1712:
1713: if (loadfirmware("tht", &fw, &fwlen) != 0)
1714: return (1);
1715:
1716: if ((fwlen % 8) != 0)
1717: goto err;
1718:
1719: buf = fw;
1720: while (fwlen > 0) {
1721: while (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_GAP) {
1722: if (tsleep(sc, PCATCH, "thtfw", 1) == EINTR)
1723: goto err;
1724: }
1725:
1726: wrlen = MIN(sc->sc_txt.tf_ready - THT_FIFO_GAP, fwlen);
1727: tht_fifo_pre(sc, &sc->sc_txt);
1728: tht_fifo_write(sc, &sc->sc_txt, buf, wrlen);
1729: tht_fifo_post(sc, &sc->sc_txt);
1730:
1731: fwlen -= wrlen;
1732: buf += wrlen;
1733: }
1734:
1735: timeout_set(&ticker, tht_fw_tick, &ticker);
1736: timeout_add(&ticker, 2*hz);
1737: while (ok) {
1738: if (tht_read(sc, THT_REG_INIT_STATUS) != 0) {
1739: error = 0;
1740: break;
1741: }
1742:
1743: if (tsleep(sc, PCATCH, "thtinit", 1) == EINTR)
1744: goto err;
1745: }
1746: timeout_del(&ticker);
1747:
1748: tht_write(sc, THT_REG_INIT_SEMAPHORE, 0x1);
1749:
1750: err:
1751: free(fw, M_DEVBUF);
1752: return (error);
1753: }
1754:
1755: void
1756: tht_fw_tick(void *arg)
1757: {
1758: volatile int *ok = arg;
1759:
1760: *ok = 0;
1761: }
1762:
1763: void
1764: tht_link_state(struct tht_softc *sc)
1765: {
1766: struct ifnet *ifp = &sc->sc_ac.ac_if;
1767: int link_state = LINK_STATE_DOWN;
1768:
1769: if (tht_read(sc, THT_REG_MAC_LNK_STAT) & THT_REG_MAC_LNK_STAT_LINK)
1770: link_state = LINK_STATE_UP;
1771:
1772: if (ifp->if_link_state != link_state) {
1773: ifp->if_link_state = link_state;
1774: if_link_state_change(ifp);
1775: }
1776: }
1777:
1778: u_int32_t
1779: tht_read(struct tht_softc *sc, bus_size_t r)
1780: {
1781: bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1782: BUS_SPACE_BARRIER_READ);
1783: return (bus_space_read_4(sc->sc_thtc->sc_memt, sc->sc_memh, r));
1784: }
1785:
1786: void
1787: tht_write(struct tht_softc *sc, bus_size_t r, u_int32_t v)
1788: {
1789: bus_space_write_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, v);
1790: bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1791: BUS_SPACE_BARRIER_WRITE);
1792: }
1793:
1794: void
1795: tht_write_region(struct tht_softc *sc, bus_size_t r, void *buf, size_t len)
1796: {
1797: bus_space_write_raw_region_4(sc->sc_thtc->sc_memt, sc->sc_memh, r,
1798: buf, len);
1799: bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, len,
1800: BUS_SPACE_BARRIER_WRITE);
1801: }
1802:
1803: int
1804: tht_wait_eq(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1805: int timeout)
1806: {
1807: while ((tht_read(sc, r) & m) != v) {
1808: if (timeout == 0)
1809: return (0);
1810:
1811: delay(1000);
1812: timeout--;
1813: }
1814:
1815: return (1);
1816: }
1817:
1818: int
1819: tht_wait_ne(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1820: int timeout)
1821: {
1822: while ((tht_read(sc, r) & m) == v) {
1823: if (timeout == 0)
1824: return (0);
1825:
1826: delay(1000);
1827: timeout--;
1828: }
1829:
1830: return (1);
1831: }
1832:
1833: struct tht_dmamem *
1834: tht_dmamem_alloc(struct tht_softc *sc, bus_size_t size, bus_size_t align)
1835: {
1836: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1837: struct tht_dmamem *tdm;
1838: int nsegs;
1839:
1840: tdm = malloc(sizeof(struct tht_dmamem), M_DEVBUF, M_WAITOK);
1841: bzero(tdm, sizeof(struct tht_dmamem));
1842: tdm->tdm_size = size;
1843:
1844: if (bus_dmamap_create(dmat, size, 1, size, 0,
1845: BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1846: goto tdmfree;
1847:
1848: if (bus_dmamem_alloc(dmat, size, align, 0, &tdm->tdm_seg, 1, &nsegs,
1849: BUS_DMA_WAITOK) != 0)
1850: goto destroy;
1851:
1852: if (bus_dmamem_map(dmat, &tdm->tdm_seg, nsegs, size, &tdm->tdm_kva,
1853: BUS_DMA_WAITOK) != 0)
1854: goto free;
1855:
1856: if (bus_dmamap_load(dmat, tdm->tdm_map, tdm->tdm_kva, size,
1857: NULL, BUS_DMA_WAITOK) != 0)
1858: goto unmap;
1859:
1860: bzero(tdm->tdm_kva, size);
1861:
1862: return (tdm);
1863:
1864: unmap:
1865: bus_dmamem_unmap(dmat, tdm->tdm_kva, size);
1866: free:
1867: bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1868: destroy:
1869: bus_dmamap_destroy(dmat, tdm->tdm_map);
1870: tdmfree:
1871: free(tdm, M_DEVBUF);
1872:
1873: return (NULL);
1874: }
1875:
1876: void
1877: tht_dmamem_free(struct tht_softc *sc, struct tht_dmamem *tdm)
1878: {
1879: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1880:
1881: bus_dmamap_unload(dmat, tdm->tdm_map);
1882: bus_dmamem_unmap(dmat, tdm->tdm_kva, tdm->tdm_size);
1883: bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1884: bus_dmamap_destroy(dmat, tdm->tdm_map);
1885: free(tdm, M_DEVBUF);
1886: }
1887:
1888: int
1889: tht_pkt_alloc(struct tht_softc *sc, struct tht_pkt_list *tpl, int npkts,
1890: int nsegs)
1891: {
1892: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1893: struct tht_pkt *pkt;
1894: int i;
1895:
1896: tpl->tpl_pkts = malloc(sizeof(struct tht_pkt) * npkts, M_DEVBUF,
1897: M_WAITOK);
1898: bzero(tpl->tpl_pkts, sizeof(struct tht_pkt) * npkts);
1899:
1900: TAILQ_INIT(&tpl->tpl_free);
1901: TAILQ_INIT(&tpl->tpl_used);
1902: for (i = 0; i < npkts; i++) {
1903: pkt = &tpl->tpl_pkts[i];
1904:
1905: pkt->tp_id = i;
1906: if (bus_dmamap_create(dmat, THT_PBD_PKTLEN, nsegs,
1907: THT_PBD_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1908: &pkt->tp_dmap) != 0) {
1909: tht_pkt_free(sc, tpl);
1910: return (1);
1911: }
1912:
1913: TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1914: }
1915:
1916: return (0);
1917: }
1918:
1919: void
1920: tht_pkt_free(struct tht_softc *sc, struct tht_pkt_list *tpl)
1921: {
1922: bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat;
1923: struct tht_pkt *pkt;
1924:
1925: while ((pkt = tht_pkt_get(tpl)) != NULL)
1926: bus_dmamap_destroy(dmat, pkt->tp_dmap);
1927: free(tpl->tpl_pkts, M_DEVBUF);
1928: tpl->tpl_pkts = NULL;
1929: }
1930:
1931: void
1932: tht_pkt_put(struct tht_pkt_list *tpl, struct tht_pkt *pkt)
1933: {
1934: TAILQ_REMOVE(&tpl->tpl_used, pkt, tp_link);
1935: TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1936: }
1937:
1938: struct tht_pkt *
1939: tht_pkt_get(struct tht_pkt_list *tpl)
1940: {
1941: struct tht_pkt *pkt;
1942:
1943: pkt = TAILQ_FIRST(&tpl->tpl_free);
1944: if (pkt != NULL) {
1945: TAILQ_REMOVE(&tpl->tpl_free, pkt, tp_link);
1946: TAILQ_INSERT_TAIL(&tpl->tpl_used, pkt, tp_link);
1947:
1948: }
1949:
1950: return (pkt);
1951: }
1952:
1953: struct tht_pkt *
1954: tht_pkt_used(struct tht_pkt_list *tpl)
1955: {
1956: return (TAILQ_FIRST(&tpl->tpl_used));
1957: }
CVSweb