1 |
dpavlin |
1 |
/* |
2 |
dpavlin |
7 |
* Cisco router simulation platform. |
3 |
dpavlin |
1 |
* Copyright (C) 2005,2006 Christophe Fillot. All rights reserved. |
4 |
|
|
* |
5 |
|
|
* Serial Interfaces (Mueslix). |
6 |
|
|
* |
7 |
|
|
* Note: "debug serial mueslix" gives more technical info. |
8 |
|
|
* |
9 |
|
|
* Chip mode: Cisco models 36xx and 72xx don't seem to use the same microcode, |
10 |
|
|
* so there are code variants to make things work properly. |
11 |
|
|
* |
12 |
|
|
* Chip mode 0 => 3600 |
13 |
|
|
* Chip mode 1 => 7200 |
14 |
|
|
* |
15 |
|
|
* 2 points noticed until now: |
16 |
|
|
* - RX/TX ring wrapping checks are done differently, |
17 |
|
|
* - TX packet sizes are not specified in the same way. |
18 |
|
|
* |
19 |
|
|
* Test methodology: |
20 |
|
|
* - Connect two virtual routers together ; |
21 |
|
|
* - Do pings by sending 10 packets by 10 packets. If this stops working, |
22 |
|
|
* count the number of transmitted packets and check with RX/TX rings |
23 |
|
|
* sizes. This is problably a ring wrapping problem. |
24 |
|
|
* - Do multiple pings with various sizes (padding checks); |
25 |
|
|
* - Check if CDP is working, with various hostname sizes. Since CDP |
26 |
|
|
* contains a checksum, it is a good way to determine if packets are |
27 |
|
|
* sent/received correctly. |
28 |
|
|
* - Do a Telnet from both virtual router to the other one, and do a |
29 |
|
|
* "sh run". |
30 |
|
|
*/ |
31 |
|
|
|
32 |
|
|
#include <stdio.h> |
33 |
|
|
#include <stdlib.h> |
34 |
|
|
#include <string.h> |
35 |
|
|
#include <unistd.h> |
36 |
|
|
#include <errno.h> |
37 |
|
|
#include <assert.h> |
38 |
|
|
|
39 |
dpavlin |
7 |
#include "cpu.h" |
40 |
|
|
#include "vm.h" |
41 |
dpavlin |
1 |
#include "dynamips.h" |
42 |
|
|
#include "memory.h" |
43 |
|
|
#include "device.h" |
44 |
|
|
#include "net.h" |
45 |
|
|
#include "net_io.h" |
46 |
|
|
#include "ptask.h" |
47 |
|
|
#include "dev_mueslix.h" |
48 |
|
|
|
49 |
|
|
/* Debugging flags */ |
50 |
|
|
#define DEBUG_ACCESS 0 |
51 |
|
|
#define DEBUG_UNKNOWN 0 |
52 |
|
|
#define DEBUG_PCI_REGS 0 |
53 |
|
|
#define DEBUG_TRANSMIT 0 |
54 |
|
|
#define DEBUG_RECEIVE 0 |
55 |
|
|
|
56 |
|
|
/* Mueslix PCI vendor/product codes */ |
57 |
|
|
#define MUESLIX_PCI_VENDOR_ID 0x1137 |
58 |
|
|
#define MUESLIX_PCI_PRODUCT_ID 0x0001 |
59 |
|
|
|
60 |
|
|
/* Number of channels (4 interfaces) */ |
61 |
|
|
#define MUESLIX_NR_CHANNELS 4 |
62 |
|
|
#define MUESLIX_CHANNEL_LEN 0x100 |
63 |
|
|
|
64 |
|
|
/* RX/TX status for a channel */ |
65 |
|
|
#define MUESLIX_CHANNEL_STATUS_RX 0x01 |
66 |
|
|
#define MUESLIX_CHANNEL_STATUS_TX 0x02 |
67 |
|
|
|
68 |
|
|
/* RX/TX enable masks (XXX check if bit position is correct) */ |
69 |
|
|
#define MUESLIX_TX_ENABLE 0x01 |
70 |
|
|
#define MUESLIX_RX_ENABLE 0x02 |
71 |
|
|
|
72 |
|
|
/* RX/TX IRQ masks */ |
73 |
|
|
#define MUESLIX_TX_IRQ 0x01 |
74 |
|
|
#define MUESLIX_RX_IRQ 0x10 |
75 |
|
|
|
76 |
|
|
/* Addresses of ports */ |
77 |
|
|
#define MUESLIX_CHANNEL0_OFFSET 0x100 |
78 |
|
|
#define MUESLIX_CHANNEL1_OFFSET 0x200 |
79 |
|
|
#define MUESLIX_CHANNEL2_OFFSET 0x300 |
80 |
|
|
#define MUESLIX_CHANNEL3_OFFSET 0x400 |
81 |
|
|
|
82 |
|
|
/* TPU Registers */ |
83 |
|
|
#define MUESLIX_TPU_CMD_OFFSET 0x2c24 |
84 |
|
|
#define MUESLIX_TPU_CMD_RSP_OFFSET 0x2c2c |
85 |
|
|
|
86 |
|
|
/* General and channels registers */ |
87 |
|
|
#define MUESLIX_GEN_CHAN_LEN 0x500 |
88 |
|
|
|
89 |
|
|
/* TPU microcode */ |
90 |
|
|
#define MUESLIX_UCODE_OFFSET 0x2000 |
91 |
|
|
#define MUESLIX_UCODE_LEN 0x800 |
92 |
|
|
|
93 |
|
|
/* TPU Xmem and YMem */ |
94 |
|
|
#define MUESLIX_XMEM_OFFSET 0x2a00 |
95 |
|
|
#define MUESLIX_YMEM_OFFSET 0x2b00 |
96 |
|
|
#define MUESLIX_XYMEM_LEN 0x100 |
97 |
|
|
|
98 |
|
|
/* Maximum packet size */ |
99 |
dpavlin |
8 |
#define MUESLIX_MAX_PKT_SIZE 18000 |
100 |
dpavlin |
1 |
|
101 |
|
|
/* Send up to 16 packets in a TX ring scan pass */ |
102 |
|
|
#define MUESLIX_TXRING_PASS_COUNT 16 |
103 |
|
|
|
104 |
|
|
/* RX descriptors */ |
105 |
|
|
#define MUESLIX_RXDESC_OWN 0x80000000 /* Ownership */ |
106 |
|
|
#define MUESLIX_RXDESC_FS 0x40000000 /* First Segment */ |
107 |
|
|
#define MUESLIX_RXDESC_LS 0x20000000 /* Last Segment */ |
108 |
|
|
#define MUESLIX_RXDESC_OVERRUN 0x10000000 /* Overrun */ |
109 |
|
|
#define MUESLIX_RXDESC_IGNORED 0x08000000 /* Ignored */ |
110 |
|
|
#define MUESLIX_RXDESC_ABORT 0x04000000 /* Abort */ |
111 |
|
|
#define MUESLIX_RXDESC_CRC 0x02000000 /* CRC error */ |
112 |
dpavlin |
8 |
#define MUESLIX_RXDESC_LEN_MASK 0xffff |
113 |
dpavlin |
1 |
|
114 |
|
|
/* TX descriptors */ |
115 |
|
|
#define MUESLIX_TXDESC_OWN 0x80000000 /* Ownership */ |
116 |
|
|
#define MUESLIX_TXDESC_FS 0x40000000 /* First Segment */ |
117 |
|
|
#define MUESLIX_TXDESC_LS 0x20000000 /* Last Segment */ |
118 |
|
|
#define MUESLIX_TXDESC_SUB 0x00100000 /* Length substractor ? */ |
119 |
|
|
#define MUESLIX_TXDESC_SUB_LEN 0x03000000 /* Length substrator ? */ |
120 |
|
|
#define MUESLIX_TXDESC_SUB_SHIFT 24 |
121 |
|
|
#define MUESLIX_TXDESC_PAD 0x00c00000 /* Sort of padding info ? */ |
122 |
|
|
#define MUESLIX_TXDESC_PAD_SHIFT 22 |
123 |
|
|
|
124 |
dpavlin |
8 |
#define MUESLIX_TXDESC_LEN_MASK 0xffff |
125 |
dpavlin |
1 |
|
126 |
|
|
/* RX Descriptor */ |
127 |
|
|
struct rx_desc { |
128 |
|
|
m_uint32_t rdes[2]; |
129 |
|
|
}; |
130 |
|
|
|
131 |
|
|
/* TX Descriptor */ |
132 |
|
|
struct tx_desc { |
133 |
|
|
m_uint32_t tdes[2]; |
134 |
|
|
}; |
135 |
|
|
|
136 |
|
|
/* Forward declaration of Mueslix data */ |
137 |
|
|
typedef struct mueslix_data mueslix_data_t; |
138 |
|
|
|
139 |
|
|
/* Mueslix channel */ |
140 |
|
|
struct mueslix_channel { |
141 |
|
|
/* Channel ID */ |
142 |
|
|
u_int id; |
143 |
|
|
|
144 |
|
|
/* Channel status (0=disabled) */ |
145 |
|
|
u_int status; |
146 |
|
|
|
147 |
|
|
/* NetIO descriptor */ |
148 |
|
|
netio_desc_t *nio; |
149 |
|
|
|
150 |
|
|
/* TX ring scanners task id */ |
151 |
|
|
ptask_id_t tx_tid; |
152 |
|
|
|
153 |
|
|
/* physical addresses for start and end of RX/TX rings */ |
154 |
|
|
m_uint32_t rx_start,rx_end,tx_start,tx_end; |
155 |
|
|
|
156 |
|
|
/* physical addresses of current RX and TX descriptors */ |
157 |
|
|
m_uint32_t rx_current,tx_current; |
158 |
|
|
|
159 |
|
|
/* Parent mueslix structure */ |
160 |
|
|
mueslix_data_t *parent; |
161 |
|
|
}; |
162 |
|
|
|
163 |
|
|
/* Mueslix Data */ |
164 |
|
|
struct mueslix_data { |
165 |
|
|
char *name; |
166 |
dpavlin |
8 |
|
167 |
|
|
/* Lock */ |
168 |
|
|
pthread_mutex_t lock; |
169 |
|
|
|
170 |
|
|
/* IRQ status and mask */ |
171 |
|
|
m_uint32_t irq_status,irq_mask; |
172 |
|
|
u_int irq_clearing_count; |
173 |
|
|
|
174 |
dpavlin |
1 |
/* TPU options */ |
175 |
|
|
m_uint32_t tpu_options; |
176 |
|
|
|
177 |
|
|
/* Virtual machine */ |
178 |
|
|
vm_instance_t *vm; |
179 |
|
|
|
180 |
|
|
/* Virtual device */ |
181 |
|
|
struct vdevice *dev; |
182 |
|
|
|
183 |
|
|
/* PCI device information */ |
184 |
|
|
struct pci_device *pci_dev; |
185 |
|
|
|
186 |
|
|
/* Chip mode: |
187 |
|
|
* |
188 |
|
|
* 0=increment ring pointers before check + direct TX size, |
189 |
|
|
* 1=increment ring pointers after check + "complex" TX size. |
190 |
|
|
*/ |
191 |
|
|
int chip_mode; |
192 |
|
|
|
193 |
|
|
/* Channels */ |
194 |
|
|
struct mueslix_channel channel[MUESLIX_NR_CHANNELS]; |
195 |
|
|
m_uint32_t channel_enable_mask; |
196 |
|
|
|
197 |
|
|
/* TPU microcode */ |
198 |
|
|
u_char ucode[MUESLIX_UCODE_LEN]; |
199 |
|
|
|
200 |
|
|
/* TPU Xmem and Ymem */ |
201 |
|
|
u_char xmem[MUESLIX_XYMEM_LEN]; |
202 |
|
|
u_char ymem[MUESLIX_XYMEM_LEN]; |
203 |
|
|
}; |
204 |
|
|
|
205 |
|
|
/* Offsets of the 4 channels */ |
206 |
|
|
static m_uint32_t channel_offset[MUESLIX_NR_CHANNELS] = { |
207 |
|
|
MUESLIX_CHANNEL0_OFFSET, MUESLIX_CHANNEL1_OFFSET, |
208 |
|
|
MUESLIX_CHANNEL2_OFFSET, MUESLIX_CHANNEL3_OFFSET, |
209 |
|
|
}; |
210 |
|
|
|
211 |
dpavlin |
8 |
/* Lock/Unlock primitives */ |
212 |
|
|
#define MUESLIX_LOCK(d) pthread_mutex_lock(&(d)->lock) |
213 |
|
|
#define MUESLIX_UNLOCK(d) pthread_mutex_unlock(&(d)->lock) |
214 |
|
|
|
215 |
dpavlin |
1 |
/* Log a Mueslix message */ |
216 |
|
|
#define MUESLIX_LOG(d,msg...) vm_log((d)->vm,(d)->name,msg) |
217 |
|
|
|
218 |
|
|
/* Returns TRUE if RX/TX is enabled for a channel */ |
219 |
|
|
static inline int dev_mueslix_is_rx_tx_enabled(struct mueslix_data *d,u_int id) |
220 |
|
|
{ |
221 |
|
|
/* 2 bits for RX/TX, 4 channels max */ |
222 |
|
|
return((d->channel_enable_mask >> (id << 1)) & 0x03); |
223 |
|
|
} |
224 |
|
|
|
225 |
dpavlin |
8 |
/* Update IRQ status */ |
226 |
|
|
static inline void dev_mueslix_update_irq_status(struct mueslix_data *d) |
227 |
|
|
{ |
228 |
|
|
if (d->irq_status & d->irq_mask) |
229 |
|
|
pci_dev_trigger_irq(d->vm,d->pci_dev); |
230 |
|
|
else { |
231 |
|
|
if (++d->irq_clearing_count == 3) { |
232 |
|
|
pci_dev_clear_irq(d->vm,d->pci_dev); |
233 |
|
|
d->irq_clearing_count = 0; |
234 |
|
|
} |
235 |
|
|
} |
236 |
|
|
} |
237 |
|
|
|
238 |
dpavlin |
1 |
/* |
239 |
|
|
* Access to channel registers. |
240 |
|
|
*/ |
241 |
dpavlin |
7 |
void dev_mueslix_chan_access(cpu_gen_t *cpu,struct mueslix_channel *channel, |
242 |
dpavlin |
1 |
m_uint32_t offset,u_int op_size,u_int op_type, |
243 |
|
|
m_uint64_t *data) |
244 |
|
|
{ |
245 |
|
|
switch(offset) { |
246 |
|
|
case 0x60: /* signals ? */ |
247 |
|
|
if ((op_type == MTS_READ) && (channel->nio != NULL)) |
248 |
|
|
*data = 0xFFFFFFFF; |
249 |
|
|
break; |
250 |
|
|
|
251 |
|
|
case 0x64: /* port status - cable type and probably other things */ |
252 |
|
|
if (op_type == MTS_READ) |
253 |
|
|
*data = 0x7B; |
254 |
|
|
break; |
255 |
|
|
|
256 |
|
|
case 0x90: /* has influence on clock rate */ |
257 |
|
|
if (op_type == MTS_READ) |
258 |
|
|
*data = 0x11111111; |
259 |
|
|
break; |
260 |
|
|
|
261 |
|
|
case 0x80: /* TX start */ |
262 |
|
|
if (op_type == MTS_WRITE) |
263 |
|
|
channel->tx_start = channel->tx_current = *data; |
264 |
|
|
else |
265 |
|
|
*data = channel->tx_start; |
266 |
|
|
break; |
267 |
|
|
|
268 |
|
|
case 0x84: /* TX end */ |
269 |
|
|
if (op_type == MTS_WRITE) |
270 |
|
|
channel->tx_end = *data; |
271 |
|
|
else |
272 |
|
|
*data = channel->tx_end; |
273 |
|
|
break; |
274 |
|
|
|
275 |
|
|
case 0x88: /* RX start */ |
276 |
|
|
if (op_type == MTS_WRITE) |
277 |
|
|
channel->rx_start = channel->rx_current = *data; |
278 |
|
|
else |
279 |
|
|
*data = channel->rx_start; |
280 |
|
|
break; |
281 |
|
|
|
282 |
|
|
case 0x8c: /* RX end */ |
283 |
|
|
if (op_type == MTS_WRITE) |
284 |
|
|
channel->rx_end = *data; |
285 |
|
|
else |
286 |
|
|
*data = channel->rx_end; |
287 |
|
|
break; |
288 |
|
|
} |
289 |
|
|
} |
290 |
|
|
|
291 |
|
|
/* Handle TPU commands for chip mode 0 (3600) */ |
292 |
|
|
static void tpu_cm0_handle_cmd(struct mueslix_data *d,u_int cmd) |
293 |
|
|
{ |
294 |
|
|
struct mueslix_channel *channel; |
295 |
|
|
u_int opcode,channel_id; |
296 |
|
|
|
297 |
|
|
opcode = (cmd >> 12) & 0xFF; |
298 |
|
|
channel_id = cmd & 0x03; |
299 |
|
|
channel = &d->channel[channel_id]; |
300 |
|
|
|
301 |
|
|
switch(opcode) { |
302 |
|
|
case 0x10: |
303 |
|
|
MUESLIX_LOG(d,"channel %u disabled\n",channel_id); |
304 |
|
|
channel->status = 0; |
305 |
|
|
break; |
306 |
|
|
case 0x00: |
307 |
|
|
MUESLIX_LOG(d,"channel %u enabled\n",channel_id); |
308 |
|
|
channel->status = 1; |
309 |
|
|
break; |
310 |
|
|
default: |
311 |
|
|
MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd); |
312 |
|
|
} |
313 |
|
|
} |
314 |
|
|
|
315 |
|
|
/* Handle TPU commands for chip mode 1 (7200) */ |
316 |
|
|
static void tpu_cm1_handle_cmd(struct mueslix_data *d,u_int cmd) |
317 |
|
|
{ |
318 |
|
|
struct mueslix_channel *channel; |
319 |
|
|
u_int opcode,channel_id; |
320 |
|
|
|
321 |
|
|
opcode = (cmd >> 12) & 0xFF; |
322 |
|
|
channel_id = cmd & 0x03; |
323 |
|
|
channel = &d->channel[channel_id]; |
324 |
|
|
|
325 |
|
|
switch(opcode) { |
326 |
|
|
case 0x50: |
327 |
|
|
case 0x30: |
328 |
|
|
MUESLIX_LOG(d,"channel %u disabled\n",channel_id); |
329 |
|
|
channel->status = 0; |
330 |
|
|
break; |
331 |
|
|
case 0x00: |
332 |
|
|
MUESLIX_LOG(d,"channel %u enabled\n",channel_id); |
333 |
|
|
channel->status = 1; |
334 |
|
|
break; |
335 |
|
|
default: |
336 |
|
|
MUESLIX_LOG(d,"unknown command 0x%5x\n",cmd); |
337 |
|
|
} |
338 |
|
|
} |
339 |
|
|
|
340 |
|
|
/* |
341 |
|
|
* dev_mueslix_access() |
342 |
|
|
*/ |
343 |
dpavlin |
7 |
void *dev_mueslix_access(cpu_gen_t *cpu,struct vdevice *dev,m_uint32_t offset, |
344 |
dpavlin |
1 |
u_int op_size,u_int op_type,m_uint64_t *data) |
345 |
|
|
{ |
346 |
|
|
struct mueslix_data *d = dev->priv_data; |
347 |
|
|
int i; |
348 |
|
|
|
349 |
|
|
#if DEBUG_ACCESS >= 2 |
350 |
|
|
if (op_type == MTS_READ) { |
351 |
|
|
cpu_log(cpu,d->name,"read access to offset=0x%x, pc=0x%llx, size=%u\n", |
352 |
dpavlin |
7 |
offset,cpu_get_pc(cpu),op_size); |
353 |
dpavlin |
1 |
} else { |
354 |
|
|
cpu_log(cpu,d->name,"write access to offset=0x%x, pc=0x%llx, " |
355 |
dpavlin |
7 |
"val=0x%llx, size=%u\n",offset,cpu_get_pc(cpu),*data,op_size); |
356 |
dpavlin |
1 |
} |
357 |
|
|
#endif |
358 |
|
|
|
359 |
|
|
/* Returns 0 if we don't know the offset */ |
360 |
|
|
if (op_type == MTS_READ) |
361 |
|
|
*data = 0x00000000; |
362 |
|
|
|
363 |
|
|
/* Handle microcode access */ |
364 |
|
|
if ((offset >= MUESLIX_UCODE_OFFSET) && |
365 |
|
|
(offset < (MUESLIX_UCODE_OFFSET + MUESLIX_UCODE_LEN))) |
366 |
|
|
return(d->ucode + offset - MUESLIX_UCODE_OFFSET); |
367 |
|
|
|
368 |
|
|
/* Handle TPU XMem access */ |
369 |
|
|
if ((offset >= MUESLIX_XMEM_OFFSET) && |
370 |
|
|
(offset < (MUESLIX_XMEM_OFFSET + MUESLIX_XYMEM_LEN))) |
371 |
|
|
return(d->xmem + offset - MUESLIX_XMEM_OFFSET); |
372 |
|
|
|
373 |
|
|
/* Handle TPU YMem access */ |
374 |
|
|
if ((offset >= MUESLIX_YMEM_OFFSET) && |
375 |
|
|
(offset < (MUESLIX_YMEM_OFFSET + MUESLIX_XYMEM_LEN))) |
376 |
|
|
return(d->ymem + offset - MUESLIX_YMEM_OFFSET); |
377 |
|
|
|
378 |
|
|
/* Handle channel access */ |
379 |
|
|
for(i=0;i<MUESLIX_NR_CHANNELS;i++) |
380 |
|
|
if ((offset >= channel_offset[i]) && |
381 |
|
|
(offset < (channel_offset[i] + MUESLIX_CHANNEL_LEN))) |
382 |
|
|
{ |
383 |
dpavlin |
8 |
MUESLIX_LOCK(d); |
384 |
dpavlin |
1 |
dev_mueslix_chan_access(cpu,&d->channel[i], |
385 |
|
|
offset - channel_offset[i], |
386 |
|
|
op_size,op_type,data); |
387 |
dpavlin |
8 |
MUESLIX_UNLOCK(d); |
388 |
dpavlin |
1 |
return NULL; |
389 |
|
|
} |
390 |
|
|
|
391 |
dpavlin |
8 |
MUESLIX_LOCK(d); |
392 |
|
|
|
393 |
dpavlin |
1 |
/* Generic case */ |
394 |
|
|
switch(offset) { |
395 |
|
|
/* this reg is accessed when an interrupt occurs */ |
396 |
|
|
case 0x0: |
397 |
|
|
if (op_type == MTS_READ) { |
398 |
dpavlin |
8 |
*data = d->irq_status; |
399 |
dpavlin |
1 |
} else { |
400 |
dpavlin |
8 |
d->irq_status &= ~(*data); |
401 |
|
|
dev_mueslix_update_irq_status(d); |
402 |
dpavlin |
1 |
} |
403 |
|
|
break; |
404 |
|
|
|
405 |
dpavlin |
8 |
/* Maybe interrupt mask */ |
406 |
dpavlin |
1 |
case 0x10: |
407 |
dpavlin |
8 |
if (op_type == MTS_READ) { |
408 |
|
|
*data = d->irq_mask; |
409 |
|
|
} else { |
410 |
|
|
d->irq_mask = *data; |
411 |
|
|
dev_mueslix_update_irq_status(d); |
412 |
|
|
} |
413 |
dpavlin |
1 |
break; |
414 |
|
|
|
415 |
|
|
case 0x14: |
416 |
|
|
if (op_type == MTS_READ) |
417 |
|
|
*data = d->channel_enable_mask; |
418 |
|
|
else { |
419 |
|
|
#if DEBUG_ACCESS |
420 |
|
|
cpu_log(cpu,d->name, |
421 |
|
|
"channel_enable_mask = 0x%5.5llx at pc=0x%llx\n", |
422 |
dpavlin |
7 |
*data,cpu_get_pc(cpu)); |
423 |
dpavlin |
1 |
#endif |
424 |
|
|
d->channel_enable_mask = *data; |
425 |
|
|
} |
426 |
|
|
break; |
427 |
|
|
|
428 |
|
|
case 0x18: |
429 |
|
|
if (op_type == MTS_READ) |
430 |
|
|
*data = 0x7F7F7F7F; |
431 |
|
|
break; |
432 |
|
|
|
433 |
|
|
case 0x48: |
434 |
|
|
if (op_type == MTS_READ) |
435 |
|
|
*data = 0x00000000; |
436 |
|
|
break; |
437 |
|
|
|
438 |
|
|
case 0x7c: |
439 |
|
|
if (op_type == MTS_READ) |
440 |
|
|
*data = 0x492; |
441 |
|
|
break; |
442 |
|
|
|
443 |
|
|
case 0x2c00: |
444 |
|
|
if (op_type == MTS_READ) |
445 |
|
|
*data = d->tpu_options; |
446 |
|
|
else |
447 |
|
|
d->tpu_options = *data; |
448 |
|
|
break; |
449 |
|
|
|
450 |
|
|
/* cmd reg */ |
451 |
|
|
case MUESLIX_TPU_CMD_OFFSET: |
452 |
|
|
#if DEBUG_ACCESS |
453 |
|
|
if (op_type == MTS_WRITE) { |
454 |
|
|
cpu_log(cpu,d->name,"cmd_reg = 0x%5.5llx at pc=0x%llx\n", |
455 |
dpavlin |
7 |
*data,cpu_get_pc(cpu)); |
456 |
dpavlin |
1 |
} |
457 |
|
|
#endif |
458 |
|
|
switch(d->chip_mode) { |
459 |
|
|
case 0: /* 3600 */ |
460 |
|
|
tpu_cm0_handle_cmd(d,*data); |
461 |
|
|
break; |
462 |
|
|
case 1: /* 7200 */ |
463 |
|
|
tpu_cm1_handle_cmd(d,*data); |
464 |
|
|
break; |
465 |
|
|
} |
466 |
|
|
break; |
467 |
|
|
|
468 |
|
|
/* |
469 |
|
|
* cmd_rsp reg, it seems that 0xFFFF means OK |
470 |
|
|
* (seen on a "sh contr se1/0" with "debug serial mueslix" enabled). |
471 |
|
|
*/ |
472 |
|
|
case MUESLIX_TPU_CMD_RSP_OFFSET: |
473 |
|
|
if (op_type == MTS_READ) |
474 |
|
|
*data = 0xFFFF; |
475 |
|
|
break; |
476 |
|
|
|
477 |
|
|
#if DEBUG_UNKNOWN |
478 |
|
|
default: |
479 |
|
|
if (op_type == MTS_READ) { |
480 |
|
|
cpu_log(cpu,d->name, |
481 |
|
|
"read from unknown addr 0x%x, pc=0x%llx (size=%u)\n", |
482 |
dpavlin |
7 |
offset,cpu_get_pc(cpu),op_size); |
483 |
dpavlin |
1 |
} else { |
484 |
|
|
cpu_log(cpu,d->name, |
485 |
|
|
"write to unknown addr 0x%x, value=0x%llx, " |
486 |
dpavlin |
7 |
"pc=0x%llx (size=%u)\n", |
487 |
|
|
offset,*data,cpu_get_pc(cpu),op_size); |
488 |
dpavlin |
1 |
} |
489 |
|
|
#endif |
490 |
|
|
} |
491 |
|
|
|
492 |
dpavlin |
8 |
MUESLIX_UNLOCK(d); |
493 |
dpavlin |
1 |
return NULL; |
494 |
|
|
} |
495 |
|
|
|
496 |
|
|
/* |
497 |
|
|
* Get the address of the next RX descriptor. |
498 |
|
|
*/ |
499 |
|
|
static m_uint32_t rxdesc_get_next(struct mueslix_channel *channel, |
500 |
|
|
m_uint32_t rxd_addr) |
501 |
|
|
{ |
502 |
|
|
m_uint32_t nrxd_addr; |
503 |
|
|
|
504 |
|
|
switch(channel->parent->chip_mode) { |
505 |
|
|
case 0: |
506 |
|
|
nrxd_addr = rxd_addr + sizeof(struct rx_desc); |
507 |
|
|
if (nrxd_addr == channel->rx_end) |
508 |
|
|
nrxd_addr = channel->rx_start; |
509 |
|
|
break; |
510 |
|
|
|
511 |
|
|
case 1: |
512 |
|
|
default: |
513 |
|
|
if (rxd_addr == channel->rx_end) |
514 |
|
|
nrxd_addr = channel->rx_start; |
515 |
|
|
else |
516 |
|
|
nrxd_addr = rxd_addr + sizeof(struct rx_desc); |
517 |
|
|
break; |
518 |
|
|
} |
519 |
|
|
|
520 |
|
|
return(nrxd_addr); |
521 |
|
|
} |
522 |
|
|
|
523 |
|
|
/* Read an RX descriptor */ |
524 |
|
|
static void rxdesc_read(struct mueslix_data *d,m_uint32_t rxd_addr, |
525 |
|
|
struct rx_desc *rxd) |
526 |
|
|
{ |
527 |
|
|
#if DEBUG_RECEIVE |
528 |
|
|
MUESLIX_LOG(d,"reading RX descriptor at address 0x%x\n",rxd_addr); |
529 |
|
|
#endif |
530 |
|
|
|
531 |
|
|
/* get the next descriptor from VM physical RAM */ |
532 |
|
|
physmem_copy_from_vm(d->vm,rxd,rxd_addr,sizeof(struct rx_desc)); |
533 |
|
|
|
534 |
|
|
/* byte-swapping */ |
535 |
|
|
rxd->rdes[0] = vmtoh32(rxd->rdes[0]); |
536 |
|
|
rxd->rdes[1] = vmtoh32(rxd->rdes[1]); |
537 |
|
|
} |
538 |
|
|
|
539 |
|
|
/* |
540 |
|
|
* Try to acquire the specified RX descriptor. Returns TRUE if we have it. |
541 |
|
|
* It assumes that the byte-swapping is done. |
542 |
|
|
*/ |
543 |
|
|
static inline int rxdesc_acquire(m_uint32_t rdes0) |
544 |
|
|
{ |
545 |
|
|
return(rdes0 & MUESLIX_RXDESC_OWN); |
546 |
|
|
} |
547 |
|
|
|
548 |
|
|
/* Put a packet in buffer of a descriptor */ |
549 |
|
|
static ssize_t rxdesc_put_pkt(struct mueslix_data *d,struct rx_desc *rxd, |
550 |
|
|
u_char **pkt,ssize_t *pkt_len) |
551 |
|
|
{ |
552 |
|
|
ssize_t len,cp_len; |
553 |
|
|
|
554 |
|
|
len = rxd->rdes[0] & MUESLIX_RXDESC_LEN_MASK; |
555 |
|
|
|
556 |
|
|
/* compute the data length to copy */ |
557 |
|
|
cp_len = m_min(len,*pkt_len); |
558 |
|
|
|
559 |
|
|
#if DEBUG_RECEIVE |
560 |
|
|
MUESLIX_LOG(d,"copying %d bytes at 0x%x\n",cp_len,rxd->rdes[1]); |
561 |
|
|
#endif |
562 |
|
|
|
563 |
|
|
/* copy packet data to the VM physical RAM */ |
564 |
|
|
physmem_copy_to_vm(d->vm,*pkt,rxd->rdes[1],cp_len); |
565 |
|
|
|
566 |
|
|
*pkt += cp_len; |
567 |
|
|
*pkt_len -= cp_len; |
568 |
|
|
return(cp_len); |
569 |
|
|
} |
570 |
|
|
|
571 |
|
|
/* |
572 |
|
|
* Put a packet in the RX ring of the Mueslix specified channel. |
573 |
|
|
*/ |
574 |
|
|
static void dev_mueslix_receive_pkt(struct mueslix_channel *channel, |
575 |
|
|
u_char *pkt,ssize_t pkt_len) |
576 |
|
|
{ |
577 |
|
|
struct mueslix_data *d = channel->parent; |
578 |
|
|
m_uint32_t rx_start,rxdn_addr,rxdn_rdes0; |
579 |
|
|
struct rx_desc rxd0,rxdn,*rxdc; |
580 |
|
|
ssize_t cp_len,tot_len = pkt_len; |
581 |
|
|
u_char *pkt_ptr = pkt; |
582 |
|
|
int i; |
583 |
|
|
|
584 |
|
|
if ((channel->rx_start == 0) || (channel->status == 0) || |
585 |
|
|
(channel->nio == NULL)) |
586 |
|
|
return; |
587 |
|
|
|
588 |
|
|
/* Don't make anything if RX is not enabled for this channel */ |
589 |
|
|
if (!(dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE)) |
590 |
|
|
return; |
591 |
|
|
|
592 |
|
|
/* Truncate the packet if it is too big */ |
593 |
|
|
pkt_len = m_min(pkt_len,MUESLIX_MAX_PKT_SIZE); |
594 |
|
|
|
595 |
|
|
/* Copy the current rxring descriptor */ |
596 |
|
|
rxdesc_read(d,channel->rx_current,&rxd0); |
597 |
|
|
|
598 |
|
|
/* We must have the first descriptor... */ |
599 |
|
|
if (!rxdesc_acquire(rxd0.rdes[0])) |
600 |
|
|
return; |
601 |
|
|
|
602 |
|
|
/* Remember the first RX descriptor address */ |
603 |
|
|
rx_start = channel->rx_current; |
604 |
|
|
|
605 |
|
|
for(i=0,rxdc=&rxd0;tot_len>0;i++) |
606 |
|
|
{ |
607 |
|
|
/* Put data into the descriptor buffers */ |
608 |
|
|
cp_len = rxdesc_put_pkt(d,rxdc,&pkt_ptr,&tot_len); |
609 |
|
|
|
610 |
|
|
/* Get address of the next descriptor */ |
611 |
|
|
rxdn_addr = rxdesc_get_next(channel,channel->rx_current); |
612 |
|
|
|
613 |
|
|
/* We have finished if the complete packet has been stored */ |
614 |
|
|
if (tot_len == 0) { |
615 |
|
|
rxdc->rdes[0] = MUESLIX_RXDESC_LS; |
616 |
|
|
rxdc->rdes[0] |= cp_len; |
617 |
|
|
|
618 |
|
|
if (i != 0) |
619 |
|
|
physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]); |
620 |
|
|
|
621 |
|
|
channel->rx_current = rxdn_addr; |
622 |
|
|
break; |
623 |
|
|
} |
624 |
|
|
|
625 |
|
|
#if DEBUG_RECEIVE |
626 |
|
|
MUESLIX_LOG(d,"trying to acquire new descriptor at 0x%x\n",rxdn_addr); |
627 |
|
|
#endif |
628 |
|
|
|
629 |
|
|
/* Get status of the next descriptor to see if we can acquire it */ |
630 |
|
|
rxdn_rdes0 = physmem_copy_u32_from_vm(d->vm,rxdn_addr); |
631 |
|
|
|
632 |
|
|
if (!rxdesc_acquire(rxdn_rdes0)) |
633 |
|
|
rxdc->rdes[0] = MUESLIX_RXDESC_LS | MUESLIX_RXDESC_OVERRUN; |
634 |
|
|
else |
635 |
|
|
rxdc->rdes[0] = 0x00000000; /* ok, no special flag */ |
636 |
|
|
|
637 |
|
|
rxdc->rdes[0] |= cp_len; |
638 |
|
|
|
639 |
|
|
/* Update the new status (only if we are not on the first desc) */ |
640 |
|
|
if (i != 0) |
641 |
|
|
physmem_copy_u32_to_vm(d->vm,channel->rx_current,rxdc->rdes[0]); |
642 |
|
|
|
643 |
|
|
/* Update the RX pointer */ |
644 |
|
|
channel->rx_current = rxdn_addr; |
645 |
|
|
|
646 |
|
|
if (rxdc->rdes[0] & MUESLIX_RXDESC_LS) |
647 |
|
|
break; |
648 |
|
|
|
649 |
|
|
/* Read the next descriptor from VM physical RAM */ |
650 |
|
|
rxdesc_read(d,rxdn_addr,&rxdn); |
651 |
|
|
rxdc = &rxdn; |
652 |
|
|
} |
653 |
|
|
|
654 |
|
|
/* Update the first RX descriptor */ |
655 |
|
|
rxd0.rdes[0] |= MUESLIX_RXDESC_FS; |
656 |
|
|
physmem_copy_u32_to_vm(d->vm,rx_start,rxd0.rdes[0]); |
657 |
|
|
|
658 |
|
|
/* Indicate that we have a frame ready (XXX something to do ?) */ |
659 |
|
|
|
660 |
|
|
/* Generate IRQ on CPU */ |
661 |
dpavlin |
8 |
d->irq_status |= MUESLIX_RX_IRQ << channel->id; |
662 |
|
|
dev_mueslix_update_irq_status(d); |
663 |
dpavlin |
1 |
} |
664 |
|
|
|
665 |
|
|
/* Handle the Mueslix RX ring of the specified channel */ |
666 |
|
|
static int dev_mueslix_handle_rxring(netio_desc_t *nio, |
667 |
|
|
u_char *pkt,ssize_t pkt_len, |
668 |
|
|
struct mueslix_channel *channel) |
669 |
dpavlin |
8 |
{ |
670 |
dpavlin |
1 |
struct mueslix_data *d = channel->parent; |
671 |
|
|
|
672 |
dpavlin |
8 |
#if DEBUG_RECEIVE |
673 |
dpavlin |
1 |
MUESLIX_LOG(d,"channel %u: receiving a packet of %d bytes\n", |
674 |
|
|
channel->id,pkt_len); |
675 |
|
|
mem_dump(log_file,pkt,pkt_len); |
676 |
|
|
#endif |
677 |
|
|
|
678 |
dpavlin |
8 |
MUESLIX_LOCK(d); |
679 |
|
|
if (dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_RX_ENABLE) |
680 |
|
|
dev_mueslix_receive_pkt(channel,pkt,pkt_len); |
681 |
|
|
MUESLIX_UNLOCK(d); |
682 |
dpavlin |
1 |
return(TRUE); |
683 |
|
|
} |
684 |
|
|
|
685 |
|
|
/* Read a TX descriptor */ |
686 |
|
|
static void txdesc_read(struct mueslix_data *d,m_uint32_t txd_addr, |
687 |
|
|
struct tx_desc *txd) |
688 |
|
|
{ |
689 |
|
|
/* get the next descriptor from VM physical RAM */ |
690 |
|
|
physmem_copy_from_vm(d->vm,txd,txd_addr,sizeof(struct tx_desc)); |
691 |
|
|
|
692 |
|
|
/* byte-swapping */ |
693 |
|
|
txd->tdes[0] = vmtoh32(txd->tdes[0]); |
694 |
|
|
txd->tdes[1] = vmtoh32(txd->tdes[1]); |
695 |
|
|
} |
696 |
|
|
|
697 |
|
|
/* Set the address of the next TX descriptor */ |
698 |
|
|
static void txdesc_set_next(struct mueslix_channel *channel) |
699 |
|
|
{ |
700 |
|
|
switch(channel->parent->chip_mode) { |
701 |
|
|
case 0: |
702 |
|
|
channel->tx_current += sizeof(struct tx_desc); |
703 |
|
|
|
704 |
|
|
if (channel->tx_current == channel->tx_end) |
705 |
|
|
channel->tx_current = channel->tx_start; |
706 |
|
|
break; |
707 |
|
|
|
708 |
|
|
case 1: |
709 |
|
|
default: |
710 |
|
|
if (channel->tx_current == channel->tx_end) |
711 |
|
|
channel->tx_current = channel->tx_start; |
712 |
|
|
else |
713 |
|
|
channel->tx_current += sizeof(struct tx_desc); |
714 |
|
|
} |
715 |
|
|
} |
716 |
|
|
|
717 |
|
|
/* Handle the TX ring of a specific channel (single packet) */ |
718 |
|
|
static int dev_mueslix_handle_txring_single(struct mueslix_channel *channel) |
719 |
|
|
{ |
720 |
|
|
struct mueslix_data *d = channel->parent; |
721 |
|
|
u_char pkt[MUESLIX_MAX_PKT_SIZE],*pkt_ptr; |
722 |
|
|
m_uint32_t tx_start,clen,sub_len,tot_len,pad; |
723 |
|
|
struct tx_desc txd0,ctxd,*ptxd; |
724 |
|
|
int done = FALSE; |
725 |
|
|
|
726 |
|
|
if ((channel->tx_start == 0) || (channel->status == 0)) |
727 |
|
|
return(FALSE); |
728 |
|
|
|
729 |
|
|
/* Copy the current txring descriptor */ |
730 |
|
|
tx_start = channel->tx_current; |
731 |
|
|
ptxd = &txd0; |
732 |
|
|
txdesc_read(d,channel->tx_current,ptxd); |
733 |
|
|
|
734 |
|
|
/* If we don't own the descriptor, we cannot transmit */ |
735 |
|
|
if (!(txd0.tdes[0] & MUESLIX_TXDESC_OWN)) |
736 |
|
|
return(FALSE); |
737 |
|
|
|
738 |
|
|
#if DEBUG_TRANSMIT |
739 |
|
|
MUESLIX_LOG(d,"mueslix_handle_txring: 1st desc: " |
740 |
|
|
"tdes[0]=0x%x, tdes[1]=0x%x\n", |
741 |
|
|
ptxd->tdes[0],ptxd->tdes[1]); |
742 |
|
|
#endif |
743 |
|
|
|
744 |
|
|
pkt_ptr = pkt; |
745 |
|
|
tot_len = 0; |
746 |
|
|
|
747 |
|
|
do { |
748 |
|
|
#if DEBUG_TRANSMIT |
749 |
|
|
MUESLIX_LOG(d,"mueslix_handle_txring: loop: " |
750 |
|
|
"tdes[0]=0x%x, tdes[1]=0x%x\n", |
751 |
|
|
ptxd->tdes[0],ptxd->tdes[1]); |
752 |
|
|
#endif |
753 |
|
|
|
754 |
|
|
if (!(ptxd->tdes[0] & MUESLIX_TXDESC_OWN)) { |
755 |
|
|
MUESLIX_LOG(d,"mueslix_handle_txring: descriptor not owned!\n"); |
756 |
|
|
return(FALSE); |
757 |
|
|
} |
758 |
|
|
|
759 |
|
|
switch(channel->parent->chip_mode) { |
760 |
|
|
case 0: |
761 |
|
|
clen = ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK; |
762 |
|
|
break; |
763 |
|
|
|
764 |
|
|
case 1: |
765 |
|
|
default: |
766 |
|
|
clen = (ptxd->tdes[0] & MUESLIX_TXDESC_LEN_MASK) << 2; |
767 |
|
|
|
768 |
|
|
if (ptxd->tdes[0] & MUESLIX_TXDESC_SUB) { |
769 |
|
|
sub_len = ptxd->tdes[0] & MUESLIX_TXDESC_SUB_LEN; |
770 |
|
|
sub_len >>= MUESLIX_TXDESC_SUB_SHIFT; |
771 |
|
|
clen -= sub_len; |
772 |
|
|
} |
773 |
|
|
} |
774 |
|
|
|
775 |
|
|
/* Be sure that we have length not null */ |
776 |
|
|
if (clen != 0) { |
777 |
|
|
//printf("pkt_ptr = %p, ptxd->tdes[1] = 0x%x, clen = %d\n", |
778 |
|
|
//pkt_ptr, ptxd->tdes[1], clen); |
779 |
|
|
physmem_copy_from_vm(d->vm,pkt_ptr,ptxd->tdes[1],clen); |
780 |
|
|
} |
781 |
|
|
|
782 |
|
|
pkt_ptr += clen; |
783 |
|
|
tot_len += clen; |
784 |
|
|
|
785 |
|
|
/* Clear the OWN bit if this is not the first descriptor */ |
786 |
|
|
if (!(ptxd->tdes[0] & MUESLIX_TXDESC_FS)) |
787 |
|
|
physmem_copy_u32_to_vm(d->vm,channel->tx_current,0); |
788 |
|
|
|
789 |
|
|
/* Go to the next descriptor */ |
790 |
|
|
txdesc_set_next(channel); |
791 |
|
|
|
792 |
|
|
/* Copy the next txring descriptor */ |
793 |
|
|
if (!(ptxd->tdes[0] & MUESLIX_TXDESC_LS)) { |
794 |
|
|
txdesc_read(d,channel->tx_current,&ctxd); |
795 |
|
|
ptxd = &ctxd; |
796 |
|
|
} else |
797 |
|
|
done = TRUE; |
798 |
|
|
}while(!done); |
799 |
|
|
|
800 |
|
|
if (tot_len != 0) { |
801 |
|
|
#if DEBUG_TRANSMIT |
802 |
|
|
MUESLIX_LOG(d,"sending packet of %u bytes (flags=0x%4.4x)\n", |
803 |
|
|
tot_len,txd0.tdes[0]); |
804 |
|
|
mem_dump(log_file,pkt,tot_len); |
805 |
|
|
#endif |
806 |
|
|
|
807 |
|
|
pad = ptxd->tdes[0] & MUESLIX_TXDESC_PAD; |
808 |
|
|
pad >>= MUESLIX_TXDESC_PAD_SHIFT; |
809 |
|
|
tot_len += (pad - 1) & 0x03; |
810 |
|
|
|
811 |
|
|
/* send it on wire */ |
812 |
|
|
netio_send(channel->nio,pkt,tot_len); |
813 |
|
|
} |
814 |
|
|
|
815 |
|
|
/* Clear the OWN flag of the first descriptor */ |
816 |
|
|
physmem_copy_u32_to_vm(d->vm,tx_start,0); |
817 |
|
|
|
818 |
|
|
/* Interrupt on completion ? */ |
819 |
dpavlin |
8 |
d->irq_status |= MUESLIX_TX_IRQ << channel->id; |
820 |
|
|
dev_mueslix_update_irq_status(d); |
821 |
dpavlin |
1 |
return(TRUE); |
822 |
|
|
} |
823 |
|
|
|
824 |
|
|
/* Handle the TX ring of a specific channel */ |
825 |
|
|
static int dev_mueslix_handle_txring(struct mueslix_channel *channel) |
826 |
|
|
{ |
827 |
dpavlin |
8 |
struct mueslix_data *d = channel->parent; |
828 |
|
|
int res,i; |
829 |
dpavlin |
1 |
|
830 |
dpavlin |
8 |
if (!dev_mueslix_is_rx_tx_enabled(d,channel->id) & MUESLIX_TX_ENABLE) |
831 |
|
|
return(FALSE); |
832 |
|
|
|
833 |
|
|
for(i=0;i<MUESLIX_TXRING_PASS_COUNT;i++) { |
834 |
|
|
MUESLIX_LOCK(d); |
835 |
|
|
res = dev_mueslix_handle_txring_single(channel); |
836 |
|
|
MUESLIX_UNLOCK(d); |
837 |
|
|
|
838 |
|
|
if (!res) |
839 |
dpavlin |
1 |
break; |
840 |
dpavlin |
8 |
} |
841 |
dpavlin |
1 |
|
842 |
|
|
return(TRUE); |
843 |
|
|
} |
844 |
|
|
|
845 |
|
|
/* pci_mueslix_read() */ |
846 |
dpavlin |
7 |
static m_uint32_t pci_mueslix_read(cpu_gen_t *cpu,struct pci_device *dev, |
847 |
dpavlin |
1 |
int reg) |
848 |
|
|
{ |
849 |
|
|
struct mueslix_data *d = dev->priv_data; |
850 |
|
|
|
851 |
|
|
switch(reg) { |
852 |
|
|
case 0x08: /* Rev ID */ |
853 |
|
|
return(0x2800001); |
854 |
|
|
case PCI_REG_BAR0: |
855 |
|
|
return(d->dev->phys_addr); |
856 |
|
|
default: |
857 |
|
|
return(0); |
858 |
|
|
} |
859 |
|
|
} |
860 |
|
|
|
861 |
|
|
/* pci_mueslix_write() */ |
862 |
dpavlin |
7 |
static void pci_mueslix_write(cpu_gen_t *cpu,struct pci_device *dev, |
863 |
dpavlin |
1 |
int reg,m_uint32_t value) |
864 |
|
|
{ |
865 |
|
|
struct mueslix_data *d = dev->priv_data; |
866 |
|
|
|
867 |
|
|
switch(reg) { |
868 |
|
|
case PCI_REG_BAR0: |
869 |
|
|
vm_map_device(cpu->vm,d->dev,(m_uint64_t)value); |
870 |
|
|
MUESLIX_LOG(d,"registers are mapped at 0x%x\n",value); |
871 |
|
|
break; |
872 |
|
|
} |
873 |
|
|
} |
874 |
|
|
|
875 |
|
|
/* Initialize a Mueslix chip */ |
876 |
|
|
struct mueslix_data * |
877 |
|
|
dev_mueslix_init(vm_instance_t *vm,char *name,int chip_mode, |
878 |
|
|
struct pci_bus *pci_bus,int pci_device,int irq) |
879 |
|
|
{ |
880 |
|
|
struct pci_device *pci_dev; |
881 |
|
|
struct mueslix_data *d; |
882 |
|
|
struct vdevice *dev; |
883 |
|
|
int i; |
884 |
|
|
|
885 |
|
|
/* Allocate the private data structure for Mueslix chip */ |
886 |
|
|
if (!(d = malloc(sizeof(*d)))) { |
887 |
|
|
fprintf(stderr,"%s (Mueslix): out of memory\n",name); |
888 |
|
|
return NULL; |
889 |
|
|
} |
890 |
|
|
|
891 |
|
|
memset(d,0,sizeof(*d)); |
892 |
dpavlin |
8 |
pthread_mutex_init(&d->lock,NULL); |
893 |
dpavlin |
1 |
d->chip_mode = chip_mode; |
894 |
|
|
|
895 |
|
|
for(i=0;i<MUESLIX_NR_CHANNELS;i++) |
896 |
|
|
d->channel[i].id = i; |
897 |
|
|
|
898 |
|
|
/* Add as PCI device */ |
899 |
|
|
pci_dev = pci_dev_add(pci_bus,name, |
900 |
|
|
MUESLIX_PCI_VENDOR_ID,MUESLIX_PCI_PRODUCT_ID, |
901 |
|
|
pci_device,0,irq, |
902 |
|
|
d,NULL,pci_mueslix_read,pci_mueslix_write); |
903 |
|
|
|
904 |
|
|
if (!pci_dev) { |
905 |
|
|
fprintf(stderr,"%s (Mueslix): unable to create PCI device.\n",name); |
906 |
|
|
return NULL; |
907 |
|
|
} |
908 |
|
|
|
909 |
|
|
/* Create the device itself */ |
910 |
|
|
if (!(dev = dev_create(name))) { |
911 |
|
|
fprintf(stderr,"%s (Mueslix): unable to create device.\n",name); |
912 |
|
|
return NULL; |
913 |
|
|
} |
914 |
|
|
|
915 |
|
|
d->name = name; |
916 |
|
|
d->pci_dev = pci_dev; |
917 |
|
|
d->vm = vm; |
918 |
|
|
|
919 |
|
|
dev->phys_addr = 0; |
920 |
|
|
dev->phys_len = 0x4000; |
921 |
|
|
dev->handler = dev_mueslix_access; |
922 |
|
|
dev->priv_data = d; |
923 |
|
|
|
924 |
|
|
/* Store device info */ |
925 |
|
|
dev->priv_data = d; |
926 |
|
|
d->dev = dev; |
927 |
|
|
return(d); |
928 |
|
|
} |
929 |
|
|
|
930 |
|
|
/* Remove a Mueslix device */ |
931 |
|
|
void dev_mueslix_remove(struct mueslix_data *d) |
932 |
|
|
{ |
933 |
|
|
if (d != NULL) { |
934 |
|
|
pci_dev_remove(d->pci_dev); |
935 |
|
|
vm_unbind_device(d->vm,d->dev); |
936 |
|
|
cpu_group_rebuild_mts(d->vm->cpu_group); |
937 |
|
|
free(d->dev); |
938 |
|
|
free(d); |
939 |
|
|
} |
940 |
|
|
} |
941 |
|
|
|
942 |
|
|
/* Bind a NIO to a Mueslix channel */ |
943 |
|
|
int dev_mueslix_set_nio(struct mueslix_data *d,u_int channel_id, |
944 |
|
|
netio_desc_t *nio) |
945 |
|
|
{ |
946 |
|
|
struct mueslix_channel *channel; |
947 |
|
|
|
948 |
|
|
if (channel_id >= MUESLIX_NR_CHANNELS) |
949 |
|
|
return(-1); |
950 |
|
|
|
951 |
|
|
channel = &d->channel[channel_id]; |
952 |
|
|
|
953 |
|
|
/* check that a NIO is not already bound */ |
954 |
|
|
if (channel->nio != NULL) |
955 |
|
|
return(-1); |
956 |
|
|
|
957 |
|
|
/* define the new NIO */ |
958 |
|
|
channel->nio = nio; |
959 |
|
|
channel->parent = d; |
960 |
|
|
channel->tx_tid = ptask_add((ptask_callback)dev_mueslix_handle_txring, |
961 |
|
|
channel,NULL); |
962 |
|
|
netio_rxl_add(nio,(netio_rx_handler_t)dev_mueslix_handle_rxring, |
963 |
|
|
channel,NULL); |
964 |
|
|
return(0); |
965 |
|
|
} |
966 |
|
|
|
967 |
|
|
/* Unbind a NIO from a Mueslix channel */ |
968 |
|
|
int dev_mueslix_unset_nio(struct mueslix_data *d,u_int channel_id) |
969 |
|
|
{ |
970 |
|
|
struct mueslix_channel *channel; |
971 |
|
|
|
972 |
|
|
if (channel_id >= MUESLIX_NR_CHANNELS) |
973 |
|
|
return(-1); |
974 |
|
|
|
975 |
|
|
channel = &d->channel[channel_id]; |
976 |
|
|
|
977 |
|
|
if (channel->nio) { |
978 |
|
|
ptask_remove(channel->tx_tid); |
979 |
|
|
netio_rxl_remove(channel->nio); |
980 |
|
|
channel->nio = NULL; |
981 |
|
|
} |
982 |
|
|
return(0); |
983 |
|
|
} |