1 /**********************************************************************
2 * iph5526.c: IP/SCSI driver for the Interphase 5526 PCI Fibre Channel
4 * Copyright (C) 1999 Vineet M Abraham <vmabraham@hotmail.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *********************************************************************/
16 /**********************************************************************
19 02.12.99 Support multiple cards.
20 03.15.99 Added Fabric support.
21 04.04.99 Added N_Port support.
22 04.15.99 Added SCSI support.
23 06.18.99 Added ABTS Protocol.
24 06.24.99 Fixed data corruption when multiple XFER_RDYs are received.
25 07.07.99 Can be loaded as part of the Kernel. Changed semaphores. Added
26 more checks before invalidating SEST entries.
27 07.08.99 Added Broadcast IP stuff and fixed an unicast timeout bug.
28 ***********************************************************************/
30 R_T_TOV set to 15msec in Loop topology. Need to be 100 msec.
32 Fix ADISC Tx before completing FLOGI.
35 static const char *version
=
36 "iph5526.c:v1.0 07.08.99 Vineet Abraham (vmabraham@hotmail.com)\n";
38 #include <linux/module.h>
39 #include <linux/kernel.h>
40 #include <linux/errno.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/interrupt.h>
45 #include <linux/delay.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_arp.h>
48 #include <linux/timer.h>
49 #include <linux/spinlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/fcdevice.h> /* had the declarations for init_fcdev among
52 others + includes if_fcdevice.h */
54 #include "../../scsi/scsi.h"
55 #include <scsi/scsi_host.h>
56 #include "../../fc4/fcp.h"
58 #include <asm/system.h>
61 /* driver specific header files */
63 #include "tach_structs.h"
64 #include "iph5526_ip.h"
65 #include "iph5526_scsi.h"
66 #include "iph5526_novram.c"
68 #define RUN_AT(x) (jiffies + (x))
70 #define DEBUG_5526_0 0
71 #define DEBUG_5526_1 0
72 #define DEBUG_5526_2 0
75 #define DPRINTK(format, a...) {printk("%s: ", fi->name); \
76 printk(format, ##a); \
78 #define ENTER(x) {printk("%s: ", fi->name); \
79 printk("iph5526.c : entering %s()\n", x);}
80 #define LEAVE(x) {printk("%s: ", fi->name); \
81 printk("iph5526.c : leaving %s()\n",x);}
84 #define DPRINTK(format, a...) {}
90 #define DPRINTK1(format, a...) {printk("%s: ", fi->name); \
91 printk(format, ##a); \
94 #define DPRINTK1(format, a...) {}
98 #define DPRINTK2(format, a...) {printk("%s: ", fi->name); \
99 printk(format, ##a); \
102 #define DPRINTK2(format, a...) {}
105 #define T_MSG(format, a...) {printk("%s: ", fi->name); \
106 printk(format, ##a);\
109 #define ALIGNED_SFS_ADDR(addr) ((((unsigned long)(addr) + (SFS_BUFFER_SIZE - 1)) & ~(SFS_BUFFER_SIZE - 1)) - (unsigned long)(addr))
110 #define ALIGNED_ADDR(addr, len) ((((unsigned long)(addr) + (len - 1)) & ~(len - 1)) - (unsigned long)(addr))
113 static struct pci_device_id iph5526_pci_tbl
[] = {
114 { PCI_VENDOR_ID_INTERPHASE
, PCI_DEVICE_ID_INTERPHASE_5526
, PCI_ANY_ID
, PCI_ANY_ID
, },
115 { PCI_VENDOR_ID_INTERPHASE
, PCI_DEVICE_ID_INTERPHASE_55x6
, PCI_ANY_ID
, PCI_ANY_ID
, },
116 { } /* Terminating entry */
118 MODULE_DEVICE_TABLE(pci
, iph5526_pci_tbl
);
120 MODULE_LICENSE("GPL");
122 #define MAX_FC_CARDS 2
123 static struct fc_info
*fc
[MAX_FC_CARDS
+1];
124 static unsigned int pci_irq_line
;
126 unsigned short vendor_id
;
127 unsigned short device_id
;
130 clone_list
[] __initdata
= {
131 {PCI_VENDOR_ID_INTERPHASE
, PCI_DEVICE_ID_INTERPHASE_5526
, "Interphase Fibre Channel HBA"},
132 {PCI_VENDOR_ID_INTERPHASE
, PCI_DEVICE_ID_INTERPHASE_55x6
, "Interphase Fibre Channel HBA"},
136 static irqreturn_t
tachyon_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
137 static void tachyon_interrupt_handler(int irq
, void* dev_id
, struct pt_regs
* regs
);
139 static int initialize_register_pointers(struct fc_info
*fi
);
140 void clean_up_memory(struct fc_info
*fi
);
142 static int tachyon_init(struct fc_info
*fi
);
143 static int build_queues(struct fc_info
*fi
);
144 static void build_tachyon_header(struct fc_info
*fi
, u_int my_id
, u_int r_ctl
, u_int d_id
, u_int type
, u_char seq_id
, u_char df_ctl
, u_short ox_id
, u_short rx_id
, char *data
);
145 static int get_free_header(struct fc_info
*fi
);
146 static void build_EDB(struct fc_info
*fi
, char *data
, u_short flags
, u_short len
);
147 static int get_free_EDB(struct fc_info
*fi
);
148 static void build_ODB(struct fc_info
*fi
, u_char seq_id
, u_int d_id
, u_int len
, u_int cntl
, u_short mtu
, u_short ox_id
, u_short rx_id
, int NW_header
, int int_required
, u_int frame_class
);
149 static void write_to_tachyon_registers(struct fc_info
*fi
);
150 static void reset_latch(struct fc_info
*fi
);
151 static void reset_tachyon(struct fc_info
*fi
, u_int value
);
152 static void take_tachyon_offline(struct fc_info
*fi
);
153 static void read_novram(struct fc_info
*fi
);
154 static void reset_ichip(struct fc_info
*fi
);
155 static void update_OCQ_indx(struct fc_info
*fi
);
156 static void update_IMQ_indx(struct fc_info
*fi
, int count
);
157 static void update_SFSBQ_indx(struct fc_info
*fi
);
158 static void update_MFSBQ_indx(struct fc_info
*fi
, int count
);
159 static void update_tachyon_header_indx(struct fc_info
*fi
);
160 static void update_EDB_indx(struct fc_info
*fi
);
161 static void handle_FM_interrupt(struct fc_info
*fi
);
162 static void handle_MFS_interrupt(struct fc_info
*fi
);
163 static void handle_OOO_interrupt(struct fc_info
*fi
);
164 static void handle_SFS_interrupt(struct fc_info
*fi
);
165 static void handle_OCI_interrupt(struct fc_info
*fi
);
166 static void handle_SFS_BUF_WARN_interrupt(struct fc_info
*fi
);
167 static void handle_MFS_BUF_WARN_interrupt(struct fc_info
*fi
);
168 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info
*fi
);
169 static void handle_Unknown_Frame_interrupt(struct fc_info
*fi
);
170 static void handle_Busied_Frame_interrupt(struct fc_info
*fi
);
171 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info
*fi
);
172 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info
*fi
);
173 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info
*fi
);
174 static void completion_message_handler(struct fc_info
*fi
, u_int imq_int_type
);
175 static void fill_login_frame(struct fc_info
*fi
, u_int logi
);
177 static int tx_exchange(struct fc_info
*fi
, char *data
, u_int len
, u_int r_ctl
, u_int type
, u_int d_id
, u_int mtu
, int int_required
, u_short ox_id
, u_int frame_class
);
178 static int tx_sequence(struct fc_info
*fi
, char *data
, u_int len
, u_int mtu
, u_int d_id
, u_short ox_id
, u_short rx_id
, u_char seq_id
, int NW_flag
, int int_required
, u_int frame_class
);
179 static int validate_login(struct fc_info
*fi
, u_int
*base_ptr
);
180 static void add_to_address_cache(struct fc_info
*fi
, u_int
*base_ptr
);
181 static void remove_from_address_cache(struct fc_info
*fi
, u_int
*data
, u_int cmnd_code
);
182 static int node_logged_in_prev(struct fc_info
*fi
, u_int
*buff_addr
);
183 static int sid_logged_in(struct fc_info
*fi
, u_int s_id
);
184 static struct fc_node_info
*look_up_cache(struct fc_info
*fi
, char *data
);
185 static int display_cache(struct fc_info
*fi
);
187 static void tx_logi(struct fc_info
*fi
, u_int logi
, u_int d_id
);
188 static void tx_logi_acc(struct fc_info
*fi
, u_int logi
, u_int d_id
, u_short received_ox_id
);
189 static void tx_prli(struct fc_info
*fi
, u_int command_code
, u_int d_id
, u_short received_ox_id
);
190 static void tx_logo(struct fc_info
*fi
, u_int d_id
, u_short received_ox_id
);
191 static void tx_adisc(struct fc_info
*fi
, u_int cmnd_code
, u_int d_id
, u_short received_ox_id
);
192 static void tx_ls_rjt(struct fc_info
*fi
, u_int d_id
, u_short received_ox_id
, u_short reason_code
, u_short expln_code
);
193 static u_int
plogi_ok(struct fc_info
*fi
, u_int
*buff_addr
, int size
);
194 static void tx_acc(struct fc_info
*fi
, u_int d_id
, u_short received_ox_id
);
195 static void tx_name_server_req(struct fc_info
*fi
, u_int req
);
196 static void rscn_handler(struct fc_info
*fi
, u_int node_id
);
197 static void tx_scr(struct fc_info
*fi
);
198 static void scr_timer(unsigned long data
);
199 static void explore_fabric(struct fc_info
*fi
, u_int
*buff_addr
);
200 static void perform_adisc(struct fc_info
*fi
);
201 static void local_port_discovery(struct fc_info
*fi
);
202 static void add_to_ox_id_list(struct fc_info
*fi
, u_int transaction_id
, u_int cmnd_code
);
203 static u_int
remove_from_ox_id_list(struct fc_info
*fi
, u_short received_ox_id
);
204 static void add_display_cache_timer(struct fc_info
*fi
);
207 static void nos_ols_timer(unsigned long data
);
208 static void loop_timer(unsigned long data
);
209 static void fabric_explore_timer(unsigned long data
);
210 static void port_discovery_timer(unsigned long data
);
211 static void display_cache_timer(unsigned long data
);
214 static int add_to_sest(struct fc_info
*fi
, Scsi_Cmnd
*Cmnd
, struct fc_node_info
*ni
);
215 static struct fc_node_info
*resolve_target(struct fc_info
*fi
, u_char target
);
216 static void update_FCP_CMND_indx(struct fc_info
*fi
);
217 static int get_free_SDB(struct fc_info
*fi
);
218 static void update_SDB_indx(struct fc_info
*fi
);
219 static void mark_scsi_sid(struct fc_info
*fi
, u_int
*buff_addr
, u_char action
);
220 static void invalidate_SEST_entry(struct fc_info
*fi
, u_short received_ox_id
);
221 static int abort_exchange(struct fc_info
*fi
, u_short ox_id
);
222 static void flush_tachyon_cache(struct fc_info
*fi
, u_short ox_id
);
223 static int get_scsi_oxid(struct fc_info
*fi
);
224 static void update_scsi_oxid(struct fc_info
*fi
);
226 static Scsi_Host_Template driver_template
= IPH5526_SCSI_FC
;
228 static void iph5526_timeout(struct net_device
*dev
);
230 static int iph5526_probe_pci(struct net_device
*dev
);
232 int __init
iph5526_probe(struct net_device
*dev
)
234 if (iph5526_probe_pci(dev
) == 0)
239 static int __init
iph5526_probe_pci(struct net_device
*dev
)
241 struct fc_info
*fi
= dev
->priv
;
243 dev
->base_addr
= fi
->base_addr
;
245 if (dev
->priv
== NULL
)
248 /* Assign ur MAC address.
250 dev
->dev_addr
[0] = (fi
->g
.my_port_name_high
& 0x0000FF00) >> 8;
251 dev
->dev_addr
[1] = fi
->g
.my_port_name_high
;
252 dev
->dev_addr
[2] = (fi
->g
.my_port_name_low
& 0xFF000000) >> 24;
253 dev
->dev_addr
[3] = (fi
->g
.my_port_name_low
& 0x00FF0000) >> 16;
254 dev
->dev_addr
[4] = (fi
->g
.my_port_name_low
& 0x0000FF00) >> 8;
255 dev
->dev_addr
[5] = fi
->g
.my_port_name_low
;
260 static int __init
fcdev_init(struct net_device
*dev
)
262 SET_MODULE_OWNER(dev
);
263 dev
->open
= iph5526_open
;
264 dev
->stop
= iph5526_close
;
265 dev
->hard_start_xmit
= iph5526_send_packet
;
266 dev
->get_stats
= iph5526_get_stats
;
267 dev
->set_multicast_list
= NULL
;
268 dev
->change_mtu
= iph5526_change_mtu
;
269 dev
->tx_timeout
= iph5526_timeout
;
270 dev
->watchdog_timeo
= 5*HZ
;
274 /* initialize tachyon and take it OnLine */
275 static int tachyon_init(struct fc_info
*fi
)
277 ENTER("tachyon_init");
278 if (build_queues(fi
) == 0) {
279 T_MSG("build_queues() failed");
283 /* Retrieve your port/node name.
289 reset_tachyon(fi
, SOFTWARE_RESET
);
291 LEAVE("tachyon_init");
295 /* Build the 4 Qs - IMQ, OCQ, MFSBQ, SFSBQ */
296 /* Lots of dma_pages needed as Tachyon DMAs almost everything into
299 static int build_queues(struct fc_info
*fi
)
303 ENTER("build_queues");
304 /* Initializing Queue Variables.
306 fi
->q
.ptr_host_ocq_cons_indx
= NULL
;
307 fi
->q
.ptr_host_hpcq_cons_indx
= NULL
;
308 fi
->q
.ptr_host_imq_prod_indx
= NULL
;
310 fi
->q
.ptr_ocq_base
= NULL
;
313 fi
->q
.ocq_prod_indx
= 0;
315 fi
->q
.ptr_imq_base
= NULL
;
318 fi
->q
.imq_cons_indx
= 0;
319 fi
->q
.imq_prod_indx
= 0;
321 fi
->q
.ptr_mfsbq_base
= NULL
;
324 fi
->q
.mfsbq_prod_indx
= 0;
325 fi
->q
.mfsbq_cons_indx
= 0;
326 fi
->q
.mfsbuff_len
= 0;
327 fi
->q
.mfsbuff_end
= 0;
328 fi
->g
.mfs_buffer_count
= 0;
330 fi
->q
.ptr_sfsbq_base
= NULL
;
333 fi
->q
.sfsbq_prod_indx
= 0;
334 fi
->q
.sfsbq_cons_indx
= 0;
335 fi
->q
.sfsbuff_len
= 0;
336 fi
->q
.sfsbuff_end
= 0;
339 fi
->q
.fcp_cmnd_indx
= 0;
341 fi
->q
.ptr_edb_base
= NULL
;
342 fi
->q
.edb_buffer_indx
= 0;
343 fi
->q
.ptr_tachyon_header_base
= NULL
;
344 fi
->q
.tachyon_header_indx
= 0;
345 fi
->node_info_list
= NULL
;
346 fi
->ox_id_list
= NULL
;
347 fi
->g
.loop_up
= FALSE
;
348 fi
->g
.ptp_up
= FALSE
;
349 fi
->g
.link_up
= FALSE
;
350 fi
->g
.fabric_present
= FALSE
;
351 fi
->g
.n_port_try
= FALSE
;
352 fi
->g
.dont_init
= FALSE
;
353 fi
->g
.nport_timer_set
= FALSE
;
354 fi
->g
.lport_timer_set
= FALSE
;
355 fi
->g
.no_of_targets
= 0;
357 fi
->g
.perform_adisc
= FALSE
;
361 if ( (fi
->q
.ptr_ocq_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 0)) == 0) {
362 T_MSG("failed to get OCQ page");
365 /* set up the OCQ structures */
366 for (i
= 0; i
< OCQ_LENGTH
; i
++)
367 fi
->q
.ptr_odb
[i
] = fi
->q
.ptr_ocq_base
+ NO_OF_ENTRIES
*i
;
370 if ( (fi
->q
.ptr_imq_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 0)) == 0) {
371 T_MSG("failed to get IMQ page");
374 for (i
= 0; i
< IMQ_LENGTH
; i
++)
375 fi
->q
.ptr_imqe
[i
] = fi
->q
.ptr_imq_base
+ NO_OF_ENTRIES
*i
;
378 if ( (fi
->q
.ptr_mfsbq_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 0)) == 0) {
379 T_MSG("failed to get MFSBQ page");
382 memset((char *)fi
->q
.ptr_mfsbq_base
, 0, MFSBQ_LENGTH
* 32);
383 /* Allocate one huge chunk of memory... helps while reassembling
386 if ( (addr
= (u_char
*)__get_free_pages(GFP_KERNEL
, 5) ) == 0) {
387 T_MSG("failed to get MFSBQ page");
390 /* fill in addresses of empty buffers */
391 for (i
= 0; i
< MFSBQ_LENGTH
; i
++) {
392 for (j
= 0; j
< NO_OF_ENTRIES
; j
++) {
393 *(fi
->q
.ptr_mfsbq_base
+ i
*NO_OF_ENTRIES
+ j
) = htonl(virt_to_bus(addr
));
394 addr
+= MFS_BUFFER_SIZE
;
398 /* The number of entries in each MFS buffer is 8. There are 8
399 * MFS buffers. That leaves us with 4096-256 bytes. We use them
400 * as temporary space for ELS frames. This is done to make sure that
401 * the addresses are aligned.
403 fi
->g
.els_buffer
[0] = fi
->q
.ptr_mfsbq_base
+ MFSBQ_LENGTH
*NO_OF_ENTRIES
;
404 for (i
= 1; i
< MAX_PENDING_FRAMES
; i
++)
405 fi
->g
.els_buffer
[i
] = fi
->g
.els_buffer
[i
-1] + 64;
408 if ( (fi
->q
.ptr_sfsbq_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 0)) == 0) {
409 T_MSG("failed to get SFSBQ page");
412 memset((char *)fi
->q
.ptr_sfsbq_base
, 0, SFSBQ_LENGTH
* 32);
413 /* fill in addresses of empty buffers */
414 for (i
= 0; i
< SFSBQ_LENGTH
; i
++)
415 for (j
= 0; j
< NO_OF_ENTRIES
; j
++){
416 addr
= kmalloc(SFS_BUFFER_SIZE
*2, GFP_KERNEL
);
418 T_MSG("ptr_sfs_buffer : memory not allocated");
422 int offset
= ALIGNED_SFS_ADDR(addr
);
423 memset((char *)addr
, 0, SFS_BUFFER_SIZE
);
424 fi
->q
.ptr_sfs_buffers
[i
*NO_OF_ENTRIES
+j
] = (u_int
*)addr
;
426 *(fi
->q
.ptr_sfsbq_base
+ i
*NO_OF_ENTRIES
+ j
) = htonl(virt_to_bus(addr
));
430 /* The number of entries in each SFS buffer is 8. There are 8
431 * MFS buffers. That leaves us with 4096-256 bytes. We use them
432 * as temporary space for ARP frames. This is done inorder to
433 * support HW_Types of 0x1 and 0x6.
435 fi
->g
.arp_buffer
= (char *)fi
->q
.ptr_sfsbq_base
+ SFSBQ_LENGTH
*NO_OF_ENTRIES
*4;
438 if ((fi
->q
.ptr_edb_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 5) ) == 0) {
439 T_MSG("failed to get EDB page");
442 for (i
= 0; i
< EDB_LEN
; i
++)
443 fi
->q
.ptr_edb
[i
] = fi
->q
.ptr_edb_base
+ 2*i
;
447 /* OX_IDs range from 0x0 - 0x4FFF.
449 if ((fi
->q
.ptr_sest_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 5)) == 0) {
450 T_MSG("failed to get SEST page");
453 for (i
= 0; i
< SEST_LENGTH
; i
++)
454 fi
->q
.ptr_sest
[i
] = fi
->q
.ptr_sest_base
+ NO_OF_ENTRIES
*i
;
456 if ((fi
->q
.ptr_sdb_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 5)) == 0) {
457 T_MSG("failed to get SDB page");
460 for (i
= 0 ; i
< NO_OF_SDB_ENTRIES
; i
++)
461 fi
->q
.ptr_sdb_slot
[i
] = fi
->q
.ptr_sdb_base
+ (SDB_SIZE
/4)*i
;
463 if ((fi
->q
.ptr_fcp_cmnd_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 0)) == 0) {
464 T_MSG("failed to get FCP_CMND page");
467 for (i
= 0; i
< NO_OF_FCP_CMNDS
; i
++)
468 fi
->q
.ptr_fcp_cmnd
[i
] = fi
->q
.ptr_fcp_cmnd_base
+ NO_OF_ENTRIES
*i
;
470 /* Allocate space for Tachyon Header as well...
472 if ((fi
->q
.ptr_tachyon_header_base
= (u_int
*)__get_free_pages(GFP_KERNEL
, 0) ) == 0) {
473 T_MSG("failed to get tachyon_header page");
476 for (i
= 0; i
< NO_OF_TACH_HEADERS
; i
++)
477 fi
->q
.ptr_tachyon_header
[i
] = fi
->q
.ptr_tachyon_header_base
+ 16*i
;
479 /* Allocate memory for indices.
480 * Indices should be aligned on 32 byte boundaries.
482 fi
->q
.host_ocq_cons_indx
= kmalloc(2*32, GFP_KERNEL
);
483 if (fi
->q
.host_ocq_cons_indx
== NULL
){
484 T_MSG("fi->q.host_ocq_cons_indx : memory not allocated");
487 fi
->q
.ptr_host_ocq_cons_indx
= fi
->q
.host_ocq_cons_indx
;
488 if ((u_long
)(fi
->q
.host_ocq_cons_indx
) % 32)
489 fi
->q
.host_ocq_cons_indx
++;
491 fi
->q
.host_hpcq_cons_indx
= kmalloc(2*32, GFP_KERNEL
);
492 if (fi
->q
.host_hpcq_cons_indx
== NULL
){
493 T_MSG("fi->q.host_hpcq_cons_indx : memory not allocated");
496 fi
->q
.ptr_host_hpcq_cons_indx
= fi
->q
.host_hpcq_cons_indx
;
497 if ((u_long
)(fi
->q
.host_hpcq_cons_indx
) % 32)
498 fi
->q
.host_hpcq_cons_indx
++;
500 fi
->q
.host_imq_prod_indx
= kmalloc(2*32, GFP_KERNEL
);
501 if (fi
->q
.host_imq_prod_indx
== NULL
){
502 T_MSG("fi->q.host_imq_prod_indx : memory not allocated");
505 fi
->q
.ptr_host_imq_prod_indx
= fi
->q
.host_imq_prod_indx
;
506 if ((u_long
)(fi
->q
.host_imq_prod_indx
) % 32)
507 fi
->q
.host_imq_prod_indx
++;
509 LEAVE("build_queues");
514 static void write_to_tachyon_registers(struct fc_info
*fi
)
516 u_int bus_addr
, bus_indx_addr
, i
;
518 ENTER("write_to_tachyon_registers");
520 /* Clear Queues each time Tachyon is reset */
521 memset((char *)fi
->q
.ptr_ocq_base
, 0, OCQ_LENGTH
* 32);
522 memset((char *)fi
->q
.ptr_imq_base
, 0, IMQ_LENGTH
* 32);
523 memset((char *)fi
->q
.ptr_edb_base
, 0, EDB_LEN
* 8);
524 memset((char *)fi
->q
.ptr_sest_base
, 0, SEST_LENGTH
* 32);
525 memset((char *)fi
->q
.ptr_sdb_base
, 0, NO_OF_SDB_ENTRIES
* SDB_SIZE
);
526 memset((char *)fi
->q
.ptr_tachyon_header_base
, 0xFF, NO_OF_TACH_HEADERS
* TACH_HEADER_SIZE
);
527 for (i
= 0; i
< SEST_LENGTH
; i
++)
528 fi
->q
.free_scsi_oxid
[i
] = OXID_AVAILABLE
;
529 for (i
= 0; i
< NO_OF_SDB_ENTRIES
; i
++)
530 fi
->q
.sdb_slot_status
[i
] = SDB_FREE
;
532 take_tachyon_offline(fi
);
533 writel(readl(fi
->t_r
.ptr_tach_config_reg
) | SCSI_ENABLE
| WRITE_STREAM_SIZE
| READ_STREAM_SIZE
| PARITY_EVEN
| OOO_REASSEMBLY_DISABLE
, fi
->t_r
.ptr_tach_config_reg
);
535 /* Write OCQ registers */
536 fi
->q
.ocq_prod_indx
= 0;
537 *(fi
->q
.host_ocq_cons_indx
) = 0;
539 /* The Tachyon needs to be passed the "real" address */
540 bus_addr
= virt_to_bus(fi
->q
.ptr_ocq_base
);
541 writel(bus_addr
, fi
->t_r
.ptr_ocq_base_reg
);
542 writel(OCQ_LENGTH
- 1, fi
->t_r
. ptr_ocq_len_reg
);
543 bus_indx_addr
= virt_to_bus(fi
->q
.host_ocq_cons_indx
);
544 writel(bus_indx_addr
, fi
->t_r
.ptr_ocq_cons_indx_reg
);
546 /* Write IMQ registers */
547 fi
->q
.imq_cons_indx
= 0;
548 *(fi
->q
.host_imq_prod_indx
) = 0;
549 bus_addr
= virt_to_bus(fi
->q
.ptr_imq_base
);
550 writel(bus_addr
, fi
->t_r
.ptr_imq_base_reg
);
551 writel(IMQ_LENGTH
- 1, fi
->t_r
.ptr_imq_len_reg
);
552 bus_indx_addr
= virt_to_bus(fi
->q
.host_imq_prod_indx
);
553 writel(bus_indx_addr
, fi
->t_r
.ptr_imq_prod_indx_reg
);
555 /* Write MFSBQ registers */
556 fi
->q
.mfsbq_prod_indx
= MFSBQ_LENGTH
- 1;
557 fi
->q
.mfsbuff_end
= MFS_BUFFER_SIZE
- 1;
558 fi
->q
.mfsbq_cons_indx
= 0;
559 bus_addr
= virt_to_bus(fi
->q
.ptr_mfsbq_base
);
560 writel(bus_addr
, fi
->t_r
.ptr_mfsbq_base_reg
);
561 writel(MFSBQ_LENGTH
- 1, fi
->t_r
.ptr_mfsbq_len_reg
);
562 writel(fi
->q
.mfsbuff_end
, fi
->t_r
.ptr_mfsbuff_len_reg
);
563 /* Do this last as tachyon will prefetch the
564 * first entry as soon as we write to it.
566 writel(fi
->q
.mfsbq_prod_indx
, fi
->t_r
.ptr_mfsbq_prod_reg
);
568 /* Write SFSBQ registers */
569 fi
->q
.sfsbq_prod_indx
= SFSBQ_LENGTH
- 1;
570 fi
->q
.sfsbuff_end
= SFS_BUFFER_SIZE
- 1;
571 fi
->q
.sfsbq_cons_indx
= 0;
572 bus_addr
= virt_to_bus(fi
->q
.ptr_sfsbq_base
);
573 writel(bus_addr
, fi
->t_r
.ptr_sfsbq_base_reg
);
574 writel(SFSBQ_LENGTH
- 1, fi
->t_r
.ptr_sfsbq_len_reg
);
575 writel(fi
->q
.sfsbuff_end
, fi
->t_r
.ptr_sfsbuff_len_reg
);
576 /* Do this last as tachyon will prefetch the first
577 * entry as soon as we write to it.
579 writel(fi
->q
.sfsbq_prod_indx
, fi
->t_r
.ptr_sfsbq_prod_reg
);
581 /* Write SEST registers */
582 bus_addr
= virt_to_bus(fi
->q
.ptr_sest_base
);
583 writel(bus_addr
, fi
->t_r
.ptr_sest_base_reg
);
584 writel(SEST_LENGTH
- 1, fi
->t_r
.ptr_sest_len_reg
);
585 /* the last 2 bits _should_ be 1 */
586 writel(SEST_BUFFER_SIZE
- 1, fi
->t_r
.ptr_scsibuff_len_reg
);
588 /* write AL_TIME & E_D_TOV into the registers */
589 writel(TOV_VALUES
, fi
->t_r
.ptr_fm_tov_reg
);
590 /* Tell Tachyon to pick a Soft Assigned AL_PA */
591 writel(LOOP_INIT_SOFT_ADDRESS
, fi
->t_r
.ptr_fm_config_reg
);
593 /* Read the WWN from EEPROM . But, for now we assign it here. */
594 writel(WORLD_WIDE_NAME_LOW
, fi
->t_r
.ptr_fm_wwn_low_reg
);
595 writel(WORLD_WIDE_NAME_HIGH
, fi
->t_r
.ptr_fm_wwn_hi_reg
);
597 DPRINTK1("TACHYON initializing as L_Port...\n");
598 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
600 LEAVE("write_to_tachyon_registers");
604 static irqreturn_t
tachyon_interrupt(int irq
, void* dev_id
, struct pt_regs
* regs
)
606 struct Scsi_Host
*host
= dev_id
;
607 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
608 struct fc_info
*fi
= hostdata
->fi
;
610 spin_lock_irqsave(&fi
->fc_lock
, flags
);
611 tachyon_interrupt_handler(irq
, dev_id
, regs
);
612 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
616 static void tachyon_interrupt_handler(int irq
, void* dev_id
, struct pt_regs
* regs
)
618 struct Scsi_Host
*host
= dev_id
;
619 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
620 struct fc_info
*fi
= hostdata
->fi
;
621 u_int
*ptr_imq_entry
;
622 u_int imq_int_type
, current_IMQ_index
= 0, prev_IMQ_index
;
623 int index
, no_of_entries
= 0;
626 ENTER("tachyon_interrupt");
627 if (fi
->q
.host_imq_prod_indx
!= NULL
) {
628 current_IMQ_index
= ntohl(*(fi
->q
.host_imq_prod_indx
));
631 /* _Should not_ happen */
632 T_MSG("IMQ_indx NULL. DISABLING INTERRUPTS!!!\n");
633 writel(0x0, fi
->i_r
.ptr_ichip_hw_control_reg
);
636 if (current_IMQ_index
> fi
->q
.imq_cons_indx
)
637 no_of_entries
= current_IMQ_index
- fi
->q
.imq_cons_indx
;
639 if (current_IMQ_index
< fi
->q
.imq_cons_indx
)
640 no_of_entries
= IMQ_LENGTH
- (fi
->q
.imq_cons_indx
- current_IMQ_index
);
642 if (no_of_entries
== 0) {
644 ichip_status
= readl(fi
->i_r
.ptr_ichip_hw_status_reg
);
645 if (ichip_status
& 0x20) {
646 /* Should _never_ happen. Might require a hard reset */
647 T_MSG("Too bad... PCI Bus Error. Resetting (i)chip");
649 T_MSG("DISABLING INTERRUPTS!!!\n");
650 writel(0x0, fi
->i_r
.ptr_ichip_hw_control_reg
);
654 prev_IMQ_index
= current_IMQ_index
;
655 for (index
= 0; index
< no_of_entries
; index
++) {
656 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
657 imq_int_type
= ntohl(*ptr_imq_entry
);
659 completion_message_handler(fi
, imq_int_type
);
660 if ((fi
->g
.link_up
== FALSE
) && ((imq_int_type
== MFS_BUF_WARN
) || (imq_int_type
== SFS_BUF_WARN
) || (imq_int_type
== IMQ_BUF_WARN
)))
662 update_IMQ_indx(fi
, 1);
664 /* Check for more entries */
665 current_IMQ_index
= ntohl(*(fi
->q
.host_imq_prod_indx
));
666 if (current_IMQ_index
!= prev_IMQ_index
) {
668 prev_IMQ_index
= current_IMQ_index
;
670 } /*end of for loop*/
671 LEAVE("tachyon_interrupt");
676 static void handle_SFS_BUF_WARN_interrupt(struct fc_info
*fi
)
679 ENTER("handle_SFS_BUF_WARN_interrupt");
680 if (fi
->g
.link_up
== FALSE
) {
681 reset_tachyon(fi
, SOFTWARE_RESET
);
684 /* Free up all but one entry in the Q.
686 for (i
= 0; i
< ((SFSBQ_LENGTH
- 1) * NO_OF_ENTRIES
); i
++) {
687 handle_SFS_interrupt(fi
);
688 update_IMQ_indx(fi
, 1);
690 LEAVE("handle_SFS_BUF_WARN_interrupt");
693 /* Untested_Code_Begin */
694 static void handle_MFS_BUF_WARN_interrupt(struct fc_info
*fi
)
697 ENTER("handle_MFS_BUF_WARN_interrupt");
698 if (fi
->g
.link_up
== FALSE
) {
699 reset_tachyon(fi
, SOFTWARE_RESET
);
702 /* FIXME: freeing up 8 entries.
704 for (i
= 0; i
< NO_OF_ENTRIES
; i
++) {
705 handle_MFS_interrupt(fi
);
706 update_IMQ_indx(fi
, 1);
708 LEAVE("handle_MFS_BUF_WARN_interrupt");
710 /*Untested_Code_End */
712 static void handle_IMQ_BUF_WARN_interrupt(struct fc_info
*fi
)
714 u_int
*ptr_imq_entry
;
715 u_int imq_int_type
, current_IMQ_index
= 0, temp_imq_cons_indx
;
716 int index
, no_of_entries
= 0;
718 ENTER("handle_IMQ_BUF_WARN_interrupt");
719 if (fi
->g
.link_up
== FALSE
) {
720 reset_tachyon(fi
, SOFTWARE_RESET
);
723 current_IMQ_index
= ntohl(*(fi
->q
.host_imq_prod_indx
));
725 if (current_IMQ_index
> fi
->q
.imq_cons_indx
)
726 no_of_entries
= current_IMQ_index
- fi
->q
.imq_cons_indx
;
728 if (current_IMQ_index
< fi
->q
.imq_cons_indx
)
729 no_of_entries
= IMQ_LENGTH
- (fi
->q
.imq_cons_indx
- current_IMQ_index
);
730 /* We don't want to look at the same IMQ entry again.
732 temp_imq_cons_indx
= fi
->q
.imq_cons_indx
+ 1;
733 if (no_of_entries
!= 0)
735 for (index
= 0; index
< no_of_entries
; index
++) {
736 ptr_imq_entry
= fi
->q
.ptr_imqe
[temp_imq_cons_indx
];
737 imq_int_type
= ntohl(*ptr_imq_entry
);
738 if (imq_int_type
!= IMQ_BUF_WARN
)
739 completion_message_handler(fi
, imq_int_type
);
740 temp_imq_cons_indx
++;
741 if (temp_imq_cons_indx
== IMQ_LENGTH
)
742 temp_imq_cons_indx
= 0;
743 } /*end of for loop*/
744 if (no_of_entries
!= 0)
745 update_IMQ_indx(fi
, no_of_entries
);
746 LEAVE("handle_IMQ_BUF_WARN_interrupt");
749 static void completion_message_handler(struct fc_info
*fi
, u_int imq_int_type
)
751 switch(imq_int_type
) {
752 case OUTBOUND_COMPLETION
:
753 DPRINTK("OUTBOUND_COMPLETION message received");
755 case OUTBOUND_COMPLETION_I
:
756 DPRINTK("OUTBOUND_COMPLETION_I message received");
757 handle_OCI_interrupt(fi
);
759 case OUT_HI_PRI_COMPLETION
:
760 DPRINTK("OUT_HI_PRI_COMPLETION message received");
762 case OUT_HI_PRI_COMPLETION_I
:
763 DPRINTK("OUT_HI_PRI_COMPLETION_I message received");
765 case INBOUND_MFS_COMPLETION
:
766 DPRINTK("INBOUND_MFS_COMPLETION message received");
767 handle_MFS_interrupt(fi
);
769 case INBOUND_OOO_COMPLETION
:
770 DPRINTK("INBOUND_OOO_COMPLETION message received");
771 handle_OOO_interrupt(fi
);
773 case INBOUND_SFS_COMPLETION
:
774 DPRINTK("INBOUND_SFS_COMPLETION message received");
775 handle_SFS_interrupt(fi
);
777 case INBOUND_UNKNOWN_FRAME_I
:
778 DPRINTK("INBOUND_UNKNOWN_FRAME message received");
779 handle_Unknown_Frame_interrupt(fi
);
781 case INBOUND_BUSIED_FRAME
:
782 DPRINTK("INBOUND_BUSIED_FRAME message received");
783 handle_Busied_Frame_interrupt(fi
);
785 case FRAME_MGR_INTERRUPT
:
786 DPRINTK("FRAME_MGR_INTERRUPT message received");
787 handle_FM_interrupt(fi
);
790 DPRINTK("READ_STATUS message received");
793 DPRINTK("SFS_BUF_WARN message received");
794 handle_SFS_BUF_WARN_interrupt(fi
);
797 DPRINTK("MFS_BUF_WARN message received");
798 handle_MFS_BUF_WARN_interrupt(fi
);
801 DPRINTK("IMQ_BUF_WARN message received");
802 handle_IMQ_BUF_WARN_interrupt(fi
);
804 case INBOUND_C1_TIMEOUT
:
805 DPRINTK("INBOUND_C1_TIMEOUT message received");
808 DPRINTK("BAD_SCSI_FRAME message received");
809 handle_Bad_SCSI_Frame_interrupt(fi
);
811 case INB_SCSI_STATUS_COMPLETION
:
812 DPRINTK("INB_SCSI_STATUS_COMPL message received");
813 handle_Inbound_SCSI_Status_interrupt(fi
);
815 case INBOUND_SCSI_COMMAND
:
816 DPRINTK("INBOUND_SCSI_COMMAND message received");
817 handle_Inbound_SCSI_Command_interrupt(fi
);
819 case INBOUND_SCSI_DATA_COMPLETION
:
820 DPRINTK("INBOUND_SCSI_DATA message received");
821 /* Only for targets */
824 T_MSG("DEFAULT message received, type = %x", imq_int_type
);
830 static void handle_OCI_interrupt(struct fc_info
*fi
)
832 u_int
*ptr_imq_entry
;
833 u_long transaction_id
= 0;
834 unsigned short status
, seq_count
, transmitted_ox_id
;
835 struct Scsi_Host
*host
= fi
->host
;
836 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
840 ENTER("handle_OCI_interrupt");
841 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
842 transaction_id
= ntohl(*(ptr_imq_entry
+ 1));
843 status
= ntohl(*(ptr_imq_entry
+ 2)) >> 16;
844 seq_count
= ntohl(*(ptr_imq_entry
+ 3));
845 DPRINTK("transaction_id= %x", (u_int
)transaction_id
);
846 tag
= transaction_id
& 0xFFFF0000;
847 transmitted_ox_id
= transaction_id
;
849 /* The INT could be either due to TIME_OUT | BAD_ALPA.
850 * But we check only for TimeOuts. Bad AL_PA will
851 * caught by FM_interrupt handler.
854 if ((status
== OCM_TIMEOUT_OR_BAD_ALPA
) && (!fi
->g
.port_discovery
) && (!fi
->g
.perform_adisc
)){
855 DPRINTK("Frame TimeOut on OX_ID = %x", (u_int
)transaction_id
);
857 /* Is it a SCSI frame that is timing out ? Not a very good check...
859 if ((transmitted_ox_id
<= MAX_SCSI_OXID
) && ((tag
== FC_SCSI_BAD_TARGET
) || (tag
< 0x00FF0000))) {
860 /* If it is a Bad AL_PA, we report it as BAD_TARGET.
861 * Else, we allow the command to time-out. A Link
862 * re-initialization could be taking place.
864 if (tag
== FC_SCSI_BAD_TARGET
) {
865 Cmnd
= hostdata
->cmnd_handler
[transmitted_ox_id
& MAX_SCSI_XID
];
866 hostdata
->cmnd_handler
[transmitted_ox_id
& MAX_SCSI_XID
] = NULL
;
868 Cmnd
->result
= DID_BAD_TARGET
<< 16;
869 (*Cmnd
->scsi_done
) (Cmnd
);
872 T_MSG("NULL Command out of handler!");
873 } /* if Bad Target */
875 u_char missing_target
= tag
>> 16;
876 struct fc_node_info
*q
= fi
->node_info_list
;
877 /* A Node that we thought was logged in has gone
878 * away. We are the optimistic kind and we keep
879 * hoping that our dear little Target will come back
880 * to us. For now we log him out.
882 DPRINTK2("Missing Target = %d", missing_target
);
884 if (q
->target_id
== missing_target
) {
885 T_MSG("Target %d Logged out", q
->target_id
);
886 q
->login
= LOGIN_ATTEMPTED
;
887 if (fi
->num_nodes
> 0)
889 tx_logi(fi
, ELS_PLOGI
, q
->d_id
);
896 } /* End of SCSI frame timing out. */
899 /* An IP frame was transmitted to a Bad AL_PA. Free up
902 dev_kfree_skb_irq((struct sk_buff
*)(bus_to_virt(transaction_id
)));
903 netif_wake_queue(fi
->dev
);
905 } /* End of IP frame timing out. */
906 } /* End of frame timing out. */
908 /* Frame was transmitted successfully. Check if it was an ELS
909 * frame or an IP frame or a Bad_Target_Notification frame (in
910 * case of a ptp_link). Ugly!
912 if ((status
== 0) && (seq_count
== 0)) {
913 u_int tag
= transaction_id
& 0xFFFF0000;
914 /* Continue with port discovery after an ELS is successfully
915 * transmitted. (status == 0).
917 DPRINTK("tag = %x", tag
);
920 /* Letz use the Name Server instead */
921 fi
->g
.explore_fabric
= TRUE
;
922 fi
->g
.port_discovery
= FALSE
;
923 fi
->g
.alpa_list_index
= MAX_NODES
;
924 add_to_ox_id_list(fi
, transaction_id
, tag
);
927 if (fi
->g
.fabric_present
&& (fi
->g
.name_server
== FALSE
))
928 add_to_ox_id_list(fi
,transaction_id
,ELS_NS_PLOGI
);
930 add_to_ox_id_list(fi
, transaction_id
, tag
);
932 case FC_SCSI_BAD_TARGET
:
933 Cmnd
= hostdata
->cmnd_handler
[transmitted_ox_id
& MAX_SCSI_XID
];
934 hostdata
->cmnd_handler
[transmitted_ox_id
& MAX_SCSI_XID
] = NULL
;
936 Cmnd
->result
= DID_BAD_TARGET
<< 16;
937 (*Cmnd
->scsi_done
) (Cmnd
);
940 T_MSG("NULL Command out of handler!");
943 add_to_ox_id_list(fi
, transaction_id
, tag
);
946 if (fi
->g
.alpa_list_index
>= MAX_NODES
) {
947 if (fi
->g
.port_discovery
== TRUE
) {
948 fi
->g
.port_discovery
= FALSE
;
949 add_display_cache_timer(fi
);
951 fi
->g
.alpa_list_index
= MAX_NODES
;
953 if (fi
->g
.port_discovery
== TRUE
)
954 local_port_discovery(fi
);
957 /* An IP frame has been successfully transmitted.
958 * Free the skb that was used for this IP frame.
960 if ((status
== 0) && (seq_count
> 1)) {
961 dev_kfree_skb_irq((struct sk_buff
*)(bus_to_virt(transaction_id
)));
962 netif_wake_queue(fi
->dev
);
966 LEAVE("handle_OCI_interrupt");
969 /* Right now we discard OOO frames */
970 static void handle_OOO_interrupt(struct fc_info
*fi
)
972 u_int
*ptr_imq_entry
;
973 int queue_indx
, offset
, payload_size
;
974 int no_of_buffers
= 1; /* header is in a separate buffer */
975 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
976 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
977 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
978 queue_indx
= queue_indx
>> 16;
979 payload_size
= ntohl(*(ptr_imq_entry
+ 2)) - TACHYON_HEADER_LEN
;
980 /* Calculate total number of buffers */
981 no_of_buffers
+= payload_size
/ MFS_BUFFER_SIZE
;
982 if (payload_size
% MFS_BUFFER_SIZE
)
985 /* provide Tachyon will another set of buffers */
986 fi
->g
.mfs_buffer_count
+= no_of_buffers
;
987 if (fi
->g
.mfs_buffer_count
>= NO_OF_ENTRIES
) {
988 int count
= fi
->g
.mfs_buffer_count
/ NO_OF_ENTRIES
;
989 fi
->g
.mfs_buffer_count
-= NO_OF_ENTRIES
* count
;
990 update_MFSBQ_indx(fi
, count
);
994 static void handle_MFS_interrupt(struct fc_info
*fi
)
996 u_int
*ptr_imq_entry
, *buff_addr
;
997 u_int type_of_frame
, s_id
;
998 int queue_indx
, offset
, payload_size
, starting_indx
, starting_offset
;
999 u_short received_ox_id
;
1000 int no_of_buffers
= 1; /* header is in a separate buffer */
1001 struct sk_buff
*skb
;
1002 int wrap_around
= FALSE
, no_of_wrap_buffs
= NO_OF_ENTRIES
- 1;
1003 ENTER("handle_MFS_interrupt");
1004 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1005 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1006 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1007 queue_indx
= queue_indx
>> 16;
1008 DPRINTK("queue_indx = %d, offset = %d\n", queue_indx
, offset
);
1009 payload_size
= ntohl(*(ptr_imq_entry
+ 2)) - TACHYON_HEADER_LEN
;
1010 DPRINTK("payload_size = %d", payload_size
);
1011 /* Calculate total number of buffers */
1012 no_of_buffers
+= payload_size
/ MFS_BUFFER_SIZE
;
1013 if (payload_size
% MFS_BUFFER_SIZE
)
1015 DPRINTK("no_of_buffers = %d", no_of_buffers
);
1017 if ((no_of_buffers
- 1) <= offset
) {
1018 starting_offset
= offset
- (no_of_buffers
- 1);
1019 starting_indx
= queue_indx
;
1022 int temp
= no_of_buffers
- (offset
+ 1);
1023 int no_of_queues
= temp
/ NO_OF_ENTRIES
;
1024 starting_offset
= temp
% NO_OF_ENTRIES
;
1025 if (starting_offset
!= 0) {
1026 no_of_wrap_buffs
= starting_offset
- 1; //exclude header
1027 starting_offset
= NO_OF_ENTRIES
- starting_offset
;
1030 starting_indx
= queue_indx
- no_of_queues
;
1031 if (starting_indx
< 0) {
1032 no_of_wrap_buffs
-= (starting_indx
+ 1) * NO_OF_ENTRIES
;
1033 starting_indx
= MFSBQ_LENGTH
+ starting_indx
;
1038 DPRINTK("starting_indx = %d, starting offset = %d no_of_wrap_buffs = %d\n", starting_indx
, starting_offset
, no_of_wrap_buffs
);
1039 /* Get Tachyon Header from first buffer */
1040 buff_addr
= bus_to_virt(ntohl(*(fi
->q
.ptr_mfsbq_base
+ starting_indx
*NO_OF_ENTRIES
+ starting_offset
)));
1043 /* extract Type of Frame */
1044 type_of_frame
= (u_int
)ntohl(*(buff_addr
+ 4)) & 0xFF000000;
1045 s_id
= (u_int
)ntohl(*(buff_addr
+ 3)) & 0x00FFFFFF;
1046 received_ox_id
= ntohl(*(buff_addr
+ 6)) >> 16;
1047 buff_addr
+= MFS_BUFFER_SIZE
/4;
1048 DPRINTK("type_of_frame = %x, s_id = %x, ox_id = %x", type_of_frame
, s_id
, received_ox_id
);
1050 switch(type_of_frame
) {
1052 skb
= dev_alloc_skb(payload_size
);
1054 printk(KERN_NOTICE
"%s: In handle_MFS_interrupt() Memory squeeze, dropping packet.\n", fi
->name
);
1055 fi
->fc_stats
.rx_dropped
++;
1056 fi
->g
.mfs_buffer_count
+= no_of_buffers
;
1057 if (fi
->g
.mfs_buffer_count
>= NO_OF_ENTRIES
) {
1058 int count
= fi
->g
.mfs_buffer_count
/ NO_OF_ENTRIES
;
1059 fi
->g
.mfs_buffer_count
-= NO_OF_ENTRIES
* count
;
1060 update_MFSBQ_indx(fi
, count
);
1065 int wrap_size
= no_of_wrap_buffs
* MFS_BUFFER_SIZE
;
1066 int tail_size
= payload_size
- wrap_size
;
1067 DPRINTK("wrap_size = %d, tail_size = %d\n", wrap_size
, tail_size
);
1068 if (no_of_wrap_buffs
)
1069 memcpy(skb_put(skb
, wrap_size
), buff_addr
, wrap_size
);
1070 buff_addr
= bus_to_virt(ntohl(*(fi
->q
.ptr_mfsbq_base
)));
1071 memcpy(skb_put(skb
, tail_size
), buff_addr
, tail_size
);
1074 memcpy(skb_put(skb
, payload_size
), buff_addr
, payload_size
);
1075 rx_net_mfs_packet(fi
, skb
);
1078 T_MSG("Unknown Frame Type received. Type = %x", type_of_frame
);
1081 /* provide Tachyon will another set of buffers */
1082 fi
->g
.mfs_buffer_count
+= no_of_buffers
;
1083 if (fi
->g
.mfs_buffer_count
>= NO_OF_ENTRIES
) {
1084 int count
= fi
->g
.mfs_buffer_count
/ NO_OF_ENTRIES
;
1085 fi
->g
.mfs_buffer_count
-= NO_OF_ENTRIES
* count
;
1086 update_MFSBQ_indx(fi
, count
);
1088 LEAVE("handle_MFS_interrupt");
1091 static void handle_Unknown_Frame_interrupt(struct fc_info
*fi
)
1093 u_int
*ptr_imq_entry
;
1094 int queue_indx
, offset
;
1095 ENTER("handle_Unknown_Frame_interrupt");
1096 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1097 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1098 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1099 queue_indx
= queue_indx
>> 16;
1100 /* We discard the "unknown" frame */
1101 /* provide Tachyon will another set of buffers */
1102 if (offset
== (NO_OF_ENTRIES
- 1))
1103 update_SFSBQ_indx(fi
);
1104 LEAVE("handle_Unknown_Frame_interrupt");
1107 static void handle_Busied_Frame_interrupt(struct fc_info
*fi
)
1109 u_int
*ptr_imq_entry
;
1110 int queue_indx
, offset
;
1111 ENTER("handle_Busied_Frame_interrupt");
1112 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1113 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1114 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1115 queue_indx
= queue_indx
>> 16;
1116 /* We discard the "busied" frame */
1117 /* provide Tachyon will another set of buffers */
1118 if (offset
== (NO_OF_ENTRIES
- 1))
1119 update_SFSBQ_indx(fi
);
1120 LEAVE("handle_Busied_Frame_interrupt");
1123 static void handle_Bad_SCSI_Frame_interrupt(struct fc_info
*fi
)
1125 u_int
*ptr_imq_entry
, *buff_addr
, *tach_header
, *ptr_edb
;
1126 u_int s_id
, rctl
, frame_class
, burst_len
, transfered_len
, len
= 0;
1127 int queue_indx
, offset
, payload_size
, i
;
1128 u_short ox_id
, rx_id
, x_id
, mtu
= 512;
1129 u_char target_id
= 0xFF;
1131 ENTER("handle_Bad_SCSI_Frame_interrupt");
1132 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1133 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1134 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1135 queue_indx
= queue_indx
>> 16;
1136 payload_size
= ntohl(*(ptr_imq_entry
+ 2));
1138 buff_addr
= bus_to_virt(ntohl(*(fi
->q
.ptr_sfsbq_base
+ queue_indx
*NO_OF_ENTRIES
+ offset
)));
1140 rctl
= ntohl(*(buff_addr
+ 2)) & 0xFF000000;
1141 s_id
= ntohl(*(buff_addr
+ 3)) & 0x00FFFFFF;
1142 ox_id
= ntohl(*(buff_addr
+ 6)) >> 16;
1143 rx_id
= ntohl(*(buff_addr
+ 6));
1144 x_id
= ox_id
& MAX_SCSI_XID
;
1146 /* Any frame that comes in with OX_ID that matches an OX_ID
1147 * that has been allocated for SCSI, will be called a Bad
1148 * SCSI frame if the Exchange is not valid any more.
1150 * We will also get a Bad SCSI frame interrupt if we receive
1151 * a XFER_RDY with offset != 0. Tachyon washes its hands off
1152 * this Exchange. We have to take care of ourselves. Grrr...
1154 if (rctl
== DATA_DESCRIPTOR
) {
1155 struct fc_node_info
*q
= fi
->node_info_list
;
1157 if (q
->d_id
== s_id
) {
1158 target_id
= q
->target_id
;
1165 frame_class
= target_id
;
1166 transfered_len
= ntohl(*(buff_addr
+ 8));
1167 burst_len
= ntohl(*(buff_addr
+ 9));
1169 build_ODB(fi
, fi
->g
.seq_id
, s_id
, burst_len
, 0, mtu
, ox_id
, rx_id
, 0, 0, frame_class
<< 16);
1170 /* Update the SEQ_ID and Relative Offset in the
1171 * Tachyon Header Structure.
1173 tach_header
= bus_to_virt(ntohl(*(fi
->q
.ptr_sest
[x_id
] + 5)));
1174 *(tach_header
+ 5) = htonl(fi
->g
.seq_id
<< 24);
1175 *(tach_header
+ 7) = htonl(transfered_len
);
1176 fi
->g
.odb
.hdr_addr
= *(fi
->q
.ptr_sest
[x_id
] + 5);
1178 /* Invalidate the EDBs used
1180 ptr_edb
= bus_to_virt(ntohl(*(fi
->q
.ptr_sest
[x_id
] + 7)));
1182 for (i
= 0; i
< EDB_LEN
; i
++)
1183 if (fi
->q
.ptr_edb
[i
] == ptr_edb
)
1191 len
+= (htonl(*ptr_edb
) & 0xFFFF);
1193 fi
->q
.free_edb_list
[i
++] = EDB_FREE
;
1196 ptr_edb
= fi
->q
.ptr_edb_base
- 1;
1198 } while (len
< transfered_len
);
1199 if (len
> transfered_len
) {
1201 fi
->q
.free_edb_list
[j
] = EDB_BUSY
;
1207 T_MSG("EDB not found while freeing");
1208 if (offset
== (NO_OF_ENTRIES
- 1))
1209 update_SFSBQ_indx(fi
);
1213 /* Update the EDB pointer in the ODB.
1215 fi
->g
.odb
.edb_addr
= htonl(virt_to_bus(ptr_edb
));
1216 memcpy(fi
->q
.ptr_odb
[fi
->q
.ocq_prod_indx
], &(fi
->g
.odb
), sizeof(ODB
));
1217 /* Update the EDB pointer in the SEST entry. We might need
1218 * this if get another XFER_RDY for the same Exchange.
1220 *(fi
->q
.ptr_sest
[x_id
] + 7) = htonl(virt_to_bus(ptr_edb
));
1222 update_OCQ_indx(fi
);
1223 if (fi
->g
.seq_id
== MAX_SEQ_ID
)
1229 /* Could be a BA_ACC or a BA_RJT.
1231 if (rctl
== RCTL_BASIC_ACC
) {
1232 u_int bls_type
= remove_from_ox_id_list(fi
, ox_id
);
1233 DPRINTK1("BA_ACC received from S_ID 0x%x with OX_ID = %x in response to %x", s_id
, ox_id
, bls_type
);
1234 if (bls_type
== RCTL_BASIC_ABTS
) {
1236 /* Invalidate resources for that Exchange.
1238 STE_bit
= ntohl(*fi
->q
.ptr_sest
[x_id
]);
1239 if (STE_bit
& SEST_V
) {
1240 *(fi
->q
.ptr_sest
[x_id
]) &= htonl(SEST_INV
);
1241 invalidate_SEST_entry(fi
, ox_id
);
1246 if (rctl
== RCTL_BASIC_RJT
) {
1247 u_int bls_type
= remove_from_ox_id_list(fi
, ox_id
);
1248 DPRINTK1("BA_RJT received from S_ID 0x%x with OX_ID = %x in response to %x", s_id
, ox_id
, bls_type
);
1249 if (bls_type
== RCTL_BASIC_ABTS
) {
1251 /* Invalidate resources for that Exchange.
1253 STE_bit
= ntohl(*fi
->q
.ptr_sest
[x_id
]);
1254 if (STE_bit
& SEST_V
) {
1255 *(fi
->q
.ptr_sest
[x_id
]) &= htonl(SEST_INV
);
1256 invalidate_SEST_entry(fi
, ox_id
);
1261 DPRINTK1("Frame with R_CTL = %x received from S_ID 0x%x with OX_ID %x", rctl
, s_id
, ox_id
);
1263 /* Else, discard the "Bad" SCSI frame.
1266 /* provide Tachyon will another set of buffers
1268 if (offset
== (NO_OF_ENTRIES
- 1))
1269 update_SFSBQ_indx(fi
);
1270 LEAVE("handle_Bad_SCSI_Frame_interrupt");
1273 static void handle_Inbound_SCSI_Status_interrupt(struct fc_info
*fi
)
1275 struct Scsi_Host
*host
= fi
->host
;
1276 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
1277 u_int
*ptr_imq_entry
, *buff_addr
, *ptr_rsp_info
, *ptr_sense_info
= NULL
;
1278 int queue_indx
, offset
, payload_size
;
1279 u_short received_ox_id
, x_id
;
1281 u_int fcp_status
, fcp_rsp_info_len
= 0, fcp_sense_info_len
= 0, s_id
;
1282 ENTER("handle_SCSI_status_interrupt");
1284 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1285 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1286 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1287 queue_indx
= queue_indx
>> 16;
1288 buff_addr
= bus_to_virt(ntohl(*(fi
->q
.ptr_sfsbq_base
+ queue_indx
*NO_OF_ENTRIES
+ offset
)));
1289 payload_size
= ntohl(*(ptr_imq_entry
+ 2));
1290 received_ox_id
= ntohl(*(buff_addr
+ 6)) >> 16;
1292 buff_addr
= bus_to_virt(ntohl(*(fi
->q
.ptr_sfsbq_base
+ queue_indx
*NO_OF_ENTRIES
+ offset
)));
1294 fcp_status
= ntohl(*(buff_addr
+ 10));
1295 ptr_rsp_info
= buff_addr
+ 14;
1296 if (fcp_status
& FCP_STATUS_RSP_LEN
)
1297 fcp_rsp_info_len
= ntohl(*(buff_addr
+ 13));
1299 if (fcp_status
& FCP_STATUS_SENSE_LEN
) {
1300 ptr_sense_info
= ptr_rsp_info
+ fcp_rsp_info_len
/ 4;
1301 fcp_sense_info_len
= ntohl(*(buff_addr
+ 12));
1302 DPRINTK("sense_info = %x", (u_int
)ntohl(*ptr_sense_info
));
1304 DPRINTK("fcp_status = %x, fcp_rsp_len = %x", fcp_status
, fcp_rsp_info_len
);
1305 x_id
= received_ox_id
& MAX_SCSI_XID
;
1306 Cmnd
= hostdata
->cmnd_handler
[x_id
];
1307 hostdata
->cmnd_handler
[x_id
] = NULL
;
1309 memset(Cmnd
->sense_buffer
, 0, sizeof(Cmnd
->sense_buffer
));
1310 /* Check if there is a Sense field */
1311 if (fcp_status
& FCP_STATUS_SENSE_LEN
) {
1312 int size
= sizeof(Cmnd
->sense_buffer
);
1313 if (fcp_sense_info_len
< size
)
1314 size
= fcp_sense_info_len
;
1315 memcpy(Cmnd
->sense_buffer
, (char *)ptr_sense_info
, size
);
1317 Cmnd
->result
= fcp_status
& FCP_STATUS_MASK
;
1318 (*Cmnd
->scsi_done
) (Cmnd
);
1321 T_MSG("NULL Command out of handler!");
1323 invalidate_SEST_entry(fi
, received_ox_id
);
1324 s_id
= ntohl(*(buff_addr
+ 3)) & 0x00FFFFFF;
1325 fi
->q
.free_scsi_oxid
[x_id
] = OXID_AVAILABLE
;
1327 /* provide Tachyon will another set of buffers */
1328 if (offset
== (NO_OF_ENTRIES
- 1))
1329 update_SFSBQ_indx(fi
);
1330 LEAVE("handle_SCSI_status_interrupt");
1333 static void invalidate_SEST_entry(struct fc_info
*fi
, u_short received_ox_id
)
1335 u_short x_id
= received_ox_id
& MAX_SCSI_XID
;
1336 /* Invalidate SEST entry if it is an OutBound SEST Entry
1338 if (!(received_ox_id
& SCSI_READ_BIT
)) {
1339 u_int
*ptr_tach_header
, *ptr_edb
;
1340 u_short temp_ox_id
= NOT_SCSI_XID
;
1342 *(fi
->q
.ptr_sest
[x_id
]) &= htonl(SEST_INV
);
1344 /* Invalidate the Tachyon Header structure
1346 ptr_tach_header
= bus_to_virt(ntohl(*(fi
->q
.ptr_sest
[x_id
] + 5)));
1347 for (i
= 0; i
< NO_OF_TACH_HEADERS
; i
++)
1348 if(fi
->q
.ptr_tachyon_header
[i
] == ptr_tach_header
)
1350 if (i
< NO_OF_TACH_HEADERS
)
1351 memset(ptr_tach_header
, 0xFF, 32);
1353 T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1355 /* Invalidate the EDB used
1357 ptr_edb
= bus_to_virt(ntohl(*(fi
->q
.ptr_sest
[x_id
] + 7)));
1358 for (i
= 0; i
< EDB_LEN
; i
++)
1359 if (fi
->q
.ptr_edb
[i
] == ptr_edb
)
1365 fi
->q
.free_edb_list
[i
++] = EDB_FREE
;
1368 ptr_edb
= fi
->q
.ptr_edb_base
- 1;
1370 } while ((htonl(*ptr_edb
) & 0x80000000) != 0x80000000);
1373 T_MSG("EDB not found while freeing in invalidate_SEST_entry()");
1375 /* Search for its other header structure and destroy it!
1377 if ((ptr_tach_header
+ 16) < (fi
->q
.ptr_tachyon_header_base
+ (MY_PAGE_SIZE
/4)))
1378 ptr_tach_header
+= 16;
1380 ptr_tach_header
= fi
->q
.ptr_tachyon_header_base
;
1381 while (temp_ox_id
!= x_id
) {
1382 temp_ox_id
= ntohl(*(ptr_tach_header
+ 6)) >> 16;
1383 if (temp_ox_id
== x_id
) {
1384 /* Paranoid checking...
1386 for (i
= 0; i
< NO_OF_TACH_HEADERS
; i
++)
1387 if(fi
->q
.ptr_tachyon_header
[i
] == ptr_tach_header
)
1389 if (i
< NO_OF_TACH_HEADERS
)
1390 memset(ptr_tach_header
, 0xFF, 32);
1392 T_MSG("Tachyon Header not found while freeing in invalidate_SEST_entry()");
1396 if ((ptr_tach_header
+ 16) < (fi
->q
.ptr_tachyon_header_base
+ (MY_PAGE_SIZE
/4)))
1397 ptr_tach_header
+= 16;
1399 ptr_tach_header
= fi
->q
.ptr_tachyon_header_base
;
1404 u_short sdb_table_indx
;
1405 /* An Inbound Command has completed or needs to be Aborted.
1406 * Clear up the SDB buffers.
1408 sdb_table_indx
= *(fi
->q
.ptr_sest
[x_id
] + 5);
1409 fi
->q
.sdb_slot_status
[sdb_table_indx
] = SDB_FREE
;
1413 static void handle_Inbound_SCSI_Command_interrupt(struct fc_info
*fi
)
1415 u_int
*ptr_imq_entry
;
1416 int queue_indx
, offset
;
1417 ENTER("handle_Inbound_SCSI_Command_interrupt");
1418 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1419 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1420 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1421 queue_indx
= queue_indx
>> 16;
1422 /* We discard the SCSI frame as we shouldn't be receiving
1423 * a SCSI Command in the first place
1425 /* provide Tachyon will another set of buffers */
1426 if (offset
== (NO_OF_ENTRIES
- 1))
1427 update_SFSBQ_indx(fi
);
1428 LEAVE("handle_Inbound_SCSI_Command_interrupt");
1431 static void handle_SFS_interrupt(struct fc_info
*fi
)
1433 u_int
*ptr_imq_entry
, *buff_addr
;
1434 u_int class_of_frame
, type_of_frame
, s_id
, els_type
= 0, rctl
;
1435 int queue_indx
, offset
, payload_size
, login_state
;
1436 u_short received_ox_id
, fs_cmnd_code
;
1437 ENTER("handle_SFS_interrupt");
1438 ptr_imq_entry
= fi
->q
.ptr_imqe
[fi
->q
.imq_cons_indx
];
1439 offset
= ntohl(*(ptr_imq_entry
+ 1)) & 0x00000007;
1440 queue_indx
= ntohl(*(ptr_imq_entry
+ 1)) & 0xFFFF0000;
1441 queue_indx
= queue_indx
>> 16;
1442 DPRINTK("queue_indx = %d, offset = %d\n", queue_indx
, offset
);
1443 payload_size
= ntohl(*(ptr_imq_entry
+ 2));
1444 DPRINTK("payload_size = %d", payload_size
);
1446 buff_addr
= bus_to_virt(ntohl(*(fi
->q
.ptr_sfsbq_base
+ queue_indx
*NO_OF_ENTRIES
+ offset
)));
1448 /* extract Type of Frame */
1449 type_of_frame
= ntohl(*(buff_addr
+ 4)) & 0xFF000000;
1450 s_id
= ntohl(*(buff_addr
+ 3)) & 0x00FFFFFF;
1451 received_ox_id
= ntohl(*(buff_addr
+ 6)) >> 16;
1452 switch(type_of_frame
) {
1454 rctl
= ntohl(*(buff_addr
+ 2)) & 0xFF000000;
1456 case RCTL_BASIC_ABTS
:
1457 /* As an Initiator, we should never be receiving
1460 DPRINTK1("ABTS received from S_ID 0x%x with OX_ID = %x", s_id
, received_ox_id
);
1465 class_of_frame
= ntohl(*(buff_addr
+ 8));
1466 login_state
= sid_logged_in(fi
, s_id
);
1467 switch(class_of_frame
& 0xFF000000) {
1469 if (s_id
!= fi
->g
.my_id
) {
1471 DPRINTK1("PLOGI received from D_ID 0x%x with 0X_ID = %x", s_id
, received_ox_id
);
1472 if ((ret_code
= plogi_ok(fi
, buff_addr
, payload_size
)) == 0){
1473 tx_logi_acc(fi
, ELS_ACC
, s_id
, received_ox_id
);
1474 add_to_address_cache(fi
, buff_addr
);
1477 u_short cmnd_code
= ret_code
>> 16;
1478 u_short expln_code
= ret_code
;
1479 tx_ls_rjt(fi
, s_id
, received_ox_id
, cmnd_code
, expln_code
);
1484 els_type
= remove_from_ox_id_list(fi
, received_ox_id
);
1485 DPRINTK1("ELS_ACC received from D_ID 0x%x in response to ELS %x", s_id
, els_type
);
1488 add_to_address_cache(fi
, buff_addr
);
1489 tx_prli(fi
, ELS_PRLI
, s_id
, OX_ID_FIRST_SEQUENCE
);
1492 add_to_address_cache(fi
, buff_addr
);
1493 fi
->g
.my_id
= ntohl(*(buff_addr
+ 2)) & 0x00FFFFFF;
1494 fi
->g
.fabric_present
= TRUE
;
1495 fi
->g
.my_ddaa
= fi
->g
.my_id
& 0xFFFF00;
1496 /* Login to the Name Server
1498 tx_logi(fi
, ELS_PLOGI
, DIRECTORY_SERVER
);
1501 fi
->g
.name_server
= TRUE
;
1502 add_to_address_cache(fi
, buff_addr
);
1503 tx_name_server_req(fi
, FCS_RFC_4
);
1505 /* Some devices have a delay before
1506 * registering with the Name Server
1509 tx_name_server_req(fi
, FCS_GP_ID4
);
1512 mark_scsi_sid(fi
, buff_addr
, ADD_ENTRY
);
1515 if (!(validate_login(fi
, buff_addr
)))
1516 tx_logo(fi
, s_id
, OX_ID_FIRST_SEQUENCE
);
1521 DPRINTK1("ELS_PDISC received from D_ID 0x%x", s_id
);
1522 tx_logo(fi
, s_id
, received_ox_id
);
1525 DPRINTK1("ELS_ADISC received from D_ID 0x%x", s_id
);
1526 if (node_logged_in_prev(fi
, buff_addr
))
1527 tx_adisc(fi
, ELS_ACC
, s_id
, received_ox_id
);
1529 tx_logo(fi
, s_id
, received_ox_id
);
1532 DPRINTK1("ELS_PRLI received from D_ID 0x%x", s_id
);
1533 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
)) {
1534 tx_prli(fi
, ELS_ACC
, s_id
, received_ox_id
);
1535 mark_scsi_sid(fi
, buff_addr
, ADD_ENTRY
);
1538 tx_logo(fi
, s_id
, received_ox_id
);
1541 DPRINTK1("ELS_PRLO received from D_ID 0x%x", s_id
);
1542 if ((login_state
== NODE_LOGGED_OUT
) || (login_state
== NODE_NOT_PRESENT
))
1543 tx_logo(fi
, s_id
, received_ox_id
);
1545 if (login_state
== NODE_LOGGED_IN
)
1547 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1549 if (login_state
== NODE_PROCESS_LOGGED_IN
) {
1550 tx_prli(fi
, ELS_ACC
, s_id
, received_ox_id
);
1551 mark_scsi_sid(fi
, buff_addr
, DELETE_ENTRY
);
1555 els_type
= remove_from_ox_id_list(fi
, received_ox_id
);
1556 DPRINTK1("ELS_LS_RJT received from D_ID 0x%x in response to %x", s_id
, els_type
);
1557 /* We should be chking the reason code.
1561 tx_logi(fi
, ELS_PLOGI
, s_id
);
1566 els_type
= remove_from_ox_id_list(fi
, received_ox_id
);
1567 DPRINTK1("ELS_LOGO received from D_ID 0x%x in response to %x", s_id
, els_type
);
1568 remove_from_address_cache(fi
, buff_addr
, ELS_LOGO
);
1569 tx_acc(fi
, s_id
, received_ox_id
);
1570 if (els_type
== ELS_ADISC
)
1571 tx_logi(fi
, ELS_PLOGI
, s_id
);
1574 DPRINTK1("ELS_RSCN received from D_ID 0x%x", s_id
);
1575 tx_acc(fi
, s_id
, received_ox_id
);
1576 remove_from_address_cache(fi
, buff_addr
, ELS_RSCN
);
1579 /* We do not support FARP.
1580 So, silently discard it */
1581 DPRINTK1("ELS_FARP_REQ received from D_ID 0x%x", s_id
);
1584 DPRINTK1("ELS_ABTX received from D_ID 0x%x", s_id
);
1585 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1586 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1588 tx_logo(fi
, s_id
, received_ox_id
);
1591 DPRINTK1("ELS_FLOGI received from D_ID 0x%x", s_id
);
1592 if (fi
->g
.ptp_up
== TRUE
) {
1593 /* The node could have come up as an N_Port
1594 * in a Loop! So,try initializing as an NL_port
1596 take_tachyon_offline(fi
);
1597 /* write AL_TIME & E_D_TOV into the registers */
1598 writel(TOV_VALUES
, fi
->t_r
.ptr_fm_tov_reg
);
1599 writel(LOOP_INIT_SOFT_ADDRESS
, fi
->t_r
.ptr_fm_config_reg
);
1600 DPRINTK1("FLOGI received, TACHYON initializing as L_Port...\n");
1601 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
1604 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1605 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1607 tx_logo(fi
, s_id
, received_ox_id
);
1611 DPRINTK1("ELS_ADVC received from D_ID 0x%x", s_id
);
1612 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1613 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1615 tx_logo(fi
, s_id
, received_ox_id
);
1618 DPRINTK1("ELS_ECHO received from D_ID 0x%x", s_id
);
1619 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1620 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1622 tx_logo(fi
, s_id
, received_ox_id
);
1625 DPRINTK1("ELS_ESTC received from D_ID 0x%x", s_id
);
1626 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1627 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1629 tx_logo(fi
, s_id
, received_ox_id
);
1632 DPRINTK1("ELS_ESTS received from D_ID 0x%x", s_id
);
1633 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1634 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1636 tx_logo(fi
, s_id
, received_ox_id
);
1639 DPRINTK1("ELS_RCS received from D_ID 0x%x", s_id
);
1640 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1641 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1643 tx_logo(fi
, s_id
, received_ox_id
);
1646 DPRINTK1("ELS_RES received from D_ID 0x%x", s_id
);
1647 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1648 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1650 tx_logo(fi
, s_id
, received_ox_id
);
1653 DPRINTK1("ELS_RLS received from D_ID 0x%x", s_id
);
1654 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1655 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1657 tx_logo(fi
, s_id
, received_ox_id
);
1660 DPRINTK1("ELS_RRQ received from D_ID 0x%x", s_id
);
1661 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1662 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1664 tx_logo(fi
, s_id
, received_ox_id
);
1667 DPRINTK1("ELS_RSS received from D_ID 0x%x", s_id
);
1668 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1669 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1671 tx_logo(fi
, s_id
, received_ox_id
);
1674 DPRINTK1("ELS_RTV received from D_ID 0x%x", s_id
);
1675 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1676 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1678 tx_logo(fi
, s_id
, received_ox_id
);
1681 DPRINTK1("ELS_RSI received from D_ID 0x%x", s_id
);
1682 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1683 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1685 tx_logo(fi
, s_id
, received_ox_id
);
1688 /* No reply sequence */
1689 DPRINTK1("ELS_TEST received from D_ID 0x%x", s_id
);
1692 DPRINTK1("ELS_RNC received from D_ID 0x%x", s_id
);
1693 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1694 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1696 tx_logo(fi
, s_id
, received_ox_id
);
1699 DPRINTK1("ELS_RVCS received from D_ID 0x%x", s_id
);
1700 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1701 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1703 tx_logo(fi
, s_id
, received_ox_id
);
1706 DPRINTK1("ELS_TPLS received from D_ID 0x%x", s_id
);
1707 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1708 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1710 tx_logo(fi
, s_id
, received_ox_id
);
1713 DPRINTK1("ELS_GAID received from D_ID 0x%x", s_id
);
1714 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1715 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1717 tx_logo(fi
, s_id
, received_ox_id
);
1720 DPRINTK1("ELS_FACT received from D_ID 0x%x", s_id
);
1721 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1722 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1724 tx_logo(fi
, s_id
, received_ox_id
);
1727 /* Hmmm... You don't support FAN ??? */
1728 DPRINTK1("ELS_FAN received from D_ID 0x%x", s_id
);
1729 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1732 DPRINTK1("ELS_FDACT received from D_ID 0x%x", s_id
);
1733 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1734 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1736 tx_logo(fi
, s_id
, received_ox_id
);
1739 DPRINTK1("ELS_NACT received from D_ID 0x%x", s_id
);
1740 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1741 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1743 tx_logo(fi
, s_id
, received_ox_id
);
1746 DPRINTK1("ELS_NDACT received from D_ID 0x%x", s_id
);
1747 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1748 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1750 tx_logo(fi
, s_id
, received_ox_id
);
1753 DPRINTK1("ELS_QoSR received from D_ID 0x%x", s_id
);
1754 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1755 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1757 tx_logo(fi
, s_id
, received_ox_id
);
1760 DPRINTK1("ELS_FDISC received from D_ID 0x%x", s_id
);
1761 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1762 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1764 tx_logo(fi
, s_id
, received_ox_id
);
1767 DPRINTK1("ELS Frame %x received from D_ID 0x%x", class_of_frame
, s_id
);
1768 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
))
1769 tx_ls_rjt(fi
, s_id
, received_ox_id
, CMND_NOT_SUPP
, NO_EXPLN
);
1771 tx_logo(fi
, s_id
, received_ox_id
);
1775 case TYPE_FC_SERVICES
:
1776 fs_cmnd_code
= (ntohl(*(buff_addr
+ 10)) & 0xFFFF0000) >>16;
1777 switch(fs_cmnd_code
) {
1779 els_type
= remove_from_ox_id_list(fi
, received_ox_id
);
1780 DPRINTK1("FCS_ACC received from D_ID 0x%x in response to %x", s_id
, els_type
);
1781 if (els_type
== FCS_GP_ID4
)
1782 explore_fabric(fi
, buff_addr
);
1785 DPRINTK1("FCS_REJECT received from D_ID 0x%x in response to %x", s_id
, els_type
);
1790 rx_net_packet(fi
, (u_char
*)buff_addr
, payload_size
);
1793 T_MSG("Frame Type %x received from %x", type_of_frame
, s_id
);
1796 /* provide Tachyon will another set of buffers */
1797 if (offset
== (NO_OF_ENTRIES
- 1))
1798 update_SFSBQ_indx(fi
);
1799 LEAVE("handle_SFS_interrupt");
1802 static void handle_FM_interrupt(struct fc_info
*fi
)
1805 u_int tachyon_status
;
1807 ENTER("handle_FM_interrupt");
1808 fm_status
= readl(fi
->t_r
.ptr_fm_status_reg
);
1809 tachyon_status
= readl(fi
->t_r
.ptr_tach_status_reg
);
1810 DPRINTK("FM_status = %x, Tachyon_status = %x", fm_status
, tachyon_status
);
1811 if (fm_status
& LINK_DOWN
) {
1812 T_MSG("Fibre Channel Link DOWN");
1813 fm_status
= readl(fi
->t_r
.ptr_fm_status_reg
);
1815 del_timer(&fi
->explore_timer
);
1816 del_timer(&fi
->nport_timer
);
1817 del_timer(&fi
->lport_timer
);
1818 del_timer(&fi
->display_cache_timer
);
1819 fi
->g
.link_up
= FALSE
;
1820 if (fi
->g
.ptp_up
== TRUE
)
1821 fi
->g
.n_port_try
= FALSE
;
1822 fi
->g
.ptp_up
= FALSE
;
1823 fi
->g
.port_discovery
= FALSE
;
1824 fi
->g
.explore_fabric
= FALSE
;
1825 fi
->g
.perform_adisc
= FALSE
;
1827 /* Logout will all nodes */
1828 if (fi
->node_info_list
) {
1829 struct fc_node_info
*temp_list
= fi
->node_info_list
;
1831 temp_list
->login
= LOGIN_ATTEMPTED
;
1832 temp_list
= temp_list
->next
;
1837 if ((fi
->g
.n_port_try
== FALSE
) && (fi
->g
.dont_init
== FALSE
)){
1838 take_tachyon_offline(fi
);
1839 /* write AL_TIME & E_D_TOV into the registers */
1840 writel(TOV_VALUES
, fi
->t_r
.ptr_fm_tov_reg
);
1842 if ((fi
->g
.fabric_present
== TRUE
) && (fi
->g
.loop_up
== TRUE
)) {
1843 u_int al_pa
= fi
->g
.my_id
& 0xFF;
1844 writel((al_pa
<< 24) | LOOP_INIT_FABRIC_ADDRESS
| LOOP_INIT_PREVIOUS_ADDRESS
, fi
->t_r
.ptr_fm_config_reg
);
1847 if (fi
->g
.loop_up
== TRUE
) {
1848 u_int al_pa
= fi
->g
.my_id
& 0xFF;
1849 writel((al_pa
<< 24) | LOOP_INIT_PREVIOUS_ADDRESS
, fi
->t_r
.ptr_fm_config_reg
);
1852 writel(LOOP_INIT_SOFT_ADDRESS
, fi
->t_r
.ptr_fm_config_reg
);
1853 fi
->g
.loop_up
= FALSE
;
1854 DPRINTK1("In LDWN TACHYON initializing as L_Port...\n");
1855 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
1859 if (fm_status
& NON_PARTICIPATING
) {
1860 T_MSG("Did not acquire an AL_PA. I am not participating");
1863 if ((fm_status
& LINK_UP
) && ((fm_status
& LINK_DOWN
) == 0)) {
1864 T_MSG("Fibre Channel Link UP");
1865 if ((fm_status
& NON_PARTICIPATING
) != TRUE
) {
1866 fi
->g
.link_up
= TRUE
;
1867 if (tachyon_status
& OSM_FROZEN
) {
1868 reset_tachyon(fi
, ERROR_RELEASE
);
1869 reset_tachyon(fi
, OCQ_RESET
);
1871 init_timer(&fi
->explore_timer
);
1872 init_timer(&fi
->nport_timer
);
1873 init_timer(&fi
->lport_timer
);
1874 init_timer(&fi
->display_cache_timer
);
1875 if ((fm_status
& OLD_PORT
) == 0) {
1876 fi
->g
.loop_up
= TRUE
;
1877 fi
->g
.ptp_up
= FALSE
;
1878 fi
->g
.my_id
= readl(fi
->t_r
.ptr_fm_config_reg
) >> 24;
1879 DPRINTK1("My AL_PA = %x", fi
->g
.my_id
);
1880 fi
->g
.port_discovery
= TRUE
;
1881 fi
->g
.explore_fabric
= FALSE
;
1884 if (((fm_status
& 0xF0) == OLD_PORT
) && ((fm_status
& 0x0F) == PORT_STATE_ACTIVE
)) {
1885 fi
->g
.loop_up
= FALSE
;
1887 /* In a point-to-point configuration, we expect to be
1888 * connected to an F_Port. This driver does not yet support
1889 * a configuration where it is connected to another N_Port
1892 fi
->g
.explore_fabric
= TRUE
;
1893 fi
->g
.port_discovery
= FALSE
;
1894 if (fi
->g
.n_port_try
== FALSE
) {
1895 take_tachyon_offline(fi
);
1896 /* write R_T_TOV & E_D_TOV into the registers */
1897 writel(PTP_TOV_VALUES
, fi
->t_r
.ptr_fm_tov_reg
);
1898 writel(BB_CREDIT
| NPORT
, fi
->t_r
.ptr_fm_config_reg
);
1899 fi
->g
.n_port_try
= TRUE
;
1900 DPRINTK1("In LUP TACHYON initializing as N_Port...\n");
1901 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
1904 fi
->g
.ptp_up
= TRUE
;
1905 tx_logi(fi
, ELS_FLOGI
, F_PORT
);
1908 fi
->g
.my_ddaa
= 0x0;
1909 fi
->g
.fabric_present
= FALSE
;
1910 /* We havn't sent out any Name Server Reqs */
1911 fi
->g
.name_server
= FALSE
;
1912 fi
->g
.alpa_list_index
= 0;
1913 fi
->g
.ox_id
= NOT_SCSI_XID
;
1914 fi
->g
.my_mtu
= TACH_FRAME_SIZE
;
1916 /* Implicitly LOGO with all logged-in nodes.
1918 if (fi
->node_info_list
) {
1919 struct fc_node_info
*temp_list
= fi
->node_info_list
;
1921 temp_list
->login
= LOGIN_ATTEMPTED
;
1922 temp_list
= temp_list
->next
;
1925 fi
->g
.perform_adisc
= TRUE
;
1926 //fi->g.perform_adisc = FALSE;
1927 fi
->g
.port_discovery
= FALSE
;
1928 tx_logi(fi
, ELS_FLOGI
, F_PORT
);
1931 /* If Link coming up for the _first_ time or no nodes
1932 * were logged in before...
1934 fi
->g
.scsi_oxid
= 0;
1935 fi
->g
.seq_id
= 0x00;
1936 fi
->g
.perform_adisc
= FALSE
;
1939 /* reset OX_ID table */
1940 while (fi
->ox_id_list
) {
1941 struct ox_id_els_map
*temp
= fi
->ox_id_list
;
1942 fi
->ox_id_list
= fi
->ox_id_list
->next
;
1945 fi
->ox_id_list
= NULL
;
1946 } /* End of if partipating */
1949 if (fm_status
& ELASTIC_STORE_ERROR
) {
1950 /* Too much junk on the Link
1952 /* Trying to clear it up by Txing PLOGI to urself */
1953 if (fi
->g
.link_up
== TRUE
)
1954 tx_logi(fi
, ELS_PLOGI
, fi
->g
.my_id
);
1957 if (fm_status
& LOOP_UP
) {
1958 if (tachyon_status
& OSM_FROZEN
) {
1959 reset_tachyon(fi
, ERROR_RELEASE
);
1960 reset_tachyon(fi
, OCQ_RESET
);
1964 if (fm_status
& NOS_OLS_RECEIVED
){
1965 if (fi
->g
.nport_timer_set
== FALSE
) {
1966 DPRINTK("NOS/OLS Received");
1967 DPRINTK("FM_status = %x", fm_status
);
1968 fi
->nport_timer
.function
= nos_ols_timer
;
1969 fi
->nport_timer
.data
= (unsigned long)fi
;
1970 fi
->nport_timer
.expires
= RUN_AT((3*HZ
)/100); /* 30 msec */
1971 init_timer(&fi
->nport_timer
);
1972 add_timer(&fi
->nport_timer
);
1973 fi
->g
.nport_timer_set
= TRUE
;
1977 if (((fm_status
& 0xF0) == OLD_PORT
) && (((fm_status
& 0x0F) == PORT_STATE_LF1
) || ((fm_status
& 0x0F) == PORT_STATE_LF2
))) {
1978 DPRINTK1("Link Fail-I in OLD-PORT.");
1979 take_tachyon_offline(fi
);
1980 reset_tachyon(fi
, SOFTWARE_RESET
);
1983 if (fm_status
& LOOP_STATE_TIMEOUT
){
1984 if ((fm_status
& 0xF0) == ARBITRATING
)
1985 DPRINTK1("ED_TOV timesout.In ARBITRATING state...");
1986 if ((fm_status
& 0xF0) == ARB_WON
)
1987 DPRINTK1("ED_TOV timesout.In ARBITRATION WON state...");
1988 if ((fm_status
& 0xF0) == OPEN
)
1989 DPRINTK1("ED_TOV timesout.In OPEN state...");
1990 if ((fm_status
& 0xF0) == OPENED
)
1991 DPRINTK1("ED_TOV timesout.In OPENED state...");
1992 if ((fm_status
& 0xF0) == TX_CLS
)
1993 DPRINTK1("ED_TOV timesout.In XMITTED CLOSE state...");
1994 if ((fm_status
& 0xF0) == RX_CLS
)
1995 DPRINTK1("ED_TOV timesout.In RECEIVED CLOSE state...");
1996 if ((fm_status
& 0xF0) == INITIALIZING
)
1997 DPRINTK1("ED_TOV timesout.In INITIALIZING state...");
1998 DPRINTK1("Initializing Loop...");
1999 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
2002 if ((fm_status
& BAD_ALPA
) && (fi
->g
.loop_up
== TRUE
)) {
2003 u_char bad_alpa
= (readl(fi
->t_r
.ptr_fm_rx_al_pa_reg
) & 0xFF00) >> 8;
2004 if (tachyon_status
& OSM_FROZEN
) {
2005 reset_tachyon(fi
, ERROR_RELEASE
);
2006 reset_tachyon(fi
, OCQ_RESET
);
2009 tx_logi(fi
, ELS_PLOGI
, fi
->g
.my_id
);
2011 if (!fi
->g
.port_discovery
&& !fi
->g
.perform_adisc
) {
2012 if (bad_alpa
!= 0xFE)
2013 DPRINTK("Bad AL_PA = %x", bad_alpa
);
2016 if ((fi
->g
.perform_adisc
== TRUE
) && (bad_alpa
== 0x00)) {
2017 DPRINTK1("Performing ADISC...");
2018 fi
->g
.fabric_present
= FALSE
;
2024 if (fm_status
& LIPF_RECEIVED
){
2025 DPRINTK("LIP(F8) Received");
2028 if (fm_status
& LINK_FAILURE
) {
2029 if (fm_status
& LOSS_OF_SIGNAL
)
2030 DPRINTK1("Detected Loss of Signal.");
2031 if (fm_status
& OUT_OF_SYNC
)
2032 DPRINTK1("Detected Loss of Synchronization.");
2035 if (fm_status
& TRANSMIT_PARITY_ERROR
) {
2036 /* Bad! Should not happen. Solution-> Hard Reset.
2038 T_MSG("Parity Error. Perform Hard Reset!");
2041 if (fi
->g
.alpa_list_index
>= MAX_NODES
){
2042 if (fi
->g
.port_discovery
== TRUE
) {
2043 fi
->g
.port_discovery
= FALSE
;
2044 add_display_cache_timer(fi
);
2046 fi
->g
.alpa_list_index
= MAX_NODES
;
2049 if (fi
->g
.port_discovery
== TRUE
)
2050 local_port_discovery(fi
);
2052 LEAVE("handle_FM_interrupt");
2056 static void local_port_discovery(struct fc_info
*fi
)
2058 if (fi
->g
.loop_up
== TRUE
) {
2059 /* If this is not here, some of the Bad AL_PAs are missed.
2062 if ((fi
->g
.alpa_list_index
== 0) && (fi
->g
.fabric_present
== FALSE
)){
2063 tx_logi(fi
, ELS_FLOGI
, F_PORT
);
2066 int login_state
= sid_logged_in(fi
, fi
->g
.my_ddaa
| alpa_list
[fi
->g
.alpa_list_index
]);
2067 while ((fi
->g
.alpa_list_index
== 0) || ((fi
->g
.alpa_list_index
< MAX_NODES
) && ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
) || (alpa_list
[fi
->g
.alpa_list_index
] == (fi
->g
.my_id
& 0xFF)))))
2068 fi
->g
.alpa_list_index
++;
2069 if (fi
->g
.alpa_list_index
< MAX_NODES
)
2070 tx_logi(fi
, ELS_PLOGI
, alpa_list
[fi
->g
.alpa_list_index
]);
2072 fi
->g
.alpa_list_index
++;
2073 if (fi
->g
.alpa_list_index
>= MAX_NODES
){
2074 if (fi
->g
.port_discovery
== TRUE
) {
2075 fi
->g
.port_discovery
= FALSE
;
2076 add_display_cache_timer(fi
);
2078 fi
->g
.alpa_list_index
= MAX_NODES
;
2083 static void nos_ols_timer(unsigned long data
)
2085 struct fc_info
*fi
= (struct fc_info
*)data
;
2087 fm_status
= readl(fi
->t_r
.ptr_fm_status_reg
);
2088 DPRINTK1("FM_status in timer= %x", fm_status
);
2089 fi
->g
.nport_timer_set
= FALSE
;
2090 del_timer(&fi
->nport_timer
);
2091 if ((fi
->g
.ptp_up
== TRUE
) || (fi
->g
.loop_up
== TRUE
))
2093 if (((fm_status
& 0xF0) == OLD_PORT
) && (((fm_status
& 0x0F) == PORT_STATE_ACTIVE
) || ((fm_status
& 0x0F) == PORT_STATE_OFFLINE
))) {
2094 DPRINTK1("In OLD-PORT after E_D_TOV.");
2095 take_tachyon_offline(fi
);
2096 /* write R_T_TOV & E_D_TOV into the registers */
2097 writel(PTP_TOV_VALUES
, fi
->t_r
.ptr_fm_tov_reg
);
2098 writel(BB_CREDIT
| NPORT
, fi
->t_r
.ptr_fm_config_reg
);
2099 fi
->g
.n_port_try
= TRUE
;
2100 DPRINTK1("In timer, TACHYON initializing as N_Port...\n");
2101 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
2104 if ((fi
->g
.lport_timer_set
== FALSE
) && ((fm_status
& 0xF0) == LOOP_FAIL
)) {
2105 DPRINTK1("Loop Fail after E_D_TOV.");
2106 fi
->lport_timer
.function
= loop_timer
;
2107 fi
->lport_timer
.data
= (unsigned long)fi
;
2108 fi
->lport_timer
.expires
= RUN_AT((8*HZ
)/100);
2109 init_timer(&fi
->lport_timer
);
2110 add_timer(&fi
->lport_timer
);
2111 fi
->g
.lport_timer_set
= TRUE
;
2112 take_tachyon_offline(fi
);
2113 reset_tachyon(fi
, SOFTWARE_RESET
);
2116 if (((fm_status
& 0xF0) == OLD_PORT
) && (((fm_status
& 0x0F) == PORT_STATE_LF1
) || ((fm_status
& 0x0F) == PORT_STATE_LF2
))) {
2117 DPRINTK1("Link Fail-II in OLD-PORT.");
2118 take_tachyon_offline(fi
);
2119 reset_tachyon(fi
, SOFTWARE_RESET
);
2123 static void loop_timer(unsigned long data
)
2125 struct fc_info
*fi
= (struct fc_info
*)data
;
2126 fi
->g
.lport_timer_set
= FALSE
;
2127 del_timer(&fi
->lport_timer
);
2128 if ((fi
->g
.ptp_up
== TRUE
) || (fi
->g
.loop_up
== TRUE
))
2132 static void add_display_cache_timer(struct fc_info
*fi
)
2134 fi
->display_cache_timer
.function
= display_cache_timer
;
2135 fi
->display_cache_timer
.data
= (unsigned long)fi
;
2136 fi
->display_cache_timer
.expires
= RUN_AT(fi
->num_nodes
* HZ
);
2137 init_timer(&fi
->display_cache_timer
);
2138 add_timer(&fi
->display_cache_timer
);
2141 static void display_cache_timer(unsigned long data
)
2143 struct fc_info
*fi
= (struct fc_info
*)data
;
2144 del_timer(&fi
->display_cache_timer
);
2149 static void reset_tachyon(struct fc_info
*fi
, u_int value
)
2151 u_int tachyon_status
, reset_done
= OCQ_RESET_STATUS
| SCSI_FREEZE_STATUS
;
2152 int not_done
= 1, i
= 0;
2153 writel(value
, fi
->t_r
.ptr_tach_control_reg
);
2154 if (value
== OCQ_RESET
)
2155 fi
->q
.ocq_prod_indx
= 0;
2156 tachyon_status
= readl(fi
->t_r
.ptr_tach_status_reg
);
2158 /* Software resets are immediately done, whereas other aren't. It
2159 about 30 clocks to do the reset */
2160 if (value
!= SOFTWARE_RESET
) {
2163 T_MSG("Reset was unsuccessful! Tachyon Status = %x", tachyon_status
);
2166 tachyon_status
= readl(fi
->t_r
.ptr_tach_status_reg
);
2167 if ((tachyon_status
& reset_done
) == 0)
2172 write_to_tachyon_registers(fi
);
2176 static void take_tachyon_offline(struct fc_info
*fi
)
2178 u_int fm_status
= readl(fi
->t_r
.ptr_fm_status_reg
);
2180 /* The first two conditions will never be true. The Manual and
2181 * the errata say this. But the current implementation is
2184 //if ((fm_status & 0xF0) == LOOP_FAIL) {
2185 if (fm_status
== LOOP_FAIL
) {
2186 // workaround as in P. 89
2187 writel(HOST_CONTROL
, fi
->t_r
.ptr_fm_control_reg
);
2188 if (fi
->g
.loop_up
== TRUE
)
2189 writel(SOFTWARE_RESET
, fi
->t_r
.ptr_tach_control_reg
);
2191 writel(OFFLINE
, fi
->t_r
.ptr_fm_control_reg
);
2192 writel(EXIT_HOST_CONTROL
, fi
->t_r
.ptr_fm_control_reg
);
2196 //if ((fm_status & LOOP_UP) == LOOP_UP) {
2197 if (fm_status
== LOOP_UP
) {
2198 writel(SOFTWARE_RESET
, fi
->t_r
.ptr_tach_control_reg
);
2201 writel(OFFLINE
, fi
->t_r
.ptr_fm_control_reg
);
2205 static void read_novram(struct fc_info
*fi
)
2208 fi
->n_r
.ptr_novram_hw_control_reg
= fi
->i_r
.ptr_ichip_hw_control_reg
;
2209 fi
->n_r
.ptr_novram_hw_status_reg
= fi
->i_r
.ptr_ichip_hw_status_reg
;
2210 iph5526_nr_do_init(fi
);
2211 if (fi
->clone_id
== PCI_VENDOR_ID_INTERPHASE
)
2214 fi
->g
.my_node_name_high
= (fi
->n_r
.data
[off
] << 16) | fi
->n_r
.data
[off
+1];
2215 fi
->g
.my_node_name_low
= (fi
->n_r
.data
[off
+2] << 16) | fi
->n_r
.data
[off
+3];
2216 fi
->g
.my_port_name_high
= (fi
->n_r
.data
[off
+4] << 16) | fi
->n_r
.data
[off
+5];
2217 fi
->g
.my_port_name_low
= (fi
->n_r
.data
[off
+6] << 16) | fi
->n_r
.data
[off
+7];
2218 DPRINTK("node_name = %x %x", fi
->g
.my_node_name_high
, fi
->g
.my_node_name_low
);
2219 DPRINTK("port_name = %x %x", fi
->g
.my_port_name_high
, fi
->g
.my_port_name_low
);
2222 static void reset_ichip(struct fc_info
*fi
)
2225 writel(ICHIP_HCR_RESET
, fi
->i_r
.ptr_ichip_hw_control_reg
);
2226 /*wait for chip to get reset */
2228 /*de-assert reset */
2229 writel(ICHIP_HCR_DERESET
, fi
->i_r
.ptr_ichip_hw_control_reg
);
2231 /* enable INT lines on the (i)chip */
2232 writel(ICHIP_HCR_ENABLE_INTA
, fi
->i_r
.ptr_ichip_hw_control_reg
);
2233 /* enable byte swap */
2234 writel(ICHIP_HAMR_BYTE_SWAP_ADDR_TR
, fi
->i_r
.ptr_ichip_hw_addr_mask_reg
);
2237 static void tx_logi(struct fc_info
*fi
, u_int logi
, u_int d_id
)
2239 int int_required
= 1;
2240 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
2241 u_int r_ctl
= RCTL_ELS_UCTL
;
2242 u_int type
= TYPE_ELS
| SEQUENCE_INITIATIVE
| FIRST_SEQUENCE
;
2243 u_int my_mtu
= fi
->g
.my_mtu
;
2245 /* We don't want interrupted for our own logi.
2246 * It screws up the port discovery process.
2248 if (d_id
== fi
->g
.my_id
)
2250 fill_login_frame(fi
, logi
);
2251 fi
->g
.type_of_frame
= FC_ELS
;
2252 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.login
, sizeof(LOGIN
));
2253 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),sizeof(LOGIN
), r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, logi
);
2255 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2261 static void tx_logi_acc(struct fc_info
*fi
, u_int logi
, u_int d_id
, u_short received_ox_id
)
2263 int int_required
= 0;
2264 u_int r_ctl
= RCTL_ELS_SCTL
;
2265 u_int type
= TYPE_ELS
| EXCHANGE_RESPONDER
| LAST_SEQUENCE
;
2266 u_int my_mtu
= fi
->g
.my_mtu
;
2267 ENTER("tx_logi_acc");
2268 fill_login_frame(fi
, logi
);
2269 fi
->g
.type_of_frame
= FC_ELS
;
2270 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.login
, sizeof(LOGIN
));
2271 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),sizeof(LOGIN
), r_ctl
, type
, d_id
, my_mtu
, int_required
, received_ox_id
, logi
);
2273 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2275 LEAVE("tx_logi_acc");
2279 static void tx_prli(struct fc_info
*fi
, u_int command_code
, u_int d_id
, u_short received_ox_id
)
2281 int int_required
= 1;
2282 u_int r_ctl
= RCTL_ELS_UCTL
;
2283 u_int type
= TYPE_ELS
| SEQUENCE_INITIATIVE
| FIRST_SEQUENCE
;
2284 u_int my_mtu
= fi
->g
.my_mtu
;
2286 if (command_code
== ELS_PRLI
)
2287 fi
->g
.prli
.cmnd_code
= htons((ELS_PRLI
| PAGE_LEN
) >> 16);
2289 fi
->g
.prli
.cmnd_code
= htons((ELS_ACC
| PAGE_LEN
) >> 16);
2291 type
= TYPE_ELS
| EXCHANGE_RESPONDER
| LAST_SEQUENCE
;
2292 r_ctl
= RCTL_ELS_SCTL
;
2294 fi
->g
.prli
.payload_length
= htons(PRLI_LEN
);
2295 fi
->g
.prli
.type_code
= htons(FCP_TYPE_CODE
);
2296 fi
->g
.prli
.est_image_pair
= htons(IMAGE_PAIR
);
2297 fi
->g
.prli
.responder_pa
= 0;
2298 fi
->g
.prli
.originator_pa
= 0;
2299 fi
->g
.prli
.service_params
= htonl(INITIATOR_FUNC
| READ_XFER_RDY_DISABLED
);
2300 fi
->g
.type_of_frame
= FC_ELS
;
2301 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.prli
, sizeof(PRLI
));
2302 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]), sizeof(PRLI
), r_ctl
, type
, d_id
, my_mtu
, int_required
, received_ox_id
, command_code
);
2304 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2310 static void tx_logo(struct fc_info
*fi
, u_int d_id
, u_short received_ox_id
)
2312 int int_required
= 1;
2313 u_int r_ctl
= RCTL_ELS_UCTL
;
2314 u_int type
= TYPE_ELS
| EXCHANGE_RESPONDER
| SEQUENCE_RESPONDER
| FIRST_SEQUENCE
| END_SEQUENCE
| SEQUENCE_INITIATIVE
;
2315 int size
= sizeof(LOGO
);
2317 u_int my_mtu
= fi
->g
.my_mtu
;
2319 fi
->g
.logo
.logo_cmnd
= htonl(ELS_LOGO
);
2320 fi
->g
.logo
.reserved
= 0;
2321 memcpy(fc_id
, &(fi
->g
.my_id
), 3);
2322 fi
->g
.logo
.n_port_id_0
= fc_id
[0];
2323 fi
->g
.logo
.n_port_id_1
= fc_id
[1];
2324 fi
->g
.logo
.n_port_id_2
= fc_id
[2];
2325 fi
->g
.logo
.port_name_up
= htonl(N_PORT_NAME_HIGH
);
2326 fi
->g
.logo
.port_name_low
= htonl(N_PORT_NAME_LOW
);
2327 fi
->g
.type_of_frame
= FC_ELS
;
2328 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.logo
, sizeof(LOGO
));
2329 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, received_ox_id
, ELS_LOGO
);
2331 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2336 static void tx_adisc(struct fc_info
*fi
, u_int cmnd_code
, u_int d_id
, u_short received_ox_id
)
2338 int int_required
= 0;
2339 u_int r_ctl
= RCTL_ELS_SCTL
;
2340 u_int type
= TYPE_ELS
| EXCHANGE_RESPONDER
| SEQUENCE_RESPONDER
| FIRST_SEQUENCE
| END_SEQUENCE
;
2341 int size
= sizeof(ADISC
);
2342 u_int my_mtu
= fi
->g
.my_mtu
;
2343 fi
->g
.adisc
.ls_cmnd_code
= htonl(cmnd_code
);
2344 fi
->g
.adisc
.hard_address
= htonl(0);
2345 fi
->g
.adisc
.port_name_high
= htonl(N_PORT_NAME_HIGH
);
2346 fi
->g
.adisc
.port_name_low
= htonl(N_PORT_NAME_LOW
);
2347 fi
->g
.adisc
.node_name_high
= htonl(NODE_NAME_HIGH
);
2348 fi
->g
.adisc
.node_name_low
= htonl(NODE_NAME_LOW
);
2349 fi
->g
.adisc
.n_port_id
= htonl(fi
->g
.my_id
);
2350 if (cmnd_code
== ELS_ADISC
) {
2352 r_ctl
= RCTL_ELS_UCTL
;
2353 type
= TYPE_ELS
| SEQUENCE_INITIATIVE
| FIRST_SEQUENCE
;
2355 fi
->g
.type_of_frame
= FC_ELS
;
2356 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.adisc
, size
);
2357 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, received_ox_id
, cmnd_code
);
2359 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2363 static void tx_ls_rjt(struct fc_info
*fi
, u_int d_id
, u_short received_ox_id
, u_short reason_code
, u_short expln_code
)
2365 int int_required
= 0;
2366 u_int r_ctl
= RCTL_ELS_SCTL
;
2367 u_int type
= TYPE_ELS
| EXCHANGE_RESPONDER
| LAST_SEQUENCE
;
2368 int size
= sizeof(LS_RJT
);
2369 u_int my_mtu
= fi
->g
.my_mtu
;
2371 fi
->g
.ls_rjt
.cmnd_code
= htonl(ELS_LS_RJT
);
2372 fi
->g
.ls_rjt
.reason_code
= htonl((reason_code
<< 16) | expln_code
);
2373 fi
->g
.type_of_frame
= FC_ELS
;
2374 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.ls_rjt
, size
);
2375 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, received_ox_id
, ELS_LS_RJT
);
2377 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2382 static void tx_abts(struct fc_info
*fi
, u_int d_id
, u_short ox_id
)
2384 int int_required
= 1;
2385 u_int r_ctl
= RCTL_BASIC_ABTS
;
2386 u_int type
= TYPE_BLS
| SEQUENCE_INITIATIVE
| FIRST_SEQUENCE
;
2388 u_int my_mtu
= fi
->g
.my_mtu
;
2390 fi
->g
.type_of_frame
= FC_BLS
;
2391 tx_exchange(fi
, NULL
, size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, RCTL_BASIC_ABTS
);
2395 static u_int
plogi_ok(struct fc_info
*fi
, u_int
*buff_addr
, int size
)
2398 u_short mtu
= ntohl(*(buff_addr
+ 10)) & 0x00000FFF;
2399 u_short class3
= ntohl(*(buff_addr
+ 25)) >> 16;
2400 u_short class3_conc_seq
= ntohl(*(buff_addr
+ 27)) >> 16;
2401 u_short open_seq
= ntohl(*(buff_addr
+ 28)) >> 16;
2402 DPRINTK1("mtu = %x class3 = %x conc_seq = %x open_seq = %x", mtu
, class3
, class3_conc_seq
, open_seq
);
2403 size
-= TACHYON_HEADER_LEN
;
2404 if (!(class3
& 0x8000)) {
2405 DPRINTK1("Received PLOGI with class3 = %x", class3
);
2406 ret_code
= (LOGICAL_ERR
<< 16) | NO_EXPLN
;
2410 DPRINTK1("Received PLOGI with MTU set to %x", mtu
);
2411 ret_code
= (LOGICAL_ERR
<< 16) | RECV_FIELD_SIZE
;
2414 if (size
!= PLOGI_LEN
) {
2415 DPRINTK1("Received PLOGI of size %x", size
);
2416 ret_code
= (LOGICAL_ERR
<< 16) | INV_PAYLOAD_LEN
;
2419 if (class3_conc_seq
== 0) {
2420 DPRINTK1("Received PLOGI with conc_seq == 0");
2421 ret_code
= (LOGICAL_ERR
<< 16) | CONC_SEQ
;
2424 if (open_seq
== 0) {
2425 DPRINTK1("Received PLOGI with open_seq == 0");
2426 ret_code
= (LOGICAL_ERR
<< 16) | NO_EXPLN
;
2430 /* Could potentially check for more fields, but might end up
2431 not talking to most of the devices. ;-) */
2432 /* Things that could get checked are:
2433 common_features = 0x8800
2434 total_concurrent_seq = at least 1
2439 static void tx_acc(struct fc_info
*fi
, u_int d_id
, u_short received_ox_id
)
2441 int int_required
= 0;
2442 u_int r_ctl
= RCTL_ELS_SCTL
;
2443 u_int type
= TYPE_ELS
| EXCHANGE_RESPONDER
| LAST_SEQUENCE
;
2444 int size
= sizeof(ACC
);
2445 u_int my_mtu
= fi
->g
.my_mtu
;
2447 fi
->g
.acc
.cmnd_code
= htonl(ELS_ACC
);
2448 fi
->g
.type_of_frame
= FC_ELS
;
2449 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.acc
, size
);
2450 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, received_ox_id
, ELS_ACC
);
2452 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2458 static void tx_name_server_req(struct fc_info
*fi
, u_int req
)
2460 int int_required
= 1, i
, size
= 0;
2461 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
2462 u_int type
= TYPE_FC_SERVICES
| SEQUENCE_INITIATIVE
| FIRST_SEQUENCE
;
2463 u_int r_ctl
= FC4_DEVICE_DATA
| UNSOLICITED_CONTROL
;
2464 u_int my_mtu
= fi
->g
.my_mtu
, d_id
= DIRECTORY_SERVER
;
2466 ENTER("tx_name_server_req");
2467 /* Fill up CT_Header */
2468 ct_hdr
.rev_in_id
= htonl(FC_CT_REV
);
2469 ct_hdr
.fs_type
= DIRECTORY_SERVER_APP
;
2470 ct_hdr
.fs_subtype
= NAME_SERVICE
;
2473 ct_hdr
.cmnd_resp_code
= htons(req
>> 16);
2474 ct_hdr
.max_res_size
= 0;
2476 ct_hdr
.reason_code
= 0;
2477 ct_hdr
.expln_code
= 0;
2478 ct_hdr
.vendor_unique
= 0;
2480 fi
->g
.type_of_frame
= FC_ELS
;
2483 memcpy(&(fi
->g
.rfc_4
.ct_hdr
), &ct_hdr
, sizeof(CT_HDR
));
2484 fi
->g
.rfc_4
.s_id
= htonl(fi
->g
.my_id
);
2485 for (i
= 0; i
< 32; i
++)
2486 fi
->g
.rfc_4
.bit_map
[i
] = 0;
2487 /* We support IP & SCSI */
2488 fi
->g
.rfc_4
.bit_map
[2] = 0x01;
2489 fi
->g
.rfc_4
.bit_map
[3] = 0x20;
2490 size
= sizeof(RFC_4
);
2491 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.rfc_4
, size
);
2492 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, req
);
2495 memcpy(&(fi
->g
.gp_id4
.ct_hdr
), &ct_hdr
, sizeof(CT_HDR
));
2496 fi
->g
.gp_id4
.port_type
= htonl(PORT_TYPE_NX_PORTS
);
2497 size
= sizeof(GP_ID4
);
2498 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.gp_id4
, size
);
2499 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, req
);
2503 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2505 LEAVE("tx_name_server_req");
2508 static void tx_scr(struct fc_info
*fi
)
2510 int int_required
= 1, size
= sizeof(SCR
);
2511 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
2512 u_int type
= TYPE_ELS
| SEQUENCE_INITIATIVE
| FIRST_SEQUENCE
;
2513 u_int r_ctl
= RCTL_ELS_UCTL
;
2514 u_int my_mtu
= fi
->g
.my_mtu
, d_id
= FABRIC_CONTROLLER
;
2516 fi
->g
.scr
.cmnd_code
= htonl(ELS_SCR
);
2517 fi
->g
.scr
.reg_function
= htonl(FULL_REGISTRATION
);
2518 fi
->g
.type_of_frame
= FC_ELS
;
2519 memcpy(fi
->g
.els_buffer
[fi
->g
.e_i
], &fi
->g
.scr
, size
);
2520 tx_exchange(fi
, (char *)(fi
->g
.els_buffer
[fi
->g
.e_i
]),size
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, ELS_SCR
);
2522 if (fi
->g
.e_i
== MAX_PENDING_FRAMES
)
2527 static void perform_adisc(struct fc_info
*fi
)
2530 /* Will be set to TRUE when timer expires in a PLDA environment.
2532 fi
->g
.port_discovery
= FALSE
;
2534 if (fi
->node_info_list
) {
2535 struct fc_node_info
*temp_list
= fi
->node_info_list
;
2537 /* Tx ADISC to all non-fabric based
2540 if ((temp_list
->d_id
& 0xFF0000) != 0xFF0000)
2541 tx_adisc(fi
, ELS_ADISC
, temp_list
->d_id
, OX_ID_FIRST_SEQUENCE
);
2542 temp_list
= temp_list
->next
;
2547 /* Perform Port Discovery after timer expires.
2548 * We are giving time for the ADISCed nodes to respond
2549 * so that we don't have to perform PLOGI to those whose
2550 * login are _still_ valid.
2552 fi
->explore_timer
.function
= port_discovery_timer
;
2553 fi
->explore_timer
.data
= (unsigned long)fi
;
2554 fi
->explore_timer
.expires
= RUN_AT((count
*3*HZ
)/100);
2555 init_timer(&fi
->explore_timer
);
2556 add_timer(&fi
->explore_timer
);
2559 static void explore_fabric(struct fc_info
*fi
, u_int
*buff_addr
)
2561 u_int
*addr
= buff_addr
+ 12; /* index into payload */
2562 u_char control_code
;
2565 ENTER("explore_fabric");
2566 DPRINTK1("entering explore_fabric");
2568 /*fi->g.perform_adisc = TRUE;
2569 fi->g.explore_fabric = TRUE;
2570 perform_adisc(fi);*/
2573 d_id
= ntohl(*addr
) & 0x00FFFFFF;
2574 if (d_id
!= fi
->g
.my_id
) {
2575 if (sid_logged_in(fi
, d_id
) == NODE_NOT_PRESENT
)
2576 tx_logi(fi
, ELS_PLOGI
, d_id
);
2578 if (sid_logged_in(fi
, d_id
) == NODE_LOGGED_OUT
)
2579 tx_adisc(fi
, ELS_ADISC
, d_id
, OX_ID_FIRST_SEQUENCE
);
2582 control_code
= (ntohl(*addr
) & 0xFF000000) >> 24;
2584 DPRINTK1("cc = %x, d_id = %x", control_code
, d_id
);
2585 } while (control_code
!= 0x80);
2587 fi
->explore_timer
.function
= fabric_explore_timer
;
2588 fi
->explore_timer
.data
= (unsigned long)fi
;
2589 /* We give 30 msec for each device to respond and then send out
2590 * our SCSI enquiries.
2592 fi
->explore_timer
.expires
= RUN_AT((count
*3*HZ
)/100);
2593 init_timer(&fi
->explore_timer
);
2594 add_timer(&fi
->explore_timer
);
2596 DPRINTK1("leaving explore_fabric");
2597 LEAVE("explore_fabric");
2600 static void fabric_explore_timer(unsigned long data
)
2602 struct fc_info
*fi
= (struct fc_info
*)data
;
2603 del_timer(&fi
->explore_timer
);
2605 if ((fi
->g
.loop_up
== TRUE
) && (fi
->g
.ptp_up
== FALSE
)) {
2606 /* Initiate Local Port Discovery on the Local Loop.
2608 fi
->g
.port_discovery
= TRUE
;
2609 fi
->g
.alpa_list_index
= 1;
2610 local_port_discovery(fi
);
2612 fi
->g
.explore_fabric
= FALSE
;
2616 static void port_discovery_timer(unsigned long data
)
2618 struct fc_info
*fi
= (struct fc_info
*)data
;
2619 del_timer(&fi
->explore_timer
);
2621 if ((fi
->g
.loop_up
== TRUE
) && (fi
->g
.explore_fabric
!= TRUE
)) {
2622 fi
->g
.port_discovery
= TRUE
;
2623 fi
->g
.alpa_list_index
= 1;
2624 local_port_discovery(fi
);
2626 fi
->g
.perform_adisc
= FALSE
;
2630 static void add_to_ox_id_list(struct fc_info
*fi
, u_int transaction_id
, u_int cmnd_code
)
2632 struct ox_id_els_map
*p
, *q
= fi
->ox_id_list
, *r
= NULL
;
2633 int size
= sizeof(struct ox_id_els_map
);
2638 p
= (struct ox_id_els_map
*)kmalloc(size
, GFP_ATOMIC
);
2640 T_MSG("kmalloc failed in add_to_ox_id_list()");
2643 p
->ox_id
= transaction_id
;
2646 if (fi
->ox_id_list
== NULL
)
2653 static u_int
remove_from_ox_id_list(struct fc_info
*fi
, u_short received_ox_id
)
2655 struct ox_id_els_map
*p
= fi
->ox_id_list
, *q
= fi
->ox_id_list
;
2658 if (q
->ox_id
== received_ox_id
) {
2660 if (q
== fi
->ox_id_list
)
2661 fi
->ox_id_list
= fi
->ox_id_list
->next
;
2663 if (q
->next
== NULL
)
2676 DPRINTK2("Could not find ox_id %x in ox_id_els_map", received_ox_id
);
2680 static void build_tachyon_header(struct fc_info
*fi
, u_int my_id
, u_int r_ctl
, u_int d_id
, u_int type
, u_char seq_id
, u_char df_ctl
, u_short ox_id
, u_short rx_id
, char *data
)
2682 u_char alpa
= d_id
& 0x0000FF;
2683 u_int dest_ddaa
= d_id
&0xFFFF00;
2685 ENTER("build_tachyon_header");
2686 DPRINTK("d_id = %x, my_ddaa = %x", d_id
, fi
->g
.my_ddaa
);
2687 /* Does it have to go to/thru a Fabric? */
2688 if ((dest_ddaa
!= 0) && ((d_id
== F_PORT
) || (fi
->g
.fabric_present
&& (dest_ddaa
!= fi
->g
.my_ddaa
))))
2690 fi
->g
.tach_header
.resv
= 0x00000000;
2691 fi
->g
.tach_header
.sof_and_eof
= SOFI3
| EOFN
;
2692 fi
->g
.tach_header
.dest_alpa
= alpa
;
2693 /* Set LCr properly to have enuff credit */
2694 if (alpa
== REPLICATE
)
2695 fi
->g
.tach_header
.lcr_and_time_stamp
= htons(0xC00);/* LCr=3 */
2697 fi
->g
.tach_header
.lcr_and_time_stamp
= 0;
2698 fi
->g
.tach_header
.r_ctl_and_d_id
= htonl(r_ctl
| d_id
);
2699 fi
->g
.tach_header
.vc_id_and_s_id
= htonl(my_id
);
2700 fi
->g
.tach_header
.type_and_f_cntl
= htonl(type
);
2701 fi
->g
.tach_header
.seq_id
= seq_id
;
2702 fi
->g
.tach_header
.df_cntl
= df_ctl
;
2703 fi
->g
.tach_header
.seq_cnt
= 0;
2704 fi
->g
.tach_header
.ox_id
= htons(ox_id
);
2705 fi
->g
.tach_header
.rx_id
= htons(rx_id
);
2706 fi
->g
.tach_header
.ro
= 0;
2708 /* We use the Seq_Count to keep track of IP frames in the
2709 * OCI_interrupt handler. Initial Seq_Count of IP frames is 1.
2711 if (fi
->g
.type_of_frame
== FC_BROADCAST
)
2712 fi
->g
.tach_header
.seq_cnt
= htons(0x1);
2714 fi
->g
.tach_header
.seq_cnt
= htons(0x2);
2715 fi
->g
.tach_header
.nw_header
.d_naa
= htons(0x1000);
2716 fi
->g
.tach_header
.nw_header
.s_naa
= htons(0x1000);
2717 memcpy(&(fi
->g
.tach_header
.nw_header
.dest_high
), data
, 2);
2718 memcpy(&(fi
->g
.tach_header
.nw_header
.dest_low
), data
+ 2, 4);
2719 memcpy(&(fi
->g
.tach_header
.nw_header
.source_high
), data
+ 6, 2);
2720 memcpy(&(fi
->g
.tach_header
.nw_header
.source_low
), data
+ 8, 4);
2722 LEAVE("build_tachyon_header");
2725 static void build_EDB(struct fc_info
*fi
, char *data
, u_short flags
, u_short len
)
2727 fi
->g
.edb
.buf_addr
= ntohl((u_int
)virt_to_bus(data
));
2728 fi
->g
.edb
.ehf
= ntohs(flags
);
2730 len
+= (4 - (len
% 4));
2731 fi
->g
.edb
.buf_len
= ntohs(len
);
2734 static void build_ODB(struct fc_info
*fi
, u_char seq_id
, u_int d_id
, u_int len
, u_int cntl
, u_short mtu
, u_short ox_id
, u_short rx_id
, int NW_header
, int int_required
, u_int frame_class
)
2736 fi
->g
.odb
.seq_d_id
= htonl(seq_id
<< 24 | d_id
);
2737 fi
->g
.odb
.tot_len
= len
;
2739 fi
->g
.odb
.tot_len
+= NW_HEADER_LEN
;
2740 if (fi
->g
.odb
.tot_len
% 4)
2741 fi
->g
.odb
.tot_len
+= (4 - (fi
->g
.odb
.tot_len
% 4));
2742 fi
->g
.odb
.tot_len
= htonl(fi
->g
.odb
.tot_len
);
2743 switch(int_required
) {
2744 case NO_COMP_AND_INT
:
2745 fi
->g
.odb
.cntl
= htons(ODB_CLASS_3
| ODB_EE_CREDIT
| ODB_NO_INT
| ODB_NO_COMP
| cntl
);
2747 case INT_AND_COMP_REQ
:
2748 fi
->g
.odb
.cntl
= htons(ODB_CLASS_3
| ODB_EE_CREDIT
| cntl
);
2750 case NO_INT_COMP_REQ
:
2751 fi
->g
.odb
.cntl
= htons(ODB_CLASS_3
| ODB_EE_CREDIT
| ODB_NO_INT
| cntl
);
2754 fi
->g
.odb
.rx_id
= htons(rx_id
);
2755 fi
->g
.odb
.cs_enable
= 0;
2756 fi
->g
.odb
.cs_seed
= htons(1);
2758 fi
->g
.odb
.hdr_addr
= htonl(virt_to_bus(fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
]));
2759 fi
->g
.odb
.frame_len
= htons(mtu
);
2762 /* The pointer to the sk_buff is in here. Freed up when the
2763 * OCI_interrupt is received.
2765 fi
->g
.odb
.trans_id
= htonl(frame_class
);
2766 fi
->g
.odb
.hdr_len
= TACHYON_HEADER_LEN
+ NW_HEADER_LEN
;
2769 /* helps in tracking transmitted OX_IDs */
2770 fi
->g
.odb
.trans_id
= htonl((frame_class
& 0xFFFF0000) | ox_id
);
2771 fi
->g
.odb
.hdr_len
= TACHYON_HEADER_LEN
;
2773 fi
->g
.odb
.hdr_len
= htons(fi
->g
.odb
.hdr_len
);
2775 fi
->g
.odb
.edb_addr
= htonl(virt_to_bus(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
]));
2778 static void fill_login_frame(struct fc_info
*fi
, u_int logi
)
2781 fi
->g
.login
.ls_cmnd_code
= htonl(logi
);
2782 fi
->g
.login
.fc_ph_version
= htons(PH_VERSION
);
2784 fi
->g
.login
.buff_to_buff_credit
= htons(LOOP_BB_CREDIT
);
2787 fi
->g
.login
.buff_to_buff_credit
= htons(PT2PT_BB_CREDIT
);
2788 if ((logi
!= ELS_FLOGI
) || (logi
== ELS_ACC
))
2789 fi
->g
.login
.common_features
= htons(PLOGI_C_F
);
2791 if (logi
== ELS_FLOGI
)
2792 fi
->g
.login
.common_features
= htons(FLOGI_C_F
);
2793 fi
->g
.login
.recv_data_field_size
= htons(TACH_FRAME_SIZE
);
2794 fi
->g
.login
.n_port_total_conc_seq
= htons(CONCURRENT_SEQUENCES
);
2795 fi
->g
.login
.rel_off_by_info_cat
= htons(RO_INFO_CATEGORY
);
2796 fi
->g
.login
.ED_TOV
= htonl(E_D_TOV
);
2797 fi
->g
.login
.n_port_name_high
= htonl(N_PORT_NAME_HIGH
);
2798 fi
->g
.login
.n_port_name_low
= htonl(N_PORT_NAME_LOW
);
2799 fi
->g
.login
.node_name_high
= htonl(NODE_NAME_HIGH
);
2800 fi
->g
.login
.node_name_low
= htonl(NODE_NAME_LOW
);
2802 /* Fill Class 1 parameters */
2803 fi
->g
.login
.c_of_s
[0].service_options
= htons(0);
2804 fi
->g
.login
.c_of_s
[0].initiator_ctl
= htons(0);
2805 fi
->g
.login
.c_of_s
[0].recipient_ctl
= htons(0);
2806 fi
->g
.login
.c_of_s
[0].recv_data_field_size
= htons(0);
2807 fi
->g
.login
.c_of_s
[0].concurrent_sequences
= htons(0);
2808 fi
->g
.login
.c_of_s
[0].n_port_end_to_end_credit
= htons(0);
2809 fi
->g
.login
.c_of_s
[0].open_seq_per_exchange
= htons(0);
2810 fi
->g
.login
.c_of_s
[0].resv
= htons(0);
2812 /* Fill Class 2 parameters */
2813 fi
->g
.login
.c_of_s
[1].service_options
= htons(0);
2814 fi
->g
.login
.c_of_s
[1].initiator_ctl
= htons(0);
2815 fi
->g
.login
.c_of_s
[1].recipient_ctl
= htons(0);
2816 fi
->g
.login
.c_of_s
[1].recv_data_field_size
= htons(0);
2817 fi
->g
.login
.c_of_s
[1].concurrent_sequences
= htons(0);
2818 fi
->g
.login
.c_of_s
[1].n_port_end_to_end_credit
= htons(0);
2819 fi
->g
.login
.c_of_s
[1].open_seq_per_exchange
= htons(0);
2820 fi
->g
.login
.c_of_s
[1].resv
= htons(0);
2822 /* Fill Class 3 parameters */
2823 if (logi
== ELS_FLOGI
)
2824 fi
->g
.login
.c_of_s
[2].service_options
= htons(SERVICE_VALID
| SEQUENCE_DELIVERY
);
2826 fi
->g
.login
.c_of_s
[2].service_options
= htons(SERVICE_VALID
);
2827 fi
->g
.login
.c_of_s
[2].initiator_ctl
= htons(0);
2828 fi
->g
.login
.c_of_s
[2].recipient_ctl
= htons(0);
2829 fi
->g
.login
.c_of_s
[2].recv_data_field_size
= htons(TACH_FRAME_SIZE
);
2830 fi
->g
.login
.c_of_s
[2].concurrent_sequences
= htons(CLASS3_CONCURRENT_SEQUENCE
);
2831 fi
->g
.login
.c_of_s
[2].n_port_end_to_end_credit
= htons(0);
2832 fi
->g
.login
.c_of_s
[2].open_seq_per_exchange
= htons(CLASS3_OPEN_SEQUENCE
);
2833 fi
->g
.login
.c_of_s
[2].resv
= htons(0);
2835 for(i
= 0; i
< 4; i
++) {
2836 fi
->g
.login
.resv
[i
] = 0;
2837 fi
->g
.login
.vendor_version_level
[i
] = 0;
2842 /* clear the Interrupt Latch on the (i)chip, so that you can receive
2843 * Interrupts from Tachyon in future
2845 static void reset_latch(struct fc_info
*fi
)
2847 writel(readl(fi
->i_r
.ptr_ichip_hw_status_reg
) | ICHIP_HSR_INT_LATCH
, fi
->i_r
.ptr_ichip_hw_status_reg
);
2850 static void update_OCQ_indx(struct fc_info
*fi
)
2852 fi
->q
.ocq_prod_indx
++;
2853 if (fi
->q
.ocq_prod_indx
== OCQ_LENGTH
)
2854 fi
->q
.ocq_prod_indx
= 0;
2855 writel(fi
->q
.ocq_prod_indx
, fi
->t_r
.ptr_ocq_prod_indx_reg
);
2858 static void update_IMQ_indx(struct fc_info
*fi
, int count
)
2860 fi
->q
.imq_cons_indx
+= count
;
2861 if (fi
->q
.imq_cons_indx
>= IMQ_LENGTH
)
2862 fi
->q
.imq_cons_indx
-= IMQ_LENGTH
;
2863 writel(fi
->q
.imq_cons_indx
, fi
->t_r
.ptr_imq_cons_indx_reg
);
2866 static void update_SFSBQ_indx(struct fc_info
*fi
)
2868 fi
->q
.sfsbq_prod_indx
++;
2869 if (fi
->q
.sfsbq_prod_indx
== SFSBQ_LENGTH
)
2870 fi
->q
.sfsbq_prod_indx
= 0;
2871 writel(fi
->q
.sfsbq_prod_indx
, fi
->t_r
.ptr_sfsbq_prod_reg
);
2874 static void update_MFSBQ_indx(struct fc_info
*fi
, int count
)
2876 fi
->q
.mfsbq_prod_indx
+= count
;
2877 if (fi
->q
.mfsbq_prod_indx
>= MFSBQ_LENGTH
)
2878 fi
->q
.mfsbq_prod_indx
-= MFSBQ_LENGTH
;
2879 writel(fi
->q
.mfsbq_prod_indx
, fi
->t_r
.ptr_mfsbq_prod_reg
);
2883 static void update_tachyon_header_indx(struct fc_info
*fi
)
2885 fi
->q
.tachyon_header_indx
++;
2886 if (fi
->q
.tachyon_header_indx
== NO_OF_TACH_HEADERS
)
2887 fi
->q
.tachyon_header_indx
= 0;
2890 static void update_EDB_indx(struct fc_info
*fi
)
2892 fi
->q
.edb_buffer_indx
++;
2893 if (fi
->q
.edb_buffer_indx
== EDB_LEN
)
2894 fi
->q
.edb_buffer_indx
= 0;
2897 static int iph5526_open(struct net_device
*dev
)
2899 netif_start_queue(dev
);
2903 static int iph5526_close(struct net_device
*dev
)
2905 netif_stop_queue(dev
);
2909 static void iph5526_timeout(struct net_device
*dev
)
2911 struct fc_info
*fi
= dev
->priv
;
2912 printk(KERN_WARNING
"%s: timed out on send.\n", dev
->name
);
2913 fi
->fc_stats
.tx_dropped
++;
2914 dev
->trans_start
= jiffies
;
2915 netif_wake_queue(dev
);
2918 static int iph5526_send_packet(struct sk_buff
*skb
, struct net_device
*dev
)
2920 struct fc_info
*fi
= dev
->priv
;
2924 struct fcllc
*fcllc
;
2926 ENTER("iph5526_send_packet");
2928 netif_stop_queue(dev
);
2929 /* Strip off the pseudo header.
2931 skb
->data
= skb
->data
+ 2*FC_ALEN
;
2932 skb
->len
= skb
->len
- 2*FC_ALEN
;
2933 fcllc
= (struct fcllc
*)skb
->data
;
2934 type
= ntohs(fcllc
->ethertype
);
2936 spin_lock_irqsave(&fi
->fc_lock
, flags
);
2939 status
= tx_ip_packet(skb
, skb
->len
, fi
);
2942 status
= tx_arp_packet(skb
->data
, skb
->len
, fi
);
2945 T_MSG("WARNING!!! Received Unknown Packet Type... Discarding...");
2946 fi
->fc_stats
.rx_dropped
++;
2949 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
2952 fi
->fc_stats
.tx_bytes
+= skb
->len
;
2953 fi
->fc_stats
.tx_packets
++;
2956 fi
->fc_stats
.tx_dropped
++;
2957 dev
->trans_start
= jiffies
;
2958 /* We free up the IP buffers in the OCI_interrupt handler.
2959 * status == 0 implies that the frame was not transmitted. So the
2960 * skb is freed here.
2962 if ((type
== ETH_P_ARP
) || (status
== 0))
2964 netif_wake_queue(dev
);
2965 LEAVE("iph5526_send_packet");
2969 static int iph5526_change_mtu(struct net_device
*dev
, int mtu
)
2974 static int tx_ip_packet(struct sk_buff
*skb
, unsigned long len
, struct fc_info
*fi
)
2977 int int_required
= 1;
2978 u_int r_ctl
= FC4_DEVICE_DATA
| UNSOLICITED_DATA
;
2979 u_int type
= TYPE_LLC_SNAP
;
2980 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
2982 struct fc_node_info
*q
;
2984 ENTER("tx_ip_packet");
2985 q
= look_up_cache(fi
, skb
->data
- 2*FC_ALEN
);
2988 DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id
);
2990 if (q
->login
== LOGIN_COMPLETED
){
2991 fi
->g
.type_of_frame
= FC_IP
;
2992 return tx_exchange(fi
, skb
->data
, len
, r_ctl
, type
, d_id
, mtu
, int_required
, ox_id
, virt_to_bus(skb
));
2995 if (q
->d_id
== BROADCAST
) {
2996 struct fc_node_info
*p
= fi
->node_info_list
;
2997 int return_value
= FALSE
;
2998 fi
->g
.type_of_frame
= FC_BROADCAST
;
2999 /* Do unicast to local nodes.
3004 if ((d_id
& 0xFFFF00) == fi
->g
.my_ddaa
)
3005 return_value
|= tx_exchange(fi
, skb
->data
, len
, r_ctl
, type
, d_id
, fi
->g
.my_mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3009 return return_value
;
3012 if (q
->login
!= LOGIN_COMPLETED
) {
3013 DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id
);
3014 /* FIXME: we are dumping the frame here */
3015 tx_logi(fi
, ELS_PLOGI
, d_id
);
3018 DPRINTK2("Look-Up Cache Failed");
3019 LEAVE("tx_ip_packet");
3023 static int tx_arp_packet(char *data
, unsigned long len
, struct fc_info
*fi
)
3025 u_int opcode
= data
[ARP_OPCODE_0
];
3027 int int_required
= 0, return_value
= FALSE
;
3028 u_int r_ctl
= FC4_DEVICE_DATA
| UNSOLICITED_DATA
;
3029 u_int type
= TYPE_LLC_SNAP
;
3030 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
3031 u_int my_mtu
= fi
->g
.my_mtu
;
3032 ENTER("tx_arp_packet");
3034 opcode
= opcode
<< 8 | data
[ARP_OPCODE_1
];
3035 fi
->g
.type_of_frame
= FC_IP
;
3037 if (opcode
== ARPOP_REQUEST
) {
3038 struct fc_node_info
*q
= fi
->node_info_list
;
3040 return_value
|= tx_exchange(fi
, data
, len
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3041 /* Some devices support HW_TYPE 0x01 */
3042 memcpy(fi
->g
.arp_buffer
, data
- 2*FC_ALEN
, len
+ 2*FC_ALEN
);
3043 fi
->g
.arp_buffer
[9 + 2*FC_ALEN
] = 0x01;
3044 return_value
|= tx_exchange(fi
, (char *)(fi
->g
.arp_buffer
+ 2*FC_ALEN
), len
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3046 /* Do unicast to local nodes.
3049 fi
->g
.type_of_frame
= FC_BROADCAST
;
3051 if ((d_id
& 0xFFFF00) == fi
->g
.my_ddaa
) {
3052 return_value
|= tx_exchange(fi
, data
, len
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3053 // Some devices support HW_TYPE 0x01
3054 memcpy(fi
->g
.arp_buffer
, data
- 2*FC_ALEN
, len
+ 2*FC_ALEN
);
3055 fi
->g
.arp_buffer
[9 + 2*FC_ALEN
] = 0x01;
3056 return_value
|= tx_exchange(fi
, (char *)(fi
->g
.arp_buffer
+ 2*FC_ALEN
), len
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3060 return return_value
;
3063 if (opcode
== ARPOP_REPLY
) {
3064 struct fc_node_info
*q
; u_int mtu
;
3065 DPRINTK("We are sending out an ARP reply");
3066 q
= look_up_cache(fi
, data
- 2*FC_ALEN
);
3069 DPRINTK("Look-Up Cache Succeeded for d_id = %x", d_id
);
3071 if (q
->login
== LOGIN_COMPLETED
){
3072 tx_exchange(fi
, data
, len
, r_ctl
, type
, d_id
, mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3073 /* Some devices support HW_TYPE 0x01 */
3074 memcpy(fi
->g
.arp_buffer
, data
- 2*FC_ALEN
, len
+ 2*FC_ALEN
);
3075 fi
->g
.arp_buffer
[9 + 2*FC_ALEN
] = 0x01;
3076 return tx_exchange(fi
, (char *)(fi
->g
.arp_buffer
+ 2*FC_ALEN
), len
, r_ctl
, type
, d_id
, my_mtu
, int_required
, ox_id
, TYPE_LLC_SNAP
);
3079 DPRINTK1("Node not logged in... Txing PLOGI to %x", d_id
);
3080 tx_logi(fi
, ELS_PLOGI
, d_id
); /* FIXME: we are dumping the frame here */
3083 DPRINTK2("Look-Up Cache Failed");
3086 T_MSG("Warning!!! Invalid Opcode in ARP Packet!");
3088 LEAVE("tx_arp_packet");
3093 static void rx_net_packet(struct fc_info
*fi
, u_char
*buff_addr
, int payload_size
)
3095 struct net_device
*dev
= fi
->dev
;
3096 struct sk_buff
*skb
;
3099 ENTER("rx_net_packet");
3100 skb_size
= payload_size
- TACHYON_HEADER_LEN
;
3101 DPRINTK("skb_size = %d", skb_size
);
3102 fi
->fc_stats
.rx_bytes
+= skb_size
- 2;
3103 skb
= dev_alloc_skb(skb_size
);
3105 printk(KERN_NOTICE
"%s: In rx_net_packet() Memory squeeze, dropping packet.\n", dev
->name
);
3106 fi
->fc_stats
.rx_dropped
++;
3109 /* Skip over the Tachyon Frame Header.
3111 buff_addr
+= TACHYON_HEADER_LEN
;
3113 memcpy(fch
.daddr
, buff_addr
+ 2, FC_ALEN
);
3114 memcpy(fch
.saddr
, buff_addr
+ 10, FC_ALEN
);
3116 memcpy(buff_addr
, fch
.daddr
, FC_ALEN
);
3117 memcpy(buff_addr
+ 6, fch
.saddr
, FC_ALEN
);
3118 skb_reserve(skb
, 2);
3119 memcpy(skb_put(skb
, skb_size
- 2), buff_addr
, skb_size
- 2);
3121 skb
->protocol
= fc_type_trans(skb
, dev
);
3122 DPRINTK("protocol = %x", skb
->protocol
);
3124 /* Hmmm... to accept HW Type 0x01 as well...
3126 if (skb
->protocol
== ntohs(ETH_P_ARP
))
3127 skb
->data
[1] = 0x06;
3129 dev
->last_rx
= jiffies
;
3130 fi
->fc_stats
.rx_packets
++;
3131 LEAVE("rx_net_packet");
3135 static void rx_net_mfs_packet(struct fc_info
*fi
, struct sk_buff
*skb
)
3137 struct net_device
*dev
= fi
->dev
;
3139 ENTER("rx_net_mfs_packet");
3140 /* Construct your Hard Header */
3141 memcpy(fch
.daddr
, skb
->data
+ 2, FC_ALEN
);
3142 memcpy(fch
.saddr
, skb
->data
+ 10, FC_ALEN
);
3144 memcpy(skb
->data
, fch
.daddr
, FC_ALEN
);
3145 memcpy(skb
->data
+ 6, fch
.saddr
, FC_ALEN
);
3147 skb
->protocol
= fc_type_trans(skb
, dev
);
3148 DPRINTK("protocol = %x", skb
->protocol
);
3150 dev
->last_rx
= jiffies
;
3151 LEAVE("rx_net_mfs_packet");
3154 static int tx_exchange(struct fc_info
*fi
, char *data
, u_int len
, u_int r_ctl
, u_int type
, u_int d_id
, u_int mtu
, int int_required
, u_short tx_ox_id
, u_int frame_class
)
3157 int NW_flag
= 0, h_size
, return_value
;
3158 u_short rx_id
= RX_ID_FIRST_SEQUENCE
;
3159 u_int tachyon_status
;
3160 u_int my_id
= fi
->g
.my_id
;
3161 ENTER("tx_exchange");
3163 tachyon_status
= readl(fi
->t_r
.ptr_tach_status_reg
);
3164 DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status
, len
, mtu
);
3165 if (tachyon_status
& OSM_FROZEN
) {
3166 reset_tachyon(fi
, ERROR_RELEASE
);
3167 reset_tachyon(fi
, OCQ_RESET
);
3168 DPRINTK("Tachyon Status = %x len = %d MTU = %d", tachyon_status
, len
, mtu
);
3170 if (tx_ox_id
== OX_ID_FIRST_SEQUENCE
) {
3171 switch(fi
->g
.type_of_frame
) {
3173 tx_ox_id
= fi
->g
.scsi_oxid
| SCSI_READ_BIT
;
3176 tx_ox_id
= fi
->g
.scsi_oxid
;
3179 tx_ox_id
= fi
->g
.ox_id
;
3184 switch(fi
->g
.type_of_frame
) {
3186 rx_id
= fi
->g
.scsi_oxid
| SCSI_READ_BIT
;
3189 rx_id
= fi
->g
.scsi_oxid
;
3192 rx_id
= RX_ID_FIRST_SEQUENCE
;
3195 rx_id
= fi
->g
.ox_id
;
3200 if (type
== TYPE_LLC_SNAP
) {
3203 /* Multi Frame Sequence ? If yes, set RO bit */
3205 type
|= RELATIVE_OFF_PRESENT
;
3206 build_tachyon_header(fi
, my_id
, r_ctl
, d_id
, type
, fi
->g
.seq_id
, df_ctl
, tx_ox_id
, rx_id
, data
- 2*FC_ALEN
);
3210 /* Multi Frame Sequence ? If yes, set RO bit */
3212 type
|= RELATIVE_OFF_PRESENT
;
3213 build_tachyon_header(fi
, my_id
, r_ctl
, d_id
, type
, fi
->g
.seq_id
, df_ctl
, tx_ox_id
, rx_id
, NULL
);
3216 /* Get free Tachyon Headers and EDBs */
3217 if (get_free_header(fi
) || get_free_EDB(fi
))
3220 if ((type
& 0xFF000000) == TYPE_LLC_SNAP
) {
3221 h_size
= TACHYON_HEADER_LEN
+ NW_HEADER_LEN
;
3222 memcpy(fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
], &(fi
->g
.tach_header
), h_size
);
3225 memcpy(fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
], &(fi
->g
.tach_header
), TACHYON_HEADER_LEN
);
3227 return_value
= tx_sequence(fi
, data
, len
, mtu
, d_id
, tx_ox_id
, rx_id
, fi
->g
.seq_id
, NW_flag
, int_required
, frame_class
);
3229 switch(fi
->g
.type_of_frame
) {
3232 update_scsi_oxid(fi
);
3238 if (fi
->g
.ox_id
== 0xFFFF)
3239 fi
->g
.ox_id
= NOT_SCSI_XID
;
3243 if (fi
->g
.seq_id
== MAX_SEQ_ID
)
3247 LEAVE("tx_exchange");
3248 return return_value
;
3251 static int tx_sequence(struct fc_info
*fi
, char *data
, u_int len
, u_int mtu
, u_int d_id
, u_short ox_id
, u_short rx_id
, u_char seq_id
, int NW_flag
, int int_required
, u_int frame_class
)
3255 ENTER("tx_sequence");
3256 build_EDB(fi
, data
, EDB_END
, len
);
3257 memcpy(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
], &(fi
->g
.edb
), sizeof(EDB
));
3258 build_ODB(fi
, seq_id
, d_id
, len
, cntl
, mtu
, ox_id
, rx_id
, NW_flag
, int_required
, frame_class
);
3259 memcpy(fi
->q
.ptr_odb
[fi
->q
.ocq_prod_indx
], &(fi
->g
.odb
), sizeof(ODB
));
3260 if (fi
->g
.link_up
!= TRUE
) {
3261 DPRINTK2("Fibre Channel Link not up. Dropping Exchange!");
3262 return_value
= FALSE
;
3265 /* To be on the safe side, a check should be included
3266 * at this point to check if we are overrunning
3269 update_OCQ_indx(fi
);
3270 return_value
= TRUE
;
3272 update_EDB_indx(fi
);
3273 update_tachyon_header_indx(fi
);
3274 LEAVE("tx_sequence");
3275 return return_value
;
3278 static int get_free_header(struct fc_info
*fi
)
3281 u_int
*tach_header
, initial_indx
= fi
->q
.tachyon_header_indx
;
3282 /* Check if the header is in use.
3283 * We could have an outstanding command.
3284 * We should find a free slot as we can queue a
3285 * maximum of 32 SCSI commands only.
3287 tach_header
= fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
];
3288 temp_ox_id
= ntohl(*(tach_header
+ 6)) >> 16;
3289 /* We care about the SCSI writes only. Those are the wicked ones
3290 * that need an additional set of buffers.
3292 while(temp_ox_id
<= MAX_SCSI_XID
) {
3293 update_tachyon_header_indx(fi
);
3294 if (fi
->q
.tachyon_header_indx
== initial_indx
) {
3295 /* Should never happen.
3297 T_MSG("No free Tachyon headers available");
3298 reset_tachyon(fi
, SOFTWARE_RESET
);
3301 tach_header
= fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
];
3302 temp_ox_id
= ntohl(*(tach_header
+ 6)) >> 16;
3307 static int get_free_EDB(struct fc_info
*fi
)
3309 unsigned int initial_indx
= fi
->q
.edb_buffer_indx
;
3310 /* Check if the EDB is in use.
3311 * We could have an outstanding SCSI Write command.
3312 * We should find a free slot as we can queue a
3313 * maximum of 32 SCSI commands only.
3315 while (fi
->q
.free_edb_list
[fi
->q
.edb_buffer_indx
] != EDB_FREE
) {
3316 update_EDB_indx(fi
);
3317 if (fi
->q
.edb_buffer_indx
== initial_indx
) {
3318 T_MSG("No free EDB buffers avaliable")
3319 reset_tachyon(fi
, SOFTWARE_RESET
);
3326 static int validate_login(struct fc_info
*fi
, u_int
*base_ptr
)
3328 struct fc_node_info
*q
= fi
->node_info_list
;
3329 char n_port_name
[PORT_NAME_LEN
];
3330 char node_name
[NODE_NAME_LEN
];
3332 ENTER("validate_login");
3333 /*index to Port Name in the payload. We need the 8 byte Port Name */
3334 memcpy(n_port_name
, base_ptr
+ 10, PORT_NAME_LEN
);
3335 memcpy(node_name
, base_ptr
+ 12, NODE_NAME_LEN
);
3336 s_id
= ntohl(*(base_ptr
+ 3)) & 0x00FFFFFF;
3338 /* check if Fibre Channel IDs have changed */
3340 if (memcmp(n_port_name
, q
->hw_addr
, PORT_NAME_LEN
) == 0) {
3341 if ((s_id
!= q
->d_id
) || (memcmp(node_name
, q
->node_name
, NODE_NAME_LEN
) != 0)) {
3342 DPRINTK1("Fibre Channel ID of Node has changed. Txing LOGO.");
3345 q
->login
= LOGIN_COMPLETED
;
3353 DPRINTK1("Port Name does not match. Txing LOGO.");
3354 LEAVE("validate_login");
3358 static void add_to_address_cache(struct fc_info
*fi
, u_int
*base_ptr
)
3360 int size
= sizeof(struct fc_node_info
);
3361 struct fc_node_info
*p
, *q
= fi
->node_info_list
, *r
= NULL
;
3362 char n_port_name
[PORT_NAME_LEN
];
3364 ENTER("add_to_address_cache");
3365 /*index to Port Name in the payload. We need the 8 byte Port Name */
3366 memcpy(n_port_name
, base_ptr
+ 13, PORT_NAME_LEN
);
3367 s_id
= ntohl(*(base_ptr
+ 3)) & 0x00FFFFFF;
3369 /* check if info already exists */
3371 if (memcmp(n_port_name
, q
->hw_addr
, PORT_NAME_LEN
) == 0) {
3372 if (s_id
!= q
->d_id
) {
3373 memcpy(&(q
->c_of_s
[0]), base_ptr
+ 17, 3 * sizeof(CLASS_OF_SERVICE
));
3374 q
->mtu
= ntohl(*(base_ptr
+ 10)) & 0x00000FFF;
3376 memcpy(q
->node_name
, base_ptr
+ 15, NODE_NAME_LEN
);
3378 q
->login
= LOGIN_COMPLETED
;
3389 p
= (struct fc_node_info
*)kmalloc(size
, GFP_ATOMIC
);
3391 T_MSG("kmalloc failed in add_to_address_cache()");
3394 memcpy(&(p
->c_of_s
[0]), base_ptr
+ 17, 3 * sizeof(CLASS_OF_SERVICE
));
3395 p
->mtu
= ntohl(*(base_ptr
+ 10)) & 0x00000FFF;
3397 memcpy(p
->hw_addr
, base_ptr
+ 13, PORT_NAME_LEN
);
3398 memcpy(p
->node_name
, base_ptr
+ 15, NODE_NAME_LEN
);
3399 p
->login
= LOGIN_COMPLETED
;
3401 p
->target_id
= 0xFF;
3403 if (fi
->node_info_list
== NULL
)
3404 fi
->node_info_list
= p
;
3411 LEAVE("add_to_address_cache");
3415 static void remove_from_address_cache(struct fc_info
*fi
, u_int
*base_ptr
, u_int cmnd_code
)
3417 struct fc_node_info
*q
= fi
->node_info_list
;
3419 ENTER("remove_from_address_cache");
3420 s_id
= ntohl(*(base_ptr
+ 3)) & 0x00FFFFFF;
3423 /* check if info exists */
3425 if (s_id
== q
->d_id
) {
3426 if (q
->login
== LOGIN_COMPLETED
)
3427 q
->login
= LOGIN_ATTEMPTED
;
3428 if (fi
->num_nodes
> 0)
3437 DPRINTK1("ELS_LOGO received from node 0x%x which is not logged-in", s_id
);
3441 int payload_len
= ntohl(*(base_ptr
+ 8)) & 0xFF;
3443 u_char address_format
;
3444 u_short received_ox_id
= ntohl(*(base_ptr
+ 6)) >> 16;
3445 u_int node_id
, mask
, *page_ptr
= base_ptr
+ 9;
3446 if ((payload_len
< 4) || (payload_len
> 256)) {
3447 DPRINTK1("RSCN with invalid payload length received");
3448 tx_ls_rjt(fi
, s_id
, received_ox_id
, LOGICAL_ERR
, RECV_FIELD_SIZE
);
3451 /* Page_size includes the Command Code */
3452 no_of_pages
= (payload_len
/ 4) - 1;
3453 for (i
= 0; i
< no_of_pages
; i
++) {
3454 address_format
= ntohl(*page_ptr
) >> 24;
3455 node_id
= ntohl(*page_ptr
) & 0x00FFFFFF;
3456 switch(address_format
) {
3457 case PORT_ADDRESS_FORMAT
:
3458 rscn_handler(fi
, node_id
);
3460 case AREA_ADDRESS_FORMAT
:
3461 case DOMAIN_ADDRESS_FORMAT
:
3462 if (address_format
== AREA_ADDRESS_FORMAT
)
3467 if ((q
->d_id
& mask
) == (node_id
& mask
))
3468 rscn_handler(fi
, q
->d_id
);
3471 /* There might be some new nodes to be
3472 * discovered. But, some of the earlier
3473 * requests as a result of the RSCN might be
3474 * in progress. We don't want to duplicate that
3475 * effort. So letz call SCR after a lag.
3477 fi
->explore_timer
.function
= scr_timer
;
3478 fi
->explore_timer
.data
= (unsigned long)fi
;
3479 fi
->explore_timer
.expires
= RUN_AT((no_of_pages
*3*HZ
)/100);
3480 init_timer(&fi
->explore_timer
);
3481 add_timer(&fi
->explore_timer
);
3484 T_MSG("RSCN with invalid address format received");
3485 tx_ls_rjt(fi
, s_id
, received_ox_id
, LOGICAL_ERR
, NO_EXPLN
);
3488 } /* end of for loop */
3489 } /* end of case RSCN: */
3495 LEAVE("remove_from_address_cache");
3498 static void rscn_handler(struct fc_info
*fi
, u_int node_id
)
3500 struct fc_node_info
*q
= fi
->node_info_list
;
3501 int login_state
= sid_logged_in(fi
, node_id
);
3502 if ((login_state
== NODE_LOGGED_IN
) || (login_state
== NODE_PROCESS_LOGGED_IN
)) {
3504 if (q
->d_id
== node_id
) {
3505 q
->login
= LOGIN_ATTEMPTED
;
3506 if (fi
->num_nodes
> 0)
3515 if (login_state
== NODE_LOGGED_OUT
)
3516 tx_adisc(fi
, ELS_ADISC
, node_id
, OX_ID_FIRST_SEQUENCE
);
3518 if (login_state
== NODE_LOGGED_OUT
)
3519 tx_logi(fi
, ELS_PLOGI
, node_id
);
3522 static void scr_timer(unsigned long data
)
3524 struct fc_info
*fi
= (struct fc_info
*)data
;
3525 del_timer(&fi
->explore_timer
);
3526 tx_name_server_req(fi
, FCS_GP_ID4
);
3529 static int sid_logged_in(struct fc_info
*fi
, u_int s_id
)
3531 struct fc_node_info
*temp
= fi
->node_info_list
;
3533 if ((temp
->d_id
== s_id
) && (temp
->login
== LOGIN_COMPLETED
)) {
3534 if (temp
->scsi
!= FALSE
)
3535 return NODE_PROCESS_LOGGED_IN
;
3537 return NODE_LOGGED_IN
;
3540 if ((temp
->d_id
== s_id
) && (temp
->login
!= LOGIN_COMPLETED
))
3541 return NODE_LOGGED_OUT
;
3544 return NODE_NOT_PRESENT
;
3547 static void mark_scsi_sid(struct fc_info
*fi
, u_int
*buff_addr
, u_char action
)
3549 struct fc_node_info
*temp
= fi
->node_info_list
;
3551 u_int service_params
;
3552 s_id
= ntohl(*(buff_addr
+ 3)) & 0x00FFFFFF;
3553 service_params
= ntohl(*(buff_addr
+ 12)) & 0x000000F0;
3555 if ((temp
->d_id
== s_id
) && (temp
->login
== LOGIN_COMPLETED
)) {
3556 if (action
== DELETE_ENTRY
) {
3563 /* Check if it is a SCSI Target */
3564 if (!(service_params
& TARGET_FUNC
)) {
3565 temp
->scsi
= INITIATOR
;
3571 temp
->scsi
= TARGET
;
3572 /* This helps to maintain the target_id no matter what your
3573 * Fibre Channel ID is.
3575 if (temp
->target_id
== 0xFF) {
3576 if (fi
->g
.no_of_targets
<= MAX_SCSI_TARGETS
)
3577 temp
->target_id
= fi
->g
.no_of_targets
++;
3579 T_MSG("MAX TARGETS reached!");
3582 DPRINTK1("Target_id %d already present", temp
->target_id
);
3593 static int node_logged_in_prev(struct fc_info
*fi
, u_int
*buff_addr
)
3595 struct fc_node_info
*temp
;
3596 u_char
*data
= (u_char
*)buff_addr
;
3598 char node_name
[NODE_NAME_LEN
];
3599 s_id
= ntohl(*(buff_addr
+ 3)) & 0x00FFFFFF;
3600 memcpy(node_name
, buff_addr
+ 12, NODE_NAME_LEN
);
3601 /* point to port_name in the ADISC payload */
3603 /* point to last 6 bytes of port_name */
3605 temp
= look_up_cache(fi
, data
);
3607 if ((temp
->d_id
== s_id
) && (memcmp(node_name
, temp
->node_name
, NODE_NAME_LEN
) == 0)) {
3608 temp
->login
= LOGIN_COMPLETED
;
3618 static struct fc_node_info
*look_up_cache(struct fc_info
*fi
, char *data
)
3620 struct fc_node_info
*temp_list
= fi
->node_info_list
, *q
;
3621 u_char n_port_name
[FC_ALEN
], temp_addr
[FC_ALEN
];
3622 ENTER("look_up_cache");
3623 memcpy(n_port_name
, data
, FC_ALEN
);
3625 if (memcmp(n_port_name
, &(temp_list
->hw_addr
[2]), FC_ALEN
) == 0)
3628 temp_list
= temp_list
->next
;
3633 temp_addr
[0] = temp_addr
[1] = temp_addr
[2] = 0xFF;
3634 temp_addr
[3] = temp_addr
[4] = temp_addr
[5] = 0xFF;
3635 if (memcmp(n_port_name
, temp_addr
, FC_ALEN
) == 0) {
3636 q
= (struct fc_node_info
*)kmalloc(sizeof(struct fc_node_info
), GFP_ATOMIC
);
3638 T_MSG("kmalloc failed in look_up_cache()");
3641 q
->d_id
= BROADCAST
;
3644 LEAVE("look_up_cache");
3648 static int display_cache(struct fc_info
*fi
)
3650 struct fc_node_info
*q
= fi
->node_info_list
;
3652 struct ox_id_els_map
*temp_ox_id_list
= fi
->ox_id_list
;
3655 printk("\nFibre Channel Node Information for %s\n", fi
->name
);
3656 printk("My FC_ID = %x, My WWN = %x %x, ", fi
->g
.my_id
, fi
->g
.my_node_name_high
, fi
->g
.my_node_name_low
);
3657 if (fi
->g
.ptp_up
== TRUE
)
3658 printk("Port_Type = N_Port\n");
3659 if (fi
->g
.loop_up
== TRUE
)
3660 printk("Port_Type = L_Port\n");
3663 for (j
= 0; j
< PORT_NAME_LEN
; j
++)
3664 printk("%x ", q
->hw_addr
[j
]);
3665 printk("FC_ID = %x, ", q
->d_id
);
3667 if (q
->login
== LOGIN_COMPLETED
)
3671 if (q
->scsi
== TARGET
)
3672 printk("Target_ID = %d ", q
->target_id
);
3679 printk("OX_ID -> ELS Map\n");
3680 while(temp_ox_id_list
) {
3681 printk("ox_id = %x, ELS = %x\n", temp_ox_id_list
->ox_id
, temp_ox_id_list
->els
);
3682 temp_ox_id_list
= temp_ox_id_list
->next
;
3689 static struct net_device_stats
* iph5526_get_stats(struct net_device
*dev
)
3691 struct fc_info
*fi
= dev
->priv
;
3692 return (struct net_device_stats
*) &fi
->fc_stats
;
3696 /* SCSI stuff starts here */
3698 int iph5526_detect(Scsi_Host_Template
*tmpt
)
3700 struct Scsi_Host
*host
= NULL
;
3701 struct iph5526_hostdata
*hostdata
;
3702 struct fc_info
*fi
= NULL
;
3703 int no_of_hosts
= 0, i
, j
, count
= 0;
3704 u_int pci_maddr
= 0;
3705 struct pci_dev
*pdev
= NULL
;
3706 unsigned long timeout
;
3708 tmpt
->proc_name
= "iph5526";
3710 for (i
= 0; i
<= MAX_FC_CARDS
; i
++)
3713 for (i
= 0; clone_list
[i
].vendor_id
!= 0; i
++)
3714 while ((pdev
= pci_find_device(clone_list
[i
].vendor_id
, clone_list
[i
].device_id
, pdev
))) {
3715 unsigned short pci_command
;
3716 if (pci_enable_device(pdev
))
3718 if (count
< MAX_FC_CARDS
) {
3719 fc
[count
] = kmalloc(sizeof(struct fc_info
), GFP_ATOMIC
);
3720 if (fc
[count
] == NULL
) {
3721 printk("iph5526.c: Unable to register card # %d\n", count
+ 1);
3724 memset(fc
[count
], 0, sizeof(struct fc_info
));
3727 printk("iph5526.c: Maximum Number of cards reached.\n");
3732 sprintf(fi
->name
, "fc%d", count
);
3734 host
= scsi_register(tmpt
, sizeof(struct iph5526_hostdata
));
3740 hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
3741 memset(hostdata
, 0 , sizeof(struct iph5526_hostdata
));
3742 for (j
= 0; j
< MAX_SCSI_TARGETS
; j
++)
3743 hostdata
->tag_ages
[j
] = jiffies
;
3746 //host->max_id = MAX_SCSI_TARGETS;
3748 host
->this_id
= tmpt
->this_id
;
3750 pci_maddr
= pci_resource_start(pdev
, 0);
3751 if (pci_resource_flags(pdev
, 0) & IORESOURCE_IO
) {
3752 printk("iph5526.c : Cannot find proper PCI device base address.\n");
3753 scsi_unregister(host
);
3759 DPRINTK("pci_maddr = %x", pci_maddr
);
3760 pci_read_config_word(pdev
, PCI_COMMAND
, &pci_command
);
3762 pci_irq_line
= pdev
->irq
;
3763 printk("iph5526.c: PCI BIOS reports %s at i/o %#x, irq %d.\n", clone_list
[i
].name
, pci_maddr
, pci_irq_line
);
3764 fi
->g
.mem_base
= ioremap(pci_maddr
& PAGE_MASK
, 1024);
3766 /* We use Memory Mapped IO. The initial space contains the
3767 * PCI Configuration registers followed by the (i) chip
3768 * registers followed by the Tachyon registers.
3770 /* Thatz where (i)chip maps Tachyon Address Space.
3772 fi
->g
.tachyon_base
= (u_long
)fi
->g
.mem_base
+ TACHYON_OFFSET
+ ( pci_maddr
& ~PAGE_MASK
);
3773 DPRINTK("fi->g.tachyon_base = %x", (u_int
)fi
->g
.tachyon_base
);
3774 if (fi
->g
.mem_base
== NULL
) {
3775 printk("iph5526.c : ioremap failed!!!\n");
3776 scsi_unregister(host
);
3781 DPRINTK("IRQ1 = %d\n", pci_irq_line
);
3783 fi
->base_addr
= (long) pdev
;
3787 /* Found it, get IRQ.
3789 irqval
= request_irq(pci_irq_line
, &tachyon_interrupt
, pci_irq_line
? SA_SHIRQ
: 0, fi
->name
, host
);
3791 printk("iph5526.c : Unable to get IRQ %d (irqval = %d).\n", pci_irq_line
, irqval
);
3792 scsi_unregister(host
);
3797 host
->irq
= fi
->irq
= pci_irq_line
;
3799 fi
->clone_id
= clone_list
[i
].vendor_id
;
3802 if (!initialize_register_pointers(fi
) || !tachyon_init(fi
)) {
3803 printk("iph5526.c: TACHYON initialization failed for card # %d!!!\n", count
+ 1);
3804 free_irq(host
->irq
, host
);
3805 scsi_unregister(host
);
3807 clean_up_memory(fi
);
3812 DPRINTK1("Fibre Channel card initialized");
3813 /* Wait for the Link to come up and the login process
3816 for(timeout
= jiffies
+ 10*HZ
; time_before(jiffies
, timeout
) && ((fi
->g
.link_up
== FALSE
) || (fi
->g
.port_discovery
== TRUE
) || (fi
->g
.explore_fabric
== TRUE
) || (fi
->g
.perform_adisc
== TRUE
));)
3825 DPRINTK1("no_of_hosts = %d",no_of_hosts
);
3827 /* This is to make sure that the ACC to the PRLI comes in
3828 * for the last ALPA.
3830 mdelay(1000); /* Ugly! Let the Gods forgive me */
3832 DPRINTK1("leaving iph5526_detect\n");
3837 int iph5526_biosparam(struct scsi_device
*sdev
, struct block_device
*n
,
3838 sector_t capacity
, int ip
[])
3840 int size
= capacity
;
3847 ip
[2] = size
/ (ip
[0] * ip
[1]);
3852 int iph5526_queuecommand(Scsi_Cmnd
*Cmnd
, void (*done
) (Scsi_Cmnd
*))
3854 int int_required
= 0;
3855 u_int r_ctl
= FC4_DEVICE_DATA
| UNSOLICITED_COMMAND
;
3856 u_int type
= TYPE_FCP
| SEQUENCE_INITIATIVE
;
3857 u_int frame_class
= Cmnd
->device
->id
;
3858 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
3859 struct Scsi_Host
*host
= Cmnd
->device
->host
;
3860 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
3861 struct fc_info
*fi
= hostdata
->fi
;
3862 struct fc_node_info
*q
;
3864 ENTER("iph5526_queuecommand");
3866 spin_lock_irqsave(&fi
->fc_lock
, flags
);
3867 Cmnd
->scsi_done
= done
;
3869 if (Cmnd
->device
->tagged_supported
) {
3871 case SIMPLE_QUEUE_TAG
:
3872 hostdata
->cmnd
.fcp_cntl
= FCP_CNTL_QTYPE_SIMPLE
;
3874 case HEAD_OF_QUEUE_TAG
:
3875 hostdata
->cmnd
.fcp_cntl
= FCP_CNTL_QTYPE_HEAD_OF_Q
;
3877 case ORDERED_QUEUE_TAG
:
3878 hostdata
->cmnd
.fcp_cntl
= FCP_CNTL_QTYPE_ORDERED
;
3881 if ((jiffies
- hostdata
->tag_ages
[Cmnd
->device
->id
]) > (5 * HZ
)) {
3882 hostdata
->cmnd
.fcp_cntl
= FCP_CNTL_QTYPE_ORDERED
;
3883 hostdata
->tag_ages
[Cmnd
->device
->id
] = jiffies
;
3886 hostdata
->cmnd
.fcp_cntl
= FCP_CNTL_QTYPE_SIMPLE
;
3891 hostdata->cmnd.fcp_cntl = FCP_CNTL_QTYPE_UNTAGGED;
3894 hostdata
->cmnd
.fcp_addr
[3] = 0;
3895 hostdata
->cmnd
.fcp_addr
[2] = 0;
3896 hostdata
->cmnd
.fcp_addr
[1] = 0;
3897 hostdata
->cmnd
.fcp_addr
[0] = htons(Cmnd
->device
->lun
);
3899 memcpy(&hostdata
->cmnd
.fcp_cdb
, Cmnd
->cmnd
, Cmnd
->cmd_len
);
3900 hostdata
->cmnd
.fcp_data_len
= htonl(Cmnd
->request_bufflen
);
3902 /* Get an used OX_ID. We could have pending commands.
3904 if (get_scsi_oxid(fi
)) {
3905 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
3908 fi
->q
.free_scsi_oxid
[fi
->g
.scsi_oxid
] = OXID_INUSE
;
3910 /* Maintain a handler so that we can associate the done() function
3911 * on completion of the SCSI command.
3913 hostdata
->cmnd_handler
[fi
->g
.scsi_oxid
] = Cmnd
;
3915 switch(Cmnd
->cmnd
[0]) {
3919 fi
->g
.type_of_frame
= FC_SCSI_WRITE
;
3920 hostdata
->cmnd
.fcp_cntl
= htonl(FCP_CNTL_WRITE
| hostdata
->cmnd
.fcp_cntl
);
3923 fi
->g
.type_of_frame
= FC_SCSI_READ
;
3924 hostdata
->cmnd
.fcp_cntl
= htonl(FCP_CNTL_READ
| hostdata
->cmnd
.fcp_cntl
);
3927 memcpy(fi
->q
.ptr_fcp_cmnd
[fi
->q
.fcp_cmnd_indx
], &(hostdata
->cmnd
), sizeof(fcp_cmd
));
3929 q
= resolve_target(fi
, Cmnd
->device
->id
);
3932 u_int bad_id
= fi
->g
.my_ddaa
| 0xFE;
3933 /* We transmit to an non-existant AL_PA so that the "done"
3934 * function can be called while receiving the interrupt
3935 * due to a Timeout for a bad AL_PA. In a PTP configuration,
3936 * the int_required field is set, since there is no notion
3937 * of AL_PAs. This approach sucks, but works alright!
3939 if (fi
->g
.ptp_up
== TRUE
)
3941 tx_exchange(fi
, (char *)(&(hostdata
->cmnd
)), sizeof(fcp_cmd
), r_ctl
, type
, bad_id
, fi
->g
.my_mtu
, int_required
, ox_id
, FC_SCSI_BAD_TARGET
);
3942 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
3943 DPRINTK1("Target ID %x not present", Cmnd
->target
);
3946 if (q
->login
== LOGIN_COMPLETED
) {
3947 if (add_to_sest(fi
, Cmnd
, q
)) {
3948 DPRINTK1("add_to_sest() failed.");
3949 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
3952 tx_exchange(fi
, (char *)(fi
->q
.ptr_fcp_cmnd
[fi
->q
.fcp_cmnd_indx
]), sizeof(fcp_cmd
), r_ctl
, type
, q
->d_id
, q
->mtu
, int_required
, ox_id
, frame_class
<< 16);
3953 update_FCP_CMND_indx(fi
);
3955 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
3956 /* If q != NULL, then we have a SCSI Target.
3957 * If q->login != LOGIN_COMPLETED, then that device could be
3958 * offline temporarily. So we let the command to time-out.
3960 LEAVE("iph5526_queuecommand");
3964 int iph5526_abort(Scsi_Cmnd
*Cmnd
)
3966 struct Scsi_Host
*host
= Cmnd
->device
->host
;
3967 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
3968 struct fc_info
*fi
= hostdata
->fi
;
3969 struct fc_node_info
*q
;
3970 u_int r_ctl
= FC4_DEVICE_DATA
| UNSOLICITED_COMMAND
;
3971 u_int type
= TYPE_FCP
| SEQUENCE_INITIATIVE
;
3972 u_short ox_id
= OX_ID_FIRST_SEQUENCE
;
3973 int int_required
= 1, i
, abort_status
= FALSE
;
3976 ENTER("iph5526_abort");
3978 spin_lock_irqsave(&fi
->fc_lock
, flags
);
3980 q
= resolve_target(fi
, Cmnd
->device
->id
);
3982 u_int bad_id
= fi
->g
.my_ddaa
| 0xFE;
3983 /* This should not happen as we should always be able to
3984 * resolve a target id. But, jus in case...
3985 * We transmit to an non-existant AL_PA so that the done
3986 * function can be called while receiving the interrupt
3989 DPRINTK1("Unresolved Target ID!");
3990 tx_exchange(fi
, (char *)(&(hostdata
->cmnd
)), sizeof(fcp_cmd
), r_ctl
, type
, bad_id
, fi
->g
.my_mtu
, int_required
, ox_id
, FC_SCSI_BAD_TARGET
);
3991 DPRINTK1("Target ID %x not present", Cmnd
->target
);
3992 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
3996 /* If q != NULL, then we have a SCSI Target. If
3997 * q->login != LOGIN_COMPLETED, then that device could
3998 * be offline temporarily. So we let the command to time-out.
4001 /* Get the OX_ID for the Command to be aborted.
4003 for (i
= 0; i
<= MAX_SCSI_XID
; i
++) {
4004 if (hostdata
->cmnd_handler
[i
] == Cmnd
) {
4005 hostdata
->cmnd_handler
[i
] = NULL
;
4010 if (i
> MAX_SCSI_XID
) {
4011 T_MSG("Command could not be resolved to OX_ID");
4012 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
4016 switch(Cmnd
->cmnd
[0]) {
4022 ox_id
|= SCSI_READ_BIT
;
4024 abort_status
= abort_exchange(fi
, ox_id
);
4026 if ((q
->login
== LOGIN_COMPLETED
) && (abort_status
== TRUE
)) {
4027 /* Then, transmit an ABTS to the target. The rest
4028 * is done when the BA_ACC is received for the ABTS.
4030 tx_abts(fi
, q
->d_id
, ox_id
);
4035 /* Invalidate resources for that Exchange.
4037 x_id
= ox_id
& MAX_SCSI_XID
;
4038 STE_bit
= ntohl(*fi
->q
.ptr_sest
[x_id
]);
4039 if (STE_bit
& SEST_V
) {
4040 *(fi
->q
.ptr_sest
[x_id
]) &= htonl(SEST_INV
);
4041 invalidate_SEST_entry(fi
, ox_id
);
4045 LEAVE("iph5526_abort");
4046 spin_unlock_irqrestore(&fi
->fc_lock
, flags
);
4050 static int abort_exchange(struct fc_info
*fi
, u_short ox_id
)
4053 volatile u_int flush_SEST
, STE_bit
;
4054 x_id
= ox_id
& MAX_SCSI_XID
;
4055 DPRINTK1("Aborting Exchange %x", ox_id
);
4057 STE_bit
= ntohl(*fi
->q
.ptr_sest
[x_id
]);
4058 /* Is the Exchange still active?.
4060 if (STE_bit
& SEST_V
) {
4061 if (ox_id
& SCSI_READ_BIT
) {
4062 /* If the Exchange to be aborted is Inbound,
4063 * Flush the SEST Entry from Tachyon's Cache.
4065 *(fi
->q
.ptr_sest
[x_id
]) &= htonl(SEST_INV
);
4066 flush_tachyon_cache(fi
, ox_id
);
4067 flush_SEST
= readl(fi
->t_r
.ptr_tach_flush_oxid_reg
);
4068 while ((flush_SEST
& 0x80000000) != 0)
4069 flush_SEST
= readl(fi
->t_r
.ptr_tach_flush_oxid_reg
);
4070 STE_bit
= ntohl(*fi
->q
.ptr_sest
[x_id
]);
4071 while ((STE_bit
& 0x80000000) != 0)
4072 STE_bit
= ntohl(*fi
->q
.ptr_sest
[x_id
]);
4073 flush_SEST
= readl(fi
->t_r
.ptr_tach_flush_oxid_reg
);
4074 invalidate_SEST_entry(fi
, ox_id
);
4079 /* For In-Order Reassembly, the following is done:
4080 * First, write zero as the buffer length in the EDB.
4082 ptr_edb
= bus_to_virt(ntohl(*(fi
->q
.ptr_sest
[x_id
] + 7)));
4083 for (i
= 0; i
< EDB_LEN
; i
++)
4084 if (fi
->q
.ptr_edb
[i
] == ptr_edb
)
4087 *ptr_edb
= *ptr_edb
& 0x0000FFFF;
4089 T_MSG("EDB not found while clearing in abort_exchange()");
4091 DPRINTK1("Exchange %x invalidated", ox_id
);
4095 DPRINTK1("SEST Entry for exchange %x not valid", ox_id
);
4100 static void flush_tachyon_cache(struct fc_info
*fi
, u_short ox_id
)
4102 volatile u_int tachyon_status
;
4103 if (fi
->g
.loop_up
== TRUE
) {
4104 writel(HOST_CONTROL
, fi
->t_r
.ptr_fm_control_reg
);
4105 /* Make sure that the Inbound FIFO is empty.
4108 tachyon_status
= readl(fi
->t_r
.ptr_tach_status_reg
);
4110 }while ((tachyon_status
& RECEIVE_FIFO_EMPTY
) == 0);
4111 /* Ok. Go ahead and flushhhhhhhhh!
4113 writel(0x80000000 | ox_id
, fi
->t_r
.ptr_tach_flush_oxid_reg
);
4114 writel(EXIT_HOST_CONTROL
, fi
->t_r
.ptr_fm_control_reg
);
4117 if (fi
->g
.ptp_up
== TRUE
) {
4118 take_tachyon_offline(fi
);
4119 /* Make sure that the Inbound FIFO is empty.
4122 tachyon_status
= readl(fi
->t_r
.ptr_tach_status_reg
);
4124 }while ((tachyon_status
& RECEIVE_FIFO_EMPTY
) == 0);
4125 writel(0x80000000 | ox_id
, fi
->t_r
.ptr_tach_flush_oxid_reg
);
4126 /* Write the Initialize command to the FM Control reg.
4128 fi
->g
.n_port_try
= TRUE
;
4129 DPRINTK1("In abort_exchange, TACHYON initializing as N_Port...\n");
4130 writel(INITIALIZE
, fi
->t_r
.ptr_fm_control_reg
);
4134 static struct fc_node_info
*resolve_target(struct fc_info
*fi
, u_char target
)
4136 struct fc_node_info
*temp
= fi
->node_info_list
;
4138 if (temp
->target_id
== target
) {
4139 if ((temp
->scsi
== TARGET
) && (temp
->login
== LOGIN_COMPLETED
))
4142 if (temp
->login
!= LOGIN_COMPLETED
) {
4143 /* The Target is not currently logged in.
4144 * It could be a Target on the Local Loop or
4145 * on a Remote Loop connected through a switch.
4146 * In either case, we will know whenever the Target
4147 * comes On-Line again. We let the command to
4148 * time-out so that it gets retried.
4150 T_MSG("Target %d not logged in.", temp
->target_id
);
4151 tx_logi(fi
, ELS_PLOGI
, temp
->d_id
);
4155 if (temp
->scsi
!= TARGET
) {
4156 /* For some reason, we did not get a response to
4157 * PRLI. Letz try it again...
4159 DPRINTK1("Node not PRLIied. Txing PRLI...");
4160 tx_prli(fi
, ELS_PRLI
, temp
->d_id
, OX_ID_FIRST_SEQUENCE
);
4171 static int add_to_sest(struct fc_info
*fi
, Scsi_Cmnd
*Cmnd
, struct fc_node_info
*ni
)
4173 /* we have at least 1 buffer, the terminator */
4174 int no_of_sdb_buffers
= 1, i
;
4175 int no_of_edb_buffers
= 0;
4176 u_int
*req_buffer
= (u_int
*)Cmnd
->request_buffer
;
4177 u_int
*ptr_sdb
= NULL
;
4178 struct scatterlist
*sl1
, *sl2
= NULL
;
4181 switch(fi
->g
.type_of_frame
) {
4183 fi
->g
.inb_sest_entry
.flags_and_byte_offset
= htonl(INB_SEST_VED
);
4184 fi
->g
.inb_sest_entry
.byte_count
= 0;
4185 fi
->g
.inb_sest_entry
.no_of_recvd_frames
= 0;
4186 fi
->g
.inb_sest_entry
.no_of_expected_frames
= 0;
4187 fi
->g
.inb_sest_entry
.last_fctl
= 0;
4190 no_of_sg
= Cmnd
->use_sg
;
4191 sl1
= sl2
= (struct scatterlist
*)Cmnd
->request_buffer
;
4192 for (i
= 0; i
< no_of_sg
; i
++) {
4193 no_of_sdb_buffers
+= sl1
->length
/ SEST_BUFFER_SIZE
;
4194 if (sl1
->length
% SEST_BUFFER_SIZE
)
4195 no_of_sdb_buffers
++;
4200 no_of_sdb_buffers
+= Cmnd
->request_bufflen
/ SEST_BUFFER_SIZE
;
4201 if (Cmnd
->request_bufflen
% SEST_BUFFER_SIZE
)
4202 no_of_sdb_buffers
++;
4205 /* We are working with the premise that at the max we would
4206 * get a scatter-gather buffer containing 63 buffers
4207 * of size 1024 bytes each. Is it a _bad_ assumption?
4209 if (no_of_sdb_buffers
> 512) {
4210 T_MSG("Number of SDB buffers needed = %d", no_of_sdb_buffers
);
4211 T_MSG("Disable Scatter-Gather!!!");
4216 /* Store it in the sdb_table so that we can retrieve that
4217 * free up the memory when the Read Command completes.
4219 if (get_free_SDB(fi
))
4221 ptr_sdb
= fi
->q
.ptr_sdb_slot
[fi
->q
.sdb_indx
];
4222 fi
->q
.sdb_slot_status
[fi
->q
.sdb_indx
] = SDB_BUSY
;
4223 fi
->g
.inb_sest_entry
.sdb_address
= htonl(virt_to_bus(ptr_sdb
));
4227 for(i
= 0; i
< no_of_sg
; i
++) {
4228 char *addr_ptr
= sl2
->address
;
4229 count
= sl2
->length
/ SEST_BUFFER_SIZE
;
4230 if (sl2
->length
% SEST_BUFFER_SIZE
)
4232 for (j
= 0; j
< count
; j
++) {
4233 *(ptr_sdb
) = htonl(virt_to_bus(addr_ptr
));
4234 addr_ptr
+= SEST_BUFFER_SIZE
;
4242 for (i
= 0; i
< no_of_sdb_buffers
- 1; i
++) {
4243 *(ptr_sdb
) = htonl(virt_to_bus(req_buffer
));
4244 req_buffer
+= SEST_BUFFER_SIZE
/4;
4248 *(ptr_sdb
) = htonl(0x1); /* Terminator */
4250 /* The scratch pad is used to hold the index into the SDB.
4252 fi
->g
.inb_sest_entry
.scratch_pad
= fi
->q
.sdb_indx
;
4253 fi
->g
.inb_sest_entry
.expected_ro
= 0;
4254 fi
->g
.inb_sest_entry
.buffer_index
= 0;
4255 fi
->g
.inb_sest_entry
.buffer_offset
= 0;
4256 memcpy(fi
->q
.ptr_sest
[fi
->g
.scsi_oxid
], &fi
->g
.inb_sest_entry
, sizeof(INB_SEST_ENTRY
));
4259 fi
->g
.outb_sest_entry
.flags_and_did
= htonl(OUTB_SEST_VED
| ni
->d_id
);
4260 fi
->g
.outb_sest_entry
.max_frame_len
= htons(ni
->mtu
<< 4);
4261 fi
->g
.outb_sest_entry
.cntl
= htons(ODB_CLASS_3
| ODB_EE_CREDIT
| ODB_NO_INT
| ODB_NO_COMP
);
4262 fi
->g
.outb_sest_entry
.total_seq_length
= INV_SEQ_LEN
;
4263 fi
->g
.outb_sest_entry
.link
= htons(OUTB_SEST_LINK
);
4264 fi
->g
.outb_sest_entry
.transaction_id
= htonl(fi
->g
.scsi_oxid
);
4265 fi
->g
.outb_sest_entry
.seq_id
= fi
->g
.seq_id
;
4266 fi
->g
.outb_sest_entry
.reserved
= 0x0;
4267 fi
->g
.outb_sest_entry
.header_length
= htons(TACHYON_HEADER_LEN
);
4271 u_short rx_id
= RX_ID_FIRST_SEQUENCE
;
4272 u_int r_ctl
= FC4_DEVICE_DATA
| SOLICITED_DATA
;
4273 u_int type
= TYPE_FCP
| SEQUENCE_INITIATIVE
;
4274 /* Multi Frame Sequence ? If yes, set RO bit.
4276 if (Cmnd
->request_bufflen
> ni
->mtu
)
4277 type
|= RELATIVE_OFF_PRESENT
;
4278 build_tachyon_header(fi
, fi
->g
.my_id
, r_ctl
, ni
->d_id
, type
, fi
->g
.seq_id
, df_ctl
, fi
->g
.scsi_oxid
, rx_id
, NULL
);
4279 if (get_free_header(fi
) || get_free_EDB(fi
))
4281 memcpy(fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
], &(fi
->g
.tach_header
), TACHYON_HEADER_LEN
);
4282 fi
->g
.outb_sest_entry
.header_address
= htonl(virt_to_bus(fi
->q
.ptr_tachyon_header
[fi
->q
.tachyon_header_indx
]));
4283 update_tachyon_header_indx(fi
);
4287 no_of_sg
= Cmnd
->use_sg
;
4288 sl1
= sl2
= (struct scatterlist
*)Cmnd
->request_buffer
;
4289 for (i
= 0; i
< no_of_sg
; i
++) {
4290 no_of_edb_buffers
+= sl1
->length
/ SEST_BUFFER_SIZE
;
4291 if (sl1
->length
% SEST_BUFFER_SIZE
)
4292 no_of_edb_buffers
++;
4297 no_of_edb_buffers
+= Cmnd
->request_bufflen
/ SEST_BUFFER_SIZE
;
4298 if (Cmnd
->request_bufflen
% SEST_BUFFER_SIZE
)
4299 no_of_edb_buffers
++;
4303 /* We need "no_of_edb_buffers" _contiguous_ EDBs
4304 * that are FREE. Check for that first.
4306 for (i
= 0; i
< no_of_edb_buffers
; i
++) {
4308 if ((fi
->q
.edb_buffer_indx
+ no_of_edb_buffers
) >= EDB_LEN
)
4309 fi
->q
.edb_buffer_indx
= 0;
4310 if (fi
->q
.free_edb_list
[fi
->q
.edb_buffer_indx
+ i
] != EDB_FREE
) {
4311 for (j
= 0; j
< i
; j
++)
4312 update_EDB_indx(fi
);
4313 if (get_free_EDB(fi
))
4319 /* We got enuff FREE EDBs.
4322 fi
->g
.outb_sest_entry
.edb_address
= htonl(virt_to_bus(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
]));
4323 sl1
= (struct scatterlist
*)Cmnd
->request_buffer
;
4324 for(i
= 0; i
< no_of_sg
; i
++) {
4326 count
= sl1
->length
/ SEST_BUFFER_SIZE
;
4327 for (j
= 0; j
< count
; j
++) {
4328 build_EDB(fi
, (char *)sl1
->address
, 0, SEST_BUFFER_SIZE
);
4329 memcpy(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
], &(fi
->g
.edb
), sizeof(EDB
));
4330 /* Mark this EDB as being in use */
4331 fi
->q
.free_edb_list
[fi
->q
.edb_buffer_indx
] = EDB_BUSY
;
4332 /* We have already made sure that we have enuff
4333 * free EDBs that are contiguous. So this is
4336 update_EDB_indx(fi
);
4337 sl1
->address
+= SEST_BUFFER_SIZE
;
4339 /* Just in case itz not a multiple of
4340 * SEST_BUFFER_SIZE bytes.
4342 if (sl1
->length
% SEST_BUFFER_SIZE
) {
4343 build_EDB(fi
, (char *)sl1
->address
, 0, sl1
->length
% SEST_BUFFER_SIZE
);
4344 memcpy(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
], &(fi
->g
.edb
), sizeof(EDB
));
4345 fi
->q
.free_edb_list
[fi
->q
.edb_buffer_indx
] = EDB_BUSY
;
4346 update_EDB_indx(fi
);
4350 /* The last EDB is special. It needs the "end bit" to
4353 *(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
- 1] + 1) = *(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
- 1] + 1) | ntohs(EDB_END
);
4357 fi
->g
.outb_sest_entry
.edb_address
= htonl(virt_to_bus(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
]));
4358 count
= Cmnd
->request_bufflen
/ SEST_BUFFER_SIZE
;
4359 for (j
= 0; j
< count
; j
++) {
4360 build_EDB(fi
, (char *)req_buffer
, 0, SEST_BUFFER_SIZE
);
4361 memcpy(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
], &(fi
->g
.edb
), sizeof(EDB
));
4362 /* Mark this EDB as being in use */
4363 fi
->q
.free_edb_list
[fi
->q
.edb_buffer_indx
] = EDB_BUSY
;
4364 /* We have already made sure that we have enuff
4365 * free EDBs that are contiguous. So this is
4368 update_EDB_indx(fi
);
4369 req_buffer
+= SEST_BUFFER_SIZE
;
4371 /* Just in case itz not a multiple of
4372 * SEST_BUFFER_SIZE bytes.
4374 if (Cmnd
->request_bufflen
% SEST_BUFFER_SIZE
) {
4375 build_EDB(fi
, (char *)req_buffer
, EDB_END
, Cmnd
->request_bufflen
% SEST_BUFFER_SIZE
);
4376 memcpy(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
], &(fi
->g
.edb
), sizeof(EDB
));
4377 fi
->q
.free_edb_list
[fi
->q
.edb_buffer_indx
] = EDB_BUSY
;
4378 update_EDB_indx(fi
);
4381 /* Mark the last EDB as the "end edb".
4383 *(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
- 1] + 1) = *(fi
->q
.ptr_edb
[fi
->q
.edb_buffer_indx
- 1] + 1) | htons(EDB_END
);
4387 /* Finally we have something to send!.
4389 memcpy(fi
->q
.ptr_sest
[fi
->g
.scsi_oxid
], &fi
->g
.outb_sest_entry
, sizeof(OUTB_SEST_ENTRY
));
4395 static void update_FCP_CMND_indx(struct fc_info
*fi
)
4397 fi
->q
.fcp_cmnd_indx
++;
4398 if (fi
->q
.fcp_cmnd_indx
== NO_OF_FCP_CMNDS
)
4399 fi
->q
.fcp_cmnd_indx
= 0;
4402 static int get_scsi_oxid(struct fc_info
*fi
)
4404 u_short initial_oxid
= fi
->g
.scsi_oxid
;
4405 /* Check if the OX_ID is in use.
4406 * We could have an outstanding SCSI command.
4408 while (fi
->q
.free_scsi_oxid
[fi
->g
.scsi_oxid
] != OXID_AVAILABLE
) {
4409 update_scsi_oxid(fi
);
4410 if (fi
->g
.scsi_oxid
== initial_oxid
) {
4411 T_MSG("No free OX_IDs avaliable")
4412 reset_tachyon(fi
, SOFTWARE_RESET
);
4419 static void update_scsi_oxid(struct fc_info
*fi
)
4422 if (fi
->g
.scsi_oxid
== (MAX_SCSI_XID
+ 1))
4423 fi
->g
.scsi_oxid
= 0;
4426 static int get_free_SDB(struct fc_info
*fi
)
4428 unsigned int initial_indx
= fi
->q
.sdb_indx
;
4429 /* Check if the SDB is in use.
4430 * We could have an outstanding SCSI Read command.
4431 * We should find a free slot as we can queue a
4432 * maximum of 32 SCSI commands only.
4434 while (fi
->q
.sdb_slot_status
[fi
->q
.sdb_indx
] != SDB_FREE
) {
4435 update_SDB_indx(fi
);
4436 if (fi
->q
.sdb_indx
== initial_indx
) {
4437 T_MSG("No free SDB buffers avaliable")
4438 reset_tachyon(fi
, SOFTWARE_RESET
);
4445 static void update_SDB_indx(struct fc_info
*fi
)
4448 if (fi
->q
.sdb_indx
== NO_OF_SDB_ENTRIES
)
4452 int iph5526_release(struct Scsi_Host
*host
)
4454 struct iph5526_hostdata
*hostdata
= (struct iph5526_hostdata
*)host
->hostdata
;
4455 struct fc_info
*fi
= hostdata
->fi
;
4456 free_irq(host
->irq
, host
);
4457 iounmap(fi
->g
.mem_base
);
4461 const char *iph5526_info(struct Scsi_Host
*host
)
4463 static char buf
[80];
4464 sprintf(buf
, "Interphase 5526 Fibre Channel PCI SCSI Adapter using IRQ %d\n", host
->irq
);
4468 #define NAMELEN 8 /* # of chars for storing dev->name */
4470 static struct net_device
*dev_fc
[MAX_FC_CARDS
];
4474 static int bad
; /* 0xbad = bad sig or no reset ack */
4475 static int scsi_registered
;
4478 static int __init
iph5526_init(void)
4482 driver_template
.module
= THIS_MODULE
;
4483 scsi_register_host(&driver_template
);
4484 if (driver_template
.present
)
4485 scsi_registered
= TRUE
;
4487 printk("iph5526: SCSI registeration failed!!!\n");
4488 scsi_registered
= FALSE
;
4489 scsi_unregister_host(&driver_template
);
4492 while(fc
[i
] != NULL
) {
4493 struct net_device
*dev
= alloc_fcdev(0);
4497 printk("iph5526.c: init_fcdev failed for card #%d\n", i
+1);
4501 iph5526_probe_pci(dev
);
4502 err
= register_netdev(dev
);
4505 printk("iph5526.c: init_fcdev failed for card #%d\n", i
+1);
4517 static void __exit
iph5526_exit(void)
4520 while(fc
[i
] != NULL
) {
4521 struct net_device
*dev
= fc
[i
]->dev
;
4522 void *priv
= dev
->priv
;
4523 fc
[i
]->g
.dont_init
= TRUE
;
4524 take_tachyon_offline(fc
[i
]);
4525 unregister_netdev(dev
);
4526 clean_up_memory(fc
[i
]);
4533 if (scsi_registered
== TRUE
)
4534 scsi_unregister_host(&driver_template
);
4537 module_init(iph5526_init
);
4538 module_exit(iph5526_exit
);
4540 void clean_up_memory(struct fc_info
*fi
)
4543 ENTER("clean_up_memory");
4544 if (fi
->q
.ptr_mfsbq_base
)
4545 free_pages((u_long
)bus_to_virt(ntohl(*(fi
->q
.ptr_mfsbq_base
))), 5);
4546 DPRINTK("after kfree2");
4547 for (i
= 0; i
< SFSBQ_LENGTH
; i
++)
4548 for (j
= 0; j
< NO_OF_ENTRIES
; j
++)
4549 if (fi
->q
.ptr_sfs_buffers
[i
*NO_OF_ENTRIES
+ j
])
4550 kfree(fi
->q
.ptr_sfs_buffers
[i
*NO_OF_ENTRIES
+ j
]);
4551 DPRINTK("after kfree1");
4552 if (fi
->q
.ptr_ocq_base
)
4553 free_page((u_long
)fi
->q
.ptr_ocq_base
);
4554 if (fi
->q
.ptr_imq_base
)
4555 free_page((u_long
)fi
->q
.ptr_imq_base
);
4556 if (fi
->q
.ptr_mfsbq_base
)
4557 free_page((u_long
)fi
->q
.ptr_mfsbq_base
);
4558 if (fi
->q
.ptr_sfsbq_base
)
4559 free_page((u_long
)fi
->q
.ptr_sfsbq_base
);
4560 if (fi
->q
.ptr_edb_base
)
4561 free_pages((u_long
)fi
->q
.ptr_edb_base
, 5);
4562 if (fi
->q
.ptr_sest_base
)
4563 free_pages((u_long
)fi
->q
.ptr_sest_base
, 5);
4564 if (fi
->q
.ptr_tachyon_header_base
)
4565 free_page((u_long
)fi
->q
.ptr_tachyon_header_base
);
4566 if (fi
->q
.ptr_sdb_base
)
4567 free_pages((u_long
)fi
->q
.ptr_sdb_base
, 5);
4568 if (fi
->q
.ptr_fcp_cmnd_base
)
4569 free_page((u_long
)fi
->q
.ptr_fcp_cmnd_base
);
4570 DPRINTK("after free_pages");
4571 if (fi
->q
.ptr_host_ocq_cons_indx
)
4572 kfree(fi
->q
.ptr_host_ocq_cons_indx
);
4573 if (fi
->q
.ptr_host_hpcq_cons_indx
)
4574 kfree(fi
->q
.ptr_host_hpcq_cons_indx
);
4575 if (fi
->q
.ptr_host_imq_prod_indx
)
4576 kfree(fi
->q
.ptr_host_imq_prod_indx
);
4577 DPRINTK("after kfree3");
4578 while (fi
->node_info_list
) {
4579 struct fc_node_info
*temp_list
= fi
->node_info_list
;
4580 fi
->node_info_list
= fi
->node_info_list
->next
;
4583 while (fi
->ox_id_list
) {
4584 struct ox_id_els_map
*temp
= fi
->ox_id_list
;
4585 fi
->ox_id_list
= fi
->ox_id_list
->next
;
4588 LEAVE("clean_up_memory");
4591 static int initialize_register_pointers(struct fc_info
*fi
)
4593 ENTER("initialize_register_pointers");
4594 if(fi
->g
.tachyon_base
== 0)
4597 fi
->i_r
.ptr_ichip_hw_control_reg
= ICHIP_HW_CONTROL_REG_OFF
+ fi
->g
.tachyon_base
;
4598 fi
->i_r
.ptr_ichip_hw_status_reg
= ICHIP_HW_STATUS_REG_OFF
+ fi
->g
.tachyon_base
;
4599 fi
->i_r
.ptr_ichip_hw_addr_mask_reg
= ICHIP_HW_ADDR_MASK_REG_OFF
+ fi
->g
.tachyon_base
;
4600 fi
->t_r
.ptr_ocq_base_reg
= OCQ_BASE_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4601 fi
->t_r
.ptr_ocq_len_reg
= OCQ_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4602 fi
->t_r
.ptr_ocq_prod_indx_reg
= OCQ_PRODUCER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4603 fi
->t_r
.ptr_ocq_cons_indx_reg
= OCQ_CONSUMER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4604 fi
->t_r
.ptr_imq_base_reg
= IMQ_BASE_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4605 fi
->t_r
.ptr_imq_len_reg
= IMQ_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4606 fi
->t_r
.ptr_imq_cons_indx_reg
= IMQ_CONSUMER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4607 fi
->t_r
.ptr_imq_prod_indx_reg
= IMQ_PRODUCER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4608 fi
->t_r
.ptr_mfsbq_base_reg
= MFSBQ_BASE_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4609 fi
->t_r
.ptr_mfsbq_len_reg
= MFSBQ_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4610 fi
->t_r
.ptr_mfsbq_prod_reg
= MFSBQ_PRODUCER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4611 fi
->t_r
.ptr_mfsbq_cons_reg
= MFSBQ_CONSUMER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4612 fi
->t_r
.ptr_mfsbuff_len_reg
= MFS_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4613 fi
->t_r
.ptr_sfsbq_base_reg
= SFSBQ_BASE_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4614 fi
->t_r
.ptr_sfsbq_len_reg
= SFSBQ_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4615 fi
->t_r
.ptr_sfsbq_prod_reg
= SFSBQ_PRODUCER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4616 fi
->t_r
.ptr_sfsbq_cons_reg
= SFSBQ_CONSUMER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4617 fi
->t_r
.ptr_sfsbuff_len_reg
= SFS_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4618 fi
->t_r
.ptr_sest_base_reg
= SEST_BASE_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4619 fi
->t_r
.ptr_sest_len_reg
= SEST_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4620 fi
->t_r
.ptr_scsibuff_len_reg
= SCSI_LENGTH_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4621 fi
->t_r
.ptr_tach_config_reg
= TACHYON_CONFIG_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4622 fi
->t_r
.ptr_tach_control_reg
= TACHYON_CONTROL_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4623 fi
->t_r
.ptr_tach_status_reg
= TACHYON_STATUS_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4624 fi
->t_r
.ptr_tach_flush_oxid_reg
= TACHYON_FLUSH_SEST_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4625 fi
->t_r
.ptr_fm_config_reg
= FMGR_CONFIG_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4626 fi
->t_r
.ptr_fm_control_reg
= FMGR_CONTROL_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4627 fi
->t_r
.ptr_fm_status_reg
= FMGR_STATUS_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4628 fi
->t_r
.ptr_fm_tov_reg
= FMGR_TIMER_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4629 fi
->t_r
.ptr_fm_wwn_hi_reg
= FMGR_WWN_HI_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4630 fi
->t_r
.ptr_fm_wwn_low_reg
= FMGR_WWN_LO_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4631 fi
->t_r
.ptr_fm_rx_al_pa_reg
= FMGR_RCVD_ALPA_REGISTER_OFFSET
+ fi
->g
.tachyon_base
;
4633 LEAVE("initialize_register_pointers");
4641 * compile-command: "gcc -DKERNEL -Wall -O6 -fomit-frame-pointer -I/usr/src/linux/net/tcp -c iph5526.c"
4642 * version-control: t
4643 * kept-new-versions: 5