1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/skbuff.h>
10 #include <linux/netdevice.h>
11 #include <linux/genhd.h>
12 #include <linux/moduleparam.h>
13 #include <net/net_namespace.h>
14 #include <asm/unaligned.h>
17 static int aoe_deadsecs
= 60 * 3;
18 module_param(aoe_deadsecs
, int, 0644);
19 MODULE_PARM_DESC(aoe_deadsecs
, "After aoe_deadsecs seconds, give up and fail dev.");
21 static int aoe_maxout
= 16;
22 module_param(aoe_maxout
, int, 0644);
23 MODULE_PARM_DESC(aoe_maxout
,
24 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
26 static struct sk_buff
*
31 skb
= alloc_skb(len
, GFP_ATOMIC
);
33 skb_reset_mac_header(skb
);
34 skb_reset_network_header(skb
);
35 skb
->protocol
= __constant_htons(ETH_P_AOE
);
37 skb
->next
= skb
->prev
= NULL
;
39 /* tell the network layer not to perform IP checksums
40 * or to get the NIC to do it
42 skb
->ip_summed
= CHECKSUM_NONE
;
48 getframe(struct aoetgt
*t
, int tag
)
61 * Leave the top bit clear so we have tagspace for userland.
62 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
63 * This driver reserves tag -1 to mean "unused frame."
66 newtag(struct aoetgt
*t
)
71 return n
|= (++t
->lasttag
& 0x7fff) << 16;
75 aoehdr_atainit(struct aoedev
*d
, struct aoetgt
*t
, struct aoe_hdr
*h
)
77 u32 host_tag
= newtag(t
);
79 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
80 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
81 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
83 h
->major
= cpu_to_be16(d
->aoemajor
);
84 h
->minor
= d
->aoeminor
;
86 h
->tag
= cpu_to_be32(host_tag
);
92 put_lba(struct aoe_atahdr
*ah
, sector_t lba
)
103 ifrotate(struct aoetgt
*t
)
106 if (t
->ifp
>= &t
->ifs
[NAOEIFS
] || t
->ifp
->nd
== NULL
)
108 if (t
->ifp
->nd
== NULL
) {
109 printk(KERN_INFO
"aoe: no interface to rotate to\n");
115 skb_pool_put(struct aoedev
*d
, struct sk_buff
*skb
)
117 __skb_queue_tail(&d
->skbpool
, skb
);
120 static struct sk_buff
*
121 skb_pool_get(struct aoedev
*d
)
123 struct sk_buff
*skb
= skb_peek(&d
->skbpool
);
125 if (skb
&& atomic_read(&skb_shinfo(skb
)->dataref
) == 1) {
126 __skb_unlink(skb
, &d
->skbpool
);
129 if (skb_queue_len(&d
->skbpool
) < NSKBPOOLMAX
&&
130 (skb
= new_skb(ETH_ZLEN
)))
136 /* freeframe is where we do our load balancing so it's a little hairy. */
137 static struct frame
*
138 freeframe(struct aoedev
*d
)
140 struct frame
*f
, *e
, *rf
;
144 if (d
->targets
[0] == NULL
) { /* shouldn't happen, but I'm paranoid */
145 printk(KERN_ERR
"aoe: NULL TARGETS!\n");
150 if (t
>= &d
->targets
[NTARGETS
] || !*t
)
153 if ((*t
)->nout
< (*t
)->maxout
158 e
= f
+ (*t
)->nframes
;
160 if (f
->tag
!= FREETAG
)
164 && !(f
->skb
= skb
= new_skb(ETH_ZLEN
)))
166 if (atomic_read(&skb_shinfo(skb
)->dataref
)
172 gotone
: skb_shinfo(skb
)->nr_frags
= skb
->data_len
= 0;
178 /* Work can be done, but the network layer is
179 holding our precious packets. Try to grab
180 one from the pool. */
182 if (f
== NULL
) { /* more paranoia */
184 "aoe: freeframe: %s.\n",
185 "unexpected null rf");
186 d
->flags
|= DEVFL_KICKME
;
189 skb
= skb_pool_get(d
);
191 skb_pool_put(d
, f
->skb
);
197 d
->flags
|= DEVFL_KICKME
;
199 if (t
== d
->tgt
) /* we've looped and found nada */
202 if (t
>= &d
->targets
[NTARGETS
] || !*t
)
209 aoecmd_ata_rw(struct aoedev
*d
)
213 struct aoe_atahdr
*ah
;
219 char writebit
, extbit
;
230 bcnt
= t
->ifp
->maxbcnt
;
233 if (bcnt
> buf
->bv_resid
)
234 bcnt
= buf
->bv_resid
;
235 /* initialize the headers & frame */
237 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
238 ah
= (struct aoe_atahdr
*) (h
+1);
239 skb_put(skb
, sizeof *h
+ sizeof *ah
);
240 memset(h
, 0, skb
->len
);
241 f
->tag
= aoehdr_atainit(d
, t
, h
);
245 f
->bufaddr
= page_address(bv
->bv_page
) + buf
->bv_off
;
247 f
->lba
= buf
->sector
;
249 /* set up ata header */
250 ah
->scnt
= bcnt
>> 9;
251 put_lba(ah
, buf
->sector
);
252 if (d
->flags
& DEVFL_EXT
) {
253 ah
->aflags
|= AOEAFL_EXT
;
257 ah
->lba3
|= 0xe0; /* LBA bit + obsolete 0xa0 */
259 if (bio_data_dir(buf
->bio
) == WRITE
) {
260 skb_fill_page_desc(skb
, 0, bv
->bv_page
, buf
->bv_off
, bcnt
);
261 ah
->aflags
|= AOEAFL_WRITE
;
263 skb
->data_len
= bcnt
;
270 ah
->cmdstat
= WIN_READ
| writebit
| extbit
;
272 /* mark all tracking fields and load out */
273 buf
->nframesout
+= 1;
275 buf
->bv_resid
-= bcnt
;
277 buf
->sector
+= bcnt
>> 9;
278 if (buf
->resid
== 0) {
280 } else if (buf
->bv_resid
== 0) {
282 buf
->bv_resid
= bv
->bv_len
;
283 WARN_ON(buf
->bv_resid
== 0);
284 buf
->bv_off
= bv
->bv_offset
;
287 skb
->dev
= t
->ifp
->nd
;
288 skb
= skb_clone(skb
, GFP_ATOMIC
);
290 __skb_queue_tail(&d
->sendq
, skb
);
294 /* some callers cannot sleep, and they can call this function,
295 * transmitting the packets later, when interrupts are on
298 aoecmd_cfg_pkts(ushort aoemajor
, unsigned char aoeminor
, struct sk_buff_head
*queue
)
301 struct aoe_cfghdr
*ch
;
303 struct net_device
*ifp
;
305 read_lock(&dev_base_lock
);
306 for_each_netdev(&init_net
, ifp
) {
308 if (!is_aoe_netif(ifp
))
311 skb
= new_skb(sizeof *h
+ sizeof *ch
);
313 printk(KERN_INFO
"aoe: skb alloc failure\n");
316 skb_put(skb
, sizeof *h
+ sizeof *ch
);
318 __skb_queue_tail(queue
, skb
);
319 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
320 memset(h
, 0, sizeof *h
+ sizeof *ch
);
322 memset(h
->dst
, 0xff, sizeof h
->dst
);
323 memcpy(h
->src
, ifp
->dev_addr
, sizeof h
->src
);
324 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
326 h
->major
= cpu_to_be16(aoemajor
);
333 read_unlock(&dev_base_lock
);
337 resend(struct aoedev
*d
, struct aoetgt
*t
, struct frame
*f
)
341 struct aoe_atahdr
*ah
;
348 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
349 ah
= (struct aoe_atahdr
*) (h
+1);
351 snprintf(buf
, sizeof buf
,
352 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
353 "retransmit", d
->aoemajor
, d
->aoeminor
, f
->tag
, jiffies
, n
,
354 h
->src
, h
->dst
, t
->nout
);
358 h
->tag
= cpu_to_be32(n
);
359 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
360 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
362 switch (ah
->cmdstat
) {
375 if (ah
->aflags
& AOEAFL_WRITE
) {
376 skb_fill_page_desc(skb
, 0, virt_to_page(f
->bufaddr
),
377 offset_in_page(f
->bufaddr
), n
);
378 skb
->len
= sizeof *h
+ sizeof *ah
+ n
;
382 skb
->dev
= t
->ifp
->nd
;
383 skb
= skb_clone(skb
, GFP_ATOMIC
);
386 __skb_queue_tail(&d
->sendq
, skb
);
394 n
= jiffies
& 0xffff;
401 static struct aoeif
*
402 getif(struct aoetgt
*t
, struct net_device
*nd
)
414 static struct aoeif
*
415 addif(struct aoetgt
*t
, struct net_device
*nd
)
423 p
->maxbcnt
= DEFAULTBCNT
;
430 ejectif(struct aoetgt
*t
, struct aoeif
*ifp
)
435 e
= t
->ifs
+ NAOEIFS
- 1;
436 n
= (e
- ifp
) * sizeof *ifp
;
437 memmove(ifp
, ifp
+1, n
);
442 sthtith(struct aoedev
*d
)
444 struct frame
*f
, *e
, *nf
;
446 struct aoetgt
*ht
= *d
->htgt
;
451 if (f
->tag
== FREETAG
)
463 resend(d
, *d
->tgt
, nf
);
465 /* he's clean, he's useless. take away his interfaces */
466 memset(ht
->ifs
, 0, sizeof ht
->ifs
);
471 static inline unsigned char
472 ata_scnt(unsigned char *packet
) {
474 struct aoe_atahdr
*ah
;
476 h
= (struct aoe_hdr
*) packet
;
477 ah
= (struct aoe_atahdr
*) (h
+1);
482 rexmit_timer(ulong vp
)
484 struct sk_buff_head queue
;
486 struct aoetgt
*t
, **tt
, **te
;
489 register long timeout
;
492 d
= (struct aoedev
*) vp
;
494 /* timeout is always ~150% of the moving average */
496 timeout
+= timeout
>> 1;
498 spin_lock_irqsave(&d
->lock
, flags
);
500 if (d
->flags
& DEVFL_TKILL
) {
501 spin_unlock_irqrestore(&d
->lock
, flags
);
506 for (; tt
< te
&& *tt
; tt
++) {
511 if (f
->tag
== FREETAG
512 || tsince(f
->tag
) < timeout
)
514 n
= f
->waited
+= timeout
;
516 if (n
> aoe_deadsecs
) {
517 /* waited too long. device failure. */
522 if (n
> HELPWAIT
/* see if another target can help */
523 && (tt
!= d
->targets
|| d
->targets
[1]))
526 if (t
->nout
== t
->maxout
) {
529 t
->lastwadj
= jiffies
;
532 ifp
= getif(t
, f
->skb
->dev
);
533 if (ifp
&& ++ifp
->lost
> (t
->nframes
<< 1)
534 && (ifp
!= t
->ifs
|| t
->ifs
[1].nd
)) {
539 if (ata_scnt(skb_mac_header(f
->skb
)) > DEFAULTBCNT
/ 512
540 && ifp
&& ++ifp
->lostjumbo
> (t
->nframes
<< 1)
541 && ifp
->maxbcnt
!= DEFAULTBCNT
) {
544 "too many lost jumbo on "
546 "falling back to %d frames.\n",
547 d
->aoemajor
, d
->aoeminor
,
548 ifp
->nd
->name
, t
->addr
,
556 if (t
->nout
== t
->maxout
557 && t
->maxout
< t
->nframes
558 && (jiffies
- t
->lastwadj
)/HZ
> 10) {
560 t
->lastwadj
= jiffies
;
564 if (!skb_queue_empty(&d
->sendq
)) {
567 d
->rttavg
= MAXTIMER
;
570 if (d
->flags
& DEVFL_KICKME
|| d
->htgt
) {
571 d
->flags
&= ~DEVFL_KICKME
;
575 __skb_queue_head_init(&queue
);
576 skb_queue_splice_init(&d
->sendq
, &queue
);
578 d
->timer
.expires
= jiffies
+ TIMERTICK
;
579 add_timer(&d
->timer
);
581 spin_unlock_irqrestore(&d
->lock
, flags
);
586 /* enters with d->lock held */
588 aoecmd_work(struct aoedev
*d
)
592 if (d
->htgt
&& !sthtith(d
))
594 if (d
->inprocess
== NULL
) {
595 if (list_empty(&d
->bufq
))
597 buf
= container_of(d
->bufq
.next
, struct buf
, bufs
);
598 list_del(d
->bufq
.next
);
601 if (aoecmd_ata_rw(d
))
605 /* this function performs work that has been deferred until sleeping is OK
608 aoecmd_sleepwork(struct work_struct
*work
)
610 struct aoedev
*d
= container_of(work
, struct aoedev
, work
);
612 if (d
->flags
& DEVFL_GDALLOC
)
615 if (d
->flags
& DEVFL_NEWSIZE
) {
616 struct block_device
*bd
;
620 ssize
= get_capacity(d
->gd
);
621 bd
= bdget_disk(d
->gd
, 0);
624 mutex_lock(&bd
->bd_inode
->i_mutex
);
625 i_size_write(bd
->bd_inode
, (loff_t
)ssize
<<9);
626 mutex_unlock(&bd
->bd_inode
->i_mutex
);
629 spin_lock_irqsave(&d
->lock
, flags
);
630 d
->flags
|= DEVFL_UP
;
631 d
->flags
&= ~DEVFL_NEWSIZE
;
632 spin_unlock_irqrestore(&d
->lock
, flags
);
637 ataid_complete(struct aoedev
*d
, struct aoetgt
*t
, unsigned char *id
)
642 /* word 83: command set supported */
643 n
= get_unaligned_le16(&id
[83 << 1]);
645 /* word 86: command set/feature enabled */
646 n
|= get_unaligned_le16(&id
[86 << 1]);
648 if (n
& (1<<10)) { /* bit 10: LBA 48 */
649 d
->flags
|= DEVFL_EXT
;
651 /* word 100: number lba48 sectors */
652 ssize
= get_unaligned_le64(&id
[100 << 1]);
654 /* set as in ide-disk.c:init_idedisk_capacity */
655 d
->geo
.cylinders
= ssize
;
656 d
->geo
.cylinders
/= (255 * 63);
660 d
->flags
&= ~DEVFL_EXT
;
662 /* number lba28 sectors */
663 ssize
= get_unaligned_le32(&id
[60 << 1]);
665 /* NOTE: obsolete in ATA 6 */
666 d
->geo
.cylinders
= get_unaligned_le16(&id
[54 << 1]);
667 d
->geo
.heads
= get_unaligned_le16(&id
[55 << 1]);
668 d
->geo
.sectors
= get_unaligned_le16(&id
[56 << 1]);
671 if (d
->ssize
!= ssize
)
673 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
675 d
->aoemajor
, d
->aoeminor
,
676 d
->fw_ver
, (long long)ssize
);
679 if (d
->flags
& (DEVFL_GDALLOC
|DEVFL_NEWSIZE
))
682 set_capacity(d
->gd
, ssize
);
683 d
->flags
|= DEVFL_NEWSIZE
;
685 d
->flags
|= DEVFL_GDALLOC
;
686 schedule_work(&d
->work
);
690 calc_rttavg(struct aoedev
*d
, int rtt
)
699 else if (n
> MAXTIMER
)
701 d
->mintimer
+= (n
- d
->mintimer
) >> 1;
702 } else if (n
< d
->mintimer
)
704 else if (n
> MAXTIMER
)
707 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
712 static struct aoetgt
*
713 gettgt(struct aoedev
*d
, char *addr
)
715 struct aoetgt
**t
, **e
;
719 for (; t
< e
&& *t
; t
++)
720 if (memcmp((*t
)->addr
, addr
, sizeof((*t
)->addr
)) == 0)
726 diskstats(struct gendisk
*disk
, struct bio
*bio
, ulong duration
, sector_t sector
)
728 unsigned long n_sect
= bio
->bi_size
>> 9;
729 const int rw
= bio_data_dir(bio
);
730 struct hd_struct
*part
;
733 cpu
= part_stat_lock();
734 part
= disk_map_sector_rcu(disk
, sector
);
736 part_stat_inc(cpu
, part
, ios
[rw
]);
737 part_stat_add(cpu
, part
, ticks
[rw
], duration
);
738 part_stat_add(cpu
, part
, sectors
[rw
], n_sect
);
739 part_stat_add(cpu
, part
, io_ticks
, duration
);
745 aoecmd_ata_rsp(struct sk_buff
*skb
)
747 struct sk_buff_head queue
;
749 struct aoe_hdr
*hin
, *hout
;
750 struct aoe_atahdr
*ahin
, *ahout
;
760 hin
= (struct aoe_hdr
*) skb_mac_header(skb
);
761 aoemajor
= get_unaligned_be16(&hin
->major
);
762 d
= aoedev_by_aoeaddr(aoemajor
, hin
->minor
);
764 snprintf(ebuf
, sizeof ebuf
, "aoecmd_ata_rsp: ata response "
765 "for unknown device %d.%d\n",
766 aoemajor
, hin
->minor
);
771 spin_lock_irqsave(&d
->lock
, flags
);
773 n
= get_unaligned_be32(&hin
->tag
);
774 t
= gettgt(d
, hin
->src
);
776 printk(KERN_INFO
"aoe: can't find target e%ld.%d:%pm\n",
777 d
->aoemajor
, d
->aoeminor
, hin
->src
);
778 spin_unlock_irqrestore(&d
->lock
, flags
);
783 calc_rttavg(d
, -tsince(n
));
784 spin_unlock_irqrestore(&d
->lock
, flags
);
785 snprintf(ebuf
, sizeof ebuf
,
786 "%15s e%d.%d tag=%08x@%08lx\n",
788 get_unaligned_be16(&hin
->major
),
790 get_unaligned_be32(&hin
->tag
),
796 calc_rttavg(d
, tsince(f
->tag
));
798 ahin
= (struct aoe_atahdr
*) (hin
+1);
799 hout
= (struct aoe_hdr
*) skb_mac_header(f
->skb
);
800 ahout
= (struct aoe_atahdr
*) (hout
+1);
803 if (ahin
->cmdstat
& 0xa9) { /* these bits cleared on success */
805 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
806 ahout
->cmdstat
, ahin
->cmdstat
,
807 d
->aoemajor
, d
->aoeminor
);
809 buf
->flags
|= BUFFL_FAIL
;
811 if (d
->htgt
&& t
== *d
->htgt
) /* I'll help myself, thank you. */
813 n
= ahout
->scnt
<< 9;
814 switch (ahout
->cmdstat
) {
817 if (skb
->len
- sizeof *hin
- sizeof *ahin
< n
) {
819 "aoe: %s. skb->len=%d need=%ld\n",
820 "runt data size in read", skb
->len
, n
);
821 /* fail frame f? just returning will rexmit. */
822 spin_unlock_irqrestore(&d
->lock
, flags
);
825 memcpy(f
->bufaddr
, ahin
+1, n
);
828 ifp
= getif(t
, skb
->dev
);
842 if (skb
->len
- sizeof *hin
- sizeof *ahin
< 512) {
844 "aoe: runt data size in ataid. skb->len=%d\n",
846 spin_unlock_irqrestore(&d
->lock
, flags
);
849 ataid_complete(d
, t
, (char *) (ahin
+1));
853 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
855 get_unaligned_be16(&hin
->major
),
860 if (buf
&& --buf
->nframesout
== 0 && buf
->resid
== 0) {
861 diskstats(d
->gd
, buf
->bio
, jiffies
- buf
->stime
, buf
->sector
);
862 n
= (buf
->flags
& BUFFL_FAIL
) ? -EIO
: 0;
863 bio_endio(buf
->bio
, n
);
864 mempool_free(buf
, d
->bufpool
);
873 __skb_queue_head_init(&queue
);
874 skb_queue_splice_init(&d
->sendq
, &queue
);
876 spin_unlock_irqrestore(&d
->lock
, flags
);
881 aoecmd_cfg(ushort aoemajor
, unsigned char aoeminor
)
883 struct sk_buff_head queue
;
885 __skb_queue_head_init(&queue
);
886 aoecmd_cfg_pkts(aoemajor
, aoeminor
, &queue
);
891 aoecmd_ata_id(struct aoedev
*d
)
894 struct aoe_atahdr
*ah
;
905 /* initialize the headers & frame */
907 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
908 ah
= (struct aoe_atahdr
*) (h
+1);
909 skb_put(skb
, sizeof *h
+ sizeof *ah
);
910 memset(h
, 0, skb
->len
);
911 f
->tag
= aoehdr_atainit(d
, t
, h
);
915 /* set up ata header */
917 ah
->cmdstat
= WIN_IDENTIFY
;
920 skb
->dev
= t
->ifp
->nd
;
922 d
->rttavg
= MAXTIMER
;
923 d
->timer
.function
= rexmit_timer
;
925 return skb_clone(skb
, GFP_ATOMIC
);
928 static struct aoetgt
*
929 addtgt(struct aoedev
*d
, char *addr
, ulong nframes
)
931 struct aoetgt
*t
, **tt
, **te
;
936 for (; tt
< te
&& *tt
; tt
++)
941 "aoe: device addtgt failure; too many targets\n");
944 t
= kcalloc(1, sizeof *t
, GFP_ATOMIC
);
945 f
= kcalloc(nframes
, sizeof *f
, GFP_ATOMIC
);
949 printk(KERN_INFO
"aoe: cannot allocate memory to add target\n");
953 t
->nframes
= nframes
;
958 memcpy(t
->addr
, addr
, sizeof t
->addr
);
960 t
->maxout
= t
->nframes
;
965 aoecmd_cfg_rsp(struct sk_buff
*skb
)
969 struct aoe_cfghdr
*ch
;
972 ulong flags
, sysminor
, aoemajor
;
976 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
977 ch
= (struct aoe_cfghdr
*) (h
+1);
980 * Enough people have their dip switches set backwards to
981 * warrant a loud message for this special case.
983 aoemajor
= get_unaligned_be16(&h
->major
);
984 if (aoemajor
== 0xfff) {
985 printk(KERN_ERR
"aoe: Warning: shelf address is all ones. "
986 "Check shelf dip switches.\n");
990 sysminor
= SYSMINOR(aoemajor
, h
->minor
);
991 if (sysminor
* AOE_PARTITIONS
+ AOE_PARTITIONS
> MINORMASK
) {
992 printk(KERN_INFO
"aoe: e%ld.%d: minor number too large\n",
993 aoemajor
, (int) h
->minor
);
997 n
= be16_to_cpu(ch
->bufcnt
);
998 if (n
> aoe_maxout
) /* keep it reasonable */
1001 d
= aoedev_by_sysminor_m(sysminor
);
1003 printk(KERN_INFO
"aoe: device sysminor_m failure\n");
1007 spin_lock_irqsave(&d
->lock
, flags
);
1009 t
= gettgt(d
, h
->src
);
1011 t
= addtgt(d
, h
->src
, n
);
1013 spin_unlock_irqrestore(&d
->lock
, flags
);
1017 ifp
= getif(t
, skb
->dev
);
1019 ifp
= addif(t
, skb
->dev
);
1022 "aoe: device addif failure; "
1023 "too many interfaces?\n");
1024 spin_unlock_irqrestore(&d
->lock
, flags
);
1030 n
-= sizeof (struct aoe_hdr
) + sizeof (struct aoe_atahdr
);
1034 n
= n
? n
* 512 : DEFAULTBCNT
;
1035 if (n
!= ifp
->maxbcnt
) {
1037 "aoe: e%ld.%d: setting %d%s%s:%pm\n",
1038 d
->aoemajor
, d
->aoeminor
, n
,
1039 " byte data frames on ", ifp
->nd
->name
,
1045 /* don't change users' perspective */
1047 spin_unlock_irqrestore(&d
->lock
, flags
);
1050 d
->fw_ver
= be16_to_cpu(ch
->fwver
);
1052 sl
= aoecmd_ata_id(d
);
1054 spin_unlock_irqrestore(&d
->lock
, flags
);
1057 struct sk_buff_head queue
;
1058 __skb_queue_head_init(&queue
);
1059 __skb_queue_tail(&queue
, sl
);
1060 aoenet_xmit(&queue
);
1065 aoecmd_cleanslate(struct aoedev
*d
)
1067 struct aoetgt
**t
, **te
;
1068 struct aoeif
*p
, *e
;
1070 d
->mintimer
= MINTIMER
;
1074 for (; t
< te
&& *t
; t
++) {
1075 (*t
)->maxout
= (*t
)->nframes
;
1078 for (; p
< e
; p
++) {
1081 p
->maxbcnt
= DEFAULTBCNT
;