1 /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
4 * Filesystem request handling methods
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blk-mq.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/moduleparam.h>
14 #include <linux/workqueue.h>
15 #include <linux/kthread.h>
16 #include <net/net_namespace.h>
17 #include <linux/unaligned.h>
18 #include <linux/uio.h>
21 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
23 static void ktcomplete(struct frame
*, struct sk_buff
*);
24 static int count_targets(struct aoedev
*d
, int *untainted
);
26 static struct buf
*nextbuf(struct aoedev
*);
28 static int aoe_deadsecs
= 60 * 3;
29 module_param(aoe_deadsecs
, int, 0644);
30 MODULE_PARM_DESC(aoe_deadsecs
, "After aoe_deadsecs seconds, give up and fail dev.");
32 static int aoe_maxout
= 64;
33 module_param(aoe_maxout
, int, 0644);
34 MODULE_PARM_DESC(aoe_maxout
,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
37 /* The number of online cpus during module initialization gives us a
38 * convenient heuristic cap on the parallelism used for ktio threads
39 * doing I/O completion. It is not important that the cap equal the
40 * actual number of running CPUs at any given time, but because of CPU
41 * hotplug, we take care to use ncpus instead of using
42 * num_online_cpus() after module initialization.
46 /* mutex lock used for synchronization while thread spawning */
47 static DEFINE_MUTEX(ktio_spawn_lock
);
49 static wait_queue_head_t
*ktiowq
;
50 static struct ktstate
*kts
;
52 /* io completion queue */
54 struct list_head head
;
57 static struct iocq_ktio
*iocq
;
59 static struct page
*empty_page
;
61 static struct sk_buff
*
66 skb
= alloc_skb(len
+ MAX_HEADER
, GFP_ATOMIC
);
68 skb_reserve(skb
, MAX_HEADER
);
69 skb_reset_mac_header(skb
);
70 skb_reset_network_header(skb
);
71 skb
->protocol
= __constant_htons(ETH_P_AOE
);
72 skb_checksum_none_assert(skb
);
78 getframe_deferred(struct aoedev
*d
, u32 tag
)
80 struct list_head
*head
, *pos
, *nx
;
84 list_for_each_safe(pos
, nx
, head
) {
85 f
= list_entry(pos
, struct frame
, head
);
95 getframe(struct aoedev
*d
, u32 tag
)
98 struct list_head
*head
, *pos
, *nx
;
102 head
= &d
->factive
[n
];
103 list_for_each_safe(pos
, nx
, head
) {
104 f
= list_entry(pos
, struct frame
, head
);
114 * Leave the top bit clear so we have tagspace for userland.
115 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
116 * This driver reserves tag -1 to mean "unused frame."
119 newtag(struct aoedev
*d
)
123 n
= jiffies
& 0xffff;
124 return n
| (++d
->lasttag
& 0x7fff) << 16;
128 aoehdr_atainit(struct aoedev
*d
, struct aoetgt
*t
, struct aoe_hdr
*h
)
130 u32 host_tag
= newtag(d
);
132 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
133 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
134 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
136 h
->major
= cpu_to_be16(d
->aoemajor
);
137 h
->minor
= d
->aoeminor
;
139 h
->tag
= cpu_to_be32(host_tag
);
145 put_lba(struct aoe_atahdr
*ah
, sector_t lba
)
148 ah
->lba1
= lba
>>= 8;
149 ah
->lba2
= lba
>>= 8;
150 ah
->lba3
= lba
>>= 8;
151 ah
->lba4
= lba
>>= 8;
152 ah
->lba5
= lba
>>= 8;
155 static struct aoeif
*
156 ifrotate(struct aoetgt
*t
)
162 if (ifp
>= &t
->ifs
[NAOEIFS
] || ifp
->nd
== NULL
)
170 skb_pool_put(struct aoedev
*d
, struct sk_buff
*skb
)
172 __skb_queue_tail(&d
->skbpool
, skb
);
175 static struct sk_buff
*
176 skb_pool_get(struct aoedev
*d
)
178 struct sk_buff
*skb
= skb_peek(&d
->skbpool
);
180 if (skb
&& atomic_read(&skb_shinfo(skb
)->dataref
) == 1) {
181 __skb_unlink(skb
, &d
->skbpool
);
184 if (skb_queue_len(&d
->skbpool
) < NSKBPOOLMAX
&&
185 (skb
= new_skb(ETH_ZLEN
)))
192 aoe_freetframe(struct frame
*f
)
198 memset(&f
->iter
, 0, sizeof(f
->iter
));
201 list_add(&f
->head
, &t
->ffree
);
204 static struct frame
*
205 newtframe(struct aoedev
*d
, struct aoetgt
*t
)
209 struct list_head
*pos
;
211 if (list_empty(&t
->ffree
)) {
212 if (t
->falloc
>= NSKBPOOLMAX
*2)
214 f
= kcalloc(1, sizeof(*f
), GFP_ATOMIC
);
222 f
= list_entry(pos
, struct frame
, head
);
227 f
->skb
= skb
= new_skb(ETH_ZLEN
);
229 bail
: aoe_freetframe(f
);
234 if (atomic_read(&skb_shinfo(skb
)->dataref
) != 1) {
235 skb
= skb_pool_get(d
);
238 skb_pool_put(d
, f
->skb
);
242 skb
->truesize
-= skb
->data_len
;
243 skb_shinfo(skb
)->nr_frags
= skb
->data_len
= 0;
248 static struct frame
*
249 newframe(struct aoedev
*d
)
252 struct aoetgt
*t
, **tt
;
257 if (!d
->targets
|| !d
->targets
[0]) {
258 printk(KERN_ERR
"aoe: NULL TARGETS!\n");
261 tt
= d
->tgt
; /* last used target */
262 for (use_tainted
= 0, has_untainted
= 0;;) {
264 if (tt
>= &d
->targets
[d
->ntargets
] || !*tt
)
271 if (t
->nout
< t
->maxout
272 && (use_tainted
|| !t
->taint
)
281 if (tt
== d
->tgt
) { /* we've looped and found nada */
282 if (!use_tainted
&& !has_untainted
)
290 d
->flags
|= DEVFL_KICKME
;
296 skb_fillup(struct sk_buff
*skb
, struct bio
*bio
, struct bvec_iter iter
)
301 __bio_for_each_segment(bv
, bio
, iter
, iter
)
302 skb_fill_page_desc(skb
, frag
++, bv
.bv_page
,
303 bv
.bv_offset
, bv
.bv_len
);
307 fhash(struct frame
*f
)
309 struct aoedev
*d
= f
->t
->d
;
312 n
= f
->tag
% NFACTIVE
;
313 list_add_tail(&f
->head
, &d
->factive
[n
]);
317 ata_rw_frameinit(struct frame
*f
)
321 struct aoe_atahdr
*ah
;
323 char writebit
, extbit
;
326 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
327 ah
= (struct aoe_atahdr
*) (h
+ 1);
328 skb_put(skb
, sizeof(*h
) + sizeof(*ah
));
329 memset(h
, 0, skb
->len
);
335 f
->tag
= aoehdr_atainit(t
->d
, t
, h
);
341 /* set up ata header */
342 ah
->scnt
= f
->iter
.bi_size
>> 9;
343 put_lba(ah
, f
->iter
.bi_sector
);
344 if (t
->d
->flags
& DEVFL_EXT
) {
345 ah
->aflags
|= AOEAFL_EXT
;
349 ah
->lba3
|= 0xe0; /* LBA bit + obsolete 0xa0 */
351 if (f
->buf
&& bio_data_dir(f
->buf
->bio
) == WRITE
) {
352 skb_fillup(skb
, f
->buf
->bio
, f
->iter
);
353 ah
->aflags
|= AOEAFL_WRITE
;
354 skb
->len
+= f
->iter
.bi_size
;
355 skb
->data_len
= f
->iter
.bi_size
;
356 skb
->truesize
+= f
->iter
.bi_size
;
363 ah
->cmdstat
= ATA_CMD_PIO_READ
| writebit
| extbit
;
364 dev_hold(t
->ifp
->nd
);
365 skb
->dev
= t
->ifp
->nd
;
369 aoecmd_ata_rw(struct aoedev
*d
)
374 struct sk_buff_head queue
;
383 /* initialize the headers & frame */
386 f
->iter
.bi_size
= min_t(unsigned long,
387 d
->maxbcnt
?: DEFAULTBCNT
,
389 bio_advance_iter(buf
->bio
, &buf
->iter
, f
->iter
.bi_size
);
391 if (!buf
->iter
.bi_size
)
394 /* mark all tracking fields and load out */
395 buf
->nframesout
+= 1;
399 skb
= skb_clone(f
->skb
, GFP_ATOMIC
);
401 f
->sent
= ktime_get();
402 __skb_queue_head_init(&queue
);
403 __skb_queue_tail(&queue
, skb
);
406 dev_put(f
->t
->ifp
->nd
);
411 /* some callers cannot sleep, and they can call this function,
412 * transmitting the packets later, when interrupts are on
415 aoecmd_cfg_pkts(ushort aoemajor
, unsigned char aoeminor
, struct sk_buff_head
*queue
)
418 struct aoe_cfghdr
*ch
;
420 struct net_device
*ifp
;
423 for_each_netdev_rcu(&init_net
, ifp
) {
425 if (!is_aoe_netif(ifp
)) {
430 skb
= new_skb(sizeof *h
+ sizeof *ch
);
432 printk(KERN_INFO
"aoe: skb alloc failure\n");
436 skb_put(skb
, sizeof *h
+ sizeof *ch
);
438 __skb_queue_tail(queue
, skb
);
439 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
440 memset(h
, 0, sizeof *h
+ sizeof *ch
);
442 memset(h
->dst
, 0xff, sizeof h
->dst
);
443 memcpy(h
->src
, ifp
->dev_addr
, sizeof h
->src
);
444 h
->type
= __constant_cpu_to_be16(ETH_P_AOE
);
446 h
->major
= cpu_to_be16(aoemajor
);
454 resend(struct aoedev
*d
, struct frame
*f
)
457 struct sk_buff_head queue
;
466 if (ifrotate(t
) == NULL
) {
467 /* probably can't happen, but set it up to fail anyway */
468 pr_info("aoe: resend: no interfaces to rotate to.\n");
472 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
474 if (!(f
->flags
& FFL_PROBE
)) {
475 snprintf(buf
, sizeof(buf
),
476 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
477 "retransmit", d
->aoemajor
, d
->aoeminor
,
479 h
->src
, h
->dst
, t
->nout
);
485 h
->tag
= cpu_to_be32(n
);
486 memcpy(h
->dst
, t
->addr
, sizeof h
->dst
);
487 memcpy(h
->src
, t
->ifp
->nd
->dev_addr
, sizeof h
->src
);
489 dev_hold(t
->ifp
->nd
);
490 skb
->dev
= t
->ifp
->nd
;
491 skb
= skb_clone(skb
, GFP_ATOMIC
);
496 f
->sent
= ktime_get();
497 __skb_queue_head_init(&queue
);
498 __skb_queue_tail(&queue
, skb
);
503 tsince_hr(struct frame
*f
)
505 u64 delta
= ktime_to_ns(ktime_sub(ktime_get(), f
->sent
));
507 /* delta is normally under 4.2 seconds, avoid 64-bit division */
508 if (likely(delta
<= UINT_MAX
))
509 return (u32
)delta
/ NSEC_PER_USEC
;
511 /* avoid overflow after 71 minutes */
512 if (delta
> ((u64
)INT_MAX
* NSEC_PER_USEC
))
515 return div_u64(delta
, NSEC_PER_USEC
);
523 n
= jiffies
& 0xffff;
527 return jiffies_to_usecs(n
+ 1);
530 static struct aoeif
*
531 getif(struct aoetgt
*t
, struct net_device
*nd
)
544 ejectif(struct aoetgt
*t
, struct aoeif
*ifp
)
547 struct net_device
*nd
;
551 e
= t
->ifs
+ NAOEIFS
- 1;
552 n
= (e
- ifp
) * sizeof *ifp
;
553 memmove(ifp
, ifp
+1, n
);
558 static struct frame
*
559 reassign_frame(struct frame
*f
)
564 nf
= newframe(f
->t
->d
);
577 nf
->waited_total
= f
->waited_total
;
585 probe(struct aoetgt
*t
)
590 struct sk_buff_head queue
;
597 pr_err("%s %pm for e%ld.%d: %s\n",
598 "aoe: cannot probe remote address",
600 (long) d
->aoemajor
, d
->aoeminor
,
601 "no frame available");
604 f
->flags
|= FFL_PROBE
;
606 f
->iter
.bi_size
= t
->d
->maxbcnt
? t
->d
->maxbcnt
: DEFAULTBCNT
;
609 for (frag
= 0, n
= f
->iter
.bi_size
; n
> 0; ++frag
, n
-= m
) {
614 skb_fill_page_desc(skb
, frag
, empty_page
, 0, m
);
616 skb
->len
+= f
->iter
.bi_size
;
617 skb
->data_len
= f
->iter
.bi_size
;
618 skb
->truesize
+= f
->iter
.bi_size
;
620 skb
= skb_clone(f
->skb
, GFP_ATOMIC
);
622 f
->sent
= ktime_get();
623 __skb_queue_head_init(&queue
);
624 __skb_queue_tail(&queue
, skb
);
627 dev_put(f
->t
->ifp
->nd
);
632 rto(struct aoedev
*d
)
636 t
= 2 * d
->rttavg
>> RTTSCALE
;
637 t
+= 8 * d
->rttdev
>> RTTDSCALE
;
645 rexmit_deferred(struct aoedev
*d
)
650 struct list_head
*pos
, *nx
, *head
;
654 count_targets(d
, &untainted
);
657 list_for_each_safe(pos
, nx
, head
) {
658 f
= list_entry(pos
, struct frame
, head
);
661 if (!(f
->flags
& FFL_PROBE
)) {
662 nf
= reassign_frame(f
);
664 if (t
->nout_probes
== 0
669 list_replace(&f
->head
, &nf
->head
);
675 } else if (untainted
< 1) {
676 /* don't probe w/o other untainted aoetgts */
678 } else if (tsince_hr(f
) < t
->taint
* rto(d
)) {
679 /* reprobe slowly when taint is high */
682 } else if (f
->flags
& FFL_PROBE
) {
683 stop_probe
: /* don't probe untainted aoetgts */
686 /* leaving d->kicked, because this is routine */
687 f
->t
->d
->flags
|= DEVFL_KICKME
;
690 if (t
->nout
>= t
->maxout
)
694 if (f
->flags
& FFL_PROBE
)
696 since
= tsince_hr(f
);
698 f
->waited_total
+= since
;
703 /* An aoetgt accumulates demerits quickly, and successful
704 * probing redeems the aoetgt slowly.
707 scorn(struct aoetgt
*t
)
712 t
->taint
+= t
->taint
* 2;
715 if (t
->taint
> MAX_TAINT
)
716 t
->taint
= MAX_TAINT
;
720 count_targets(struct aoedev
*d
, int *untainted
)
724 for (i
= good
= 0; i
< d
->ntargets
&& d
->targets
[i
]; ++i
)
725 if (d
->targets
[i
]->taint
== 0)
734 rexmit_timer(struct timer_list
*timer
)
740 struct list_head
*head
, *pos
, *nx
;
742 register long timeout
;
745 int utgts
; /* number of aoetgt descriptors (not slots) */
748 d
= from_timer(d
, timer
, timer
);
750 spin_lock_irqsave(&d
->lock
, flags
);
752 /* timeout based on observed timings and variations */
755 utgts
= count_targets(d
, NULL
);
757 if (d
->flags
& DEVFL_TKILL
) {
758 spin_unlock_irqrestore(&d
->lock
, flags
);
762 /* collect all frames to rexmit into flist */
763 for (i
= 0; i
< NFACTIVE
; i
++) {
764 head
= &d
->factive
[i
];
765 list_for_each_safe(pos
, nx
, head
) {
766 f
= list_entry(pos
, struct frame
, head
);
767 if (tsince_hr(f
) < timeout
)
768 break; /* end of expired frames */
769 /* move to flist for later processing */
770 list_move_tail(pos
, &flist
);
774 /* process expired frames */
775 while (!list_empty(&flist
)) {
777 f
= list_entry(pos
, struct frame
, head
);
778 since
= tsince_hr(f
);
779 n
= f
->waited_total
+ since
;
783 && !(f
->flags
& FFL_PROBE
)) {
784 /* Waited too long. Device failure.
785 * Hang all frames on first hash bucket for downdev
788 list_splice(&flist
, &d
->factive
[0]);
794 n
= f
->waited
+ since
;
796 if (aoe_deadsecs
&& utgts
> 0
797 && (n
> aoe_deadsecs
/ utgts
|| n
> HARD_SCORN_SECS
))
798 scorn(t
); /* avoid this target */
800 if (t
->maxout
!= 1) {
801 t
->ssthresh
= t
->maxout
/ 2;
805 if (f
->flags
& FFL_PROBE
) {
808 ifp
= getif(t
, f
->skb
->dev
);
809 if (ifp
&& ++ifp
->lost
> (t
->nframes
<< 1)
810 && (ifp
!= t
->ifs
|| t
->ifs
[1].nd
)) {
815 list_move_tail(pos
, &d
->rexmitq
);
821 if ((d
->flags
& DEVFL_KICKME
) && d
->blkq
) {
822 d
->flags
&= ~DEVFL_KICKME
;
823 blk_mq_run_hw_queues(d
->blkq
, true);
826 d
->timer
.expires
= jiffies
+ TIMERTICK
;
827 add_timer(&d
->timer
);
829 spin_unlock_irqrestore(&d
->lock
, flags
);
833 bufinit(struct buf
*buf
, struct request
*rq
, struct bio
*bio
)
835 memset(buf
, 0, sizeof(*buf
));
838 buf
->iter
= bio
->bi_iter
;
842 nextbuf(struct aoedev
*d
)
845 struct request_queue
*q
;
852 return NULL
; /* initializing */
857 rq
= list_first_entry_or_null(&d
->rq_list
, struct request
,
861 list_del_init(&rq
->queuelist
);
862 blk_mq_start_request(rq
);
864 d
->ip
.nxbio
= rq
->bio
;
866 req
= blk_mq_rq_to_pdu(rq
);
868 __rq_for_each_bio(bio
, rq
)
871 buf
= mempool_alloc(d
->bufpool
, GFP_ATOMIC
);
873 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
877 bufinit(buf
, rq
, bio
);
882 return d
->ip
.buf
= buf
;
885 /* enters with d->lock held */
887 aoecmd_work(struct aoedev
*d
)
890 while (aoecmd_ata_rw(d
))
894 /* this function performs work that has been deferred until sleeping is OK
897 aoecmd_sleepwork(struct work_struct
*work
)
899 struct aoedev
*d
= container_of(work
, struct aoedev
, work
);
901 if (d
->flags
& DEVFL_GDALLOC
)
904 if (d
->flags
& DEVFL_NEWSIZE
) {
905 set_capacity_and_notify(d
->gd
, d
->ssize
);
907 spin_lock_irq(&d
->lock
);
908 d
->flags
|= DEVFL_UP
;
909 d
->flags
&= ~DEVFL_NEWSIZE
;
910 spin_unlock_irq(&d
->lock
);
915 ata_ident_fixstring(u16
*id
, int ns
)
921 *id
++ = s
>> 8 | s
<< 8;
926 ataid_complete(struct aoedev
*d
, struct aoetgt
*t
, unsigned char *id
)
931 /* word 83: command set supported */
932 n
= get_unaligned_le16(&id
[83 << 1]);
934 /* word 86: command set/feature enabled */
935 n
|= get_unaligned_le16(&id
[86 << 1]);
937 if (n
& (1<<10)) { /* bit 10: LBA 48 */
938 d
->flags
|= DEVFL_EXT
;
940 /* word 100: number lba48 sectors */
941 ssize
= get_unaligned_le64(&id
[100 << 1]);
943 /* set as in ide-disk.c:init_idedisk_capacity */
944 d
->geo
.cylinders
= ssize
;
945 d
->geo
.cylinders
/= (255 * 63);
949 d
->flags
&= ~DEVFL_EXT
;
951 /* number lba28 sectors */
952 ssize
= get_unaligned_le32(&id
[60 << 1]);
954 /* NOTE: obsolete in ATA 6 */
955 d
->geo
.cylinders
= get_unaligned_le16(&id
[54 << 1]);
956 d
->geo
.heads
= get_unaligned_le16(&id
[55 << 1]);
957 d
->geo
.sectors
= get_unaligned_le16(&id
[56 << 1]);
960 ata_ident_fixstring((u16
*) &id
[10<<1], 10); /* serial */
961 ata_ident_fixstring((u16
*) &id
[23<<1], 4); /* firmware */
962 ata_ident_fixstring((u16
*) &id
[27<<1], 20); /* model */
963 memcpy(d
->ident
, id
, sizeof(d
->ident
));
965 if (d
->ssize
!= ssize
)
967 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
969 d
->aoemajor
, d
->aoeminor
,
970 d
->fw_ver
, (long long)ssize
);
973 if (d
->flags
& (DEVFL_GDALLOC
|DEVFL_NEWSIZE
))
976 d
->flags
|= DEVFL_NEWSIZE
;
978 d
->flags
|= DEVFL_GDALLOC
;
979 queue_work(aoe_wq
, &d
->work
);
983 calc_rttavg(struct aoedev
*d
, struct aoetgt
*t
, int rtt
)
989 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
990 n
-= d
->rttavg
>> RTTSCALE
;
994 n
-= d
->rttdev
>> RTTDSCALE
;
997 if (!t
|| t
->maxout
>= t
->nframes
)
999 if (t
->maxout
< t
->ssthresh
)
1001 else if (t
->nout
== t
->maxout
&& t
->next_cwnd
-- == 0) {
1003 t
->next_cwnd
= t
->maxout
;
1007 static struct aoetgt
*
1008 gettgt(struct aoedev
*d
, char *addr
)
1010 struct aoetgt
**t
, **e
;
1013 e
= t
+ d
->ntargets
;
1014 for (; t
< e
&& *t
; t
++)
1015 if (memcmp((*t
)->addr
, addr
, sizeof((*t
)->addr
)) == 0)
1021 bvcpy(struct sk_buff
*skb
, struct bio
*bio
, struct bvec_iter iter
, long cnt
)
1028 __bio_for_each_segment(bv
, bio
, iter
, iter
) {
1029 char *p
= bvec_kmap_local(&bv
);
1030 skb_copy_bits(skb
, soff
, p
, bv
.bv_len
);
1037 aoe_end_request(struct aoedev
*d
, struct request
*rq
, int fastfail
)
1041 struct request_queue
*q
;
1042 blk_status_t err
= BLK_STS_OK
;
1049 bok
= !fastfail
&& !bio
->bi_status
;
1051 err
= BLK_STS_IOERR
;
1052 } while (blk_update_request(rq
, bok
? BLK_STS_OK
: BLK_STS_IOERR
, bio
->bi_iter
.bi_size
));
1054 __blk_mq_end_request(rq
, err
);
1056 /* cf. https://lore.kernel.org/lkml/20061031071040.GS14055@kernel.dk/ */
1058 blk_mq_run_hw_queues(q
, true);
1062 aoe_end_buf(struct aoedev
*d
, struct buf
*buf
)
1064 struct request
*rq
= buf
->rq
;
1065 struct aoe_req
*req
= blk_mq_rq_to_pdu(rq
);
1067 if (buf
== d
->ip
.buf
)
1069 mempool_free(buf
, d
->bufpool
);
1070 if (--req
->nr_bios
== 0)
1071 aoe_end_request(d
, rq
, 0);
1075 ktiocomplete(struct frame
*f
)
1077 struct aoe_hdr
*hin
, *hout
;
1078 struct aoe_atahdr
*ahin
, *ahout
;
1080 struct sk_buff
*skb
;
1094 if (f
->flags
& FFL_PROBE
)
1096 if (!skb
) /* just fail the buf. */
1099 hout
= (struct aoe_hdr
*) skb_mac_header(f
->skb
);
1100 ahout
= (struct aoe_atahdr
*) (hout
+1);
1102 hin
= (struct aoe_hdr
*) skb
->data
;
1103 skb_pull(skb
, sizeof(*hin
));
1104 ahin
= (struct aoe_atahdr
*) skb
->data
;
1105 skb_pull(skb
, sizeof(*ahin
));
1106 if (ahin
->cmdstat
& 0xa9) { /* these bits cleared on success */
1107 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1108 ahout
->cmdstat
, ahin
->cmdstat
,
1109 d
->aoemajor
, d
->aoeminor
);
1111 buf
->bio
->bi_status
= BLK_STS_IOERR
;
1115 n
= ahout
->scnt
<< 9;
1116 switch (ahout
->cmdstat
) {
1117 case ATA_CMD_PIO_READ
:
1118 case ATA_CMD_PIO_READ_EXT
:
1120 pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
1121 "aoe: runt data size in read from",
1122 (long) d
->aoemajor
, d
->aoeminor
,
1124 buf
->bio
->bi_status
= BLK_STS_IOERR
;
1127 if (n
> f
->iter
.bi_size
) {
1128 pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
1129 "aoe: too-large data size in read from",
1130 (long) d
->aoemajor
, d
->aoeminor
,
1131 n
, f
->iter
.bi_size
);
1132 buf
->bio
->bi_status
= BLK_STS_IOERR
;
1135 bvcpy(skb
, f
->buf
->bio
, f
->iter
, n
);
1137 case ATA_CMD_PIO_WRITE
:
1138 case ATA_CMD_PIO_WRITE_EXT
:
1139 spin_lock_irq(&d
->lock
);
1140 ifp
= getif(t
, skb
->dev
);
1143 spin_unlock_irq(&d
->lock
);
1145 case ATA_CMD_ID_ATA
:
1146 if (skb
->len
< 512) {
1147 pr_info("%s e%ld.%d. skb->len=%d need=512\n",
1148 "aoe: runt data size in ataid from",
1149 (long) d
->aoemajor
, d
->aoeminor
,
1153 if (skb_linearize(skb
))
1155 spin_lock_irq(&d
->lock
);
1156 ataid_complete(d
, t
, skb
->data
);
1157 spin_unlock_irq(&d
->lock
);
1160 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1162 be16_to_cpu(get_unaligned(&hin
->major
)),
1166 spin_lock_irq(&d
->lock
);
1169 && t
->nout_probes
== 0) {
1170 count_targets(d
, &untainted
);
1171 if (untainted
> 0) {
1179 if (buf
&& --buf
->nframesout
== 0 && buf
->iter
.bi_size
== 0)
1180 aoe_end_buf(d
, buf
);
1182 spin_unlock_irq(&d
->lock
);
1187 /* Enters with iocq.lock held.
1188 * Returns true iff responses needing processing remain.
1194 struct list_head
*pos
;
1198 for (i
= 0; ; ++i
) {
1201 if (list_empty(&iocq
[id
].head
))
1203 pos
= iocq
[id
].head
.next
;
1205 f
= list_entry(pos
, struct frame
, head
);
1206 spin_unlock_irq(&iocq
[id
].lock
);
1209 /* Figure out if extra threads are required. */
1210 actual_id
= f
->t
->d
->aoeminor
% ncpus
;
1212 if (!kts
[actual_id
].active
) {
1214 mutex_lock(&ktio_spawn_lock
);
1215 if (!kts
[actual_id
].active
1216 && aoe_ktstart(&kts
[actual_id
]) == 0)
1217 kts
[actual_id
].active
= 1;
1218 mutex_unlock(&ktio_spawn_lock
);
1220 spin_lock_irq(&iocq
[id
].lock
);
1228 DECLARE_WAITQUEUE(wait
, current
);
1232 current
->flags
|= PF_NOFREEZE
;
1233 set_user_nice(current
, -10);
1234 complete(&k
->rendez
); /* tell spawner we're running */
1236 spin_lock_irq(k
->lock
);
1237 more
= k
->fn(k
->id
);
1239 add_wait_queue(k
->waitq
, &wait
);
1240 __set_current_state(TASK_INTERRUPTIBLE
);
1242 spin_unlock_irq(k
->lock
);
1245 remove_wait_queue(k
->waitq
, &wait
);
1248 } while (!kthread_should_stop());
1249 complete(&k
->rendez
); /* tell spawner we're stopping */
1254 aoe_ktstop(struct ktstate
*k
)
1256 kthread_stop(k
->task
);
1257 wait_for_completion(&k
->rendez
);
1261 aoe_ktstart(struct ktstate
*k
)
1263 struct task_struct
*task
;
1265 init_completion(&k
->rendez
);
1266 task
= kthread_run(kthread
, k
, "%s", k
->name
);
1267 if (task
== NULL
|| IS_ERR(task
))
1270 wait_for_completion(&k
->rendez
); /* allow kthread to start */
1271 init_completion(&k
->rendez
); /* for waiting for exit later */
1275 /* pass it off to kthreads for processing */
1277 ktcomplete(struct frame
*f
, struct sk_buff
*skb
)
1283 id
= f
->t
->d
->aoeminor
% ncpus
;
1284 spin_lock_irqsave(&iocq
[id
].lock
, flags
);
1285 if (!kts
[id
].active
) {
1286 spin_unlock_irqrestore(&iocq
[id
].lock
, flags
);
1287 /* The thread with id has not been spawned yet,
1288 * so delegate the work to the main thread and
1289 * try spawning a new thread.
1292 spin_lock_irqsave(&iocq
[id
].lock
, flags
);
1294 list_add_tail(&f
->head
, &iocq
[id
].head
);
1295 spin_unlock_irqrestore(&iocq
[id
].lock
, flags
);
1296 wake_up(&ktiowq
[id
]);
1300 aoecmd_ata_rsp(struct sk_buff
*skb
)
1310 h
= (struct aoe_hdr
*) skb
->data
;
1311 aoemajor
= be16_to_cpu(get_unaligned(&h
->major
));
1312 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 0);
1314 snprintf(ebuf
, sizeof ebuf
, "aoecmd_ata_rsp: ata response "
1315 "for unknown device %d.%d\n",
1316 aoemajor
, h
->minor
);
1321 spin_lock_irqsave(&d
->lock
, flags
);
1323 n
= be32_to_cpu(get_unaligned(&h
->tag
));
1326 calc_rttavg(d
, f
->t
, tsince_hr(f
));
1328 if (f
->flags
& FFL_PROBE
)
1329 f
->t
->nout_probes
--;
1331 f
= getframe_deferred(d
, n
);
1333 calc_rttavg(d
, NULL
, tsince_hr(f
));
1335 calc_rttavg(d
, NULL
, tsince(n
));
1336 spin_unlock_irqrestore(&d
->lock
, flags
);
1338 snprintf(ebuf
, sizeof(ebuf
),
1339 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
1341 get_unaligned_be16(&h
->major
),
1343 get_unaligned_be32(&h
->tag
),
1353 spin_unlock_irqrestore(&d
->lock
, flags
);
1358 * Note here that we do not perform an aoedev_put, as we are
1359 * leaving this reference for the ktio to release.
1365 aoecmd_cfg(ushort aoemajor
, unsigned char aoeminor
)
1367 struct sk_buff_head queue
;
1369 __skb_queue_head_init(&queue
);
1370 aoecmd_cfg_pkts(aoemajor
, aoeminor
, &queue
);
1371 aoenet_xmit(&queue
);
1375 aoecmd_ata_id(struct aoedev
*d
)
1378 struct aoe_atahdr
*ah
;
1380 struct sk_buff
*skb
;
1389 /* initialize the headers & frame */
1391 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1392 ah
= (struct aoe_atahdr
*) (h
+1);
1393 skb_put(skb
, sizeof *h
+ sizeof *ah
);
1394 memset(h
, 0, skb
->len
);
1395 f
->tag
= aoehdr_atainit(d
, t
, h
);
1399 f
->waited_total
= 0;
1401 /* set up ata header */
1403 ah
->cmdstat
= ATA_CMD_ID_ATA
;
1406 dev_hold(t
->ifp
->nd
);
1407 skb
->dev
= t
->ifp
->nd
;
1409 d
->rttavg
= RTTAVG_INIT
;
1410 d
->rttdev
= RTTDEV_INIT
;
1411 d
->timer
.function
= rexmit_timer
;
1413 skb
= skb_clone(skb
, GFP_ATOMIC
);
1415 f
->sent
= ktime_get();
1417 dev_put(t
->ifp
->nd
);
1422 static struct aoetgt
**
1423 grow_targets(struct aoedev
*d
)
1430 tt
= kcalloc(newn
, sizeof(*d
->targets
), GFP_ATOMIC
);
1433 memmove(tt
, d
->targets
, sizeof(*d
->targets
) * oldn
);
1434 d
->tgt
= tt
+ (d
->tgt
- d
->targets
);
1439 return &d
->targets
[oldn
];
1442 static struct aoetgt
*
1443 addtgt(struct aoedev
*d
, char *addr
, ulong nframes
)
1445 struct aoetgt
*t
, **tt
, **te
;
1448 te
= tt
+ d
->ntargets
;
1449 for (; tt
< te
&& *tt
; tt
++)
1453 tt
= grow_targets(d
);
1457 t
= kzalloc(sizeof(*t
), GFP_ATOMIC
);
1460 t
->nframes
= nframes
;
1462 memcpy(t
->addr
, addr
, sizeof t
->addr
);
1465 t
->maxout
= t
->nframes
/ 2;
1466 INIT_LIST_HEAD(&t
->ffree
);
1470 pr_info("aoe: cannot allocate memory to add target\n");
1475 setdbcnt(struct aoedev
*d
)
1477 struct aoetgt
**t
, **e
;
1481 e
= t
+ d
->ntargets
;
1482 for (; t
< e
&& *t
; t
++)
1483 if (bcnt
== 0 || bcnt
> (*t
)->minbcnt
)
1484 bcnt
= (*t
)->minbcnt
;
1485 if (bcnt
!= d
->maxbcnt
) {
1487 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1488 d
->aoemajor
, d
->aoeminor
, bcnt
);
1493 setifbcnt(struct aoetgt
*t
, struct net_device
*nd
, int bcnt
)
1496 struct aoeif
*p
, *e
;
1503 for (; p
< e
; p
++) {
1505 break; /* end of the valid interfaces */
1507 p
->bcnt
= bcnt
; /* we're updating */
1509 } else if (minbcnt
> p
->bcnt
)
1510 minbcnt
= p
->bcnt
; /* find the min interface */
1514 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1521 t
->minbcnt
= minbcnt
;
1526 aoecmd_cfg_rsp(struct sk_buff
*skb
)
1530 struct aoe_cfghdr
*ch
;
1532 ulong flags
, aoemajor
;
1534 struct sk_buff_head queue
;
1538 h
= (struct aoe_hdr
*) skb_mac_header(skb
);
1539 ch
= (struct aoe_cfghdr
*) (h
+1);
1542 * Enough people have their dip switches set backwards to
1543 * warrant a loud message for this special case.
1545 aoemajor
= get_unaligned_be16(&h
->major
);
1546 if (aoemajor
== 0xfff) {
1547 printk(KERN_ERR
"aoe: Warning: shelf address is all ones. "
1548 "Check shelf dip switches.\n");
1551 if (aoemajor
== 0xffff) {
1552 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
1553 aoemajor
, (int) h
->minor
);
1556 if (h
->minor
== 0xff) {
1557 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1558 aoemajor
, (int) h
->minor
);
1562 n
= be16_to_cpu(ch
->bufcnt
);
1563 if (n
> aoe_maxout
) /* keep it reasonable */
1566 d
= aoedev_by_aoeaddr(aoemajor
, h
->minor
, 1);
1568 pr_info("aoe: device allocation failure\n");
1572 spin_lock_irqsave(&d
->lock
, flags
);
1574 t
= gettgt(d
, h
->src
);
1580 t
= addtgt(d
, h
->src
, n
);
1585 n
-= sizeof(struct aoe_hdr
) + sizeof(struct aoe_atahdr
);
1589 n
= n
? n
* 512 : DEFAULTBCNT
;
1590 setifbcnt(t
, skb
->dev
, n
);
1592 /* don't change users' perspective */
1593 if (d
->nopen
== 0) {
1594 d
->fw_ver
= be16_to_cpu(ch
->fwver
);
1595 sl
= aoecmd_ata_id(d
);
1598 spin_unlock_irqrestore(&d
->lock
, flags
);
1601 __skb_queue_head_init(&queue
);
1602 __skb_queue_tail(&queue
, sl
);
1603 aoenet_xmit(&queue
);
1608 aoecmd_wreset(struct aoetgt
*t
)
1611 t
->ssthresh
= t
->nframes
/ 2;
1612 t
->next_cwnd
= t
->nframes
;
1616 aoecmd_cleanslate(struct aoedev
*d
)
1618 struct aoetgt
**t
, **te
;
1620 d
->rttavg
= RTTAVG_INIT
;
1621 d
->rttdev
= RTTDEV_INIT
;
1625 te
= t
+ d
->ntargets
;
1626 for (; t
< te
&& *t
; t
++)
1631 aoe_failbuf(struct aoedev
*d
, struct buf
*buf
)
1635 buf
->iter
.bi_size
= 0;
1636 buf
->bio
->bi_status
= BLK_STS_IOERR
;
1637 if (buf
->nframesout
== 0)
1638 aoe_end_buf(d
, buf
);
1642 aoe_flush_iocq(void)
1646 for (i
= 0; i
< ncpus
; i
++) {
1648 aoe_flush_iocq_by_index(i
);
1653 aoe_flush_iocq_by_index(int id
)
1658 struct list_head
*pos
;
1659 struct sk_buff
*skb
;
1662 spin_lock_irqsave(&iocq
[id
].lock
, flags
);
1663 list_splice_init(&iocq
[id
].head
, &flist
);
1664 spin_unlock_irqrestore(&iocq
[id
].lock
, flags
);
1665 while (!list_empty(&flist
)) {
1668 f
= list_entry(pos
, struct frame
, head
);
1671 spin_lock_irqsave(&d
->lock
, flags
);
1673 f
->buf
->nframesout
--;
1674 aoe_failbuf(d
, f
->buf
);
1677 spin_unlock_irqrestore(&d
->lock
, flags
);
1690 /* get_zeroed_page returns page with ref count 1 */
1691 p
= (void *) get_zeroed_page(GFP_KERNEL
);
1694 empty_page
= virt_to_page(p
);
1696 ncpus
= num_online_cpus();
1698 iocq
= kcalloc(ncpus
, sizeof(struct iocq_ktio
), GFP_KERNEL
);
1702 kts
= kcalloc(ncpus
, sizeof(struct ktstate
), GFP_KERNEL
);
1708 ktiowq
= kcalloc(ncpus
, sizeof(wait_queue_head_t
), GFP_KERNEL
);
1714 for (i
= 0; i
< ncpus
; i
++) {
1715 INIT_LIST_HEAD(&iocq
[i
].head
);
1716 spin_lock_init(&iocq
[i
].lock
);
1717 init_waitqueue_head(&ktiowq
[i
]);
1718 snprintf(kts
[i
].name
, sizeof(kts
[i
].name
), "aoe_ktio%d", i
);
1720 kts
[i
].waitq
= &ktiowq
[i
];
1721 kts
[i
].lock
= &iocq
[i
].lock
;
1726 if (aoe_ktstart(&kts
[0])) {
1747 for (i
= 0; i
< ncpus
; i
++)
1749 aoe_ktstop(&kts
[i
]);
1753 /* Free up the iocq and thread speicific configuration
1754 * allocated during startup.
1760 free_page((unsigned long) page_address(empty_page
));