2 * Block driver for s390 storage class memory.
4 * Copyright IBM Corp. 2012
5 * Author(s): Sebastian Ott <sebott@linux.vnet.ibm.com>
8 #include <linux/spinlock.h>
9 #include <linux/module.h>
10 #include <linux/blkdev.h>
11 #include <linux/genhd.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
17 static unsigned int write_cluster_size
= 64;
18 module_param(write_cluster_size
, uint
, S_IRUGO
);
19 MODULE_PARM_DESC(write_cluster_size
,
20 "Number of pages used for contiguous writes.");
22 #define CLUSTER_SIZE (write_cluster_size * PAGE_SIZE)
24 void __scm_free_rq_cluster(struct scm_request
*scmrq
)
28 if (!scmrq
->cluster
.buf
)
31 for (i
= 0; i
< 2 * write_cluster_size
; i
++)
32 free_page((unsigned long) scmrq
->cluster
.buf
[i
]);
34 kfree(scmrq
->cluster
.buf
);
37 int __scm_alloc_rq_cluster(struct scm_request
*scmrq
)
41 scmrq
->cluster
.buf
= kzalloc(sizeof(void *) * 2 * write_cluster_size
,
43 if (!scmrq
->cluster
.buf
)
46 for (i
= 0; i
< 2 * write_cluster_size
; i
++) {
47 scmrq
->cluster
.buf
[i
] = (void *) get_zeroed_page(GFP_DMA
);
48 if (!scmrq
->cluster
.buf
[i
])
51 INIT_LIST_HEAD(&scmrq
->cluster
.list
);
55 void scm_request_cluster_init(struct scm_request
*scmrq
)
57 scmrq
->cluster
.state
= CLUSTER_NONE
;
60 static bool clusters_intersect(struct scm_request
*A
, struct scm_request
*B
)
62 unsigned long firstA
, lastA
, firstB
, lastB
;
64 firstA
= ((u64
) blk_rq_pos(A
->request
) << 9) / CLUSTER_SIZE
;
65 lastA
= (((u64
) blk_rq_pos(A
->request
) << 9) +
66 blk_rq_bytes(A
->request
) - 1) / CLUSTER_SIZE
;
68 firstB
= ((u64
) blk_rq_pos(B
->request
) << 9) / CLUSTER_SIZE
;
69 lastB
= (((u64
) blk_rq_pos(B
->request
) << 9) +
70 blk_rq_bytes(B
->request
) - 1) / CLUSTER_SIZE
;
72 return (firstB
<= lastA
&& firstA
<= lastB
);
75 bool scm_reserve_cluster(struct scm_request
*scmrq
)
77 struct scm_blk_dev
*bdev
= scmrq
->bdev
;
78 struct scm_request
*iter
;
80 if (write_cluster_size
== 0)
83 spin_lock(&bdev
->lock
);
84 list_for_each_entry(iter
, &bdev
->cluster_list
, cluster
.list
) {
85 if (clusters_intersect(scmrq
, iter
) &&
86 (rq_data_dir(scmrq
->request
) == WRITE
||
87 rq_data_dir(iter
->request
) == WRITE
)) {
88 spin_unlock(&bdev
->lock
);
92 list_add(&scmrq
->cluster
.list
, &bdev
->cluster_list
);
93 spin_unlock(&bdev
->lock
);
98 void scm_release_cluster(struct scm_request
*scmrq
)
100 struct scm_blk_dev
*bdev
= scmrq
->bdev
;
103 if (write_cluster_size
== 0)
106 spin_lock_irqsave(&bdev
->lock
, flags
);
107 list_del(&scmrq
->cluster
.list
);
108 spin_unlock_irqrestore(&bdev
->lock
, flags
);
111 void scm_blk_dev_cluster_setup(struct scm_blk_dev
*bdev
)
113 INIT_LIST_HEAD(&bdev
->cluster_list
);
114 blk_queue_io_opt(bdev
->rq
, CLUSTER_SIZE
);
117 static void scm_prepare_cluster_request(struct scm_request
*scmrq
)
119 struct scm_blk_dev
*bdev
= scmrq
->bdev
;
120 struct scm_device
*scmdev
= bdev
->gendisk
->private_data
;
121 struct request
*req
= scmrq
->request
;
122 struct aidaw
*aidaw
= scmrq
->aidaw
;
123 struct msb
*msb
= &scmrq
->aob
->msb
[0];
124 struct req_iterator iter
;
129 switch (scmrq
->cluster
.state
) {
131 scmrq
->cluster
.state
= CLUSTER_READ
;
134 scmrq
->aob
->request
.msb_count
= 1;
136 msb
->oc
= MSB_OC_READ
;
137 msb
->flags
= MSB_FLAG_IDA
;
138 msb
->data_addr
= (u64
) aidaw
;
139 msb
->blk_count
= write_cluster_size
;
141 addr
= scmdev
->address
+ ((u64
) blk_rq_pos(req
) << 9);
142 msb
->scm_addr
= round_down(addr
, CLUSTER_SIZE
);
145 round_down(addr
+ (u64
) blk_rq_bytes(req
) - 1,
147 msb
->blk_count
= 2 * write_cluster_size
;
149 for (i
= 0; i
< msb
->blk_count
; i
++) {
150 aidaw
->data_addr
= (u64
) scmrq
->cluster
.buf
[i
];
156 msb
->oc
= MSB_OC_WRITE
;
158 for (addr
= msb
->scm_addr
;
159 addr
< scmdev
->address
+ ((u64
) blk_rq_pos(req
) << 9);
161 aidaw
->data_addr
= (u64
) scmrq
->cluster
.buf
[i
];
165 rq_for_each_segment(bv
, req
, iter
) {
166 aidaw
->data_addr
= (u64
) page_address(bv
.bv_page
);
170 for (; i
< msb
->blk_count
; i
++) {
171 aidaw
->data_addr
= (u64
) scmrq
->cluster
.buf
[i
];
178 bool scm_need_cluster_request(struct scm_request
*scmrq
)
180 if (rq_data_dir(scmrq
->request
) == READ
)
183 return blk_rq_bytes(scmrq
->request
) < CLUSTER_SIZE
;
186 /* Called with queue lock held. */
187 void scm_initiate_cluster_request(struct scm_request
*scmrq
)
189 scm_prepare_cluster_request(scmrq
);
190 if (eadm_start_aob(scmrq
->aob
))
191 scm_request_requeue(scmrq
);
194 bool scm_test_cluster_request(struct scm_request
*scmrq
)
196 return scmrq
->cluster
.state
!= CLUSTER_NONE
;
199 void scm_cluster_request_irq(struct scm_request
*scmrq
)
201 struct scm_blk_dev
*bdev
= scmrq
->bdev
;
204 switch (scmrq
->cluster
.state
) {
210 scm_request_finish(scmrq
);
213 scmrq
->cluster
.state
= CLUSTER_WRITE
;
214 spin_lock_irqsave(&bdev
->rq_lock
, flags
);
215 scm_initiate_cluster_request(scmrq
);
216 spin_unlock_irqrestore(&bdev
->rq_lock
, flags
);
219 scm_request_finish(scmrq
);
224 bool scm_cluster_size_valid(void)
226 if (write_cluster_size
== 1 || write_cluster_size
> 128)
229 return !(write_cluster_size
& (write_cluster_size
- 1));