sched: Fix schedule_tail() to disable preemption
[linux/fpc-iii.git] / drivers / s390 / block / scm_blk.h
blobe59331e6c2e5ef682dae02f7f5fbebe1bae1b5c8
1 #ifndef SCM_BLK_H
2 #define SCM_BLK_H
4 #include <linux/interrupt.h>
5 #include <linux/spinlock.h>
6 #include <linux/blkdev.h>
7 #include <linux/genhd.h>
8 #include <linux/list.h>
10 #include <asm/debug.h>
11 #include <asm/eadm.h>
13 #define SCM_NR_PARTS 8
14 #define SCM_QUEUE_DELAY 5
16 struct scm_blk_dev {
17 struct tasklet_struct tasklet;
18 struct request_queue *rq;
19 struct gendisk *gendisk;
20 struct scm_device *scmdev;
21 spinlock_t rq_lock; /* guard the request queue */
22 spinlock_t lock; /* guard the rest of the blockdev */
23 atomic_t queued_reqs;
24 enum {SCM_OPER, SCM_WR_PROHIBIT} state;
25 struct list_head finished_requests;
26 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
27 struct list_head cluster_list;
28 #endif
31 struct scm_request {
32 struct scm_blk_dev *bdev;
33 struct request *request;
34 struct aidaw *aidaw;
35 struct aob *aob;
36 struct list_head list;
37 u8 retries;
38 int error;
39 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
40 struct {
41 enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
42 struct list_head list;
43 void **buf;
44 } cluster;
45 #endif
48 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
50 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
51 void scm_blk_dev_cleanup(struct scm_blk_dev *);
52 void scm_blk_set_available(struct scm_blk_dev *);
53 void scm_blk_irq(struct scm_device *, void *, int);
55 void scm_request_finish(struct scm_request *);
56 void scm_request_requeue(struct scm_request *);
58 int scm_drv_init(void);
59 void scm_drv_cleanup(void);
61 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
62 void __scm_free_rq_cluster(struct scm_request *);
63 int __scm_alloc_rq_cluster(struct scm_request *);
64 void scm_request_cluster_init(struct scm_request *);
65 bool scm_reserve_cluster(struct scm_request *);
66 void scm_release_cluster(struct scm_request *);
67 void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
68 bool scm_need_cluster_request(struct scm_request *);
69 void scm_initiate_cluster_request(struct scm_request *);
70 void scm_cluster_request_irq(struct scm_request *);
71 bool scm_test_cluster_request(struct scm_request *);
72 bool scm_cluster_size_valid(void);
73 #else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
74 static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
75 static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
77 return 0;
79 static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
80 static inline bool scm_reserve_cluster(struct scm_request *scmrq)
82 return true;
84 static inline void scm_release_cluster(struct scm_request *scmrq) {}
85 static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
86 static inline bool scm_need_cluster_request(struct scm_request *scmrq)
88 return false;
90 static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
91 static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
92 static inline bool scm_test_cluster_request(struct scm_request *scmrq)
94 return false;
96 static inline bool scm_cluster_size_valid(void)
98 return true;
100 #endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
102 extern debug_info_t *scm_debug;
104 #define SCM_LOG(imp, txt) do { \
105 debug_text_event(scm_debug, imp, txt); \
106 } while (0)
108 static inline void SCM_LOG_HEX(int level, void *data, int length)
110 if (!debug_level_enabled(scm_debug, level))
111 return;
112 while (length > 0) {
113 debug_event(scm_debug, level, data, length);
114 length -= scm_debug->buf_size;
115 data += scm_debug->buf_size;
119 static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
121 struct {
122 u64 address;
123 u8 oper_state;
124 u8 rank;
125 } __packed data = {
126 .address = scmdev->address,
127 .oper_state = scmdev->attrs.oper_state,
128 .rank = scmdev->attrs.rank,
131 SCM_LOG_HEX(level, &data, sizeof(data));
134 #endif /* SCM_BLK_H */