NFS: Don't clear desc->pg_moreio in nfs_do_recoalesce()
[linux/fpc-iii.git] / drivers / firewire / core.h
blobe1480ff683d281f95320918712e566f6cec2e6b7
1 #ifndef _FIREWIRE_CORE_H
2 #define _FIREWIRE_CORE_H
4 #include <linux/compiler.h>
5 #include <linux/device.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/fs.h>
8 #include <linux/list.h>
9 #include <linux/idr.h>
10 #include <linux/mm_types.h>
11 #include <linux/rwsem.h>
12 #include <linux/slab.h>
13 #include <linux/types.h>
15 #include <linux/atomic.h>
17 struct device;
18 struct fw_card;
19 struct fw_device;
20 struct fw_iso_buffer;
21 struct fw_iso_context;
22 struct fw_iso_packet;
23 struct fw_node;
24 struct fw_packet;
27 /* -card */
29 extern __printf(2, 3)
30 void fw_err(const struct fw_card *card, const char *fmt, ...);
31 extern __printf(2, 3)
32 void fw_notice(const struct fw_card *card, const char *fmt, ...);
34 /* bitfields within the PHY registers */
35 #define PHY_LINK_ACTIVE 0x80
36 #define PHY_CONTENDER 0x40
37 #define PHY_BUS_RESET 0x40
38 #define PHY_EXTENDED_REGISTERS 0xe0
39 #define PHY_BUS_SHORT_RESET 0x40
40 #define PHY_INT_STATUS_BITS 0x3c
41 #define PHY_ENABLE_ACCEL 0x02
42 #define PHY_ENABLE_MULTI 0x01
43 #define PHY_PAGE_SELECT 0xe0
45 #define BANDWIDTH_AVAILABLE_INITIAL 4915
46 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
47 #define BROADCAST_CHANNEL_VALID (1 << 30)
49 #define CSR_STATE_BIT_CMSTR (1 << 8)
50 #define CSR_STATE_BIT_ABDICATE (1 << 10)
52 struct fw_card_driver {
54 * Enable the given card with the given initial config rom.
55 * This function is expected to activate the card, and either
56 * enable the PHY or set the link_on bit and initiate a bus
57 * reset.
59 int (*enable)(struct fw_card *card,
60 const __be32 *config_rom, size_t length);
62 int (*read_phy_reg)(struct fw_card *card, int address);
63 int (*update_phy_reg)(struct fw_card *card, int address,
64 int clear_bits, int set_bits);
67 * Update the config rom for an enabled card. This function
68 * should change the config rom that is presented on the bus
69 * and initiate a bus reset.
71 int (*set_config_rom)(struct fw_card *card,
72 const __be32 *config_rom, size_t length);
74 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
75 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
76 /* Calling cancel is valid once a packet has been submitted. */
77 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
80 * Allow the specified node ID to do direct DMA out and in of
81 * host memory. The card will disable this for all node when
82 * a bus reset happens, so driver need to reenable this after
83 * bus reset. Returns 0 on success, -ENODEV if the card
84 * doesn't support this, -ESTALE if the generation doesn't
85 * match.
87 int (*enable_phys_dma)(struct fw_card *card,
88 int node_id, int generation);
90 u32 (*read_csr)(struct fw_card *card, int csr_offset);
91 void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
93 struct fw_iso_context *
94 (*allocate_iso_context)(struct fw_card *card,
95 int type, int channel, size_t header_size);
96 void (*free_iso_context)(struct fw_iso_context *ctx);
98 int (*start_iso)(struct fw_iso_context *ctx,
99 s32 cycle, u32 sync, u32 tags);
101 int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
103 int (*queue_iso)(struct fw_iso_context *ctx,
104 struct fw_iso_packet *packet,
105 struct fw_iso_buffer *buffer,
106 unsigned long payload);
108 void (*flush_queue_iso)(struct fw_iso_context *ctx);
110 int (*flush_iso_completions)(struct fw_iso_context *ctx);
112 int (*stop_iso)(struct fw_iso_context *ctx);
115 void fw_card_initialize(struct fw_card *card,
116 const struct fw_card_driver *driver, struct device *device);
117 int fw_card_add(struct fw_card *card,
118 u32 max_receive, u32 link_speed, u64 guid);
119 void fw_core_remove_card(struct fw_card *card);
120 int fw_compute_block_crc(__be32 *block);
121 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
123 /* -cdev */
125 extern const struct file_operations fw_device_ops;
127 void fw_device_cdev_update(struct fw_device *device);
128 void fw_device_cdev_remove(struct fw_device *device);
129 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
132 /* -device */
134 extern struct rw_semaphore fw_device_rwsem;
135 extern struct idr fw_device_idr;
136 extern int fw_cdev_major;
138 static inline struct fw_device *fw_device_get(struct fw_device *device)
140 get_device(&device->device);
142 return device;
145 static inline void fw_device_put(struct fw_device *device)
147 put_device(&device->device);
150 struct fw_device *fw_device_get_by_devt(dev_t devt);
151 int fw_device_set_broadcast_channel(struct device *dev, void *gen);
152 void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
155 /* -iso */
157 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
158 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
159 enum dma_data_direction direction);
160 int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
161 struct vm_area_struct *vma);
164 /* -topology */
166 enum {
167 FW_NODE_CREATED,
168 FW_NODE_UPDATED,
169 FW_NODE_DESTROYED,
170 FW_NODE_LINK_ON,
171 FW_NODE_LINK_OFF,
172 FW_NODE_INITIATED_RESET,
175 struct fw_node {
176 u16 node_id;
177 u8 color;
178 u8 port_count;
179 u8 link_on:1;
180 u8 initiated_reset:1;
181 u8 b_path:1;
182 u8 phy_speed:2; /* As in the self ID packet. */
183 u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
184 * local node to this node. */
185 u8 max_depth:4; /* Maximum depth to any leaf node */
186 u8 max_hops:4; /* Max hops in this sub tree */
187 atomic_t ref_count;
189 /* For serializing node topology into a list. */
190 struct list_head link;
192 /* Upper layer specific data. */
193 void *data;
195 struct fw_node *ports[0];
198 static inline struct fw_node *fw_node_get(struct fw_node *node)
200 atomic_inc(&node->ref_count);
202 return node;
205 static inline void fw_node_put(struct fw_node *node)
207 if (atomic_dec_and_test(&node->ref_count))
208 kfree(node);
211 void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
212 int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
213 void fw_destroy_nodes(struct fw_card *card);
216 * Check whether new_generation is the immediate successor of old_generation.
217 * Take counter roll-over at 255 (as per OHCI) into account.
219 static inline bool is_next_generation(int new_generation, int old_generation)
221 return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
225 /* -transaction */
227 #define TCODE_LINK_INTERNAL 0xe
229 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
230 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
231 #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
232 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
233 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
234 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
235 #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
237 #define LOCAL_BUS 0xffc0
239 /* OHCI-1394's default upper bound for physical DMA: 4 GB */
240 #define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
242 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
243 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
244 int fw_get_response_length(struct fw_request *request);
245 void fw_fill_response(struct fw_packet *response, u32 *request_header,
246 int rcode, void *payload, size_t length);
248 #define FW_PHY_CONFIG_NO_NODE_ID -1
249 #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
250 void fw_send_phy_config(struct fw_card *card,
251 int node_id, int generation, int gap_count);
253 static inline bool is_ping_packet(u32 *data)
255 return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
258 #endif /* _FIREWIRE_CORE_H */