Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / drivers / firewire / core.h
blobc98764aeeec6112928fcde7ac0907949aab001ea
1 #ifndef _FIREWIRE_CORE_H
2 #define _FIREWIRE_CORE_H
4 #include <linux/compiler.h>
5 #include <linux/device.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/fs.h>
8 #include <linux/list.h>
9 #include <linux/idr.h>
10 #include <linux/mm_types.h>
11 #include <linux/rwsem.h>
12 #include <linux/slab.h>
13 #include <linux/types.h>
15 #include <linux/atomic.h>
17 struct device;
18 struct fw_card;
19 struct fw_device;
20 struct fw_iso_buffer;
21 struct fw_iso_context;
22 struct fw_iso_packet;
23 struct fw_node;
24 struct fw_packet;
27 /* -card */
29 extern __printf(2, 3)
30 void fw_err(const struct fw_card *card, const char *fmt, ...);
31 extern __printf(2, 3)
32 void fw_notice(const struct fw_card *card, const char *fmt, ...);
34 /* bitfields within the PHY registers */
35 #define PHY_LINK_ACTIVE 0x80
36 #define PHY_CONTENDER 0x40
37 #define PHY_BUS_RESET 0x40
38 #define PHY_EXTENDED_REGISTERS 0xe0
39 #define PHY_BUS_SHORT_RESET 0x40
40 #define PHY_INT_STATUS_BITS 0x3c
41 #define PHY_ENABLE_ACCEL 0x02
42 #define PHY_ENABLE_MULTI 0x01
43 #define PHY_PAGE_SELECT 0xe0
45 #define BANDWIDTH_AVAILABLE_INITIAL 4915
46 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
47 #define BROADCAST_CHANNEL_VALID (1 << 30)
49 #define CSR_STATE_BIT_CMSTR (1 << 8)
50 #define CSR_STATE_BIT_ABDICATE (1 << 10)
52 struct fw_card_driver {
54 * Enable the given card with the given initial config rom.
55 * This function is expected to activate the card, and either
56 * enable the PHY or set the link_on bit and initiate a bus
57 * reset.
59 int (*enable)(struct fw_card *card,
60 const __be32 *config_rom, size_t length);
62 int (*read_phy_reg)(struct fw_card *card, int address);
63 int (*update_phy_reg)(struct fw_card *card, int address,
64 int clear_bits, int set_bits);
67 * Update the config rom for an enabled card. This function
68 * should change the config rom that is presented on the bus
69 * and initiate a bus reset.
71 int (*set_config_rom)(struct fw_card *card,
72 const __be32 *config_rom, size_t length);
74 void (*send_request)(struct fw_card *card, struct fw_packet *packet);
75 void (*send_response)(struct fw_card *card, struct fw_packet *packet);
76 /* Calling cancel is valid once a packet has been submitted. */
77 int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
80 * Allow the specified node ID to do direct DMA out and in of
81 * host memory. The card will disable this for all node when
82 * a bus reset happens, so driver need to reenable this after
83 * bus reset. Returns 0 on success, -ENODEV if the card
84 * doesn't support this, -ESTALE if the generation doesn't
85 * match.
87 int (*enable_phys_dma)(struct fw_card *card,
88 int node_id, int generation);
90 u32 (*read_csr)(struct fw_card *card, int csr_offset);
91 void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
93 struct fw_iso_context *
94 (*allocate_iso_context)(struct fw_card *card,
95 int type, int channel, size_t header_size);
96 void (*free_iso_context)(struct fw_iso_context *ctx);
98 int (*start_iso)(struct fw_iso_context *ctx,
99 s32 cycle, u32 sync, u32 tags);
101 int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
103 int (*queue_iso)(struct fw_iso_context *ctx,
104 struct fw_iso_packet *packet,
105 struct fw_iso_buffer *buffer,
106 unsigned long payload);
108 void (*flush_queue_iso)(struct fw_iso_context *ctx);
110 int (*flush_iso_completions)(struct fw_iso_context *ctx);
112 int (*stop_iso)(struct fw_iso_context *ctx);
115 void fw_card_initialize(struct fw_card *card,
116 const struct fw_card_driver *driver, struct device *device);
117 int fw_card_add(struct fw_card *card,
118 u32 max_receive, u32 link_speed, u64 guid);
119 void fw_core_remove_card(struct fw_card *card);
120 int fw_compute_block_crc(__be32 *block);
121 void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset);
122 void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
124 /* -cdev */
126 extern const struct file_operations fw_device_ops;
128 void fw_device_cdev_update(struct fw_device *device);
129 void fw_device_cdev_remove(struct fw_device *device);
130 void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
133 /* -device */
135 extern struct rw_semaphore fw_device_rwsem;
136 extern struct idr fw_device_idr;
137 extern int fw_cdev_major;
139 static inline struct fw_device *fw_device_get(struct fw_device *device)
141 get_device(&device->device);
143 return device;
146 static inline void fw_device_put(struct fw_device *device)
148 put_device(&device->device);
151 struct fw_device *fw_device_get_by_devt(dev_t devt);
152 int fw_device_set_broadcast_channel(struct device *dev, void *gen);
153 void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
156 /* -iso */
158 int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
159 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
160 enum dma_data_direction direction);
161 int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
162 struct vm_area_struct *vma);
165 /* -topology */
167 enum {
168 FW_NODE_CREATED,
169 FW_NODE_UPDATED,
170 FW_NODE_DESTROYED,
171 FW_NODE_LINK_ON,
172 FW_NODE_LINK_OFF,
173 FW_NODE_INITIATED_RESET,
176 struct fw_node {
177 u16 node_id;
178 u8 color;
179 u8 port_count;
180 u8 link_on:1;
181 u8 initiated_reset:1;
182 u8 b_path:1;
183 u8 phy_speed:2; /* As in the self ID packet. */
184 u8 max_speed:2; /* Minimum of all phy-speeds on the path from the
185 * local node to this node. */
186 u8 max_depth:4; /* Maximum depth to any leaf node */
187 u8 max_hops:4; /* Max hops in this sub tree */
188 atomic_t ref_count;
190 /* For serializing node topology into a list. */
191 struct list_head link;
193 /* Upper layer specific data. */
194 void *data;
196 struct fw_node *ports[0];
199 static inline struct fw_node *fw_node_get(struct fw_node *node)
201 atomic_inc(&node->ref_count);
203 return node;
206 static inline void fw_node_put(struct fw_node *node)
208 if (atomic_dec_and_test(&node->ref_count))
209 kfree(node);
212 void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
213 int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
214 void fw_destroy_nodes(struct fw_card *card);
217 * Check whether new_generation is the immediate successor of old_generation.
218 * Take counter roll-over at 255 (as per OHCI) into account.
220 static inline bool is_next_generation(int new_generation, int old_generation)
222 return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
226 /* -transaction */
228 #define TCODE_LINK_INTERNAL 0xe
230 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
231 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
232 #define TCODE_IS_LINK_INTERNAL(tcode) ((tcode) == TCODE_LINK_INTERNAL)
233 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
234 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
235 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
236 #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
238 #define LOCAL_BUS 0xffc0
240 /* arbitrarily chosen maximum range for physical DMA: 128 TB */
241 #define FW_MAX_PHYSICAL_RANGE (128ULL << 40)
243 void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
244 void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
245 int fw_get_response_length(struct fw_request *request);
246 void fw_fill_response(struct fw_packet *response, u32 *request_header,
247 int rcode, void *payload, size_t length);
249 #define FW_PHY_CONFIG_NO_NODE_ID -1
250 #define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
251 void fw_send_phy_config(struct fw_card *card,
252 int node_id, int generation, int gap_count);
254 static inline bool is_ping_packet(u32 *data)
256 return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
259 #endif /* _FIREWIRE_CORE_H */