1 #ifndef _FIREWIRE_CORE_H
2 #define _FIREWIRE_CORE_H
5 #include <linux/list.h>
7 #include <linux/mm_types.h>
8 #include <linux/rwsem.h>
9 #include <linux/slab.h>
10 #include <linux/types.h>
12 #include <asm/atomic.h>
18 struct fw_iso_context
;
26 /* bitfields within the PHY registers */
27 #define PHY_LINK_ACTIVE 0x80
28 #define PHY_CONTENDER 0x40
29 #define PHY_BUS_RESET 0x40
30 #define PHY_BUS_SHORT_RESET 0x40
32 #define BANDWIDTH_AVAILABLE_INITIAL 4915
33 #define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
34 #define BROADCAST_CHANNEL_VALID (1 << 30)
36 struct fw_card_driver
{
38 * Enable the given card with the given initial config rom.
39 * This function is expected to activate the card, and either
40 * enable the PHY or set the link_on bit and initiate a bus
43 int (*enable
)(struct fw_card
*card
, u32
*config_rom
, size_t length
);
45 int (*update_phy_reg
)(struct fw_card
*card
, int address
,
46 int clear_bits
, int set_bits
);
49 * Update the config rom for an enabled card. This function
50 * should change the config rom that is presented on the bus
51 * an initiate a bus reset.
53 int (*set_config_rom
)(struct fw_card
*card
,
54 u32
*config_rom
, size_t length
);
56 void (*send_request
)(struct fw_card
*card
, struct fw_packet
*packet
);
57 void (*send_response
)(struct fw_card
*card
, struct fw_packet
*packet
);
58 /* Calling cancel is valid once a packet has been submitted. */
59 int (*cancel_packet
)(struct fw_card
*card
, struct fw_packet
*packet
);
62 * Allow the specified node ID to do direct DMA out and in of
63 * host memory. The card will disable this for all node when
64 * a bus reset happens, so driver need to reenable this after
65 * bus reset. Returns 0 on success, -ENODEV if the card
66 * doesn't support this, -ESTALE if the generation doesn't
69 int (*enable_phys_dma
)(struct fw_card
*card
,
70 int node_id
, int generation
);
72 u64 (*get_bus_time
)(struct fw_card
*card
);
74 struct fw_iso_context
*
75 (*allocate_iso_context
)(struct fw_card
*card
,
76 int type
, int channel
, size_t header_size
);
77 void (*free_iso_context
)(struct fw_iso_context
*ctx
);
79 int (*start_iso
)(struct fw_iso_context
*ctx
,
80 s32 cycle
, u32 sync
, u32 tags
);
82 int (*queue_iso
)(struct fw_iso_context
*ctx
,
83 struct fw_iso_packet
*packet
,
84 struct fw_iso_buffer
*buffer
,
85 unsigned long payload
);
87 int (*stop_iso
)(struct fw_iso_context
*ctx
);
90 void fw_card_initialize(struct fw_card
*card
,
91 const struct fw_card_driver
*driver
, struct device
*device
);
92 int fw_card_add(struct fw_card
*card
,
93 u32 max_receive
, u32 link_speed
, u64 guid
);
94 void fw_core_remove_card(struct fw_card
*card
);
95 int fw_core_initiate_bus_reset(struct fw_card
*card
, int short_reset
);
96 int fw_compute_block_crc(u32
*block
);
97 void fw_schedule_bm_work(struct fw_card
*card
, unsigned long delay
);
99 static inline struct fw_card
*fw_card_get(struct fw_card
*card
)
101 kref_get(&card
->kref
);
106 void fw_card_release(struct kref
*kref
);
108 static inline void fw_card_put(struct fw_card
*card
)
110 kref_put(&card
->kref
, fw_card_release
);
116 extern const struct file_operations fw_device_ops
;
118 void fw_device_cdev_update(struct fw_device
*device
);
119 void fw_device_cdev_remove(struct fw_device
*device
);
124 extern struct rw_semaphore fw_device_rwsem
;
125 extern struct idr fw_device_idr
;
126 extern int fw_cdev_major
;
128 struct fw_device
*fw_device_get_by_devt(dev_t devt
);
129 int fw_device_set_broadcast_channel(struct device
*dev
, void *gen
);
130 void fw_node_event(struct fw_card
*card
, struct fw_node
*node
, int event
);
135 int fw_iso_buffer_map(struct fw_iso_buffer
*buffer
, struct vm_area_struct
*vma
);
136 void fw_iso_resource_manage(struct fw_card
*card
, int generation
,
137 u64 channels_mask
, int *channel
, int *bandwidth
,
138 bool allocate
, __be32 buffer
[2]);
149 FW_NODE_INITIATED_RESET
,
157 u8 initiated_reset
:1;
159 u8 phy_speed
:2; /* As in the self ID packet. */
160 u8 max_speed
:2; /* Minimum of all phy-speeds on the path from the
161 * local node to this node. */
162 u8 max_depth
:4; /* Maximum depth to any leaf node */
163 u8 max_hops
:4; /* Max hops in this sub tree */
166 /* For serializing node topology into a list. */
167 struct list_head link
;
169 /* Upper layer specific data. */
172 struct fw_node
*ports
[0];
175 static inline struct fw_node
*fw_node_get(struct fw_node
*node
)
177 atomic_inc(&node
->ref_count
);
182 static inline void fw_node_put(struct fw_node
*node
)
184 if (atomic_dec_and_test(&node
->ref_count
))
188 void fw_core_handle_bus_reset(struct fw_card
*card
, int node_id
,
189 int generation
, int self_id_count
, u32
*self_ids
);
190 void fw_destroy_nodes(struct fw_card
*card
);
193 * Check whether new_generation is the immediate successor of old_generation.
194 * Take counter roll-over at 255 (as per OHCI) into account.
196 static inline bool is_next_generation(int new_generation
, int old_generation
)
198 return (new_generation
& 0xff) == ((old_generation
+ 1) & 0xff);
204 #define TCODE_IS_READ_REQUEST(tcode) (((tcode) & ~1) == 4)
205 #define TCODE_IS_BLOCK_PACKET(tcode) (((tcode) & 1) != 0)
206 #define TCODE_IS_REQUEST(tcode) (((tcode) & 2) == 0)
207 #define TCODE_IS_RESPONSE(tcode) (((tcode) & 2) != 0)
208 #define TCODE_HAS_REQUEST_DATA(tcode) (((tcode) & 12) != 4)
209 #define TCODE_HAS_RESPONSE_DATA(tcode) (((tcode) & 12) != 0)
211 #define LOCAL_BUS 0xffc0
213 void fw_core_handle_request(struct fw_card
*card
, struct fw_packet
*request
);
214 void fw_core_handle_response(struct fw_card
*card
, struct fw_packet
*packet
);
215 void fw_fill_response(struct fw_packet
*response
, u32
*request_header
,
216 int rcode
, void *payload
, size_t length
);
217 void fw_flush_transactions(struct fw_card
*card
);
218 void fw_send_phy_config(struct fw_card
*card
,
219 int node_id
, int generation
, int gap_count
);
221 #endif /* _FIREWIRE_CORE_H */