1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Core IEEE1394 transaction logic
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/completion.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/firewire.h>
13 #include <linux/firewire-constants.h>
15 #include <linux/init.h>
16 #include <linux/jiffies.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/rculist.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/string.h>
24 #include <linux/timer.h>
25 #include <linux/types.h>
26 #include <linux/workqueue.h>
28 #include <asm/byteorder.h>
31 #include "packet-header-definitions.h"
32 #include "phy-packet-definitions.h"
33 #include <trace/events/firewire.h>
35 #define HEADER_DESTINATION_IS_BROADCAST(header) \
36 ((async_header_get_destination(header) & 0x3f) == 0x3f)
38 /* returns 0 if the split timeout handler is already running */
39 static int try_cancel_split_timeout(struct fw_transaction
*t
)
41 if (t
->is_split_transaction
)
42 return del_timer(&t
->split_timeout_timer
);
47 static int close_transaction(struct fw_transaction
*transaction
, struct fw_card
*card
, int rcode
,
50 struct fw_transaction
*t
= NULL
, *iter
;
52 scoped_guard(spinlock_irqsave
, &card
->lock
) {
53 list_for_each_entry(iter
, &card
->transaction_list
, link
) {
54 if (iter
== transaction
) {
55 if (try_cancel_split_timeout(iter
)) {
56 list_del_init(&iter
->link
);
57 card
->tlabel_mask
&= ~(1ULL << iter
->tlabel
);
68 if (!t
->with_tstamp
) {
69 t
->callback
.without_tstamp(card
, rcode
, NULL
, 0, t
->callback_data
);
71 t
->callback
.with_tstamp(card
, rcode
, t
->packet
.timestamp
, response_tstamp
, NULL
, 0,
79 * Only valid for transactions that are potentially pending (ie have
82 int fw_cancel_transaction(struct fw_card
*card
,
83 struct fw_transaction
*transaction
)
88 * Cancel the packet transmission if it's still queued. That
89 * will call the packet transmission callback which cancels
93 if (card
->driver
->cancel_packet(card
, &transaction
->packet
) == 0)
97 * If the request packet has already been sent, we need to see
98 * if the transaction is still pending and remove it in that case.
101 if (transaction
->packet
.ack
== 0) {
102 // The timestamp is reused since it was just read now.
103 tstamp
= transaction
->packet
.timestamp
;
105 u32 curr_cycle_time
= 0;
107 (void)fw_card_read_cycle_time(card
, &curr_cycle_time
);
108 tstamp
= cycle_time_to_ohci_tstamp(curr_cycle_time
);
111 return close_transaction(transaction
, card
, RCODE_CANCELLED
, tstamp
);
113 EXPORT_SYMBOL(fw_cancel_transaction
);
115 static void split_transaction_timeout_callback(struct timer_list
*timer
)
117 struct fw_transaction
*t
= from_timer(t
, timer
, split_timeout_timer
);
118 struct fw_card
*card
= t
->card
;
120 scoped_guard(spinlock_irqsave
, &card
->lock
) {
121 if (list_empty(&t
->link
))
124 card
->tlabel_mask
&= ~(1ULL << t
->tlabel
);
127 if (!t
->with_tstamp
) {
128 t
->callback
.without_tstamp(card
, RCODE_CANCELLED
, NULL
, 0, t
->callback_data
);
130 t
->callback
.with_tstamp(card
, RCODE_CANCELLED
, t
->packet
.timestamp
,
131 t
->split_timeout_cycle
, NULL
, 0, t
->callback_data
);
135 static void start_split_transaction_timeout(struct fw_transaction
*t
,
136 struct fw_card
*card
)
138 guard(spinlock_irqsave
)(&card
->lock
);
140 if (list_empty(&t
->link
) || WARN_ON(t
->is_split_transaction
))
143 t
->is_split_transaction
= true;
144 mod_timer(&t
->split_timeout_timer
,
145 jiffies
+ card
->split_timeout_jiffies
);
148 static u32
compute_split_timeout_timestamp(struct fw_card
*card
, u32 request_timestamp
);
150 static void transmit_complete_callback(struct fw_packet
*packet
,
151 struct fw_card
*card
, int status
)
153 struct fw_transaction
*t
=
154 container_of(packet
, struct fw_transaction
, packet
);
156 trace_async_request_outbound_complete((uintptr_t)t
, card
->index
, packet
->generation
,
157 packet
->speed
, status
, packet
->timestamp
);
161 close_transaction(t
, card
, RCODE_COMPLETE
, packet
->timestamp
);
165 t
->split_timeout_cycle
=
166 compute_split_timeout_timestamp(card
, packet
->timestamp
) & 0xffff;
167 start_split_transaction_timeout(t
, card
);
173 close_transaction(t
, card
, RCODE_BUSY
, packet
->timestamp
);
176 close_transaction(t
, card
, RCODE_DATA_ERROR
, packet
->timestamp
);
179 close_transaction(t
, card
, RCODE_TYPE_ERROR
, packet
->timestamp
);
183 * In this case the ack is really a juju specific
184 * rcode, so just forward that to the callback.
186 close_transaction(t
, card
, status
, packet
->timestamp
);
191 static void fw_fill_request(struct fw_packet
*packet
, int tcode
, int tlabel
,
192 int destination_id
, int source_id
, int generation
, int speed
,
193 unsigned long long offset
, void *payload
, size_t length
)
197 if (tcode
== TCODE_STREAM_DATA
) {
198 // The value of destination_id argument should include tag, channel, and sy fields
199 // as isochronous packet header has.
200 packet
->header
[0] = destination_id
;
201 isoc_header_set_data_length(packet
->header
, length
);
202 isoc_header_set_tcode(packet
->header
, TCODE_STREAM_DATA
);
203 packet
->header_length
= 4;
204 packet
->payload
= payload
;
205 packet
->payload_length
= length
;
211 ext_tcode
= tcode
& ~0x10;
212 tcode
= TCODE_LOCK_REQUEST
;
216 async_header_set_retry(packet
->header
, RETRY_X
);
217 async_header_set_tlabel(packet
->header
, tlabel
);
218 async_header_set_tcode(packet
->header
, tcode
);
219 async_header_set_destination(packet
->header
, destination_id
);
220 async_header_set_source(packet
->header
, source_id
);
221 async_header_set_offset(packet
->header
, offset
);
224 case TCODE_WRITE_QUADLET_REQUEST
:
225 async_header_set_quadlet_data(packet
->header
, *(u32
*)payload
);
226 packet
->header_length
= 16;
227 packet
->payload_length
= 0;
230 case TCODE_LOCK_REQUEST
:
231 case TCODE_WRITE_BLOCK_REQUEST
:
232 async_header_set_data_length(packet
->header
, length
);
233 async_header_set_extended_tcode(packet
->header
, ext_tcode
);
234 packet
->header_length
= 16;
235 packet
->payload
= payload
;
236 packet
->payload_length
= length
;
239 case TCODE_READ_QUADLET_REQUEST
:
240 packet
->header_length
= 12;
241 packet
->payload_length
= 0;
244 case TCODE_READ_BLOCK_REQUEST
:
245 async_header_set_data_length(packet
->header
, length
);
246 async_header_set_extended_tcode(packet
->header
, ext_tcode
);
247 packet
->header_length
= 16;
248 packet
->payload_length
= 0;
252 WARN(1, "wrong tcode %d\n", tcode
);
255 packet
->speed
= speed
;
256 packet
->generation
= generation
;
258 packet
->payload_mapped
= false;
261 static int allocate_tlabel(struct fw_card
*card
)
265 tlabel
= card
->current_tlabel
;
266 while (card
->tlabel_mask
& (1ULL << tlabel
)) {
267 tlabel
= (tlabel
+ 1) & 0x3f;
268 if (tlabel
== card
->current_tlabel
)
272 card
->current_tlabel
= (tlabel
+ 1) & 0x3f;
273 card
->tlabel_mask
|= 1ULL << tlabel
;
279 * __fw_send_request() - submit a request packet for transmission to generate callback for response
280 * subaction with or without time stamp.
281 * @card: interface to send the request at
282 * @t: transaction instance to which the request belongs
283 * @tcode: transaction code
284 * @destination_id: destination node ID, consisting of bus_ID and phy_ID
285 * @generation: bus generation in which request and response are valid
286 * @speed: transmission speed
287 * @offset: 48bit wide offset into destination's address space
288 * @payload: data payload for the request subaction
289 * @length: length of the payload, in bytes
290 * @callback: union of two functions whether to receive time stamp or not for response
292 * @with_tstamp: Whether to receive time stamp or not for response subaction.
293 * @callback_data: data to be passed to the transaction completion callback
295 * Submit a request packet into the asynchronous request transmission queue.
296 * Can be called from atomic context. If you prefer a blocking API, use
297 * fw_run_transaction() in a context that can sleep.
299 * In case of lock requests, specify one of the firewire-core specific %TCODE_
300 * constants instead of %TCODE_LOCK_REQUEST in @tcode.
302 * Make sure that the value in @destination_id is not older than the one in
303 * @generation. Otherwise the request is in danger to be sent to a wrong node.
305 * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
306 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
307 * It will contain tag, channel, and sy data instead of a node ID then.
309 * The payload buffer at @data is going to be DMA-mapped except in case of
310 * @length <= 8 or of local (loopback) requests. Hence make sure that the
311 * buffer complies with the restrictions of the streaming DMA mapping API.
312 * @payload must not be freed before the @callback is called.
314 * In case of request types without payload, @data is NULL and @length is 0.
316 * After the transaction is completed successfully or unsuccessfully, the
317 * @callback will be called. Among its parameters is the response code which
318 * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
319 * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
320 * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
321 * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
322 * generation, or missing ACK respectively.
324 * Note some timing corner cases: fw_send_request() may complete much earlier
325 * than when the request packet actually hits the wire. On the other hand,
326 * transaction completion and hence execution of @callback may happen even
327 * before fw_send_request() returns.
329 void __fw_send_request(struct fw_card
*card
, struct fw_transaction
*t
, int tcode
,
330 int destination_id
, int generation
, int speed
, unsigned long long offset
,
331 void *payload
, size_t length
, union fw_transaction_callback callback
,
332 bool with_tstamp
, void *callback_data
)
338 * Allocate tlabel from the bitmap and put the transaction on
339 * the list while holding the card spinlock.
342 spin_lock_irqsave(&card
->lock
, flags
);
344 tlabel
= allocate_tlabel(card
);
346 spin_unlock_irqrestore(&card
->lock
, flags
);
348 callback
.without_tstamp(card
, RCODE_SEND_ERROR
, NULL
, 0, callback_data
);
350 // Timestamping on behalf of hardware.
351 u32 curr_cycle_time
= 0;
354 (void)fw_card_read_cycle_time(card
, &curr_cycle_time
);
355 tstamp
= cycle_time_to_ohci_tstamp(curr_cycle_time
);
357 callback
.with_tstamp(card
, RCODE_SEND_ERROR
, tstamp
, tstamp
, NULL
, 0,
363 t
->node_id
= destination_id
;
366 t
->is_split_transaction
= false;
367 timer_setup(&t
->split_timeout_timer
, split_transaction_timeout_callback
, 0);
368 t
->callback
= callback
;
369 t
->with_tstamp
= with_tstamp
;
370 t
->callback_data
= callback_data
;
372 fw_fill_request(&t
->packet
, tcode
, t
->tlabel
, destination_id
, card
->node_id
, generation
,
373 speed
, offset
, payload
, length
);
374 t
->packet
.callback
= transmit_complete_callback
;
376 list_add_tail(&t
->link
, &card
->transaction_list
);
378 spin_unlock_irqrestore(&card
->lock
, flags
);
380 trace_async_request_outbound_initiate((uintptr_t)t
, card
->index
, generation
, speed
,
381 t
->packet
.header
, payload
,
382 tcode_is_read_request(tcode
) ? 0 : length
/ 4);
384 card
->driver
->send_request(card
, &t
->packet
);
386 EXPORT_SYMBOL_GPL(__fw_send_request
);
388 struct transaction_callback_data
{
389 struct completion done
;
394 static void transaction_callback(struct fw_card
*card
, int rcode
,
395 void *payload
, size_t length
, void *data
)
397 struct transaction_callback_data
*d
= data
;
399 if (rcode
== RCODE_COMPLETE
)
400 memcpy(d
->payload
, payload
, length
);
406 * fw_run_transaction() - send request and sleep until transaction is completed
407 * @card: card interface for this request
408 * @tcode: transaction code
409 * @destination_id: destination node ID, consisting of bus_ID and phy_ID
410 * @generation: bus generation in which request and response are valid
411 * @speed: transmission speed
412 * @offset: 48bit wide offset into destination's address space
413 * @payload: data payload for the request subaction
414 * @length: length of the payload, in bytes
416 * Returns the RCODE. See fw_send_request() for parameter documentation.
417 * Unlike fw_send_request(), @data points to the payload of the request or/and
418 * to the payload of the response. DMA mapping restrictions apply to outbound
419 * request payloads of >= 8 bytes but not to inbound response payloads.
421 int fw_run_transaction(struct fw_card
*card
, int tcode
, int destination_id
,
422 int generation
, int speed
, unsigned long long offset
,
423 void *payload
, size_t length
)
425 struct transaction_callback_data d
;
426 struct fw_transaction t
;
428 timer_setup_on_stack(&t
.split_timeout_timer
, NULL
, 0);
429 init_completion(&d
.done
);
431 fw_send_request(card
, &t
, tcode
, destination_id
, generation
, speed
,
432 offset
, payload
, length
, transaction_callback
, &d
);
433 wait_for_completion(&d
.done
);
434 destroy_timer_on_stack(&t
.split_timeout_timer
);
438 EXPORT_SYMBOL(fw_run_transaction
);
440 static DEFINE_MUTEX(phy_config_mutex
);
441 static DECLARE_COMPLETION(phy_config_done
);
443 static void transmit_phy_packet_callback(struct fw_packet
*packet
,
444 struct fw_card
*card
, int status
)
446 trace_async_phy_outbound_complete((uintptr_t)packet
, card
->index
, packet
->generation
, status
,
448 complete(&phy_config_done
);
451 static struct fw_packet phy_config_packet
= {
455 .callback
= transmit_phy_packet_callback
,
458 void fw_send_phy_config(struct fw_card
*card
,
459 int node_id
, int generation
, int gap_count
)
461 long timeout
= DIV_ROUND_UP(HZ
, 10);
464 phy_packet_set_packet_identifier(&data
, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG
);
466 if (node_id
!= FW_PHY_CONFIG_NO_NODE_ID
) {
467 phy_packet_phy_config_set_root_id(&data
, node_id
);
468 phy_packet_phy_config_set_force_root_node(&data
, true);
471 if (gap_count
== FW_PHY_CONFIG_CURRENT_GAP_COUNT
) {
472 gap_count
= card
->driver
->read_phy_reg(card
, 1);
480 phy_packet_phy_config_set_gap_count(&data
, gap_count
);
481 phy_packet_phy_config_set_gap_count_optimization(&data
, true);
483 guard(mutex
)(&phy_config_mutex
);
485 async_header_set_tcode(phy_config_packet
.header
, TCODE_LINK_INTERNAL
);
486 phy_config_packet
.header
[1] = data
;
487 phy_config_packet
.header
[2] = ~data
;
488 phy_config_packet
.generation
= generation
;
489 reinit_completion(&phy_config_done
);
491 trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet
, card
->index
,
492 phy_config_packet
.generation
, phy_config_packet
.header
[1],
493 phy_config_packet
.header
[2]);
495 card
->driver
->send_request(card
, &phy_config_packet
);
496 wait_for_completion_timeout(&phy_config_done
, timeout
);
499 static struct fw_address_handler
*lookup_overlapping_address_handler(
500 struct list_head
*list
, unsigned long long offset
, size_t length
)
502 struct fw_address_handler
*handler
;
504 list_for_each_entry_rcu(handler
, list
, link
) {
505 if (handler
->offset
< offset
+ length
&&
506 offset
< handler
->offset
+ handler
->length
)
513 static bool is_enclosing_handler(struct fw_address_handler
*handler
,
514 unsigned long long offset
, size_t length
)
516 return handler
->offset
<= offset
&&
517 offset
+ length
<= handler
->offset
+ handler
->length
;
520 static struct fw_address_handler
*lookup_enclosing_address_handler(
521 struct list_head
*list
, unsigned long long offset
, size_t length
)
523 struct fw_address_handler
*handler
;
525 list_for_each_entry_rcu(handler
, list
, link
) {
526 if (is_enclosing_handler(handler
, offset
, length
))
533 static DEFINE_SPINLOCK(address_handler_list_lock
);
534 static LIST_HEAD(address_handler_list
);
536 const struct fw_address_region fw_high_memory_region
=
537 { .start
= FW_MAX_PHYSICAL_RANGE
, .end
= 0xffffe0000000ULL
, };
538 EXPORT_SYMBOL(fw_high_memory_region
);
540 static const struct fw_address_region low_memory_region
=
541 { .start
= 0x000000000000ULL
, .end
= FW_MAX_PHYSICAL_RANGE
, };
544 const struct fw_address_region fw_private_region
=
545 { .start
= 0xffffe0000000ULL
, .end
= 0xfffff0000000ULL
, };
546 const struct fw_address_region fw_csr_region
=
547 { .start
= CSR_REGISTER_BASE
,
548 .end
= CSR_REGISTER_BASE
| CSR_CONFIG_ROM_END
, };
549 const struct fw_address_region fw_unit_space_region
=
550 { .start
= 0xfffff0000900ULL
, .end
= 0x1000000000000ULL
, };
554 * fw_core_add_address_handler() - register for incoming requests
556 * @region: region in the IEEE 1212 node space address range
558 * region->start, ->end, and handler->length have to be quadlet-aligned.
560 * When a request is received that falls within the specified address range,
561 * the specified callback is invoked. The parameters passed to the callback
562 * give the details of the particular request.
564 * To be called in process context.
565 * Return value: 0 on success, non-zero otherwise.
567 * The start offset of the handler's address region is determined by
568 * fw_core_add_address_handler() and is returned in handler->offset.
570 * Address allocations are exclusive, except for the FCP registers.
572 int fw_core_add_address_handler(struct fw_address_handler
*handler
,
573 const struct fw_address_region
*region
)
575 struct fw_address_handler
*other
;
578 if (region
->start
& 0xffff000000000003ULL
||
579 region
->start
>= region
->end
||
580 region
->end
> 0x0001000000000000ULL
||
581 handler
->length
& 3 ||
582 handler
->length
== 0)
585 guard(spinlock
)(&address_handler_list_lock
);
587 handler
->offset
= region
->start
;
588 while (handler
->offset
+ handler
->length
<= region
->end
) {
589 if (is_in_fcp_region(handler
->offset
, handler
->length
))
592 other
= lookup_overlapping_address_handler
593 (&address_handler_list
,
594 handler
->offset
, handler
->length
);
596 handler
->offset
+= other
->length
;
598 list_add_tail_rcu(&handler
->link
, &address_handler_list
);
606 EXPORT_SYMBOL(fw_core_add_address_handler
);
609 * fw_core_remove_address_handler() - unregister an address handler
612 * To be called in process context.
614 * When fw_core_remove_address_handler() returns, @handler->callback() is
615 * guaranteed to not run on any CPU anymore.
617 void fw_core_remove_address_handler(struct fw_address_handler
*handler
)
619 scoped_guard(spinlock
, &address_handler_list_lock
)
620 list_del_rcu(&handler
->link
);
624 EXPORT_SYMBOL(fw_core_remove_address_handler
);
628 struct fw_packet response
;
629 u32 request_header
[ASYNC_HEADER_QUADLET_COUNT
];
636 void fw_request_get(struct fw_request
*request
)
638 kref_get(&request
->kref
);
641 static void release_request(struct kref
*kref
)
643 struct fw_request
*request
= container_of(kref
, struct fw_request
, kref
);
648 void fw_request_put(struct fw_request
*request
)
650 kref_put(&request
->kref
, release_request
);
653 static void free_response_callback(struct fw_packet
*packet
,
654 struct fw_card
*card
, int status
)
656 struct fw_request
*request
= container_of(packet
, struct fw_request
, response
);
658 trace_async_response_outbound_complete((uintptr_t)request
, card
->index
, packet
->generation
,
659 packet
->speed
, status
, packet
->timestamp
);
661 // Decrease the reference count since not at in-flight.
662 fw_request_put(request
);
664 // Decrease the reference count to release the object.
665 fw_request_put(request
);
668 int fw_get_response_length(struct fw_request
*r
)
670 int tcode
, ext_tcode
, data_length
;
672 tcode
= async_header_get_tcode(r
->request_header
);
675 case TCODE_WRITE_QUADLET_REQUEST
:
676 case TCODE_WRITE_BLOCK_REQUEST
:
679 case TCODE_READ_QUADLET_REQUEST
:
682 case TCODE_READ_BLOCK_REQUEST
:
683 data_length
= async_header_get_data_length(r
->request_header
);
686 case TCODE_LOCK_REQUEST
:
687 ext_tcode
= async_header_get_extended_tcode(r
->request_header
);
688 data_length
= async_header_get_data_length(r
->request_header
);
690 case EXTCODE_FETCH_ADD
:
691 case EXTCODE_LITTLE_ADD
:
694 return data_length
/ 2;
698 WARN(1, "wrong tcode %d\n", tcode
);
703 void fw_fill_response(struct fw_packet
*response
, u32
*request_header
,
704 int rcode
, void *payload
, size_t length
)
706 int tcode
, tlabel
, extended_tcode
, source
, destination
;
708 tcode
= async_header_get_tcode(request_header
);
709 tlabel
= async_header_get_tlabel(request_header
);
710 source
= async_header_get_destination(request_header
); // Exchange.
711 destination
= async_header_get_source(request_header
); // Exchange.
712 extended_tcode
= async_header_get_extended_tcode(request_header
);
714 async_header_set_retry(response
->header
, RETRY_1
);
715 async_header_set_tlabel(response
->header
, tlabel
);
716 async_header_set_destination(response
->header
, destination
);
717 async_header_set_source(response
->header
, source
);
718 async_header_set_rcode(response
->header
, rcode
);
719 response
->header
[2] = 0; // The field is reserved.
722 case TCODE_WRITE_QUADLET_REQUEST
:
723 case TCODE_WRITE_BLOCK_REQUEST
:
724 async_header_set_tcode(response
->header
, TCODE_WRITE_RESPONSE
);
725 response
->header_length
= 12;
726 response
->payload_length
= 0;
729 case TCODE_READ_QUADLET_REQUEST
:
730 async_header_set_tcode(response
->header
, TCODE_READ_QUADLET_RESPONSE
);
732 async_header_set_quadlet_data(response
->header
, *(u32
*)payload
);
734 async_header_set_quadlet_data(response
->header
, 0);
735 response
->header_length
= 16;
736 response
->payload_length
= 0;
739 case TCODE_READ_BLOCK_REQUEST
:
740 case TCODE_LOCK_REQUEST
:
741 async_header_set_tcode(response
->header
, tcode
+ 2);
742 async_header_set_data_length(response
->header
, length
);
743 async_header_set_extended_tcode(response
->header
, extended_tcode
);
744 response
->header_length
= 16;
745 response
->payload
= payload
;
746 response
->payload_length
= length
;
750 WARN(1, "wrong tcode %d\n", tcode
);
753 response
->payload_mapped
= false;
755 EXPORT_SYMBOL(fw_fill_response
);
757 static u32
compute_split_timeout_timestamp(struct fw_card
*card
,
758 u32 request_timestamp
)
763 cycles
= card
->split_timeout_cycles
;
764 cycles
+= request_timestamp
& 0x1fff;
766 timestamp
= request_timestamp
& ~0x1fff;
767 timestamp
+= (cycles
/ 8000) << 13;
768 timestamp
|= cycles
% 8000;
773 static struct fw_request
*allocate_request(struct fw_card
*card
,
776 struct fw_request
*request
;
780 request_tcode
= async_header_get_tcode(p
->header
);
781 switch (request_tcode
) {
782 case TCODE_WRITE_QUADLET_REQUEST
:
783 data
= &p
->header
[3];
787 case TCODE_WRITE_BLOCK_REQUEST
:
788 case TCODE_LOCK_REQUEST
:
790 length
= async_header_get_data_length(p
->header
);
793 case TCODE_READ_QUADLET_REQUEST
:
798 case TCODE_READ_BLOCK_REQUEST
:
800 length
= async_header_get_data_length(p
->header
);
804 fw_notice(card
, "ERROR - corrupt request received - %08x %08x %08x\n",
805 p
->header
[0], p
->header
[1], p
->header
[2]);
809 request
= kmalloc(sizeof(*request
) + length
, GFP_ATOMIC
);
812 kref_init(&request
->kref
);
814 request
->response
.speed
= p
->speed
;
815 request
->response
.timestamp
=
816 compute_split_timeout_timestamp(card
, p
->timestamp
);
817 request
->response
.generation
= p
->generation
;
818 request
->response
.ack
= 0;
819 request
->response
.callback
= free_response_callback
;
820 request
->ack
= p
->ack
;
821 request
->timestamp
= p
->timestamp
;
822 request
->length
= length
;
824 memcpy(request
->data
, data
, length
);
826 memcpy(request
->request_header
, p
->header
, sizeof(p
->header
));
832 * fw_send_response: - send response packet for asynchronous transaction.
833 * @card: interface to send the response at.
834 * @request: firewire request data for the transaction.
835 * @rcode: response code to send.
837 * Submit a response packet into the asynchronous response transmission queue. The @request
838 * is going to be released when the transmission successfully finishes later.
840 void fw_send_response(struct fw_card
*card
,
841 struct fw_request
*request
, int rcode
)
844 unsigned int data_length
= 0;
846 /* unified transaction or broadcast transaction: don't respond */
847 if (request
->ack
!= ACK_PENDING
||
848 HEADER_DESTINATION_IS_BROADCAST(request
->request_header
)) {
849 fw_request_put(request
);
853 if (rcode
== RCODE_COMPLETE
) {
854 data
= request
->data
;
855 data_length
= fw_get_response_length(request
);
858 fw_fill_response(&request
->response
, request
->request_header
, rcode
, data
, data_length
);
860 // Increase the reference count so that the object is kept during in-flight.
861 fw_request_get(request
);
863 trace_async_response_outbound_initiate((uintptr_t)request
, card
->index
,
864 request
->response
.generation
, request
->response
.speed
,
865 request
->response
.header
, data
,
866 data
? data_length
/ 4 : 0);
868 card
->driver
->send_response(card
, &request
->response
);
870 EXPORT_SYMBOL(fw_send_response
);
873 * fw_get_request_speed() - returns speed at which the @request was received
874 * @request: firewire request data
876 int fw_get_request_speed(struct fw_request
*request
)
878 return request
->response
.speed
;
880 EXPORT_SYMBOL(fw_get_request_speed
);
883 * fw_request_get_timestamp: Get timestamp of the request.
884 * @request: The opaque pointer to request structure.
886 * Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The
887 * timestamp consists of the low order 3 bits of second field and the full 13 bits of count
888 * field of isochronous cycle time register.
890 * Returns: timestamp of the request.
892 u32
fw_request_get_timestamp(const struct fw_request
*request
)
894 return request
->timestamp
;
896 EXPORT_SYMBOL_GPL(fw_request_get_timestamp
);
898 static void handle_exclusive_region_request(struct fw_card
*card
,
900 struct fw_request
*request
,
901 unsigned long long offset
)
903 struct fw_address_handler
*handler
;
904 int tcode
, destination
, source
;
906 destination
= async_header_get_destination(p
->header
);
907 source
= async_header_get_source(p
->header
);
908 tcode
= async_header_get_tcode(p
->header
);
909 if (tcode
== TCODE_LOCK_REQUEST
)
910 tcode
= 0x10 + async_header_get_extended_tcode(p
->header
);
913 handler
= lookup_enclosing_address_handler(&address_handler_list
, offset
,
916 handler
->address_callback(card
, request
, tcode
, destination
, source
,
917 p
->generation
, offset
, request
->data
,
918 request
->length
, handler
->callback_data
);
922 fw_send_response(card
, request
, RCODE_ADDRESS_ERROR
);
925 static void handle_fcp_region_request(struct fw_card
*card
,
927 struct fw_request
*request
,
928 unsigned long long offset
)
930 struct fw_address_handler
*handler
;
931 int tcode
, destination
, source
;
933 if ((offset
!= (CSR_REGISTER_BASE
| CSR_FCP_COMMAND
) &&
934 offset
!= (CSR_REGISTER_BASE
| CSR_FCP_RESPONSE
)) ||
935 request
->length
> 0x200) {
936 fw_send_response(card
, request
, RCODE_ADDRESS_ERROR
);
941 tcode
= async_header_get_tcode(p
->header
);
942 destination
= async_header_get_destination(p
->header
);
943 source
= async_header_get_source(p
->header
);
945 if (tcode
!= TCODE_WRITE_QUADLET_REQUEST
&&
946 tcode
!= TCODE_WRITE_BLOCK_REQUEST
) {
947 fw_send_response(card
, request
, RCODE_TYPE_ERROR
);
953 list_for_each_entry_rcu(handler
, &address_handler_list
, link
) {
954 if (is_enclosing_handler(handler
, offset
, request
->length
))
955 handler
->address_callback(card
, request
, tcode
, destination
, source
,
956 p
->generation
, offset
, request
->data
,
957 request
->length
, handler
->callback_data
);
961 fw_send_response(card
, request
, RCODE_COMPLETE
);
964 void fw_core_handle_request(struct fw_card
*card
, struct fw_packet
*p
)
966 struct fw_request
*request
;
967 unsigned long long offset
;
970 if (p
->ack
!= ACK_PENDING
&& p
->ack
!= ACK_COMPLETE
)
973 tcode
= async_header_get_tcode(p
->header
);
974 if (tcode_is_link_internal(tcode
)) {
975 trace_async_phy_inbound((uintptr_t)p
, card
->index
, p
->generation
, p
->ack
, p
->timestamp
,
976 p
->header
[1], p
->header
[2]);
977 fw_cdev_handle_phy_packet(card
, p
);
981 request
= allocate_request(card
, p
);
982 if (request
== NULL
) {
983 /* FIXME: send statically allocated busy packet. */
987 trace_async_request_inbound((uintptr_t)request
, card
->index
, p
->generation
, p
->speed
,
988 p
->ack
, p
->timestamp
, p
->header
, request
->data
,
989 tcode_is_read_request(tcode
) ? 0 : request
->length
/ 4);
991 offset
= async_header_get_offset(p
->header
);
993 if (!is_in_fcp_region(offset
, request
->length
))
994 handle_exclusive_region_request(card
, p
, request
, offset
);
996 handle_fcp_region_request(card
, p
, request
, offset
);
999 EXPORT_SYMBOL(fw_core_handle_request
);
1001 void fw_core_handle_response(struct fw_card
*card
, struct fw_packet
*p
)
1003 struct fw_transaction
*t
= NULL
, *iter
;
1006 int tcode
, tlabel
, source
, rcode
;
1008 tcode
= async_header_get_tcode(p
->header
);
1009 tlabel
= async_header_get_tlabel(p
->header
);
1010 source
= async_header_get_source(p
->header
);
1011 rcode
= async_header_get_rcode(p
->header
);
1013 // FIXME: sanity check packet, is length correct, does tcodes
1014 // and addresses match to the transaction request queried later.
1016 // For the tracepoints event, let us decode the header here against the concern.
1019 case TCODE_READ_QUADLET_RESPONSE
:
1020 data
= (u32
*) &p
->header
[3];
1024 case TCODE_WRITE_RESPONSE
:
1029 case TCODE_READ_BLOCK_RESPONSE
:
1030 case TCODE_LOCK_RESPONSE
:
1032 data_length
= async_header_get_data_length(p
->header
);
1036 /* Should never happen, this is just to shut up gcc. */
1042 scoped_guard(spinlock_irqsave
, &card
->lock
) {
1043 list_for_each_entry(iter
, &card
->transaction_list
, link
) {
1044 if (iter
->node_id
== source
&& iter
->tlabel
== tlabel
) {
1045 if (try_cancel_split_timeout(iter
)) {
1046 list_del_init(&iter
->link
);
1047 card
->tlabel_mask
&= ~(1ULL << iter
->tlabel
);
1055 trace_async_response_inbound((uintptr_t)t
, card
->index
, p
->generation
, p
->speed
, p
->ack
,
1056 p
->timestamp
, p
->header
, data
, data_length
/ 4);
1059 fw_notice(card
, "unsolicited response (source %x, tlabel %x)\n",
1065 * The response handler may be executed while the request handler
1066 * is still pending. Cancel the request handler.
1068 card
->driver
->cancel_packet(card
, &t
->packet
);
1070 if (!t
->with_tstamp
) {
1071 t
->callback
.without_tstamp(card
, rcode
, data
, data_length
, t
->callback_data
);
1073 t
->callback
.with_tstamp(card
, rcode
, t
->packet
.timestamp
, p
->timestamp
, data
,
1074 data_length
, t
->callback_data
);
1077 EXPORT_SYMBOL(fw_core_handle_response
);
1080 * fw_rcode_string - convert a firewire result code to an error description
1081 * @rcode: the result code
1083 const char *fw_rcode_string(int rcode
)
1085 static const char *const names
[] = {
1086 [RCODE_COMPLETE
] = "no error",
1087 [RCODE_CONFLICT_ERROR
] = "conflict error",
1088 [RCODE_DATA_ERROR
] = "data error",
1089 [RCODE_TYPE_ERROR
] = "type error",
1090 [RCODE_ADDRESS_ERROR
] = "address error",
1091 [RCODE_SEND_ERROR
] = "send error",
1092 [RCODE_CANCELLED
] = "timeout",
1093 [RCODE_BUSY
] = "busy",
1094 [RCODE_GENERATION
] = "bus reset",
1095 [RCODE_NO_ACK
] = "no ack",
1098 if ((unsigned int)rcode
< ARRAY_SIZE(names
) && names
[rcode
])
1099 return names
[rcode
];
1103 EXPORT_SYMBOL(fw_rcode_string
);
1105 static const struct fw_address_region topology_map_region
=
1106 { .start
= CSR_REGISTER_BASE
| CSR_TOPOLOGY_MAP
,
1107 .end
= CSR_REGISTER_BASE
| CSR_TOPOLOGY_MAP_END
, };
1109 static void handle_topology_map(struct fw_card
*card
, struct fw_request
*request
,
1110 int tcode
, int destination
, int source
, int generation
,
1111 unsigned long long offset
, void *payload
, size_t length
,
1112 void *callback_data
)
1116 if (!tcode_is_read_request(tcode
)) {
1117 fw_send_response(card
, request
, RCODE_TYPE_ERROR
);
1121 if ((offset
& 3) > 0 || (length
& 3) > 0) {
1122 fw_send_response(card
, request
, RCODE_ADDRESS_ERROR
);
1126 start
= (offset
- topology_map_region
.start
) / 4;
1127 memcpy(payload
, &card
->topology_map
[start
], length
);
1129 fw_send_response(card
, request
, RCODE_COMPLETE
);
1132 static struct fw_address_handler topology_map
= {
1134 .address_callback
= handle_topology_map
,
1137 static const struct fw_address_region registers_region
=
1138 { .start
= CSR_REGISTER_BASE
,
1139 .end
= CSR_REGISTER_BASE
| CSR_CONFIG_ROM
, };
1141 static void update_split_timeout(struct fw_card
*card
)
1143 unsigned int cycles
;
1145 cycles
= card
->split_timeout_hi
* 8000 + (card
->split_timeout_lo
>> 19);
1147 /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
1148 cycles
= clamp(cycles
, 800u, 3u * 8000u);
1150 card
->split_timeout_cycles
= cycles
;
1151 card
->split_timeout_jiffies
= DIV_ROUND_UP(cycles
* HZ
, 8000);
1154 static void handle_registers(struct fw_card
*card
, struct fw_request
*request
,
1155 int tcode
, int destination
, int source
, int generation
,
1156 unsigned long long offset
, void *payload
, size_t length
,
1157 void *callback_data
)
1159 int reg
= offset
& ~CSR_REGISTER_BASE
;
1160 __be32
*data
= payload
;
1161 int rcode
= RCODE_COMPLETE
;
1164 case CSR_PRIORITY_BUDGET
:
1165 if (!card
->priority_budget_implemented
) {
1166 rcode
= RCODE_ADDRESS_ERROR
;
1173 * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
1174 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
1178 case CSR_STATE_CLEAR
:
1180 case CSR_CYCLE_TIME
:
1182 case CSR_BUSY_TIMEOUT
:
1183 if (tcode
== TCODE_READ_QUADLET_REQUEST
)
1184 *data
= cpu_to_be32(card
->driver
->read_csr(card
, reg
));
1185 else if (tcode
== TCODE_WRITE_QUADLET_REQUEST
)
1186 card
->driver
->write_csr(card
, reg
, be32_to_cpu(*data
));
1188 rcode
= RCODE_TYPE_ERROR
;
1191 case CSR_RESET_START
:
1192 if (tcode
== TCODE_WRITE_QUADLET_REQUEST
)
1193 card
->driver
->write_csr(card
, CSR_STATE_CLEAR
,
1194 CSR_STATE_BIT_ABDICATE
);
1196 rcode
= RCODE_TYPE_ERROR
;
1199 case CSR_SPLIT_TIMEOUT_HI
:
1200 if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
1201 *data
= cpu_to_be32(card
->split_timeout_hi
);
1202 } else if (tcode
== TCODE_WRITE_QUADLET_REQUEST
) {
1203 guard(spinlock_irqsave
)(&card
->lock
);
1205 card
->split_timeout_hi
= be32_to_cpu(*data
) & 7;
1206 update_split_timeout(card
);
1208 rcode
= RCODE_TYPE_ERROR
;
1212 case CSR_SPLIT_TIMEOUT_LO
:
1213 if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
1214 *data
= cpu_to_be32(card
->split_timeout_lo
);
1215 } else if (tcode
== TCODE_WRITE_QUADLET_REQUEST
) {
1216 guard(spinlock_irqsave
)(&card
->lock
);
1218 card
->split_timeout_lo
= be32_to_cpu(*data
) & 0xfff80000;
1219 update_split_timeout(card
);
1221 rcode
= RCODE_TYPE_ERROR
;
1225 case CSR_MAINT_UTILITY
:
1226 if (tcode
== TCODE_READ_QUADLET_REQUEST
)
1227 *data
= card
->maint_utility_register
;
1228 else if (tcode
== TCODE_WRITE_QUADLET_REQUEST
)
1229 card
->maint_utility_register
= *data
;
1231 rcode
= RCODE_TYPE_ERROR
;
1234 case CSR_BROADCAST_CHANNEL
:
1235 if (tcode
== TCODE_READ_QUADLET_REQUEST
)
1236 *data
= cpu_to_be32(card
->broadcast_channel
);
1237 else if (tcode
== TCODE_WRITE_QUADLET_REQUEST
)
1238 card
->broadcast_channel
=
1239 (be32_to_cpu(*data
) & BROADCAST_CHANNEL_VALID
) |
1240 BROADCAST_CHANNEL_INITIAL
;
1242 rcode
= RCODE_TYPE_ERROR
;
1245 case CSR_BUS_MANAGER_ID
:
1246 case CSR_BANDWIDTH_AVAILABLE
:
1247 case CSR_CHANNELS_AVAILABLE_HI
:
1248 case CSR_CHANNELS_AVAILABLE_LO
:
1250 * FIXME: these are handled by the OHCI hardware and
1251 * the stack never sees these request. If we add
1252 * support for a new type of controller that doesn't
1253 * handle this in hardware we need to deal with these
1260 rcode
= RCODE_ADDRESS_ERROR
;
1264 fw_send_response(card
, request
, rcode
);
1267 static struct fw_address_handler registers
= {
1269 .address_callback
= handle_registers
,
1272 static void handle_low_memory(struct fw_card
*card
, struct fw_request
*request
,
1273 int tcode
, int destination
, int source
, int generation
,
1274 unsigned long long offset
, void *payload
, size_t length
,
1275 void *callback_data
)
1278 * This catches requests not handled by the physical DMA unit,
1279 * i.e., wrong transaction types or unauthorized source nodes.
1281 fw_send_response(card
, request
, RCODE_TYPE_ERROR
);
1284 static struct fw_address_handler low_memory
= {
1285 .length
= FW_MAX_PHYSICAL_RANGE
,
1286 .address_callback
= handle_low_memory
,
1289 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1290 MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1291 MODULE_LICENSE("GPL");
1293 static const u32 vendor_textual_descriptor
[] = {
1294 /* textual descriptor leaf () */
1298 0x4c696e75, /* L i n u */
1299 0x78204669, /* x F i */
1300 0x72657769, /* r e w i */
1301 0x72650000, /* r e */
1304 static const u32 model_textual_descriptor
[] = {
1305 /* model descriptor leaf () */
1309 0x4a756a75, /* J u j u */
1312 static struct fw_descriptor vendor_id_descriptor
= {
1313 .length
= ARRAY_SIZE(vendor_textual_descriptor
),
1314 .immediate
= 0x03001f11,
1316 .data
= vendor_textual_descriptor
,
1319 static struct fw_descriptor model_id_descriptor
= {
1320 .length
= ARRAY_SIZE(model_textual_descriptor
),
1321 .immediate
= 0x17023901,
1323 .data
= model_textual_descriptor
,
1326 static int __init
fw_core_init(void)
1330 fw_workqueue
= alloc_workqueue("firewire", WQ_MEM_RECLAIM
, 0);
1334 ret
= bus_register(&fw_bus_type
);
1336 destroy_workqueue(fw_workqueue
);
1340 fw_cdev_major
= register_chrdev(0, "firewire", &fw_device_ops
);
1341 if (fw_cdev_major
< 0) {
1342 bus_unregister(&fw_bus_type
);
1343 destroy_workqueue(fw_workqueue
);
1344 return fw_cdev_major
;
1347 fw_core_add_address_handler(&topology_map
, &topology_map_region
);
1348 fw_core_add_address_handler(®isters
, ®isters_region
);
1349 fw_core_add_address_handler(&low_memory
, &low_memory_region
);
1350 fw_core_add_descriptor(&vendor_id_descriptor
);
1351 fw_core_add_descriptor(&model_id_descriptor
);
1356 static void __exit
fw_core_cleanup(void)
1358 unregister_chrdev(fw_cdev_major
, "firewire");
1359 bus_unregister(&fw_bus_type
);
1360 destroy_workqueue(fw_workqueue
);
1361 xa_destroy(&fw_device_xa
);
1364 module_init(fw_core_init
);
1365 module_exit(fw_core_cleanup
);