1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2024 Linaro Ltd.
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/types.h>
15 #include "gsi_trans.h"
19 #include "ipa_endpoint.h"
21 #include "ipa_interrupt.h"
23 #include "ipa_modem.h"
24 #include "ipa_power.h"
26 #include "ipa_table.h"
27 #include "ipa_version.h"
29 /* Hardware is told about receive buffers once a "batch" has been queued */
30 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
32 /* The amount of RX buffer space consumed by standard skb overhead */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
36 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
38 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
40 /** enum ipa_status_opcode - IPA status opcode field hardware values */
41 enum ipa_status_opcode
{ /* *Not* a bitmask */
42 IPA_STATUS_OPCODE_PACKET
= 1,
43 IPA_STATUS_OPCODE_NEW_RULE_PACKET
= 2,
44 IPA_STATUS_OPCODE_DROPPED_PACKET
= 4,
45 IPA_STATUS_OPCODE_SUSPENDED_PACKET
= 8,
46 IPA_STATUS_OPCODE_LOG
= 16,
47 IPA_STATUS_OPCODE_DCMP
= 32,
48 IPA_STATUS_OPCODE_PACKET_2ND_PASS
= 64,
51 /** enum ipa_status_exception - IPA status exception field hardware values */
52 enum ipa_status_exception
{ /* *Not* a bitmask */
53 /* 0 means no exception */
54 IPA_STATUS_EXCEPTION_DEAGGR
= 1,
55 IPA_STATUS_EXCEPTION_IPTYPE
= 4,
56 IPA_STATUS_EXCEPTION_PACKET_LENGTH
= 8,
57 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS
= 16,
58 IPA_STATUS_EXCEPTION_SW_FILTER
= 32,
59 IPA_STATUS_EXCEPTION_NAT
= 64, /* IPv4 */
60 IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK
= 64, /* IPv6 */
61 IPA_STATUS_EXCEPTION_UC
= 128,
62 IPA_STATUS_EXCEPTION_INVALID_ENDPOINT
= 129,
63 IPA_STATUS_EXCEPTION_HEADER_INSERT
= 136,
64 IPA_STATUS_EXCEPTION_CHEKCSUM
= 229,
67 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
68 enum ipa_status_mask
{
69 IPA_STATUS_MASK_FRAG_PROCESS
= BIT(0),
70 IPA_STATUS_MASK_FILT_PROCESS
= BIT(1),
71 IPA_STATUS_MASK_NAT_PROCESS
= BIT(2),
72 IPA_STATUS_MASK_ROUTE_PROCESS
= BIT(3),
73 IPA_STATUS_MASK_TAG_VALID
= BIT(4),
74 IPA_STATUS_MASK_FRAGMENT
= BIT(5),
75 IPA_STATUS_MASK_FIRST_FRAGMENT
= BIT(6),
76 IPA_STATUS_MASK_V4
= BIT(7),
77 IPA_STATUS_MASK_CKSUM_PROCESS
= BIT(8),
78 IPA_STATUS_MASK_AGGR_PROCESS
= BIT(9),
79 IPA_STATUS_MASK_DEST_EOT
= BIT(10),
80 IPA_STATUS_MASK_DEAGGR_PROCESS
= BIT(11),
81 IPA_STATUS_MASK_DEAGG_FIRST
= BIT(12),
82 IPA_STATUS_MASK_SRC_EOT
= BIT(13),
83 IPA_STATUS_MASK_PREV_EOT
= BIT(14),
84 IPA_STATUS_MASK_BYTE_LIMIT
= BIT(15),
87 /* Special IPA filter/router rule field value indicating "rule miss" */
88 #define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
90 /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
92 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
93 enum ipa_status_field_id
{
94 STATUS_OPCODE
, /* enum ipa_status_opcode */
95 STATUS_EXCEPTION
, /* enum ipa_status_exception */
96 STATUS_MASK
, /* enum ipa_status_mask (bitmask) */
101 STATUS_FILTER_LOCAL
, /* Boolean */
102 STATUS_FILTER_HASH
, /* Boolean */
103 STATUS_FILTER_GLOBAL
, /* Boolean */
104 STATUS_FILTER_RETAIN
, /* Boolean */
105 STATUS_FILTER_RULE_INDEX
,
106 STATUS_ROUTER_LOCAL
, /* Boolean */
107 STATUS_ROUTER_HASH
, /* Boolean */
108 STATUS_UCP
, /* Boolean */
110 STATUS_ROUTER_RULE_INDEX
,
111 STATUS_NAT_HIT
, /* Boolean */
113 STATUS_NAT_TYPE
, /* enum ipa_nat_type */
114 STATUS_TAG_LOW32
, /* Low-order 32 bits of 48-bit tag */
115 STATUS_TAG_HIGH16
, /* High-order 16 bits of 48-bit tag */
118 STATUS_HEADER_LOCAL
, /* Boolean */
119 STATUS_HEADER_OFFSET
,
120 STATUS_FRAG_HIT
, /* Boolean */
121 STATUS_FRAG_RULE_INDEX
,
124 /* Size in bytes of an IPA packet status structure */
125 #define IPA_STATUS_SIZE sizeof(__le32[8])
127 /* IPA status structure decoder; looks up field values for a structure */
128 static u32
ipa_status_extract(struct ipa
*ipa
, const void *data
,
129 enum ipa_status_field_id field
)
131 enum ipa_version version
= ipa
->version
;
132 const __le32
*word
= data
;
136 return le32_get_bits(word
[0], GENMASK(7, 0));
137 case STATUS_EXCEPTION
:
138 return le32_get_bits(word
[0], GENMASK(15, 8));
140 return le32_get_bits(word
[0], GENMASK(31, 16));
142 return le32_get_bits(word
[1], GENMASK(15, 0));
143 case STATUS_SRC_ENDPOINT
:
144 if (version
< IPA_VERSION_5_0
)
145 return le32_get_bits(word
[1], GENMASK(20, 16));
146 return le32_get_bits(word
[1], GENMASK(23, 16));
147 /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
148 /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
149 case STATUS_DST_ENDPOINT
:
150 if (version
< IPA_VERSION_5_0
)
151 return le32_get_bits(word
[1], GENMASK(28, 24));
152 return le32_get_bits(word
[7], GENMASK(23, 16));
153 /* Status word 1, bits 29-31 are reserved */
154 case STATUS_METADATA
:
155 return le32_to_cpu(word
[2]);
156 case STATUS_FILTER_LOCAL
:
157 return le32_get_bits(word
[3], GENMASK(0, 0));
158 case STATUS_FILTER_HASH
:
159 return le32_get_bits(word
[3], GENMASK(1, 1));
160 case STATUS_FILTER_GLOBAL
:
161 return le32_get_bits(word
[3], GENMASK(2, 2));
162 case STATUS_FILTER_RETAIN
:
163 return le32_get_bits(word
[3], GENMASK(3, 3));
164 case STATUS_FILTER_RULE_INDEX
:
165 return le32_get_bits(word
[3], GENMASK(13, 4));
166 /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
167 case STATUS_ROUTER_LOCAL
:
168 if (version
< IPA_VERSION_5_0
)
169 return le32_get_bits(word
[3], GENMASK(14, 14));
170 return le32_get_bits(word
[1], GENMASK(27, 27));
171 case STATUS_ROUTER_HASH
:
172 if (version
< IPA_VERSION_5_0
)
173 return le32_get_bits(word
[3], GENMASK(15, 15));
174 return le32_get_bits(word
[1], GENMASK(28, 28));
176 if (version
< IPA_VERSION_5_0
)
177 return le32_get_bits(word
[3], GENMASK(16, 16));
178 return le32_get_bits(word
[7], GENMASK(31, 31));
179 case STATUS_ROUTER_TABLE
:
180 if (version
< IPA_VERSION_5_0
)
181 return le32_get_bits(word
[3], GENMASK(21, 17));
182 return le32_get_bits(word
[3], GENMASK(21, 14));
183 case STATUS_ROUTER_RULE_INDEX
:
184 return le32_get_bits(word
[3], GENMASK(31, 22));
186 return le32_get_bits(word
[4], GENMASK(0, 0));
187 case STATUS_NAT_INDEX
:
188 return le32_get_bits(word
[4], GENMASK(13, 1));
189 case STATUS_NAT_TYPE
:
190 return le32_get_bits(word
[4], GENMASK(15, 14));
191 case STATUS_TAG_LOW32
:
192 return le32_get_bits(word
[4], GENMASK(31, 16)) |
193 (le32_get_bits(word
[5], GENMASK(15, 0)) << 16);
194 case STATUS_TAG_HIGH16
:
195 return le32_get_bits(word
[5], GENMASK(31, 16));
196 case STATUS_SEQUENCE
:
197 return le32_get_bits(word
[6], GENMASK(7, 0));
198 case STATUS_TIME_OF_DAY
:
199 return le32_get_bits(word
[6], GENMASK(31, 8));
200 case STATUS_HEADER_LOCAL
:
201 return le32_get_bits(word
[7], GENMASK(0, 0));
202 case STATUS_HEADER_OFFSET
:
203 return le32_get_bits(word
[7], GENMASK(10, 1));
204 case STATUS_FRAG_HIT
:
205 return le32_get_bits(word
[7], GENMASK(11, 11));
206 case STATUS_FRAG_RULE_INDEX
:
207 return le32_get_bits(word
[7], GENMASK(15, 12));
208 /* Status word 7, bits 16-30 are reserved */
209 /* Status word 7, bit 31 is reserved (not IPA v5.0+) */
211 WARN(true, "%s: bad field_id %u\n", __func__
, field
);
216 /* Compute the aggregation size value to use for a given buffer size */
217 static u32
ipa_aggr_size_kb(u32 rx_buffer_size
, bool aggr_hard_limit
)
219 /* A hard aggregation limit will not be crossed; aggregation closes
220 * if saving incoming data would cross the hard byte limit boundary.
222 * With a soft limit, aggregation closes *after* the size boundary
223 * has been crossed. In that case the limit must leave enough space
224 * after that limit to receive a full MTU of data plus overhead.
226 if (!aggr_hard_limit
)
227 rx_buffer_size
-= IPA_MTU
+ IPA_RX_BUFFER_OVERHEAD
;
229 /* The byte limit is encoded as a number of kilobytes */
231 return rx_buffer_size
/ SZ_1K
;
234 static bool ipa_endpoint_data_valid_one(struct ipa
*ipa
, u32 count
,
235 const struct ipa_gsi_endpoint_data
*all_data
,
236 const struct ipa_gsi_endpoint_data
*data
)
238 const struct ipa_gsi_endpoint_data
*other_data
;
239 enum ipa_endpoint_name other_name
;
240 struct device
*dev
= ipa
->dev
;
242 if (ipa_gsi_endpoint_data_empty(data
))
245 if (!data
->toward_ipa
) {
246 const struct ipa_endpoint_rx
*rx_config
;
247 const struct reg
*reg
;
252 if (data
->endpoint
.filter_support
) {
253 dev_err(dev
, "filtering not supported for "
259 /* Nothing more to check for non-AP RX */
260 if (data
->ee_id
!= GSI_EE_AP
)
263 rx_config
= &data
->endpoint
.config
.rx
;
265 /* The buffer size must hold an MTU plus overhead */
266 buffer_size
= rx_config
->buffer_size
;
267 limit
= IPA_MTU
+ IPA_RX_BUFFER_OVERHEAD
;
268 if (buffer_size
< limit
) {
269 dev_err(dev
, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
270 data
->endpoint_id
, buffer_size
, limit
);
274 if (!data
->endpoint
.config
.aggregation
) {
277 /* No aggregation; check for bogus aggregation data */
278 if (rx_config
->aggr_time_limit
) {
280 "time limit with no aggregation for RX endpoint %u\n",
285 if (rx_config
->aggr_hard_limit
) {
286 dev_err(dev
, "hard limit with no aggregation for RX endpoint %u\n",
291 if (rx_config
->aggr_close_eof
) {
292 dev_err(dev
, "close EOF with no aggregation for RX endpoint %u\n",
297 return result
; /* Nothing more to check */
300 /* For an endpoint supporting receive aggregation, the byte
301 * limit defines the point at which aggregation closes. This
302 * check ensures the receive buffer size doesn't result in a
303 * limit that exceeds what's representable in the aggregation
306 aggr_size
= ipa_aggr_size_kb(buffer_size
- NET_SKB_PAD
,
307 rx_config
->aggr_hard_limit
);
308 reg
= ipa_reg(ipa
, ENDP_INIT_AGGR
);
310 limit
= reg_field_max(reg
, BYTE_LIMIT
);
311 if (aggr_size
> limit
) {
312 dev_err(dev
, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
313 data
->endpoint_id
, aggr_size
, limit
);
318 return true; /* Nothing more to check for RX */
321 /* Starting with IPA v4.5 sequencer replication is obsolete */
322 if (ipa
->version
>= IPA_VERSION_4_5
) {
323 if (data
->endpoint
.config
.tx
.seq_rep_type
) {
324 dev_err(dev
, "no-zero seq_rep_type TX endpoint %u\n",
330 if (data
->endpoint
.config
.status_enable
) {
331 other_name
= data
->endpoint
.config
.tx
.status_endpoint
;
332 if (other_name
>= count
) {
333 dev_err(dev
, "status endpoint name %u out of range "
335 other_name
, data
->endpoint_id
);
339 /* Status endpoint must be defined... */
340 other_data
= &all_data
[other_name
];
341 if (ipa_gsi_endpoint_data_empty(other_data
)) {
342 dev_err(dev
, "DMA endpoint name %u undefined "
344 other_name
, data
->endpoint_id
);
348 /* ...and has to be an RX endpoint... */
349 if (other_data
->toward_ipa
) {
351 "status endpoint for endpoint %u not RX\n",
356 /* ...and if it's to be an AP endpoint... */
357 if (other_data
->ee_id
== GSI_EE_AP
) {
358 /* ...make sure it has status enabled. */
359 if (!other_data
->endpoint
.config
.status_enable
) {
361 "status not enabled for endpoint %u\n",
362 other_data
->endpoint_id
);
368 if (data
->endpoint
.config
.dma_mode
) {
369 other_name
= data
->endpoint
.config
.dma_endpoint
;
370 if (other_name
>= count
) {
371 dev_err(dev
, "DMA endpoint name %u out of range "
373 other_name
, data
->endpoint_id
);
377 other_data
= &all_data
[other_name
];
378 if (ipa_gsi_endpoint_data_empty(other_data
)) {
379 dev_err(dev
, "DMA endpoint name %u undefined "
381 other_name
, data
->endpoint_id
);
389 /* Validate endpoint configuration data. Return max defined endpoint ID */
390 static u32
ipa_endpoint_max(struct ipa
*ipa
, u32 count
,
391 const struct ipa_gsi_endpoint_data
*data
)
393 const struct ipa_gsi_endpoint_data
*dp
= data
;
394 struct device
*dev
= ipa
->dev
;
395 enum ipa_endpoint_name name
;
398 if (count
> IPA_ENDPOINT_COUNT
) {
399 dev_err(dev
, "too many endpoints specified (%u > %u)\n",
400 count
, IPA_ENDPOINT_COUNT
);
404 /* Make sure needed endpoints have defined data */
405 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_COMMAND_TX
])) {
406 dev_err(dev
, "command TX endpoint not defined\n");
409 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_LAN_RX
])) {
410 dev_err(dev
, "LAN RX endpoint not defined\n");
413 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_MODEM_TX
])) {
414 dev_err(dev
, "AP->modem TX endpoint not defined\n");
417 if (ipa_gsi_endpoint_data_empty(&data
[IPA_ENDPOINT_AP_MODEM_RX
])) {
418 dev_err(dev
, "AP<-modem RX endpoint not defined\n");
423 for (name
= 0; name
< count
; name
++, dp
++) {
424 if (!ipa_endpoint_data_valid_one(ipa
, count
, data
, dp
))
426 max
= max_t(u32
, max
, dp
->endpoint_id
);
432 /* Allocate a transaction to use on a non-command endpoint */
433 static struct gsi_trans
*ipa_endpoint_trans_alloc(struct ipa_endpoint
*endpoint
,
436 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
437 u32 channel_id
= endpoint
->channel_id
;
438 enum dma_data_direction direction
;
440 direction
= endpoint
->toward_ipa
? DMA_TO_DEVICE
: DMA_FROM_DEVICE
;
442 return gsi_channel_trans_alloc(gsi
, channel_id
, tre_count
, direction
);
445 /* suspend_delay represents suspend for RX, delay for TX endpoints.
446 * Note that suspend is not supported starting with IPA v4.0, and
447 * delay mode should not be used starting with IPA v4.2.
450 ipa_endpoint_init_ctrl(struct ipa_endpoint
*endpoint
, bool suspend_delay
)
452 struct ipa
*ipa
= endpoint
->ipa
;
453 const struct reg
*reg
;
460 if (endpoint
->toward_ipa
)
461 WARN_ON(ipa
->version
>= IPA_VERSION_4_2
);
463 WARN_ON(ipa
->version
>= IPA_VERSION_4_0
);
465 reg
= ipa_reg(ipa
, ENDP_INIT_CTRL
);
466 offset
= reg_n_offset(reg
, endpoint
->endpoint_id
);
467 val
= ioread32(ipa
->reg_virt
+ offset
);
469 field_id
= endpoint
->toward_ipa
? ENDP_DELAY
: ENDP_SUSPEND
;
470 mask
= reg_bit(reg
, field_id
);
472 state
= !!(val
& mask
);
474 /* Don't bother if it's already in the requested state */
475 if (suspend_delay
!= state
) {
477 iowrite32(val
, ipa
->reg_virt
+ offset
);
483 /* We don't care what the previous state was for delay mode */
485 ipa_endpoint_program_delay(struct ipa_endpoint
*endpoint
, bool enable
)
487 /* Delay mode should not be used for IPA v4.2+ */
488 WARN_ON(endpoint
->ipa
->version
>= IPA_VERSION_4_2
);
489 WARN_ON(!endpoint
->toward_ipa
);
491 (void)ipa_endpoint_init_ctrl(endpoint
, enable
);
494 static bool ipa_endpoint_aggr_active(struct ipa_endpoint
*endpoint
)
496 u32 endpoint_id
= endpoint
->endpoint_id
;
497 struct ipa
*ipa
= endpoint
->ipa
;
498 u32 unit
= endpoint_id
/ 32;
499 const struct reg
*reg
;
502 WARN_ON(!test_bit(endpoint_id
, ipa
->available
));
504 reg
= ipa_reg(ipa
, STATE_AGGR_ACTIVE
);
505 val
= ioread32(ipa
->reg_virt
+ reg_n_offset(reg
, unit
));
507 return !!(val
& BIT(endpoint_id
% 32));
510 static void ipa_endpoint_force_close(struct ipa_endpoint
*endpoint
)
512 u32 endpoint_id
= endpoint
->endpoint_id
;
513 u32 mask
= BIT(endpoint_id
% 32);
514 struct ipa
*ipa
= endpoint
->ipa
;
515 u32 unit
= endpoint_id
/ 32;
516 const struct reg
*reg
;
518 WARN_ON(!test_bit(endpoint_id
, ipa
->available
));
520 reg
= ipa_reg(ipa
, AGGR_FORCE_CLOSE
);
521 iowrite32(mask
, ipa
->reg_virt
+ reg_n_offset(reg
, unit
));
525 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
526 * @endpoint: Endpoint on which to emulate a suspend
528 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
529 * with an open aggregation frame. This is to work around a hardware
530 * issue in IPA version 3.5.1 where the suspend interrupt will not be
531 * generated when it should be.
533 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint
*endpoint
)
535 struct ipa
*ipa
= endpoint
->ipa
;
537 if (!endpoint
->config
.aggregation
)
540 /* Nothing to do if the endpoint doesn't have aggregation open */
541 if (!ipa_endpoint_aggr_active(endpoint
))
544 /* Force close aggregation */
545 ipa_endpoint_force_close(endpoint
);
547 ipa_interrupt_simulate_suspend(ipa
->interrupt
);
550 /* Returns previous suspend state (true means suspend was enabled) */
552 ipa_endpoint_program_suspend(struct ipa_endpoint
*endpoint
, bool enable
)
556 if (endpoint
->ipa
->version
>= IPA_VERSION_4_0
)
557 return enable
; /* For IPA v4.0+, no change made */
559 WARN_ON(endpoint
->toward_ipa
);
561 suspended
= ipa_endpoint_init_ctrl(endpoint
, enable
);
563 /* A client suspended with an open aggregation frame will not
564 * generate a SUSPEND IPA interrupt. If enabling suspend, have
565 * ipa_endpoint_suspend_aggr() handle this.
567 if (enable
&& !suspended
)
568 ipa_endpoint_suspend_aggr(endpoint
);
573 /* Put all modem RX endpoints into suspend mode, and stop transmission
574 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
575 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
578 void ipa_endpoint_modem_pause_all(struct ipa
*ipa
, bool enable
)
582 while (endpoint_id
< ipa
->endpoint_count
) {
583 struct ipa_endpoint
*endpoint
= &ipa
->endpoint
[endpoint_id
++];
585 if (endpoint
->ee_id
!= GSI_EE_MODEM
)
588 if (!endpoint
->toward_ipa
)
589 (void)ipa_endpoint_program_suspend(endpoint
, enable
);
590 else if (ipa
->version
< IPA_VERSION_4_2
)
591 ipa_endpoint_program_delay(endpoint
, enable
);
593 gsi_modem_channel_flow_control(&ipa
->gsi
,
594 endpoint
->channel_id
,
599 /* Reset all modem endpoints to use the default exception endpoint */
600 int ipa_endpoint_modem_exception_reset_all(struct ipa
*ipa
)
602 struct gsi_trans
*trans
;
606 /* We need one command per modem TX endpoint, plus the commands
607 * that clear the pipeline.
609 count
= ipa
->modem_tx_count
+ ipa_cmd_pipeline_clear_count();
610 trans
= ipa_cmd_trans_alloc(ipa
, count
);
613 "no transaction to reset modem exception endpoints\n");
617 for_each_set_bit(endpoint_id
, ipa
->defined
, ipa
->endpoint_count
) {
618 struct ipa_endpoint
*endpoint
;
619 const struct reg
*reg
;
622 /* We only reset modem TX endpoints */
623 endpoint
= &ipa
->endpoint
[endpoint_id
];
624 if (!(endpoint
->ee_id
== GSI_EE_MODEM
&& endpoint
->toward_ipa
))
627 reg
= ipa_reg(ipa
, ENDP_STATUS
);
628 offset
= reg_n_offset(reg
, endpoint_id
);
630 /* Value written is 0, and all bits are updated. That
631 * means status is disabled on the endpoint, and as a
632 * result all other fields in the register are ignored.
634 ipa_cmd_register_write_add(trans
, offset
, 0, ~0, false);
637 ipa_cmd_pipeline_clear_add(trans
);
639 gsi_trans_commit_wait(trans
);
641 ipa_cmd_pipeline_clear_wait(ipa
);
646 static void ipa_endpoint_init_cfg(struct ipa_endpoint
*endpoint
)
648 u32 endpoint_id
= endpoint
->endpoint_id
;
649 struct ipa
*ipa
= endpoint
->ipa
;
650 enum ipa_cs_offload_en enabled
;
651 const struct reg
*reg
;
654 reg
= ipa_reg(ipa
, ENDP_INIT_CFG
);
655 /* FRAG_OFFLOAD_EN is 0 */
656 if (endpoint
->config
.checksum
) {
657 enum ipa_version version
= ipa
->version
;
659 if (endpoint
->toward_ipa
) {
662 /* Checksum header offset is in 4-byte units */
663 off
= sizeof(struct rmnet_map_header
) / sizeof(u32
);
664 val
|= reg_encode(reg
, CS_METADATA_HDR_OFFSET
, off
);
666 enabled
= version
< IPA_VERSION_4_5
668 : IPA_CS_OFFLOAD_INLINE
;
670 enabled
= version
< IPA_VERSION_4_5
672 : IPA_CS_OFFLOAD_INLINE
;
675 enabled
= IPA_CS_OFFLOAD_NONE
;
677 val
|= reg_encode(reg
, CS_OFFLOAD_EN
, enabled
);
678 /* CS_GEN_QMB_MASTER_SEL is 0 */
680 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
683 static void ipa_endpoint_init_nat(struct ipa_endpoint
*endpoint
)
685 u32 endpoint_id
= endpoint
->endpoint_id
;
686 struct ipa
*ipa
= endpoint
->ipa
;
687 const struct reg
*reg
;
690 if (!endpoint
->toward_ipa
)
693 reg
= ipa_reg(ipa
, ENDP_INIT_NAT
);
694 val
= reg_encode(reg
, NAT_EN
, IPA_NAT_TYPE_BYPASS
);
696 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
700 ipa_qmap_header_size(enum ipa_version version
, struct ipa_endpoint
*endpoint
)
702 u32 header_size
= sizeof(struct rmnet_map_header
);
704 /* Without checksum offload, we just have the MAP header */
705 if (!endpoint
->config
.checksum
)
708 if (version
< IPA_VERSION_4_5
) {
709 /* Checksum header inserted for AP TX endpoints only */
710 if (endpoint
->toward_ipa
)
711 header_size
+= sizeof(struct rmnet_map_ul_csum_header
);
713 /* Checksum header is used in both directions */
714 header_size
+= sizeof(struct rmnet_map_v5_csum_header
);
720 /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
721 static u32
ipa_header_size_encode(enum ipa_version version
,
722 const struct reg
*reg
, u32 header_size
)
724 u32 field_max
= reg_field_max(reg
, HDR_LEN
);
727 /* We know field_max can be used as a mask (2^n - 1) */
728 val
= reg_encode(reg
, HDR_LEN
, header_size
& field_max
);
729 if (version
< IPA_VERSION_4_5
) {
730 WARN_ON(header_size
> field_max
);
734 /* IPA v4.5 adds a few more most-significant bits */
735 header_size
>>= hweight32(field_max
);
736 WARN_ON(header_size
> reg_field_max(reg
, HDR_LEN_MSB
));
737 val
|= reg_encode(reg
, HDR_LEN_MSB
, header_size
);
742 /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
743 static u32
ipa_metadata_offset_encode(enum ipa_version version
,
744 const struct reg
*reg
, u32 offset
)
746 u32 field_max
= reg_field_max(reg
, HDR_OFST_METADATA
);
749 /* We know field_max can be used as a mask (2^n - 1) */
750 val
= reg_encode(reg
, HDR_OFST_METADATA
, offset
);
751 if (version
< IPA_VERSION_4_5
) {
752 WARN_ON(offset
> field_max
);
756 /* IPA v4.5 adds a few more most-significant bits */
757 offset
>>= hweight32(field_max
);
758 WARN_ON(offset
> reg_field_max(reg
, HDR_OFST_METADATA_MSB
));
759 val
|= reg_encode(reg
, HDR_OFST_METADATA_MSB
, offset
);
765 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
766 * @endpoint: Endpoint pointer
768 * We program QMAP endpoints so each packet received is preceded by a QMAP
769 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
770 * packet size field, and we have the IPA hardware populate both for each
771 * received packet. The header is configured (in the HDR_EXT register)
772 * to use big endian format.
774 * The packet size is written into the QMAP header's pkt_len field. That
775 * location is defined here using the HDR_OFST_PKT_SIZE field.
777 * The mux_id comes from a 4-byte metadata value supplied with each packet
778 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
779 * value that we want, in its low-order byte. A bitmask defined in the
780 * endpoint's METADATA_MASK register defines which byte within the modem
781 * metadata contains the mux_id. And the OFST_METADATA field programmed
782 * here indicates where the extracted byte should be placed within the QMAP
785 static void ipa_endpoint_init_hdr(struct ipa_endpoint
*endpoint
)
787 u32 endpoint_id
= endpoint
->endpoint_id
;
788 struct ipa
*ipa
= endpoint
->ipa
;
789 const struct reg
*reg
;
792 reg
= ipa_reg(ipa
, ENDP_INIT_HDR
);
793 if (endpoint
->config
.qmap
) {
794 enum ipa_version version
= ipa
->version
;
797 header_size
= ipa_qmap_header_size(version
, endpoint
);
798 val
= ipa_header_size_encode(version
, reg
, header_size
);
800 /* Define how to fill fields in a received QMAP header */
801 if (!endpoint
->toward_ipa
) {
802 u32 off
; /* Field offset within header */
804 /* Where IPA will write the metadata value */
805 off
= offsetof(struct rmnet_map_header
, mux_id
);
806 val
|= ipa_metadata_offset_encode(version
, reg
, off
);
808 /* Where IPA will write the length */
809 off
= offsetof(struct rmnet_map_header
, pkt_len
);
810 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
811 if (version
>= IPA_VERSION_4_5
)
812 off
&= reg_field_max(reg
, HDR_OFST_PKT_SIZE
);
814 val
|= reg_bit(reg
, HDR_OFST_PKT_SIZE_VALID
);
815 val
|= reg_encode(reg
, HDR_OFST_PKT_SIZE
, off
);
817 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
818 val
|= reg_bit(reg
, HDR_OFST_METADATA_VALID
);
820 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
821 /* HDR_A5_MUX is 0 */
822 /* HDR_LEN_INC_DEAGG_HDR is 0 */
823 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
826 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
829 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint
*endpoint
)
831 u32 pad_align
= endpoint
->config
.rx
.pad_align
;
832 u32 endpoint_id
= endpoint
->endpoint_id
;
833 struct ipa
*ipa
= endpoint
->ipa
;
834 const struct reg
*reg
;
837 reg
= ipa_reg(ipa
, ENDP_INIT_HDR_EXT
);
838 if (endpoint
->config
.qmap
) {
839 /* We have a header, so we must specify its endianness */
840 val
|= reg_bit(reg
, HDR_ENDIANNESS
); /* big endian */
842 /* A QMAP header contains a 6 bit pad field at offset 0.
843 * The RMNet driver assumes this field is meaningful in
844 * packets it receives, and assumes the header's payload
845 * length includes that padding. The RMNet driver does
846 * *not* pad packets it sends, however, so the pad field
847 * (although 0) should be ignored.
849 if (!endpoint
->toward_ipa
) {
850 val
|= reg_bit(reg
, HDR_TOTAL_LEN_OR_PAD_VALID
);
851 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
852 val
|= reg_bit(reg
, HDR_PAYLOAD_LEN_INC_PADDING
);
853 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
857 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
858 if (!endpoint
->toward_ipa
)
859 val
|= reg_encode(reg
, HDR_PAD_TO_ALIGNMENT
, pad_align
);
861 /* IPA v4.5 adds some most-significant bits to a few fields,
862 * two of which are defined in the HDR (not HDR_EXT) register.
864 if (ipa
->version
>= IPA_VERSION_4_5
) {
865 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
866 if (endpoint
->config
.qmap
&& !endpoint
->toward_ipa
) {
867 u32 mask
= reg_field_max(reg
, HDR_OFST_PKT_SIZE
);
868 u32 off
; /* Field offset within header */
870 off
= offsetof(struct rmnet_map_header
, pkt_len
);
871 /* Low bits are in the ENDP_INIT_HDR register */
872 off
>>= hweight32(mask
);
873 val
|= reg_encode(reg
, HDR_OFST_PKT_SIZE_MSB
, off
);
874 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
878 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
881 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint
*endpoint
)
883 u32 endpoint_id
= endpoint
->endpoint_id
;
884 struct ipa
*ipa
= endpoint
->ipa
;
885 const struct reg
*reg
;
889 if (endpoint
->toward_ipa
)
890 return; /* Register not valid for TX endpoints */
892 reg
= ipa_reg(ipa
, ENDP_INIT_HDR_METADATA_MASK
);
893 offset
= reg_n_offset(reg
, endpoint_id
);
895 /* Note that HDR_ENDIANNESS indicates big endian header fields */
896 if (endpoint
->config
.qmap
)
897 val
= (__force u32
)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK
);
899 iowrite32(val
, ipa
->reg_virt
+ offset
);
902 static void ipa_endpoint_init_mode(struct ipa_endpoint
*endpoint
)
904 struct ipa
*ipa
= endpoint
->ipa
;
905 const struct reg
*reg
;
909 if (!endpoint
->toward_ipa
)
910 return; /* Register not valid for RX endpoints */
912 reg
= ipa_reg(ipa
, ENDP_INIT_MODE
);
913 if (endpoint
->config
.dma_mode
) {
914 enum ipa_endpoint_name name
= endpoint
->config
.dma_endpoint
;
915 u32 dma_endpoint_id
= ipa
->name_map
[name
]->endpoint_id
;
917 val
= reg_encode(reg
, ENDP_MODE
, IPA_DMA
);
918 val
|= reg_encode(reg
, DEST_PIPE_INDEX
, dma_endpoint_id
);
920 val
= reg_encode(reg
, ENDP_MODE
, IPA_BASIC
);
922 /* All other bits unspecified (and 0) */
924 offset
= reg_n_offset(reg
, endpoint
->endpoint_id
);
925 iowrite32(val
, ipa
->reg_virt
+ offset
);
928 /* For IPA v4.5+, times are expressed using Qtime. A time is represented
929 * at one of several available granularities, which are configured in
930 * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
931 * generators are set up with different "tick" periods. A Qtime value
932 * encodes a tick count along with an indication of a pulse generator
933 * (which has a fixed tick period). Two pulse generators are always
934 * available to the AP; a third is available starting with IPA v5.0.
935 * This function determines which pulse generator most accurately
936 * represents the time period provided, and returns the tick count to
937 * use to represent that time.
940 ipa_qtime_val(struct ipa
*ipa
, u32 microseconds
, u32 max
, u32
*select
)
945 /* Pulse generator 0 has 100 microsecond granularity */
946 ticks
= DIV_ROUND_CLOSEST(microseconds
, 100);
950 /* Pulse generator 1 has millisecond granularity */
952 ticks
= DIV_ROUND_CLOSEST(microseconds
, 1000);
956 if (ipa
->version
>= IPA_VERSION_5_0
) {
957 /* Pulse generator 2 has 10 millisecond granularity */
959 ticks
= DIV_ROUND_CLOSEST(microseconds
, 100);
961 WARN_ON(ticks
> max
);
968 /* Encode the aggregation timer limit (microseconds) based on IPA version */
969 static u32
aggr_time_limit_encode(struct ipa
*ipa
, const struct reg
*reg
,
976 return 0; /* Nothing to compute if time limit is 0 */
978 max
= reg_field_max(reg
, TIME_LIMIT
);
979 if (ipa
->version
>= IPA_VERSION_4_5
) {
982 ticks
= ipa_qtime_val(ipa
, microseconds
, max
, &select
);
984 return reg_encode(reg
, AGGR_GRAN_SEL
, select
) |
985 reg_encode(reg
, TIME_LIMIT
, ticks
);
988 /* We program aggregation granularity in ipa_hardware_config() */
989 ticks
= DIV_ROUND_CLOSEST(microseconds
, IPA_AGGR_GRANULARITY
);
990 WARN(ticks
> max
, "aggr_time_limit too large (%u > %u usec)\n",
991 microseconds
, max
* IPA_AGGR_GRANULARITY
);
993 return reg_encode(reg
, TIME_LIMIT
, ticks
);
996 static void ipa_endpoint_init_aggr(struct ipa_endpoint
*endpoint
)
998 u32 endpoint_id
= endpoint
->endpoint_id
;
999 struct ipa
*ipa
= endpoint
->ipa
;
1000 const struct reg
*reg
;
1003 reg
= ipa_reg(ipa
, ENDP_INIT_AGGR
);
1004 if (endpoint
->config
.aggregation
) {
1005 if (!endpoint
->toward_ipa
) {
1006 const struct ipa_endpoint_rx
*rx_config
;
1010 rx_config
= &endpoint
->config
.rx
;
1011 val
|= reg_encode(reg
, AGGR_EN
, IPA_ENABLE_AGGR
);
1012 val
|= reg_encode(reg
, AGGR_TYPE
, IPA_GENERIC
);
1014 buffer_size
= rx_config
->buffer_size
;
1015 limit
= ipa_aggr_size_kb(buffer_size
- NET_SKB_PAD
,
1016 rx_config
->aggr_hard_limit
);
1017 val
|= reg_encode(reg
, BYTE_LIMIT
, limit
);
1019 limit
= rx_config
->aggr_time_limit
;
1020 val
|= aggr_time_limit_encode(ipa
, reg
, limit
);
1022 /* AGGR_PKT_LIMIT is 0 (unlimited) */
1024 if (rx_config
->aggr_close_eof
)
1025 val
|= reg_bit(reg
, SW_EOF_ACTIVE
);
1027 val
|= reg_encode(reg
, AGGR_EN
, IPA_ENABLE_DEAGGR
);
1028 val
|= reg_encode(reg
, AGGR_TYPE
, IPA_QCMAP
);
1029 /* other fields ignored */
1031 /* AGGR_FORCE_CLOSE is 0 */
1032 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
1034 val
|= reg_encode(reg
, AGGR_EN
, IPA_BYPASS_AGGR
);
1035 /* other fields ignored */
1038 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
1041 /* The head-of-line blocking timer is defined as a tick count. For
1042 * IPA version 4.5 the tick count is based on the Qtimer, which is
1043 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
1044 * each tick represents 128 cycles of the IPA core clock.
1046 * Return the encoded value representing the timeout period provided
1047 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
1049 static u32
hol_block_timer_encode(struct ipa
*ipa
, const struct reg
*reg
,
1060 return 0; /* Nothing to compute if timer period is 0 */
1062 if (ipa
->version
>= IPA_VERSION_4_5
) {
1063 u32 max
= reg_field_max(reg
, TIMER_LIMIT
);
1067 ticks
= ipa_qtime_val(ipa
, microseconds
, max
, &select
);
1069 return reg_encode(reg
, TIMER_GRAN_SEL
, 1) |
1070 reg_encode(reg
, TIMER_LIMIT
, ticks
);
1073 /* Use 64 bit arithmetic to avoid overflow */
1074 rate
= ipa_core_clock_rate(ipa
);
1075 ticks
= DIV_ROUND_CLOSEST(microseconds
* rate
, 128 * USEC_PER_SEC
);
1077 /* We still need the result to fit into the field */
1078 WARN_ON(ticks
> reg_field_max(reg
, TIMER_BASE_VALUE
));
1080 /* IPA v3.5.1 through v4.1 just record the tick count */
1081 if (ipa
->version
< IPA_VERSION_4_2
)
1082 return reg_encode(reg
, TIMER_BASE_VALUE
, (u32
)ticks
);
1084 /* For IPA v4.2, the tick count is represented by base and
1085 * scale fields within the 32-bit timer register, where:
1086 * ticks = base << scale;
1087 * The best precision is achieved when the base value is as
1088 * large as possible. Find the highest set bit in the tick
1089 * count, and extract the number of bits in the base field
1090 * such that high bit is included.
1092 high
= fls(ticks
); /* 1..32 (or warning above) */
1093 width
= hweight32(reg_fmask(reg
, TIMER_BASE_VALUE
));
1094 scale
= high
> width
? high
- width
: 0;
1096 /* If we're scaling, round up to get a closer result */
1097 ticks
+= 1 << (scale
- 1);
1098 /* High bit was set, so rounding might have affected it */
1099 if (fls(ticks
) != high
)
1103 val
= reg_encode(reg
, TIMER_SCALE
, scale
);
1104 val
|= reg_encode(reg
, TIMER_BASE_VALUE
, (u32
)ticks
>> scale
);
1109 /* If microseconds is 0, timeout is immediate */
1110 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint
*endpoint
,
1113 u32 endpoint_id
= endpoint
->endpoint_id
;
1114 struct ipa
*ipa
= endpoint
->ipa
;
1115 const struct reg
*reg
;
1118 /* This should only be changed when HOL_BLOCK_EN is disabled */
1119 reg
= ipa_reg(ipa
, ENDP_INIT_HOL_BLOCK_TIMER
);
1120 val
= hol_block_timer_encode(ipa
, reg
, microseconds
);
1122 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
1126 ipa_endpoint_init_hol_block_en(struct ipa_endpoint
*endpoint
, bool enable
)
1128 u32 endpoint_id
= endpoint
->endpoint_id
;
1129 struct ipa
*ipa
= endpoint
->ipa
;
1130 const struct reg
*reg
;
1134 reg
= ipa_reg(ipa
, ENDP_INIT_HOL_BLOCK_EN
);
1135 offset
= reg_n_offset(reg
, endpoint_id
);
1136 val
= enable
? reg_bit(reg
, HOL_BLOCK_EN
) : 0;
1138 iowrite32(val
, ipa
->reg_virt
+ offset
);
1140 /* When enabling, the register must be written twice for IPA v4.5+ */
1141 if (enable
&& ipa
->version
>= IPA_VERSION_4_5
)
1142 iowrite32(val
, ipa
->reg_virt
+ offset
);
1145 /* Assumes HOL_BLOCK is in disabled state */
1146 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint
*endpoint
,
1149 ipa_endpoint_init_hol_block_timer(endpoint
, microseconds
);
1150 ipa_endpoint_init_hol_block_en(endpoint
, true);
1153 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint
*endpoint
)
1155 ipa_endpoint_init_hol_block_en(endpoint
, false);
1158 void ipa_endpoint_modem_hol_block_clear_all(struct ipa
*ipa
)
1160 u32 endpoint_id
= 0;
1162 while (endpoint_id
< ipa
->endpoint_count
) {
1163 struct ipa_endpoint
*endpoint
= &ipa
->endpoint
[endpoint_id
++];
1165 if (endpoint
->toward_ipa
|| endpoint
->ee_id
!= GSI_EE_MODEM
)
1168 ipa_endpoint_init_hol_block_disable(endpoint
);
1169 ipa_endpoint_init_hol_block_enable(endpoint
, 0);
1173 static void ipa_endpoint_init_deaggr(struct ipa_endpoint
*endpoint
)
1175 u32 endpoint_id
= endpoint
->endpoint_id
;
1176 struct ipa
*ipa
= endpoint
->ipa
;
1177 const struct reg
*reg
;
1180 if (!endpoint
->toward_ipa
)
1181 return; /* Register not valid for RX endpoints */
1183 reg
= ipa_reg(ipa
, ENDP_INIT_DEAGGR
);
1184 /* DEAGGR_HDR_LEN is 0 */
1185 /* PACKET_OFFSET_VALID is 0 */
1186 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
1187 /* MAX_PACKET_LEN is 0 (not enforced) */
1189 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
1192 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint
*endpoint
)
1194 u32 resource_group
= endpoint
->config
.resource_group
;
1195 u32 endpoint_id
= endpoint
->endpoint_id
;
1196 struct ipa
*ipa
= endpoint
->ipa
;
1197 const struct reg
*reg
;
1200 reg
= ipa_reg(ipa
, ENDP_INIT_RSRC_GRP
);
1201 val
= reg_encode(reg
, ENDP_RSRC_GRP
, resource_group
);
1203 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
1206 static void ipa_endpoint_init_seq(struct ipa_endpoint
*endpoint
)
1208 u32 endpoint_id
= endpoint
->endpoint_id
;
1209 struct ipa
*ipa
= endpoint
->ipa
;
1210 const struct reg
*reg
;
1213 if (!endpoint
->toward_ipa
)
1214 return; /* Register not valid for RX endpoints */
1216 reg
= ipa_reg(ipa
, ENDP_INIT_SEQ
);
1218 /* Low-order byte configures primary packet processing */
1219 val
= reg_encode(reg
, SEQ_TYPE
, endpoint
->config
.tx
.seq_type
);
1221 /* Second byte (if supported) configures replicated packet processing */
1222 if (ipa
->version
< IPA_VERSION_4_5
)
1223 val
|= reg_encode(reg
, SEQ_REP_TYPE
,
1224 endpoint
->config
.tx
.seq_rep_type
);
1226 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
1230 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1231 * @endpoint: Endpoint pointer
1232 * @skb: Socket buffer to send
1234 * Returns: 0 if successful, or a negative error code
1236 int ipa_endpoint_skb_tx(struct ipa_endpoint
*endpoint
, struct sk_buff
*skb
)
1238 struct gsi_trans
*trans
;
1242 /* Make sure source endpoint's TLV FIFO has enough entries to
1243 * hold the linear portion of the skb and all its fragments.
1244 * If not, see if we can linearize it before giving up.
1246 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1247 if (nr_frags
> endpoint
->skb_frag_max
) {
1248 if (skb_linearize(skb
))
1253 trans
= ipa_endpoint_trans_alloc(endpoint
, 1 + nr_frags
);
1257 ret
= gsi_trans_skb_add(trans
, skb
);
1259 goto err_trans_free
;
1260 trans
->data
= skb
; /* transaction owns skb now */
1262 gsi_trans_commit(trans
, !netdev_xmit_more());
1267 gsi_trans_free(trans
);
1272 static void ipa_endpoint_status(struct ipa_endpoint
*endpoint
)
1274 u32 endpoint_id
= endpoint
->endpoint_id
;
1275 struct ipa
*ipa
= endpoint
->ipa
;
1276 const struct reg
*reg
;
1279 reg
= ipa_reg(ipa
, ENDP_STATUS
);
1280 if (endpoint
->config
.status_enable
) {
1281 val
|= reg_bit(reg
, STATUS_EN
);
1282 if (endpoint
->toward_ipa
) {
1283 enum ipa_endpoint_name name
;
1284 u32 status_endpoint_id
;
1286 name
= endpoint
->config
.tx
.status_endpoint
;
1287 status_endpoint_id
= ipa
->name_map
[name
]->endpoint_id
;
1289 val
|= reg_encode(reg
, STATUS_ENDP
, status_endpoint_id
);
1291 /* STATUS_LOCATION is 0, meaning IPA packet status
1292 * precedes the packet (not present for IPA v4.5+)
1294 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1297 iowrite32(val
, ipa
->reg_virt
+ reg_n_offset(reg
, endpoint_id
));
1300 static int ipa_endpoint_replenish_one(struct ipa_endpoint
*endpoint
,
1301 struct gsi_trans
*trans
)
1309 buffer_size
= endpoint
->config
.rx
.buffer_size
;
1310 page
= dev_alloc_pages(get_order(buffer_size
));
1314 /* Offset the buffer to make space for skb headroom */
1315 offset
= NET_SKB_PAD
;
1316 len
= buffer_size
- offset
;
1318 ret
= gsi_trans_page_add(trans
, page
, len
, offset
);
1322 trans
->data
= page
; /* transaction owns page now */
1328 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1329 * @endpoint: Endpoint to be replenished
1331 * The IPA hardware can hold a fixed number of receive buffers for an RX
1332 * endpoint, based on the number of entries in the underlying channel ring
1333 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1334 * more receive buffers can be supplied to the hardware. Replenishing for
1335 * an endpoint can be disabled, in which case buffers are not queued to
1338 static void ipa_endpoint_replenish(struct ipa_endpoint
*endpoint
)
1340 struct gsi_trans
*trans
;
1342 if (!test_bit(IPA_REPLENISH_ENABLED
, endpoint
->replenish_flags
))
1345 /* Skip it if it's already active */
1346 if (test_and_set_bit(IPA_REPLENISH_ACTIVE
, endpoint
->replenish_flags
))
1349 while ((trans
= ipa_endpoint_trans_alloc(endpoint
, 1))) {
1352 if (ipa_endpoint_replenish_one(endpoint
, trans
))
1353 goto try_again_later
;
1356 /* Ring the doorbell if we've got a full batch */
1357 doorbell
= !(++endpoint
->replenish_count
% IPA_REPLENISH_BATCH
);
1358 gsi_trans_commit(trans
, doorbell
);
1361 clear_bit(IPA_REPLENISH_ACTIVE
, endpoint
->replenish_flags
);
1366 gsi_trans_free(trans
);
1367 clear_bit(IPA_REPLENISH_ACTIVE
, endpoint
->replenish_flags
);
1369 /* Whenever a receive buffer transaction completes we'll try to
1370 * replenish again. It's unlikely, but if we fail to supply even
1371 * one buffer, nothing will trigger another replenish attempt.
1372 * If the hardware has no receive buffers queued, schedule work to
1373 * try replenishing again.
1375 if (gsi_channel_trans_idle(&endpoint
->ipa
->gsi
, endpoint
->channel_id
))
1376 schedule_delayed_work(&endpoint
->replenish_work
,
1377 msecs_to_jiffies(1));
1380 static void ipa_endpoint_replenish_enable(struct ipa_endpoint
*endpoint
)
1382 set_bit(IPA_REPLENISH_ENABLED
, endpoint
->replenish_flags
);
1384 /* Start replenishing if hardware currently has no buffers */
1385 if (gsi_channel_trans_idle(&endpoint
->ipa
->gsi
, endpoint
->channel_id
))
1386 ipa_endpoint_replenish(endpoint
);
1389 static void ipa_endpoint_replenish_disable(struct ipa_endpoint
*endpoint
)
1391 clear_bit(IPA_REPLENISH_ENABLED
, endpoint
->replenish_flags
);
1394 static void ipa_endpoint_replenish_work(struct work_struct
*work
)
1396 struct delayed_work
*dwork
= to_delayed_work(work
);
1397 struct ipa_endpoint
*endpoint
;
1399 endpoint
= container_of(dwork
, struct ipa_endpoint
, replenish_work
);
1401 ipa_endpoint_replenish(endpoint
);
1404 static void ipa_endpoint_skb_copy(struct ipa_endpoint
*endpoint
,
1405 void *data
, u32 len
, u32 extra
)
1407 struct sk_buff
*skb
;
1409 if (!endpoint
->netdev
)
1412 skb
= __dev_alloc_skb(len
, GFP_ATOMIC
);
1414 /* Copy the data into the socket buffer and receive it */
1416 memcpy(skb
->data
, data
, len
);
1417 skb
->truesize
+= extra
;
1420 ipa_modem_skb_rx(endpoint
->netdev
, skb
);
1423 static bool ipa_endpoint_skb_build(struct ipa_endpoint
*endpoint
,
1424 struct page
*page
, u32 len
)
1426 u32 buffer_size
= endpoint
->config
.rx
.buffer_size
;
1427 struct sk_buff
*skb
;
1429 /* Nothing to do if there's no netdev */
1430 if (!endpoint
->netdev
)
1433 WARN_ON(len
> SKB_WITH_OVERHEAD(buffer_size
- NET_SKB_PAD
));
1435 skb
= build_skb(page_address(page
), buffer_size
);
1437 /* Reserve the headroom and account for the data */
1438 skb_reserve(skb
, NET_SKB_PAD
);
1442 /* Receive the buffer (or record drop if unable to build it) */
1443 ipa_modem_skb_rx(endpoint
->netdev
, skb
);
1448 /* The format of an IPA packet status structure is the same for several
1449 * status types (opcodes). Other types aren't currently supported.
1451 static bool ipa_status_format_packet(enum ipa_status_opcode opcode
)
1454 case IPA_STATUS_OPCODE_PACKET
:
1455 case IPA_STATUS_OPCODE_DROPPED_PACKET
:
1456 case IPA_STATUS_OPCODE_SUSPENDED_PACKET
:
1457 case IPA_STATUS_OPCODE_PACKET_2ND_PASS
:
1465 ipa_endpoint_status_skip(struct ipa_endpoint
*endpoint
, const void *data
)
1467 struct ipa
*ipa
= endpoint
->ipa
;
1468 enum ipa_status_opcode opcode
;
1471 opcode
= ipa_status_extract(ipa
, data
, STATUS_OPCODE
);
1472 if (!ipa_status_format_packet(opcode
))
1475 endpoint_id
= ipa_status_extract(ipa
, data
, STATUS_DST_ENDPOINT
);
1476 if (endpoint_id
!= endpoint
->endpoint_id
)
1479 return false; /* Don't skip this packet, process it */
1483 ipa_endpoint_status_tag_valid(struct ipa_endpoint
*endpoint
, const void *data
)
1485 struct ipa_endpoint
*command_endpoint
;
1486 enum ipa_status_mask status_mask
;
1487 struct ipa
*ipa
= endpoint
->ipa
;
1490 status_mask
= ipa_status_extract(ipa
, data
, STATUS_MASK
);
1492 return false; /* No valid tag */
1494 /* The status contains a valid tag. We know the packet was sent to
1495 * this endpoint (already verified by ipa_endpoint_status_skip()).
1496 * If the packet came from the AP->command TX endpoint we know
1497 * this packet was sent as part of the pipeline clear process.
1499 endpoint_id
= ipa_status_extract(ipa
, data
, STATUS_SRC_ENDPOINT
);
1500 command_endpoint
= ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
];
1501 if (endpoint_id
== command_endpoint
->endpoint_id
) {
1502 complete(&ipa
->completion
);
1504 dev_err(ipa
->dev
, "unexpected tagged packet from endpoint %u\n",
1511 /* Return whether the status indicates the packet should be dropped */
1513 ipa_endpoint_status_drop(struct ipa_endpoint
*endpoint
, const void *data
)
1515 enum ipa_status_exception exception
;
1516 struct ipa
*ipa
= endpoint
->ipa
;
1519 /* If the status indicates a tagged transfer, we'll drop the packet */
1520 if (ipa_endpoint_status_tag_valid(endpoint
, data
))
1523 /* Deaggregation exceptions we drop; all other types we consume */
1524 exception
= ipa_status_extract(ipa
, data
, STATUS_EXCEPTION
);
1526 return exception
== IPA_STATUS_EXCEPTION_DEAGGR
;
1528 /* Drop the packet if it fails to match a routing rule; otherwise no */
1529 rule
= ipa_status_extract(ipa
, data
, STATUS_ROUTER_RULE_INDEX
);
1531 return rule
== IPA_STATUS_RULE_MISS
;
1534 static void ipa_endpoint_status_parse(struct ipa_endpoint
*endpoint
,
1535 struct page
*page
, u32 total_len
)
1537 u32 buffer_size
= endpoint
->config
.rx
.buffer_size
;
1538 void *data
= page_address(page
) + NET_SKB_PAD
;
1539 u32 unused
= buffer_size
- total_len
;
1540 struct ipa
*ipa
= endpoint
->ipa
;
1541 struct device
*dev
= ipa
->dev
;
1542 u32 resid
= total_len
;
1549 if (resid
< IPA_STATUS_SIZE
) {
1551 "short message (%u bytes < %zu byte status)\n",
1552 resid
, IPA_STATUS_SIZE
);
1556 /* Skip over status packets that lack packet data */
1557 length
= ipa_status_extract(ipa
, data
, STATUS_LENGTH
);
1558 if (!length
|| ipa_endpoint_status_skip(endpoint
, data
)) {
1559 data
+= IPA_STATUS_SIZE
;
1560 resid
-= IPA_STATUS_SIZE
;
1564 /* Compute the amount of buffer space consumed by the packet,
1565 * including the status. If the hardware is configured to
1566 * pad packet data to an aligned boundary, account for that.
1567 * And if checksum offload is enabled a trailer containing
1568 * computed checksum information will be appended.
1570 align
= endpoint
->config
.rx
.pad_align
? : 1;
1571 len
= IPA_STATUS_SIZE
+ ALIGN(length
, align
);
1572 if (endpoint
->config
.checksum
)
1573 len
+= sizeof(struct rmnet_map_dl_csum_trailer
);
1575 if (!ipa_endpoint_status_drop(endpoint
, data
)) {
1579 /* Client receives only packet data (no status) */
1580 data2
= data
+ IPA_STATUS_SIZE
;
1582 /* Have the true size reflect the extra unused space in
1583 * the original receive buffer. Distribute the "cost"
1584 * proportionately across all aggregated packets in the
1587 extra
= DIV_ROUND_CLOSEST(unused
* len
, total_len
);
1588 ipa_endpoint_skb_copy(endpoint
, data2
, length
, extra
);
1591 /* Consume status and the full packet it describes */
1597 void ipa_endpoint_trans_complete(struct ipa_endpoint
*endpoint
,
1598 struct gsi_trans
*trans
)
1602 if (endpoint
->toward_ipa
)
1605 if (trans
->cancelled
)
1608 /* Parse or build a socket buffer using the actual received length */
1610 if (endpoint
->config
.status_enable
)
1611 ipa_endpoint_status_parse(endpoint
, page
, trans
->len
);
1612 else if (ipa_endpoint_skb_build(endpoint
, page
, trans
->len
))
1613 trans
->data
= NULL
; /* Pages have been consumed */
1615 ipa_endpoint_replenish(endpoint
);
1618 void ipa_endpoint_trans_release(struct ipa_endpoint
*endpoint
,
1619 struct gsi_trans
*trans
)
1621 if (endpoint
->toward_ipa
) {
1622 struct ipa
*ipa
= endpoint
->ipa
;
1624 /* Nothing to do for command transactions */
1625 if (endpoint
!= ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
]) {
1626 struct sk_buff
*skb
= trans
->data
;
1629 dev_kfree_skb_any(skb
);
1632 struct page
*page
= trans
->data
;
1639 void ipa_endpoint_default_route_set(struct ipa
*ipa
, u32 endpoint_id
)
1641 const struct reg
*reg
;
1644 reg
= ipa_reg(ipa
, ROUTE
);
1645 /* ROUTE_DIS is 0 */
1646 val
= reg_encode(reg
, ROUTE_DEF_PIPE
, endpoint_id
);
1647 val
|= reg_bit(reg
, ROUTE_DEF_HDR_TABLE
);
1648 /* ROUTE_DEF_HDR_OFST is 0 */
1649 val
|= reg_encode(reg
, ROUTE_FRAG_DEF_PIPE
, endpoint_id
);
1650 val
|= reg_bit(reg
, ROUTE_DEF_RETAIN_HDR
);
1652 iowrite32(val
, ipa
->reg_virt
+ reg_offset(reg
));
1655 void ipa_endpoint_default_route_clear(struct ipa
*ipa
)
1657 ipa_endpoint_default_route_set(ipa
, 0);
1661 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1662 * @endpoint: Endpoint to be reset
1664 * If aggregation is active on an RX endpoint when a reset is performed
1665 * on its underlying GSI channel, a special sequence of actions must be
1666 * taken to ensure the IPA pipeline is properly cleared.
1668 * Return: 0 if successful, or a negative error code
1670 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint
*endpoint
)
1672 struct ipa
*ipa
= endpoint
->ipa
;
1673 struct device
*dev
= ipa
->dev
;
1674 struct gsi
*gsi
= &ipa
->gsi
;
1675 bool suspended
= false;
1682 virt
= kzalloc(len
, GFP_KERNEL
);
1686 addr
= dma_map_single(dev
, virt
, len
, DMA_FROM_DEVICE
);
1687 if (dma_mapping_error(dev
, addr
)) {
1692 /* Force close aggregation before issuing the reset */
1693 ipa_endpoint_force_close(endpoint
);
1695 /* Reset and reconfigure the channel with the doorbell engine
1696 * disabled. Then poll until we know aggregation is no longer
1697 * active. We'll re-enable the doorbell (if appropriate) when
1698 * we reset again below.
1700 gsi_channel_reset(gsi
, endpoint
->channel_id
, false);
1702 /* Make sure the channel isn't suspended */
1703 suspended
= ipa_endpoint_program_suspend(endpoint
, false);
1705 /* Start channel and do a 1 byte read */
1706 ret
= gsi_channel_start(gsi
, endpoint
->channel_id
);
1708 goto out_suspend_again
;
1710 ret
= gsi_trans_read_byte(gsi
, endpoint
->channel_id
, addr
);
1712 goto err_endpoint_stop
;
1714 /* Wait for aggregation to be closed on the channel */
1715 retries
= IPA_ENDPOINT_RESET_AGGR_RETRY_MAX
;
1717 if (!ipa_endpoint_aggr_active(endpoint
))
1719 usleep_range(USEC_PER_MSEC
, 2 * USEC_PER_MSEC
);
1720 } while (retries
--);
1722 /* Check one last time */
1723 if (ipa_endpoint_aggr_active(endpoint
))
1724 dev_err(dev
, "endpoint %u still active during reset\n",
1725 endpoint
->endpoint_id
);
1727 gsi_trans_read_byte_done(gsi
, endpoint
->channel_id
);
1729 ret
= gsi_channel_stop(gsi
, endpoint
->channel_id
);
1731 goto out_suspend_again
;
1733 /* Finally, reset and reconfigure the channel again (re-enabling
1734 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1735 * complete the channel reset sequence. Finish by suspending the
1736 * channel again (if necessary).
1738 gsi_channel_reset(gsi
, endpoint
->channel_id
, true);
1740 usleep_range(USEC_PER_MSEC
, 2 * USEC_PER_MSEC
);
1742 goto out_suspend_again
;
1745 (void)gsi_channel_stop(gsi
, endpoint
->channel_id
);
1748 (void)ipa_endpoint_program_suspend(endpoint
, true);
1749 dma_unmap_single(dev
, addr
, len
, DMA_FROM_DEVICE
);
1756 static void ipa_endpoint_reset(struct ipa_endpoint
*endpoint
)
1758 u32 channel_id
= endpoint
->channel_id
;
1759 struct ipa
*ipa
= endpoint
->ipa
;
1763 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1764 * is active, we need to handle things specially to recover.
1765 * All other cases just need to reset the underlying GSI channel.
1767 special
= ipa
->version
< IPA_VERSION_4_0
&& !endpoint
->toward_ipa
&&
1768 endpoint
->config
.aggregation
;
1769 if (special
&& ipa_endpoint_aggr_active(endpoint
))
1770 ret
= ipa_endpoint_reset_rx_aggr(endpoint
);
1772 gsi_channel_reset(&ipa
->gsi
, channel_id
, true);
1776 "error %d resetting channel %u for endpoint %u\n",
1777 ret
, endpoint
->channel_id
, endpoint
->endpoint_id
);
1780 static void ipa_endpoint_program(struct ipa_endpoint
*endpoint
)
1782 if (endpoint
->toward_ipa
) {
1783 /* Newer versions of IPA use GSI channel flow control
1784 * instead of endpoint DELAY mode to prevent sending data.
1785 * Flow control is disabled for newly-allocated channels,
1786 * and we can assume flow control is not (ever) enabled
1787 * for AP TX channels.
1789 if (endpoint
->ipa
->version
< IPA_VERSION_4_2
)
1790 ipa_endpoint_program_delay(endpoint
, false);
1792 /* Ensure suspend mode is off on all AP RX endpoints */
1793 (void)ipa_endpoint_program_suspend(endpoint
, false);
1795 ipa_endpoint_init_cfg(endpoint
);
1796 ipa_endpoint_init_nat(endpoint
);
1797 ipa_endpoint_init_hdr(endpoint
);
1798 ipa_endpoint_init_hdr_ext(endpoint
);
1799 ipa_endpoint_init_hdr_metadata_mask(endpoint
);
1800 ipa_endpoint_init_mode(endpoint
);
1801 ipa_endpoint_init_aggr(endpoint
);
1802 if (!endpoint
->toward_ipa
) {
1803 if (endpoint
->config
.rx
.holb_drop
)
1804 ipa_endpoint_init_hol_block_enable(endpoint
, 0);
1806 ipa_endpoint_init_hol_block_disable(endpoint
);
1808 ipa_endpoint_init_deaggr(endpoint
);
1809 ipa_endpoint_init_rsrc_grp(endpoint
);
1810 ipa_endpoint_init_seq(endpoint
);
1811 ipa_endpoint_status(endpoint
);
1814 int ipa_endpoint_enable_one(struct ipa_endpoint
*endpoint
)
1816 u32 endpoint_id
= endpoint
->endpoint_id
;
1817 struct ipa
*ipa
= endpoint
->ipa
;
1818 struct gsi
*gsi
= &ipa
->gsi
;
1821 ret
= gsi_channel_start(gsi
, endpoint
->channel_id
);
1824 "error %d starting %cX channel %u for endpoint %u\n",
1825 ret
, endpoint
->toward_ipa
? 'T' : 'R',
1826 endpoint
->channel_id
, endpoint_id
);
1830 if (!endpoint
->toward_ipa
) {
1831 ipa_interrupt_suspend_enable(ipa
->interrupt
, endpoint_id
);
1832 ipa_endpoint_replenish_enable(endpoint
);
1835 __set_bit(endpoint_id
, ipa
->enabled
);
1840 void ipa_endpoint_disable_one(struct ipa_endpoint
*endpoint
)
1842 u32 endpoint_id
= endpoint
->endpoint_id
;
1843 struct ipa
*ipa
= endpoint
->ipa
;
1844 struct gsi
*gsi
= &ipa
->gsi
;
1847 if (!test_bit(endpoint_id
, ipa
->enabled
))
1850 __clear_bit(endpoint_id
, endpoint
->ipa
->enabled
);
1852 if (!endpoint
->toward_ipa
) {
1853 ipa_endpoint_replenish_disable(endpoint
);
1854 ipa_interrupt_suspend_disable(ipa
->interrupt
, endpoint_id
);
1857 /* Note that if stop fails, the channel's state is not well-defined */
1858 ret
= gsi_channel_stop(gsi
, endpoint
->channel_id
);
1860 dev_err(ipa
->dev
, "error %d attempting to stop endpoint %u\n",
1864 void ipa_endpoint_suspend_one(struct ipa_endpoint
*endpoint
)
1866 struct device
*dev
= endpoint
->ipa
->dev
;
1867 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
1870 if (!test_bit(endpoint
->endpoint_id
, endpoint
->ipa
->enabled
))
1873 if (!endpoint
->toward_ipa
) {
1874 ipa_endpoint_replenish_disable(endpoint
);
1875 (void)ipa_endpoint_program_suspend(endpoint
, true);
1878 ret
= gsi_channel_suspend(gsi
, endpoint
->channel_id
);
1880 dev_err(dev
, "error %d suspending channel %u\n", ret
,
1881 endpoint
->channel_id
);
1884 void ipa_endpoint_resume_one(struct ipa_endpoint
*endpoint
)
1886 struct device
*dev
= endpoint
->ipa
->dev
;
1887 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
1890 if (!test_bit(endpoint
->endpoint_id
, endpoint
->ipa
->enabled
))
1893 if (!endpoint
->toward_ipa
)
1894 (void)ipa_endpoint_program_suspend(endpoint
, false);
1896 ret
= gsi_channel_resume(gsi
, endpoint
->channel_id
);
1898 dev_err(dev
, "error %d resuming channel %u\n", ret
,
1899 endpoint
->channel_id
);
1900 else if (!endpoint
->toward_ipa
)
1901 ipa_endpoint_replenish_enable(endpoint
);
1904 void ipa_endpoint_suspend(struct ipa
*ipa
)
1906 if (!ipa
->setup_complete
)
1909 if (ipa
->modem_netdev
)
1910 ipa_modem_suspend(ipa
->modem_netdev
);
1912 ipa_endpoint_suspend_one(ipa
->name_map
[IPA_ENDPOINT_AP_LAN_RX
]);
1913 ipa_endpoint_suspend_one(ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
]);
1916 void ipa_endpoint_resume(struct ipa
*ipa
)
1918 if (!ipa
->setup_complete
)
1921 ipa_endpoint_resume_one(ipa
->name_map
[IPA_ENDPOINT_AP_COMMAND_TX
]);
1922 ipa_endpoint_resume_one(ipa
->name_map
[IPA_ENDPOINT_AP_LAN_RX
]);
1924 if (ipa
->modem_netdev
)
1925 ipa_modem_resume(ipa
->modem_netdev
);
1928 static void ipa_endpoint_setup_one(struct ipa_endpoint
*endpoint
)
1930 struct gsi
*gsi
= &endpoint
->ipa
->gsi
;
1931 u32 channel_id
= endpoint
->channel_id
;
1933 /* Only AP endpoints get set up */
1934 if (endpoint
->ee_id
!= GSI_EE_AP
)
1937 endpoint
->skb_frag_max
= gsi
->channel
[channel_id
].trans_tre_max
- 1;
1938 if (!endpoint
->toward_ipa
) {
1939 /* RX transactions require a single TRE, so the maximum
1940 * backlog is the same as the maximum outstanding TREs.
1942 clear_bit(IPA_REPLENISH_ENABLED
, endpoint
->replenish_flags
);
1943 clear_bit(IPA_REPLENISH_ACTIVE
, endpoint
->replenish_flags
);
1944 INIT_DELAYED_WORK(&endpoint
->replenish_work
,
1945 ipa_endpoint_replenish_work
);
1948 ipa_endpoint_program(endpoint
);
1950 __set_bit(endpoint
->endpoint_id
, endpoint
->ipa
->set_up
);
1953 static void ipa_endpoint_teardown_one(struct ipa_endpoint
*endpoint
)
1955 __clear_bit(endpoint
->endpoint_id
, endpoint
->ipa
->set_up
);
1957 if (!endpoint
->toward_ipa
)
1958 cancel_delayed_work_sync(&endpoint
->replenish_work
);
1960 ipa_endpoint_reset(endpoint
);
1963 void ipa_endpoint_setup(struct ipa
*ipa
)
1967 for_each_set_bit(endpoint_id
, ipa
->defined
, ipa
->endpoint_count
)
1968 ipa_endpoint_setup_one(&ipa
->endpoint
[endpoint_id
]);
1971 void ipa_endpoint_teardown(struct ipa
*ipa
)
1975 for_each_set_bit(endpoint_id
, ipa
->set_up
, ipa
->endpoint_count
)
1976 ipa_endpoint_teardown_one(&ipa
->endpoint
[endpoint_id
]);
1979 void ipa_endpoint_deconfig(struct ipa
*ipa
)
1981 ipa
->available_count
= 0;
1982 bitmap_free(ipa
->available
);
1983 ipa
->available
= NULL
;
1986 int ipa_endpoint_config(struct ipa
*ipa
)
1988 struct device
*dev
= ipa
->dev
;
1989 const struct reg
*reg
;
1998 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1999 * Furthermore, the endpoints were not grouped such that TX
2000 * endpoint numbers started with 0 and RX endpoints had numbers
2001 * higher than all TX endpoints, so we can't do the simple
2002 * direction check used for newer hardware below.
2004 * For hardware that doesn't support the FLAVOR_0 register,
2005 * just set the available mask to support any endpoint, and
2006 * assume the configuration is valid.
2008 if (ipa
->version
< IPA_VERSION_3_5
) {
2009 ipa
->available
= bitmap_zalloc(IPA_ENDPOINT_MAX
, GFP_KERNEL
);
2010 if (!ipa
->available
)
2012 ipa
->available_count
= IPA_ENDPOINT_MAX
;
2014 bitmap_set(ipa
->available
, 0, IPA_ENDPOINT_MAX
);
2019 /* Find out about the endpoints supplied by the hardware, and ensure
2020 * the highest one doesn't exceed the number supported by software.
2022 reg
= ipa_reg(ipa
, FLAVOR_0
);
2023 val
= ioread32(ipa
->reg_virt
+ reg_offset(reg
));
2025 /* Our RX is an IPA producer; our TX is an IPA consumer. */
2026 tx_count
= reg_decode(reg
, MAX_CONS_PIPES
, val
);
2027 rx_count
= reg_decode(reg
, MAX_PROD_PIPES
, val
);
2028 rx_base
= reg_decode(reg
, PROD_LOWEST
, val
);
2030 limit
= rx_base
+ rx_count
;
2031 if (limit
> IPA_ENDPOINT_MAX
) {
2032 dev_err(dev
, "too many endpoints, %u > %u\n",
2033 limit
, IPA_ENDPOINT_MAX
);
2037 /* Until IPA v5.0, the max endpoint ID was 32 */
2038 hw_limit
= ipa
->version
< IPA_VERSION_5_0
? 32 : U8_MAX
+ 1;
2039 if (limit
> hw_limit
) {
2040 dev_err(dev
, "unexpected endpoint count, %u > %u\n",
2045 /* Allocate and initialize the available endpoint bitmap */
2046 ipa
->available
= bitmap_zalloc(limit
, GFP_KERNEL
);
2047 if (!ipa
->available
)
2049 ipa
->available_count
= limit
;
2051 /* Mark all supported RX and TX endpoints as available */
2052 bitmap_set(ipa
->available
, 0, tx_count
);
2053 bitmap_set(ipa
->available
, rx_base
, rx_count
);
2055 for_each_set_bit(endpoint_id
, ipa
->defined
, ipa
->endpoint_count
) {
2056 struct ipa_endpoint
*endpoint
;
2058 if (endpoint_id
>= limit
) {
2059 dev_err(dev
, "invalid endpoint id, %u > %u\n",
2060 endpoint_id
, limit
- 1);
2061 goto err_free_bitmap
;
2064 if (!test_bit(endpoint_id
, ipa
->available
)) {
2065 dev_err(dev
, "unavailable endpoint id %u\n",
2067 goto err_free_bitmap
;
2070 /* Make sure it's pointing in the right direction */
2071 endpoint
= &ipa
->endpoint
[endpoint_id
];
2072 if (endpoint
->toward_ipa
) {
2073 if (endpoint_id
< tx_count
)
2075 } else if (endpoint_id
>= rx_base
) {
2079 dev_err(dev
, "endpoint id %u wrong direction\n", endpoint_id
);
2080 goto err_free_bitmap
;
2086 ipa_endpoint_deconfig(ipa
);
2091 static void ipa_endpoint_init_one(struct ipa
*ipa
, enum ipa_endpoint_name name
,
2092 const struct ipa_gsi_endpoint_data
*data
)
2094 struct ipa_endpoint
*endpoint
;
2096 endpoint
= &ipa
->endpoint
[data
->endpoint_id
];
2098 if (data
->ee_id
== GSI_EE_AP
)
2099 ipa
->channel_map
[data
->channel_id
] = endpoint
;
2100 ipa
->name_map
[name
] = endpoint
;
2102 endpoint
->ipa
= ipa
;
2103 endpoint
->ee_id
= data
->ee_id
;
2104 endpoint
->channel_id
= data
->channel_id
;
2105 endpoint
->endpoint_id
= data
->endpoint_id
;
2106 endpoint
->toward_ipa
= data
->toward_ipa
;
2107 endpoint
->config
= data
->endpoint
.config
;
2109 __set_bit(endpoint
->endpoint_id
, ipa
->defined
);
2112 static void ipa_endpoint_exit_one(struct ipa_endpoint
*endpoint
)
2114 __clear_bit(endpoint
->endpoint_id
, endpoint
->ipa
->defined
);
2116 memset(endpoint
, 0, sizeof(*endpoint
));
2119 void ipa_endpoint_exit(struct ipa
*ipa
)
2125 for_each_set_bit(endpoint_id
, ipa
->defined
, ipa
->endpoint_count
)
2126 ipa_endpoint_exit_one(&ipa
->endpoint
[endpoint_id
]);
2128 bitmap_free(ipa
->enabled
);
2129 ipa
->enabled
= NULL
;
2130 bitmap_free(ipa
->set_up
);
2132 bitmap_free(ipa
->defined
);
2133 ipa
->defined
= NULL
;
2135 memset(ipa
->name_map
, 0, sizeof(ipa
->name_map
));
2136 memset(ipa
->channel_map
, 0, sizeof(ipa
->channel_map
));
2139 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
2140 int ipa_endpoint_init(struct ipa
*ipa
, u32 count
,
2141 const struct ipa_gsi_endpoint_data
*data
)
2143 enum ipa_endpoint_name name
;
2146 BUILD_BUG_ON(!IPA_REPLENISH_BATCH
);
2148 /* Number of endpoints is one more than the maximum ID */
2149 ipa
->endpoint_count
= ipa_endpoint_max(ipa
, count
, data
) + 1;
2150 if (!ipa
->endpoint_count
)
2153 /* Initialize endpoint state bitmaps */
2154 ipa
->defined
= bitmap_zalloc(ipa
->endpoint_count
, GFP_KERNEL
);
2158 ipa
->set_up
= bitmap_zalloc(ipa
->endpoint_count
, GFP_KERNEL
);
2160 goto err_free_defined
;
2162 ipa
->enabled
= bitmap_zalloc(ipa
->endpoint_count
, GFP_KERNEL
);
2164 goto err_free_set_up
;
2167 for (name
= 0; name
< count
; name
++, data
++) {
2168 if (ipa_gsi_endpoint_data_empty(data
))
2169 continue; /* Skip over empty slots */
2171 ipa_endpoint_init_one(ipa
, name
, data
);
2173 if (data
->endpoint
.filter_support
)
2174 filtered
|= BIT(data
->endpoint_id
);
2175 if (data
->ee_id
== GSI_EE_MODEM
&& data
->toward_ipa
)
2176 ipa
->modem_tx_count
++;
2179 /* Make sure the set of filtered endpoints is valid */
2180 if (!ipa_filtered_valid(ipa
, filtered
)) {
2181 ipa_endpoint_exit(ipa
);
2186 ipa
->filtered
= filtered
;
2191 bitmap_free(ipa
->set_up
);
2194 bitmap_free(ipa
->defined
);
2195 ipa
->defined
= NULL
;