1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2019 Google, Inc.
7 #include <linux/etherdevice.h>
10 #include "gve_adminq.h"
11 #include "gve_register.h"
13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14 #define GVE_ADMINQ_SLEEP_LEN 20
15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
17 int gve_adminq_alloc(struct device
*dev
, struct gve_priv
*priv
)
19 priv
->adminq
= dma_alloc_coherent(dev
, PAGE_SIZE
,
20 &priv
->adminq_bus_addr
, GFP_KERNEL
);
21 if (unlikely(!priv
->adminq
))
24 priv
->adminq_mask
= (PAGE_SIZE
/ sizeof(union gve_adminq_command
)) - 1;
25 priv
->adminq_prod_cnt
= 0;
27 /* Setup Admin queue with the device */
28 iowrite32be(priv
->adminq_bus_addr
/ PAGE_SIZE
,
29 &priv
->reg_bar0
->adminq_pfn
);
31 gve_set_admin_queue_ok(priv
);
35 void gve_adminq_release(struct gve_priv
*priv
)
39 /* Tell the device the adminq is leaving */
40 iowrite32be(0x0, &priv
->reg_bar0
->adminq_pfn
);
41 while (ioread32be(&priv
->reg_bar0
->adminq_pfn
)) {
42 /* If this is reached the device is unrecoverable and still
43 * holding memory. Continue looping to avoid memory corruption,
44 * but WARN so it is visible what is going on.
46 if (i
== GVE_MAX_ADMINQ_RELEASE_CHECK
)
47 WARN(1, "Unrecoverable platform error!");
49 msleep(GVE_ADMINQ_SLEEP_LEN
);
51 gve_clear_device_rings_ok(priv
);
52 gve_clear_device_resources_ok(priv
);
53 gve_clear_admin_queue_ok(priv
);
56 void gve_adminq_free(struct device
*dev
, struct gve_priv
*priv
)
58 if (!gve_get_admin_queue_ok(priv
))
60 gve_adminq_release(priv
);
61 dma_free_coherent(dev
, PAGE_SIZE
, priv
->adminq
, priv
->adminq_bus_addr
);
62 gve_clear_admin_queue_ok(priv
);
65 static void gve_adminq_kick_cmd(struct gve_priv
*priv
, u32 prod_cnt
)
67 iowrite32be(prod_cnt
, &priv
->reg_bar0
->adminq_doorbell
);
70 static bool gve_adminq_wait_for_cmd(struct gve_priv
*priv
, u32 prod_cnt
)
74 for (i
= 0; i
< GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK
; i
++) {
75 if (ioread32be(&priv
->reg_bar0
->adminq_event_counter
)
78 msleep(GVE_ADMINQ_SLEEP_LEN
);
84 static int gve_adminq_parse_err(struct device
*dev
, u32 status
)
86 if (status
!= GVE_ADMINQ_COMMAND_PASSED
&&
87 status
!= GVE_ADMINQ_COMMAND_UNSET
)
88 dev_err(dev
, "AQ command failed with status %d\n", status
);
91 case GVE_ADMINQ_COMMAND_PASSED
:
93 case GVE_ADMINQ_COMMAND_UNSET
:
94 dev_err(dev
, "parse_aq_err: err and status both unset, this should not be possible.\n");
96 case GVE_ADMINQ_COMMAND_ERROR_ABORTED
:
97 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED
:
98 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS
:
99 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION
:
100 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE
:
102 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS
:
103 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR
:
104 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT
:
105 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND
:
106 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE
:
107 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR
:
109 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED
:
111 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED
:
112 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED
:
114 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED
:
116 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED
:
119 dev_err(dev
, "parse_aq_err: unknown status code %d\n", status
);
124 /* This function is not threadsafe - the caller is responsible for any
127 int gve_adminq_execute_cmd(struct gve_priv
*priv
,
128 union gve_adminq_command
*cmd_orig
)
130 union gve_adminq_command
*cmd
;
134 cmd
= &priv
->adminq
[priv
->adminq_prod_cnt
& priv
->adminq_mask
];
135 priv
->adminq_prod_cnt
++;
136 prod_cnt
= priv
->adminq_prod_cnt
;
138 memcpy(cmd
, cmd_orig
, sizeof(*cmd_orig
));
140 gve_adminq_kick_cmd(priv
, prod_cnt
);
141 if (!gve_adminq_wait_for_cmd(priv
, prod_cnt
)) {
142 dev_err(&priv
->pdev
->dev
, "AQ command timed out, need to reset AQ\n");
143 return -ENOTRECOVERABLE
;
146 memcpy(cmd_orig
, cmd
, sizeof(*cmd
));
147 status
= be32_to_cpu(READ_ONCE(cmd
->status
));
148 return gve_adminq_parse_err(&priv
->pdev
->dev
, status
);
151 /* The device specifies that the management vector can either be the first irq
152 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
153 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
154 * the management vector is first.
156 * gve arranges the msix vectors so that the management vector is last.
158 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
159 int gve_adminq_configure_device_resources(struct gve_priv
*priv
,
160 dma_addr_t counter_array_bus_addr
,
162 dma_addr_t db_array_bus_addr
,
165 union gve_adminq_command cmd
;
167 memset(&cmd
, 0, sizeof(cmd
));
168 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES
);
169 cmd
.configure_device_resources
=
170 (struct gve_adminq_configure_device_resources
) {
171 .counter_array
= cpu_to_be64(counter_array_bus_addr
),
172 .num_counters
= cpu_to_be32(num_counters
),
173 .irq_db_addr
= cpu_to_be64(db_array_bus_addr
),
174 .num_irq_dbs
= cpu_to_be32(num_ntfy_blks
),
175 .irq_db_stride
= cpu_to_be32(sizeof(priv
->ntfy_blocks
[0])),
176 .ntfy_blk_msix_base_idx
=
177 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX
),
180 return gve_adminq_execute_cmd(priv
, &cmd
);
183 int gve_adminq_deconfigure_device_resources(struct gve_priv
*priv
)
185 union gve_adminq_command cmd
;
187 memset(&cmd
, 0, sizeof(cmd
));
188 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES
);
190 return gve_adminq_execute_cmd(priv
, &cmd
);
193 int gve_adminq_create_tx_queue(struct gve_priv
*priv
, u32 queue_index
)
195 struct gve_tx_ring
*tx
= &priv
->tx
[queue_index
];
196 union gve_adminq_command cmd
;
198 memset(&cmd
, 0, sizeof(cmd
));
199 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE
);
200 cmd
.create_tx_queue
= (struct gve_adminq_create_tx_queue
) {
201 .queue_id
= cpu_to_be32(queue_index
),
203 .queue_resources_addr
= cpu_to_be64(tx
->q_resources_bus
),
204 .tx_ring_addr
= cpu_to_be64(tx
->bus
),
205 .queue_page_list_id
= cpu_to_be32(tx
->tx_fifo
.qpl
->id
),
206 .ntfy_id
= cpu_to_be32(tx
->ntfy_id
),
209 return gve_adminq_execute_cmd(priv
, &cmd
);
212 int gve_adminq_create_rx_queue(struct gve_priv
*priv
, u32 queue_index
)
214 struct gve_rx_ring
*rx
= &priv
->rx
[queue_index
];
215 union gve_adminq_command cmd
;
217 memset(&cmd
, 0, sizeof(cmd
));
218 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE
);
219 cmd
.create_rx_queue
= (struct gve_adminq_create_rx_queue
) {
220 .queue_id
= cpu_to_be32(queue_index
),
221 .index
= cpu_to_be32(queue_index
),
223 .ntfy_id
= cpu_to_be32(rx
->ntfy_id
),
224 .queue_resources_addr
= cpu_to_be64(rx
->q_resources_bus
),
225 .rx_desc_ring_addr
= cpu_to_be64(rx
->desc
.bus
),
226 .rx_data_ring_addr
= cpu_to_be64(rx
->data
.data_bus
),
227 .queue_page_list_id
= cpu_to_be32(rx
->data
.qpl
->id
),
230 return gve_adminq_execute_cmd(priv
, &cmd
);
233 int gve_adminq_destroy_tx_queue(struct gve_priv
*priv
, u32 queue_index
)
235 union gve_adminq_command cmd
;
237 memset(&cmd
, 0, sizeof(cmd
));
238 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE
);
239 cmd
.destroy_tx_queue
= (struct gve_adminq_destroy_tx_queue
) {
240 .queue_id
= cpu_to_be32(queue_index
),
243 return gve_adminq_execute_cmd(priv
, &cmd
);
246 int gve_adminq_destroy_rx_queue(struct gve_priv
*priv
, u32 queue_index
)
248 union gve_adminq_command cmd
;
250 memset(&cmd
, 0, sizeof(cmd
));
251 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE
);
252 cmd
.destroy_rx_queue
= (struct gve_adminq_destroy_rx_queue
) {
253 .queue_id
= cpu_to_be32(queue_index
),
256 return gve_adminq_execute_cmd(priv
, &cmd
);
259 int gve_adminq_describe_device(struct gve_priv
*priv
)
261 struct gve_device_descriptor
*descriptor
;
262 union gve_adminq_command cmd
;
263 dma_addr_t descriptor_bus
;
268 memset(&cmd
, 0, sizeof(cmd
));
269 descriptor
= dma_alloc_coherent(&priv
->pdev
->dev
, PAGE_SIZE
,
270 &descriptor_bus
, GFP_KERNEL
);
273 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE
);
274 cmd
.describe_device
.device_descriptor_addr
=
275 cpu_to_be64(descriptor_bus
);
276 cmd
.describe_device
.device_descriptor_version
=
277 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION
);
278 cmd
.describe_device
.available_length
= cpu_to_be32(PAGE_SIZE
);
280 err
= gve_adminq_execute_cmd(priv
, &cmd
);
282 goto free_device_descriptor
;
284 priv
->tx_desc_cnt
= be16_to_cpu(descriptor
->tx_queue_entries
);
285 if (priv
->tx_desc_cnt
* sizeof(priv
->tx
->desc
[0]) < PAGE_SIZE
) {
286 netif_err(priv
, drv
, priv
->dev
, "Tx desc count %d too low\n",
289 goto free_device_descriptor
;
291 priv
->rx_desc_cnt
= be16_to_cpu(descriptor
->rx_queue_entries
);
292 if (priv
->rx_desc_cnt
* sizeof(priv
->rx
->desc
.desc_ring
[0])
294 priv
->rx_desc_cnt
* sizeof(priv
->rx
->data
.data_ring
[0])
296 netif_err(priv
, drv
, priv
->dev
, "Rx desc count %d too low\n",
299 goto free_device_descriptor
;
301 priv
->max_registered_pages
=
302 be64_to_cpu(descriptor
->max_registered_pages
);
303 mtu
= be16_to_cpu(descriptor
->mtu
);
304 if (mtu
< ETH_MIN_MTU
) {
305 netif_err(priv
, drv
, priv
->dev
, "MTU %d below minimum MTU\n",
308 goto free_device_descriptor
;
310 priv
->dev
->max_mtu
= mtu
;
311 priv
->num_event_counters
= be16_to_cpu(descriptor
->counters
);
312 ether_addr_copy(priv
->dev
->dev_addr
, descriptor
->mac
);
313 mac
= descriptor
->mac
;
314 netif_info(priv
, drv
, priv
->dev
, "MAC addr: %pM\n", mac
);
315 priv
->tx_pages_per_qpl
= be16_to_cpu(descriptor
->tx_pages_per_qpl
);
316 priv
->rx_pages_per_qpl
= be16_to_cpu(descriptor
->rx_pages_per_qpl
);
317 if (priv
->rx_pages_per_qpl
< priv
->rx_desc_cnt
) {
318 netif_err(priv
, drv
, priv
->dev
, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
319 priv
->rx_pages_per_qpl
);
320 priv
->rx_desc_cnt
= priv
->rx_pages_per_qpl
;
322 priv
->default_num_queues
= be16_to_cpu(descriptor
->default_num_queues
);
324 free_device_descriptor
:
325 dma_free_coherent(&priv
->pdev
->dev
, sizeof(*descriptor
), descriptor
,
330 int gve_adminq_register_page_list(struct gve_priv
*priv
,
331 struct gve_queue_page_list
*qpl
)
333 struct device
*hdev
= &priv
->pdev
->dev
;
334 u32 num_entries
= qpl
->num_entries
;
335 u32 size
= num_entries
* sizeof(qpl
->page_buses
[0]);
336 union gve_adminq_command cmd
;
337 dma_addr_t page_list_bus
;
342 memset(&cmd
, 0, sizeof(cmd
));
343 page_list
= dma_alloc_coherent(hdev
, size
, &page_list_bus
, GFP_KERNEL
);
347 for (i
= 0; i
< num_entries
; i
++)
348 page_list
[i
] = cpu_to_be64(qpl
->page_buses
[i
]);
350 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST
);
351 cmd
.reg_page_list
= (struct gve_adminq_register_page_list
) {
352 .page_list_id
= cpu_to_be32(qpl
->id
),
353 .num_pages
= cpu_to_be32(num_entries
),
354 .page_address_list_addr
= cpu_to_be64(page_list_bus
),
357 err
= gve_adminq_execute_cmd(priv
, &cmd
);
358 dma_free_coherent(hdev
, size
, page_list
, page_list_bus
);
362 int gve_adminq_unregister_page_list(struct gve_priv
*priv
, u32 page_list_id
)
364 union gve_adminq_command cmd
;
366 memset(&cmd
, 0, sizeof(cmd
));
367 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST
);
368 cmd
.unreg_page_list
= (struct gve_adminq_unregister_page_list
) {
369 .page_list_id
= cpu_to_be32(page_list_id
),
372 return gve_adminq_execute_cmd(priv
, &cmd
);
375 int gve_adminq_set_mtu(struct gve_priv
*priv
, u64 mtu
)
377 union gve_adminq_command cmd
;
379 memset(&cmd
, 0, sizeof(cmd
));
380 cmd
.opcode
= cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER
);
381 cmd
.set_driver_param
= (struct gve_adminq_set_driver_parameter
) {
382 .parameter_type
= cpu_to_be32(GVE_SET_PARAM_MTU
),
383 .parameter_value
= cpu_to_be64(mtu
),
386 return gve_adminq_execute_cmd(priv
, &cmd
);