WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / google / gve / gve_adminq.c
blob53864f2005994f79b13cc3e1a4a0ffd2331ec72f
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2019 Google, Inc.
5 */
7 #include <linux/etherdevice.h>
8 #include <linux/pci.h>
9 #include "gve.h"
10 #include "gve_adminq.h"
11 #include "gve_register.h"
13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14 #define GVE_ADMINQ_SLEEP_LEN 20
15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
18 "Expected: length=%d, feature_mask=%x.\n" \
19 "Actual: length=%d, feature_mask=%x.\n"
21 static
22 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
23 struct gve_device_option *option)
25 void *option_end, *descriptor_end;
27 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
28 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
30 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
33 static
34 void gve_parse_device_option(struct gve_priv *priv,
35 struct gve_device_descriptor *device_descriptor,
36 struct gve_device_option *option)
38 u16 option_length = be16_to_cpu(option->option_length);
39 u16 option_id = be16_to_cpu(option->option_id);
41 switch (option_id) {
42 case GVE_DEV_OPT_ID_RAW_ADDRESSING:
43 /* If the length or feature mask doesn't match,
44 * continue without enabling the feature.
46 if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
47 option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
48 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
49 GVE_DEV_OPT_LEN_RAW_ADDRESSING,
50 cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
51 option_length, option->feat_mask);
52 priv->raw_addressing = 0;
53 } else {
54 dev_info(&priv->pdev->dev,
55 "Raw addressing device option enabled.\n");
56 priv->raw_addressing = 1;
58 break;
59 default:
60 /* If we don't recognize the option just continue
61 * without doing anything.
63 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
64 option_id);
68 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
70 priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
71 &priv->adminq_bus_addr, GFP_KERNEL);
72 if (unlikely(!priv->adminq))
73 return -ENOMEM;
75 priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
76 priv->adminq_prod_cnt = 0;
77 priv->adminq_cmd_fail = 0;
78 priv->adminq_timeouts = 0;
79 priv->adminq_describe_device_cnt = 0;
80 priv->adminq_cfg_device_resources_cnt = 0;
81 priv->adminq_register_page_list_cnt = 0;
82 priv->adminq_unregister_page_list_cnt = 0;
83 priv->adminq_create_tx_queue_cnt = 0;
84 priv->adminq_create_rx_queue_cnt = 0;
85 priv->adminq_destroy_tx_queue_cnt = 0;
86 priv->adminq_destroy_rx_queue_cnt = 0;
87 priv->adminq_dcfg_device_resources_cnt = 0;
88 priv->adminq_set_driver_parameter_cnt = 0;
89 priv->adminq_report_stats_cnt = 0;
90 priv->adminq_report_link_speed_cnt = 0;
92 /* Setup Admin queue with the device */
93 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
94 &priv->reg_bar0->adminq_pfn);
96 gve_set_admin_queue_ok(priv);
97 return 0;
100 void gve_adminq_release(struct gve_priv *priv)
102 int i = 0;
104 /* Tell the device the adminq is leaving */
105 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
106 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
107 /* If this is reached the device is unrecoverable and still
108 * holding memory. Continue looping to avoid memory corruption,
109 * but WARN so it is visible what is going on.
111 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
112 WARN(1, "Unrecoverable platform error!");
113 i++;
114 msleep(GVE_ADMINQ_SLEEP_LEN);
116 gve_clear_device_rings_ok(priv);
117 gve_clear_device_resources_ok(priv);
118 gve_clear_admin_queue_ok(priv);
121 void gve_adminq_free(struct device *dev, struct gve_priv *priv)
123 if (!gve_get_admin_queue_ok(priv))
124 return;
125 gve_adminq_release(priv);
126 dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
127 gve_clear_admin_queue_ok(priv);
130 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
132 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
135 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
137 int i;
139 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
140 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
141 == prod_cnt)
142 return true;
143 msleep(GVE_ADMINQ_SLEEP_LEN);
146 return false;
149 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
151 if (status != GVE_ADMINQ_COMMAND_PASSED &&
152 status != GVE_ADMINQ_COMMAND_UNSET) {
153 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
154 priv->adminq_cmd_fail++;
156 switch (status) {
157 case GVE_ADMINQ_COMMAND_PASSED:
158 return 0;
159 case GVE_ADMINQ_COMMAND_UNSET:
160 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
161 return -EINVAL;
162 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
163 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
164 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
165 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
166 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
167 return -EAGAIN;
168 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
169 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
170 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
171 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
172 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
173 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
174 return -EINVAL;
175 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
176 return -ETIME;
177 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
178 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
179 return -EACCES;
180 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
181 return -ENOMEM;
182 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
183 return -ENOTSUPP;
184 default:
185 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
186 return -EINVAL;
190 /* Flushes all AQ commands currently queued and waits for them to complete.
191 * If there are failures, it will return the first error.
193 static int gve_adminq_kick_and_wait(struct gve_priv *priv)
195 u32 tail, head;
196 int i;
198 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
199 head = priv->adminq_prod_cnt;
201 gve_adminq_kick_cmd(priv, head);
202 if (!gve_adminq_wait_for_cmd(priv, head)) {
203 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
204 priv->adminq_timeouts++;
205 return -ENOTRECOVERABLE;
208 for (i = tail; i < head; i++) {
209 union gve_adminq_command *cmd;
210 u32 status, err;
212 cmd = &priv->adminq[i & priv->adminq_mask];
213 status = be32_to_cpu(READ_ONCE(cmd->status));
214 err = gve_adminq_parse_err(priv, status);
215 if (err)
216 // Return the first error if we failed.
217 return err;
220 return 0;
223 /* This function is not threadsafe - the caller is responsible for any
224 * necessary locks.
226 static int gve_adminq_issue_cmd(struct gve_priv *priv,
227 union gve_adminq_command *cmd_orig)
229 union gve_adminq_command *cmd;
230 u32 opcode;
231 u32 tail;
233 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
235 // Check if next command will overflow the buffer.
236 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
237 int err;
239 // Flush existing commands to make room.
240 err = gve_adminq_kick_and_wait(priv);
241 if (err)
242 return err;
244 // Retry.
245 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
246 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
247 // This should never happen. We just flushed the
248 // command queue so there should be enough space.
249 return -ENOMEM;
253 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
254 priv->adminq_prod_cnt++;
256 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
257 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
259 switch (opcode) {
260 case GVE_ADMINQ_DESCRIBE_DEVICE:
261 priv->adminq_describe_device_cnt++;
262 break;
263 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
264 priv->adminq_cfg_device_resources_cnt++;
265 break;
266 case GVE_ADMINQ_REGISTER_PAGE_LIST:
267 priv->adminq_register_page_list_cnt++;
268 break;
269 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
270 priv->adminq_unregister_page_list_cnt++;
271 break;
272 case GVE_ADMINQ_CREATE_TX_QUEUE:
273 priv->adminq_create_tx_queue_cnt++;
274 break;
275 case GVE_ADMINQ_CREATE_RX_QUEUE:
276 priv->adminq_create_rx_queue_cnt++;
277 break;
278 case GVE_ADMINQ_DESTROY_TX_QUEUE:
279 priv->adminq_destroy_tx_queue_cnt++;
280 break;
281 case GVE_ADMINQ_DESTROY_RX_QUEUE:
282 priv->adminq_destroy_rx_queue_cnt++;
283 break;
284 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
285 priv->adminq_dcfg_device_resources_cnt++;
286 break;
287 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
288 priv->adminq_set_driver_parameter_cnt++;
289 break;
290 case GVE_ADMINQ_REPORT_STATS:
291 priv->adminq_report_stats_cnt++;
292 break;
293 case GVE_ADMINQ_REPORT_LINK_SPEED:
294 priv->adminq_report_link_speed_cnt++;
295 break;
296 default:
297 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
300 return 0;
303 /* This function is not threadsafe - the caller is responsible for any
304 * necessary locks.
305 * The caller is also responsible for making sure there are no commands
306 * waiting to be executed.
308 static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
310 u32 tail, head;
311 int err;
313 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
314 head = priv->adminq_prod_cnt;
315 if (tail != head)
316 // This is not a valid path
317 return -EINVAL;
319 err = gve_adminq_issue_cmd(priv, cmd_orig);
320 if (err)
321 return err;
323 return gve_adminq_kick_and_wait(priv);
326 /* The device specifies that the management vector can either be the first irq
327 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
328 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
329 * the management vector is first.
331 * gve arranges the msix vectors so that the management vector is last.
333 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
334 int gve_adminq_configure_device_resources(struct gve_priv *priv,
335 dma_addr_t counter_array_bus_addr,
336 u32 num_counters,
337 dma_addr_t db_array_bus_addr,
338 u32 num_ntfy_blks)
340 union gve_adminq_command cmd;
342 memset(&cmd, 0, sizeof(cmd));
343 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
344 cmd.configure_device_resources =
345 (struct gve_adminq_configure_device_resources) {
346 .counter_array = cpu_to_be64(counter_array_bus_addr),
347 .num_counters = cpu_to_be32(num_counters),
348 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
349 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
350 .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
351 .ntfy_blk_msix_base_idx =
352 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
355 return gve_adminq_execute_cmd(priv, &cmd);
358 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
360 union gve_adminq_command cmd;
362 memset(&cmd, 0, sizeof(cmd));
363 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
365 return gve_adminq_execute_cmd(priv, &cmd);
368 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
370 struct gve_tx_ring *tx = &priv->tx[queue_index];
371 union gve_adminq_command cmd;
372 u32 qpl_id;
373 int err;
375 qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
376 memset(&cmd, 0, sizeof(cmd));
377 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
378 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
379 .queue_id = cpu_to_be32(queue_index),
380 .reserved = 0,
381 .queue_resources_addr =
382 cpu_to_be64(tx->q_resources_bus),
383 .tx_ring_addr = cpu_to_be64(tx->bus),
384 .queue_page_list_id = cpu_to_be32(qpl_id),
385 .ntfy_id = cpu_to_be32(tx->ntfy_id),
388 err = gve_adminq_issue_cmd(priv, &cmd);
389 if (err)
390 return err;
392 return 0;
395 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
397 int err;
398 int i;
400 for (i = 0; i < num_queues; i++) {
401 err = gve_adminq_create_tx_queue(priv, i);
402 if (err)
403 return err;
406 return gve_adminq_kick_and_wait(priv);
409 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
411 struct gve_rx_ring *rx = &priv->rx[queue_index];
412 union gve_adminq_command cmd;
413 u32 qpl_id;
414 int err;
416 qpl_id = priv->raw_addressing ? GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
417 memset(&cmd, 0, sizeof(cmd));
418 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
419 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
420 .queue_id = cpu_to_be32(queue_index),
421 .index = cpu_to_be32(queue_index),
422 .reserved = 0,
423 .ntfy_id = cpu_to_be32(rx->ntfy_id),
424 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
425 .rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
426 .rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
427 .queue_page_list_id = cpu_to_be32(qpl_id),
430 err = gve_adminq_issue_cmd(priv, &cmd);
431 if (err)
432 return err;
434 return 0;
437 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
439 int err;
440 int i;
442 for (i = 0; i < num_queues; i++) {
443 err = gve_adminq_create_rx_queue(priv, i);
444 if (err)
445 return err;
448 return gve_adminq_kick_and_wait(priv);
451 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
453 union gve_adminq_command cmd;
454 int err;
456 memset(&cmd, 0, sizeof(cmd));
457 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
458 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
459 .queue_id = cpu_to_be32(queue_index),
462 err = gve_adminq_issue_cmd(priv, &cmd);
463 if (err)
464 return err;
466 return 0;
469 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
471 int err;
472 int i;
474 for (i = 0; i < num_queues; i++) {
475 err = gve_adminq_destroy_tx_queue(priv, i);
476 if (err)
477 return err;
480 return gve_adminq_kick_and_wait(priv);
483 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
485 union gve_adminq_command cmd;
486 int err;
488 memset(&cmd, 0, sizeof(cmd));
489 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
490 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
491 .queue_id = cpu_to_be32(queue_index),
494 err = gve_adminq_issue_cmd(priv, &cmd);
495 if (err)
496 return err;
498 return 0;
501 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
503 int err;
504 int i;
506 for (i = 0; i < num_queues; i++) {
507 err = gve_adminq_destroy_rx_queue(priv, i);
508 if (err)
509 return err;
512 return gve_adminq_kick_and_wait(priv);
515 int gve_adminq_describe_device(struct gve_priv *priv)
517 struct gve_device_descriptor *descriptor;
518 struct gve_device_option *dev_opt;
519 union gve_adminq_command cmd;
520 dma_addr_t descriptor_bus;
521 u16 num_options;
522 int err = 0;
523 u8 *mac;
524 u16 mtu;
525 int i;
527 memset(&cmd, 0, sizeof(cmd));
528 descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
529 &descriptor_bus, GFP_KERNEL);
530 if (!descriptor)
531 return -ENOMEM;
532 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
533 cmd.describe_device.device_descriptor_addr =
534 cpu_to_be64(descriptor_bus);
535 cmd.describe_device.device_descriptor_version =
536 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
537 cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
539 err = gve_adminq_execute_cmd(priv, &cmd);
540 if (err)
541 goto free_device_descriptor;
543 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
544 if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
545 dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
546 err = -EINVAL;
547 goto free_device_descriptor;
549 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
550 if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
551 < PAGE_SIZE ||
552 priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
553 < PAGE_SIZE) {
554 dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
555 err = -EINVAL;
556 goto free_device_descriptor;
558 priv->max_registered_pages =
559 be64_to_cpu(descriptor->max_registered_pages);
560 mtu = be16_to_cpu(descriptor->mtu);
561 if (mtu < ETH_MIN_MTU) {
562 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
563 err = -EINVAL;
564 goto free_device_descriptor;
566 priv->dev->max_mtu = mtu;
567 priv->num_event_counters = be16_to_cpu(descriptor->counters);
568 ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
569 mac = descriptor->mac;
570 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
571 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
572 priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
573 if (priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
574 dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
575 priv->rx_data_slot_cnt);
576 priv->rx_desc_cnt = priv->rx_data_slot_cnt;
578 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
579 dev_opt = (void *)(descriptor + 1);
581 num_options = be16_to_cpu(descriptor->num_device_options);
582 for (i = 0; i < num_options; i++) {
583 struct gve_device_option *next_opt;
585 next_opt = gve_get_next_option(descriptor, dev_opt);
586 if (!next_opt) {
587 dev_err(&priv->dev->dev,
588 "options exceed device_descriptor's total length.\n");
589 err = -EINVAL;
590 goto free_device_descriptor;
593 gve_parse_device_option(priv, descriptor, dev_opt);
594 dev_opt = next_opt;
597 free_device_descriptor:
598 dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
599 descriptor_bus);
600 return err;
603 int gve_adminq_register_page_list(struct gve_priv *priv,
604 struct gve_queue_page_list *qpl)
606 struct device *hdev = &priv->pdev->dev;
607 u32 num_entries = qpl->num_entries;
608 u32 size = num_entries * sizeof(qpl->page_buses[0]);
609 union gve_adminq_command cmd;
610 dma_addr_t page_list_bus;
611 __be64 *page_list;
612 int err;
613 int i;
615 memset(&cmd, 0, sizeof(cmd));
616 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
617 if (!page_list)
618 return -ENOMEM;
620 for (i = 0; i < num_entries; i++)
621 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
623 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
624 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
625 .page_list_id = cpu_to_be32(qpl->id),
626 .num_pages = cpu_to_be32(num_entries),
627 .page_address_list_addr = cpu_to_be64(page_list_bus),
630 err = gve_adminq_execute_cmd(priv, &cmd);
631 dma_free_coherent(hdev, size, page_list, page_list_bus);
632 return err;
635 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
637 union gve_adminq_command cmd;
639 memset(&cmd, 0, sizeof(cmd));
640 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
641 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
642 .page_list_id = cpu_to_be32(page_list_id),
645 return gve_adminq_execute_cmd(priv, &cmd);
648 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
650 union gve_adminq_command cmd;
652 memset(&cmd, 0, sizeof(cmd));
653 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
654 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
655 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
656 .parameter_value = cpu_to_be64(mtu),
659 return gve_adminq_execute_cmd(priv, &cmd);
662 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
663 dma_addr_t stats_report_addr, u64 interval)
665 union gve_adminq_command cmd;
667 memset(&cmd, 0, sizeof(cmd));
668 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
669 cmd.report_stats = (struct gve_adminq_report_stats) {
670 .stats_report_len = cpu_to_be64(stats_report_len),
671 .stats_report_addr = cpu_to_be64(stats_report_addr),
672 .interval = cpu_to_be64(interval),
675 return gve_adminq_execute_cmd(priv, &cmd);
678 int gve_adminq_report_link_speed(struct gve_priv *priv)
680 union gve_adminq_command gvnic_cmd;
681 dma_addr_t link_speed_region_bus;
682 __be64 *link_speed_region;
683 int err;
685 link_speed_region =
686 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
687 &link_speed_region_bus, GFP_KERNEL);
689 if (!link_speed_region)
690 return -ENOMEM;
692 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
693 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
694 gvnic_cmd.report_link_speed.link_speed_address =
695 cpu_to_be64(link_speed_region_bus);
697 err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
699 priv->link_speed = be64_to_cpu(*link_speed_region);
700 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
701 link_speed_region_bus);
702 return err;