OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / net / ethernet / sfc / mtd.c
blobbc9dcd6b30d7f6e290ab4236998e00a06af37e41
1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
11 #include <linux/bitops.h>
12 #include <linux/module.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/delay.h>
15 #include <linux/slab.h>
16 #include <linux/rtnetlink.h>
18 #include "net_driver.h"
19 #include "spi.h"
20 #include "efx.h"
21 #include "nic.h"
22 #include "mcdi.h"
23 #include "mcdi_pcol.h"
25 #define EFX_SPI_VERIFY_BUF_LEN 16
27 struct efx_mtd_partition {
28 struct mtd_info mtd;
29 union {
30 struct {
31 bool updating;
32 u8 nvram_type;
33 u16 fw_subtype;
34 } mcdi;
35 size_t offset;
37 const char *type_name;
38 char name[IFNAMSIZ + 20];
41 struct efx_mtd_ops {
42 int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
43 size_t *retlen, u8 *buffer);
44 int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
45 int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
46 size_t *retlen, const u8 *buffer);
47 int (*sync)(struct mtd_info *mtd);
50 struct efx_mtd {
51 struct list_head node;
52 struct efx_nic *efx;
53 const struct efx_spi_device *spi;
54 const char *name;
55 const struct efx_mtd_ops *ops;
56 size_t n_parts;
57 struct efx_mtd_partition part[0];
60 #define efx_for_each_partition(part, efx_mtd) \
61 for ((part) = &(efx_mtd)->part[0]; \
62 (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
63 (part)++)
65 #define to_efx_mtd_partition(mtd) \
66 container_of(mtd, struct efx_mtd_partition, mtd)
68 static int falcon_mtd_probe(struct efx_nic *efx);
69 static int siena_mtd_probe(struct efx_nic *efx);
71 /* SPI utilities */
73 static int
74 efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
76 struct efx_mtd *efx_mtd = part->mtd.priv;
77 const struct efx_spi_device *spi = efx_mtd->spi;
78 struct efx_nic *efx = efx_mtd->efx;
79 u8 status;
80 int rc, i;
82 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
83 for (i = 0; i < 40; i++) {
84 __set_current_state(uninterruptible ?
85 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
86 schedule_timeout(HZ / 10);
87 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
88 &status, sizeof(status));
89 if (rc)
90 return rc;
91 if (!(status & SPI_STATUS_NRDY))
92 return 0;
93 if (signal_pending(current))
94 return -EINTR;
96 pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
97 return -ETIMEDOUT;
100 static int
101 efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
103 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
104 SPI_STATUS_BP0);
105 u8 status;
106 int rc;
108 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
109 &status, sizeof(status));
110 if (rc)
111 return rc;
113 if (!(status & unlock_mask))
114 return 0; /* already unlocked */
116 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
117 if (rc)
118 return rc;
119 rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
120 if (rc)
121 return rc;
123 status &= ~unlock_mask;
124 rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
125 NULL, sizeof(status));
126 if (rc)
127 return rc;
128 rc = falcon_spi_wait_write(efx, spi);
129 if (rc)
130 return rc;
132 return 0;
135 static int
136 efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
138 struct efx_mtd *efx_mtd = part->mtd.priv;
139 const struct efx_spi_device *spi = efx_mtd->spi;
140 struct efx_nic *efx = efx_mtd->efx;
141 unsigned pos, block_len;
142 u8 empty[EFX_SPI_VERIFY_BUF_LEN];
143 u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
144 int rc;
146 if (len != spi->erase_size)
147 return -EINVAL;
149 if (spi->erase_command == 0)
150 return -EOPNOTSUPP;
152 rc = efx_spi_unlock(efx, spi);
153 if (rc)
154 return rc;
155 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
156 if (rc)
157 return rc;
158 rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
159 NULL, 0);
160 if (rc)
161 return rc;
162 rc = efx_spi_slow_wait(part, false);
164 /* Verify the entire region has been wiped */
165 memset(empty, 0xff, sizeof(empty));
166 for (pos = 0; pos < len; pos += block_len) {
167 block_len = min(len - pos, sizeof(buffer));
168 rc = falcon_spi_read(efx, spi, start + pos, block_len,
169 NULL, buffer);
170 if (rc)
171 return rc;
172 if (memcmp(empty, buffer, block_len))
173 return -EIO;
175 /* Avoid locking up the system */
176 cond_resched();
177 if (signal_pending(current))
178 return -EINTR;
181 return rc;
184 /* MTD interface */
186 static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
188 struct efx_mtd *efx_mtd = mtd->priv;
189 int rc;
191 rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
192 if (rc == 0) {
193 erase->state = MTD_ERASE_DONE;
194 } else {
195 erase->state = MTD_ERASE_FAILED;
196 erase->fail_addr = 0xffffffff;
198 mtd_erase_callback(erase);
199 return rc;
202 static void efx_mtd_sync(struct mtd_info *mtd)
204 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
205 struct efx_mtd *efx_mtd = mtd->priv;
206 int rc;
208 rc = efx_mtd->ops->sync(mtd);
209 if (rc)
210 pr_err("%s: %s sync failed (%d)\n",
211 part->name, efx_mtd->name, rc);
214 static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
216 int rc;
218 for (;;) {
219 rc = mtd_device_unregister(&part->mtd);
220 if (rc != -EBUSY)
221 break;
222 ssleep(1);
224 WARN_ON(rc);
227 static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
229 struct efx_mtd_partition *part;
231 efx_for_each_partition(part, efx_mtd)
232 efx_mtd_remove_partition(part);
233 list_del(&efx_mtd->node);
234 kfree(efx_mtd);
237 static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
239 struct efx_mtd_partition *part;
241 efx_for_each_partition(part, efx_mtd)
242 if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
243 snprintf(part->name, sizeof(part->name),
244 "%s %s:%02x", efx_mtd->efx->name,
245 part->type_name, part->mcdi.fw_subtype);
246 else
247 snprintf(part->name, sizeof(part->name),
248 "%s %s", efx_mtd->efx->name,
249 part->type_name);
252 static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
254 struct efx_mtd_partition *part;
256 efx_mtd->efx = efx;
258 efx_mtd_rename_device(efx_mtd);
260 efx_for_each_partition(part, efx_mtd) {
261 part->mtd.writesize = 1;
263 part->mtd.owner = THIS_MODULE;
264 part->mtd.priv = efx_mtd;
265 part->mtd.name = part->name;
266 part->mtd.erase = efx_mtd_erase;
267 part->mtd.read = efx_mtd->ops->read;
268 part->mtd.write = efx_mtd->ops->write;
269 part->mtd.sync = efx_mtd_sync;
271 if (mtd_device_register(&part->mtd, NULL, 0))
272 goto fail;
275 list_add(&efx_mtd->node, &efx->mtd_list);
276 return 0;
278 fail:
279 while (part != &efx_mtd->part[0]) {
280 --part;
281 efx_mtd_remove_partition(part);
283 /* mtd_device_register() returns 1 if the MTD table is full */
284 return -ENOMEM;
287 void efx_mtd_remove(struct efx_nic *efx)
289 struct efx_mtd *efx_mtd, *next;
291 WARN_ON(efx_dev_registered(efx));
293 list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
294 efx_mtd_remove_device(efx_mtd);
297 void efx_mtd_rename(struct efx_nic *efx)
299 struct efx_mtd *efx_mtd;
301 ASSERT_RTNL();
303 list_for_each_entry(efx_mtd, &efx->mtd_list, node)
304 efx_mtd_rename_device(efx_mtd);
307 int efx_mtd_probe(struct efx_nic *efx)
309 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
310 return siena_mtd_probe(efx);
311 else
312 return falcon_mtd_probe(efx);
315 /* Implementation of MTD operations for Falcon */
317 static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
318 size_t len, size_t *retlen, u8 *buffer)
320 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
325 int rc;
327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
328 if (rc)
329 return rc;
330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
331 retlen, buffer);
332 mutex_unlock(&nic_data->spi_lock);
333 return rc;
336 static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
339 struct efx_mtd *efx_mtd = mtd->priv;
340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
342 int rc;
344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
345 if (rc)
346 return rc;
347 rc = efx_spi_erase(part, part->offset + start, len);
348 mutex_unlock(&nic_data->spi_lock);
349 return rc;
352 static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
353 size_t len, size_t *retlen, const u8 *buffer)
355 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
356 struct efx_mtd *efx_mtd = mtd->priv;
357 const struct efx_spi_device *spi = efx_mtd->spi;
358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
360 int rc;
362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
363 if (rc)
364 return rc;
365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
366 retlen, buffer);
367 mutex_unlock(&nic_data->spi_lock);
368 return rc;
371 static int falcon_mtd_sync(struct mtd_info *mtd)
373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
374 struct efx_mtd *efx_mtd = mtd->priv;
375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
377 int rc;
379 mutex_lock(&nic_data->spi_lock);
380 rc = efx_spi_slow_wait(part, true);
381 mutex_unlock(&nic_data->spi_lock);
382 return rc;
385 static struct efx_mtd_ops falcon_mtd_ops = {
386 .read = falcon_mtd_read,
387 .erase = falcon_mtd_erase,
388 .write = falcon_mtd_write,
389 .sync = falcon_mtd_sync,
392 static int falcon_mtd_probe(struct efx_nic *efx)
394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
396 struct efx_mtd *efx_mtd;
397 int rc = -ENODEV;
399 ASSERT_RTNL();
401 spi = &nic_data->spi_flash;
402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
404 GFP_KERNEL);
405 if (!efx_mtd)
406 return -ENOMEM;
408 efx_mtd->spi = spi;
409 efx_mtd->name = "flash";
410 efx_mtd->ops = &falcon_mtd_ops;
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
427 spi = &nic_data->spi_eeprom;
428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
430 GFP_KERNEL);
431 if (!efx_mtd)
432 return -ENOMEM;
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
455 return rc;
458 /* Implementation of MTD operations for Siena */
460 static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
461 size_t len, size_t *retlen, u8 *buffer)
463 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
464 struct efx_mtd *efx_mtd = mtd->priv;
465 struct efx_nic *efx = efx_mtd->efx;
466 loff_t offset = start;
467 loff_t end = min_t(loff_t, start + len, mtd->size);
468 size_t chunk;
469 int rc = 0;
471 while (offset < end) {
472 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
473 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
474 buffer, chunk);
475 if (rc)
476 goto out;
477 offset += chunk;
478 buffer += chunk;
480 out:
481 *retlen = offset - start;
482 return rc;
485 static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
487 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
488 struct efx_mtd *efx_mtd = mtd->priv;
489 struct efx_nic *efx = efx_mtd->efx;
490 loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
491 loff_t end = min_t(loff_t, start + len, mtd->size);
492 size_t chunk = part->mtd.erasesize;
493 int rc = 0;
495 if (!part->mcdi.updating) {
496 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
497 if (rc)
498 goto out;
499 part->mcdi.updating = true;
502 /* The MCDI interface can in fact do multiple erase blocks at once;
503 * but erasing may be slow, so we make multiple calls here to avoid
504 * tripping the MCDI RPC timeout. */
505 while (offset < end) {
506 rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
507 chunk);
508 if (rc)
509 goto out;
510 offset += chunk;
512 out:
513 return rc;
516 static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
517 size_t len, size_t *retlen, const u8 *buffer)
519 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
520 struct efx_mtd *efx_mtd = mtd->priv;
521 struct efx_nic *efx = efx_mtd->efx;
522 loff_t offset = start;
523 loff_t end = min_t(loff_t, start + len, mtd->size);
524 size_t chunk;
525 int rc = 0;
527 if (!part->mcdi.updating) {
528 rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
529 if (rc)
530 goto out;
531 part->mcdi.updating = true;
534 while (offset < end) {
535 chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
536 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
537 buffer, chunk);
538 if (rc)
539 goto out;
540 offset += chunk;
541 buffer += chunk;
543 out:
544 *retlen = offset - start;
545 return rc;
548 static int siena_mtd_sync(struct mtd_info *mtd)
550 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
551 struct efx_mtd *efx_mtd = mtd->priv;
552 struct efx_nic *efx = efx_mtd->efx;
553 int rc = 0;
555 if (part->mcdi.updating) {
556 part->mcdi.updating = false;
557 rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
560 return rc;
563 static struct efx_mtd_ops siena_mtd_ops = {
564 .read = siena_mtd_read,
565 .erase = siena_mtd_erase,
566 .write = siena_mtd_write,
567 .sync = siena_mtd_sync,
570 struct siena_nvram_type_info {
571 int port;
572 const char *name;
575 static struct siena_nvram_type_info siena_nvram_types[] = {
576 [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
577 [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
578 [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
579 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
580 [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
581 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
582 [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
583 [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
584 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
585 [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
586 [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
587 [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
590 static int siena_mtd_probe_partition(struct efx_nic *efx,
591 struct efx_mtd *efx_mtd,
592 unsigned int part_id,
593 unsigned int type)
595 struct efx_mtd_partition *part = &efx_mtd->part[part_id];
596 struct siena_nvram_type_info *info;
597 size_t size, erase_size;
598 bool protected;
599 int rc;
601 if (type >= ARRAY_SIZE(siena_nvram_types))
602 return -ENODEV;
604 info = &siena_nvram_types[type];
606 if (info->port != efx_port_num(efx))
607 return -ENODEV;
609 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
610 if (rc)
611 return rc;
612 if (protected)
613 return -ENODEV; /* hide it */
615 part->mcdi.nvram_type = type;
616 part->type_name = info->name;
618 part->mtd.type = MTD_NORFLASH;
619 part->mtd.flags = MTD_CAP_NORFLASH;
620 part->mtd.size = size;
621 part->mtd.erasesize = erase_size;
623 return 0;
626 static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
627 struct efx_mtd *efx_mtd)
629 struct efx_mtd_partition *part;
630 uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
631 sizeof(uint16_t)];
632 int rc;
634 rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
635 if (rc)
636 return rc;
638 efx_for_each_partition(part, efx_mtd)
639 part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
641 return 0;
644 static int siena_mtd_probe(struct efx_nic *efx)
646 struct efx_mtd *efx_mtd;
647 int rc = -ENODEV;
648 u32 nvram_types;
649 unsigned int type;
651 ASSERT_RTNL();
653 rc = efx_mcdi_nvram_types(efx, &nvram_types);
654 if (rc)
655 return rc;
657 efx_mtd = kzalloc(sizeof(*efx_mtd) +
658 hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
659 GFP_KERNEL);
660 if (!efx_mtd)
661 return -ENOMEM;
663 efx_mtd->name = "Siena NVRAM manager";
665 efx_mtd->ops = &siena_mtd_ops;
667 type = 0;
668 efx_mtd->n_parts = 0;
670 while (nvram_types != 0) {
671 if (nvram_types & 1) {
672 rc = siena_mtd_probe_partition(efx, efx_mtd,
673 efx_mtd->n_parts, type);
674 if (rc == 0)
675 efx_mtd->n_parts++;
676 else if (rc != -ENODEV)
677 goto fail;
679 type++;
680 nvram_types >>= 1;
683 rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
684 if (rc)
685 goto fail;
687 rc = efx_mtd_probe_device(efx, efx_mtd);
688 fail:
689 if (rc)
690 kfree(efx_mtd);
691 return rc;