treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / cavium / liquidio / octeon_mem_ops.c
blob4c85ae643b7b7d3b53e43b4c6dc4dc5735c4fdd9
1 /**********************************************************************
2 * Author: Cavium, Inc.
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 **********************************************************************/
19 #include <linux/netdevice.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
26 #define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
28 #ifdef __BIG_ENDIAN_BITFIELD
29 static inline void
30 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
32 u32 mask;
34 mask = oct->fn_list.bar1_idx_read(oct, idx);
35 mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
36 oct->fn_list.bar1_idx_write(oct, idx, mask);
38 #else
39 #define octeon_toggle_bar1_swapmode(oct, idx)
40 #endif
42 static void
43 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
44 u8 *hostbuf, u32 len)
46 while ((len) && ((unsigned long)mapped_addr) & 7) {
47 writeb(*(hostbuf++), mapped_addr++);
48 len--;
51 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
53 while (len >= 8) {
54 writeq(*((u64 *)hostbuf), mapped_addr);
55 mapped_addr += 8;
56 hostbuf += 8;
57 len -= 8;
60 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
62 while (len--)
63 writeb(*(hostbuf++), mapped_addr++);
66 static void
67 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
68 u8 *hostbuf, u32 len)
70 while ((len) && ((unsigned long)mapped_addr) & 7) {
71 *(hostbuf++) = readb(mapped_addr++);
72 len--;
75 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
77 while (len >= 8) {
78 *((u64 *)hostbuf) = readq(mapped_addr);
79 mapped_addr += 8;
80 hostbuf += 8;
81 len -= 8;
84 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
86 while (len--)
87 *(hostbuf++) = readb(mapped_addr++);
90 /* Core mem read/write with temporary bar1 settings. */
91 /* op = 1 to read, op = 0 to write. */
92 static void
93 __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
94 u8 *hostbuf, u32 len, u32 op)
96 u32 copy_len = 0, index_reg_val = 0;
97 unsigned long flags;
98 u8 __iomem *mapped_addr;
99 u64 static_mapping_base;
101 static_mapping_base = oct->console_nb_info.dram_region_base;
103 if (static_mapping_base &&
104 static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
105 int bar1_index = oct->console_nb_info.bar1_index;
107 mapped_addr = oct->mmio[1].hw_addr
108 + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
109 + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
111 if (op)
112 octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
113 else
114 octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
116 return;
119 spin_lock_irqsave(&oct->mem_access_lock, flags);
121 /* Save the original index reg value. */
122 index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
123 do {
124 oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
125 mapped_addr = oct->mmio[1].hw_addr
126 + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
128 /* If operation crosses a 4MB boundary, split the transfer
129 * at the 4MB
130 * boundary.
132 if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
133 copy_len = (u32)(((addr & ~(0x3fffff)) +
134 (MEMOPS_IDX << 22)) - addr);
135 } else {
136 copy_len = len;
139 if (op) { /* read from core */
140 octeon_pci_fastread(oct, mapped_addr, hostbuf,
141 copy_len);
142 } else {
143 octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
144 copy_len);
147 len -= copy_len;
148 addr += copy_len;
149 hostbuf += copy_len;
151 } while (len);
153 oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
155 spin_unlock_irqrestore(&oct->mem_access_lock, flags);
158 void
159 octeon_pci_read_core_mem(struct octeon_device *oct,
160 u64 coreaddr,
161 u8 *buf,
162 u32 len)
164 __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
167 void
168 octeon_pci_write_core_mem(struct octeon_device *oct,
169 u64 coreaddr,
170 const u8 *buf,
171 u32 len)
173 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
176 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
178 __be64 ret;
180 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
182 return be64_to_cpu(ret);
185 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
187 __be32 ret;
189 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
191 return be32_to_cpu(ret);
194 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
195 u32 val)
197 __be32 t = cpu_to_be32(val);
199 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);