WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / cavium / liquidio / octeon_mem_ops.c
blob7ccab36143c1710bb8213db48c78d00e73a20059
1 /**********************************************************************
2 * Author: Cavium, Inc.
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 **********************************************************************/
19 #include <linux/netdevice.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_mem_ops.h"
27 #define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
29 #ifdef __BIG_ENDIAN_BITFIELD
30 static inline void
31 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
33 u32 mask;
35 mask = oct->fn_list.bar1_idx_read(oct, idx);
36 mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
37 oct->fn_list.bar1_idx_write(oct, idx, mask);
39 #else
40 #define octeon_toggle_bar1_swapmode(oct, idx)
41 #endif
43 static void
44 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
45 u8 *hostbuf, u32 len)
47 while ((len) && ((unsigned long)mapped_addr) & 7) {
48 writeb(*(hostbuf++), mapped_addr++);
49 len--;
52 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
54 while (len >= 8) {
55 writeq(*((u64 *)hostbuf), mapped_addr);
56 mapped_addr += 8;
57 hostbuf += 8;
58 len -= 8;
61 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
63 while (len--)
64 writeb(*(hostbuf++), mapped_addr++);
67 static void
68 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
69 u8 *hostbuf, u32 len)
71 while ((len) && ((unsigned long)mapped_addr) & 7) {
72 *(hostbuf++) = readb(mapped_addr++);
73 len--;
76 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
78 while (len >= 8) {
79 *((u64 *)hostbuf) = readq(mapped_addr);
80 mapped_addr += 8;
81 hostbuf += 8;
82 len -= 8;
85 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
87 while (len--)
88 *(hostbuf++) = readb(mapped_addr++);
91 /* Core mem read/write with temporary bar1 settings. */
92 /* op = 1 to read, op = 0 to write. */
93 static void
94 __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
95 u8 *hostbuf, u32 len, u32 op)
97 u32 copy_len = 0, index_reg_val = 0;
98 unsigned long flags;
99 u8 __iomem *mapped_addr;
100 u64 static_mapping_base;
102 static_mapping_base = oct->console_nb_info.dram_region_base;
104 if (static_mapping_base &&
105 static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
106 int bar1_index = oct->console_nb_info.bar1_index;
108 mapped_addr = oct->mmio[1].hw_addr
109 + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
110 + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
112 if (op)
113 octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
114 else
115 octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
117 return;
120 spin_lock_irqsave(&oct->mem_access_lock, flags);
122 /* Save the original index reg value. */
123 index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
124 do {
125 oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
126 mapped_addr = oct->mmio[1].hw_addr
127 + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
129 /* If operation crosses a 4MB boundary, split the transfer
130 * at the 4MB
131 * boundary.
133 if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
134 copy_len = (u32)(((addr & ~(0x3fffff)) +
135 (MEMOPS_IDX << 22)) - addr);
136 } else {
137 copy_len = len;
140 if (op) { /* read from core */
141 octeon_pci_fastread(oct, mapped_addr, hostbuf,
142 copy_len);
143 } else {
144 octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
145 copy_len);
148 len -= copy_len;
149 addr += copy_len;
150 hostbuf += copy_len;
152 } while (len);
154 oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
156 spin_unlock_irqrestore(&oct->mem_access_lock, flags);
159 void
160 octeon_pci_read_core_mem(struct octeon_device *oct,
161 u64 coreaddr,
162 u8 *buf,
163 u32 len)
165 __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
168 void
169 octeon_pci_write_core_mem(struct octeon_device *oct,
170 u64 coreaddr,
171 const u8 *buf,
172 u32 len)
174 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
177 u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
179 __be64 ret;
181 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
183 return be64_to_cpu(ret);
186 u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
188 __be32 ret;
190 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
192 return be32_to_cpu(ret);
195 void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
196 u32 val)
198 __be32 t = cpu_to_be32(val);
200 __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);