2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #define COMPONENT "zPCI"
9 #define pr_fmt(fmt) COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/delay.h>
15 #include <linux/pci.h>
16 #include <asm/pci_debug.h>
17 #include <asm/pci_clp.h>
20 * Call Logical Processor
21 * Retry logic is handled by the caller.
23 static inline u8
clp_instr(void *data
)
25 struct { u8 _
[CLP_BLK_SIZE
]; } *req
= data
;
30 " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
33 : [cc
] "=d" (cc
), [ign
] "=d" (ignored
), "+m" (*req
)
39 static void *clp_alloc_block(gfp_t gfp_mask
)
41 return (void *) __get_free_pages(gfp_mask
, get_order(CLP_BLK_SIZE
));
44 static void clp_free_block(void *ptr
)
46 free_pages((unsigned long) ptr
, get_order(CLP_BLK_SIZE
));
49 static void clp_store_query_pci_fngrp(struct zpci_dev
*zdev
,
50 struct clp_rsp_query_pci_grp
*response
)
52 zdev
->tlb_refresh
= response
->refresh
;
53 zdev
->dma_mask
= response
->dasm
;
54 zdev
->msi_addr
= response
->msia
;
55 zdev
->fmb_update
= response
->mui
;
57 pr_debug("Supported number of MSI vectors: %u\n", response
->noi
);
58 switch (response
->version
) {
60 zdev
->max_bus_speed
= PCIE_SPEED_5_0GT
;
63 zdev
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
68 static int clp_query_pci_fngrp(struct zpci_dev
*zdev
, u8 pfgid
)
70 struct clp_req_rsp_query_pci_grp
*rrb
;
73 rrb
= clp_alloc_block(GFP_KERNEL
);
77 memset(rrb
, 0, sizeof(*rrb
));
78 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
79 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FNGRP
;
80 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
81 rrb
->request
.pfgid
= pfgid
;
84 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
85 clp_store_query_pci_fngrp(zdev
, &rrb
->response
);
87 pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
88 rrb
->response
.hdr
.rsp
, rc
);
95 static int clp_store_query_pci_fn(struct zpci_dev
*zdev
,
96 struct clp_rsp_query_pci
*response
)
100 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
101 zdev
->bars
[i
].val
= le32_to_cpu(response
->bar
[i
]);
102 zdev
->bars
[i
].size
= response
->bar_size
[i
];
104 zdev
->start_dma
= response
->sdma
;
105 zdev
->end_dma
= response
->edma
;
106 zdev
->pchid
= response
->pchid
;
107 zdev
->pfgid
= response
->pfgid
;
111 static int clp_query_pci_fn(struct zpci_dev
*zdev
, u32 fh
)
113 struct clp_req_rsp_query_pci
*rrb
;
116 rrb
= clp_alloc_block(GFP_KERNEL
);
120 memset(rrb
, 0, sizeof(*rrb
));
121 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
122 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FN
;
123 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
124 rrb
->request
.fh
= fh
;
127 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
128 rc
= clp_store_query_pci_fn(zdev
, &rrb
->response
);
131 if (rrb
->response
.pfgid
)
132 rc
= clp_query_pci_fngrp(zdev
, rrb
->response
.pfgid
);
134 pr_err("Query PCI failed with response: %x cc: %d\n",
135 rrb
->response
.hdr
.rsp
, rc
);
143 int clp_add_pci_device(u32 fid
, u32 fh
, int configured
)
145 struct zpci_dev
*zdev
;
148 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid
, fh
, configured
);
149 zdev
= zpci_alloc_device();
151 return PTR_ERR(zdev
);
156 /* Query function properties and update zdev */
157 rc
= clp_query_pci_fn(zdev
, fh
);
162 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
164 zdev
->state
= ZPCI_FN_STATE_STANDBY
;
166 rc
= zpci_create_device(zdev
);
172 zpci_free_device(zdev
);
177 * Enable/Disable a given PCI function defined by its function handle.
179 static int clp_set_pci_fn(u32
*fh
, u8 nr_dma_as
, u8 command
)
181 struct clp_req_rsp_set_pci
*rrb
;
182 int rc
, retries
= 100;
184 rrb
= clp_alloc_block(GFP_KERNEL
);
189 memset(rrb
, 0, sizeof(*rrb
));
190 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
191 rrb
->request
.hdr
.cmd
= CLP_SET_PCI_FN
;
192 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
193 rrb
->request
.fh
= *fh
;
194 rrb
->request
.oc
= command
;
195 rrb
->request
.ndas
= nr_dma_as
;
198 if (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
) {
204 } while (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
);
206 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
207 *fh
= rrb
->response
.fh
;
209 zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh
, rc
,
210 rrb
->response
.hdr
.rsp
);
217 int clp_enable_fh(struct zpci_dev
*zdev
, u8 nr_dma_as
)
222 rc
= clp_set_pci_fn(&fh
, nr_dma_as
, CLP_SET_ENABLE_PCI_FN
);
224 /* Success -> store enabled handle in zdev */
227 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
231 int clp_disable_fh(struct zpci_dev
*zdev
)
236 if (!zdev_enabled(zdev
))
239 rc
= clp_set_pci_fn(&fh
, 0, CLP_SET_DISABLE_PCI_FN
);
241 /* Success -> store disabled handle in zdev */
244 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
248 static int clp_list_pci(struct clp_req_rsp_list_pci
*rrb
,
249 void (*cb
)(struct clp_fh_list_entry
*entry
))
251 u64 resume_token
= 0;
255 memset(rrb
, 0, sizeof(*rrb
));
256 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
257 rrb
->request
.hdr
.cmd
= CLP_LIST_PCI
;
258 /* store as many entries as possible */
259 rrb
->response
.hdr
.len
= CLP_BLK_SIZE
- LIST_PCI_HDR_LEN
;
260 rrb
->request
.resume_token
= resume_token
;
262 /* Get PCI function handle list */
264 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
265 pr_err("List PCI failed with response: 0x%x cc: %d\n",
266 rrb
->response
.hdr
.rsp
, rc
);
271 WARN_ON_ONCE(rrb
->response
.entry_size
!=
272 sizeof(struct clp_fh_list_entry
));
274 entries
= (rrb
->response
.hdr
.len
- LIST_PCI_HDR_LEN
) /
275 rrb
->response
.entry_size
;
276 pr_info("Detected number of PCI functions: %u\n", entries
);
278 /* Store the returned resume token as input for the next call */
279 resume_token
= rrb
->response
.resume_token
;
281 for (i
= 0; i
< entries
; i
++)
282 cb(&rrb
->response
.fh_list
[i
]);
283 } while (resume_token
);
285 pr_debug("Maximum number of supported PCI functions: %u\n",
286 rrb
->response
.max_fn
);
291 static void __clp_add(struct clp_fh_list_entry
*entry
)
293 if (!entry
->vendor_id
)
296 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
299 static void __clp_rescan(struct clp_fh_list_entry
*entry
)
301 struct zpci_dev
*zdev
;
303 if (!entry
->vendor_id
)
306 zdev
= get_zdev_by_fid(entry
->fid
);
308 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
312 if (!entry
->config_state
) {
314 * The handle is already disabled, that means no iota/irq freeing via
315 * the firmware interfaces anymore. Need to free resources manually
316 * (DMA memory, debug, sysfs)...
318 zpci_stop_device(zdev
);
322 static void __clp_update(struct clp_fh_list_entry
*entry
)
324 struct zpci_dev
*zdev
;
326 if (!entry
->vendor_id
)
329 zdev
= get_zdev_by_fid(entry
->fid
);
333 zdev
->fh
= entry
->fh
;
336 int clp_scan_pci_devices(void)
338 struct clp_req_rsp_list_pci
*rrb
;
341 rrb
= clp_alloc_block(GFP_KERNEL
);
345 rc
= clp_list_pci(rrb
, __clp_add
);
351 int clp_rescan_pci_devices(void)
353 struct clp_req_rsp_list_pci
*rrb
;
356 rrb
= clp_alloc_block(GFP_KERNEL
);
360 rc
= clp_list_pci(rrb
, __clp_rescan
);
366 int clp_rescan_pci_devices_simple(void)
368 struct clp_req_rsp_list_pci
*rrb
;
371 rrb
= clp_alloc_block(GFP_NOWAIT
);
375 rc
= clp_list_pci(rrb
, __clp_update
);