2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #define KMSG_COMPONENT "zpci"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/delay.h>
15 #include <linux/pci.h>
16 #include <asm/pci_debug.h>
17 #include <asm/pci_clp.h>
19 static inline void zpci_err_clp(unsigned int rsp
, int rc
)
24 } __packed data
= {rsp
, rc
};
26 zpci_err_hex(&data
, sizeof(data
));
30 * Call Logical Processor
31 * Retry logic is handled by the caller.
33 static inline u8
clp_instr(void *data
)
35 struct { u8 _
[CLP_BLK_SIZE
]; } *req
= data
;
40 " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
43 : [cc
] "=d" (cc
), [ign
] "=d" (ignored
), "+m" (*req
)
49 static void *clp_alloc_block(gfp_t gfp_mask
)
51 return (void *) __get_free_pages(gfp_mask
, get_order(CLP_BLK_SIZE
));
54 static void clp_free_block(void *ptr
)
56 free_pages((unsigned long) ptr
, get_order(CLP_BLK_SIZE
));
59 static void clp_store_query_pci_fngrp(struct zpci_dev
*zdev
,
60 struct clp_rsp_query_pci_grp
*response
)
62 zdev
->tlb_refresh
= response
->refresh
;
63 zdev
->dma_mask
= response
->dasm
;
64 zdev
->msi_addr
= response
->msia
;
65 zdev
->max_msi
= response
->noi
;
66 zdev
->fmb_update
= response
->mui
;
68 switch (response
->version
) {
70 zdev
->max_bus_speed
= PCIE_SPEED_5_0GT
;
73 zdev
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
78 static int clp_query_pci_fngrp(struct zpci_dev
*zdev
, u8 pfgid
)
80 struct clp_req_rsp_query_pci_grp
*rrb
;
83 rrb
= clp_alloc_block(GFP_KERNEL
);
87 memset(rrb
, 0, sizeof(*rrb
));
88 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
89 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FNGRP
;
90 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
91 rrb
->request
.pfgid
= pfgid
;
94 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
95 clp_store_query_pci_fngrp(zdev
, &rrb
->response
);
97 zpci_err("Q PCI FGRP:\n");
98 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
105 static int clp_store_query_pci_fn(struct zpci_dev
*zdev
,
106 struct clp_rsp_query_pci
*response
)
110 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
111 zdev
->bars
[i
].val
= le32_to_cpu(response
->bar
[i
]);
112 zdev
->bars
[i
].size
= response
->bar_size
[i
];
114 zdev
->start_dma
= response
->sdma
;
115 zdev
->end_dma
= response
->edma
;
116 zdev
->pchid
= response
->pchid
;
117 zdev
->pfgid
= response
->pfgid
;
118 zdev
->pft
= response
->pft
;
119 zdev
->vfn
= response
->vfn
;
120 zdev
->uid
= response
->uid
;
122 memcpy(zdev
->pfip
, response
->pfip
, sizeof(zdev
->pfip
));
123 if (response
->util_str_avail
) {
124 memcpy(zdev
->util_str
, response
->util_str
,
125 sizeof(zdev
->util_str
));
131 static int clp_query_pci_fn(struct zpci_dev
*zdev
, u32 fh
)
133 struct clp_req_rsp_query_pci
*rrb
;
136 rrb
= clp_alloc_block(GFP_KERNEL
);
140 memset(rrb
, 0, sizeof(*rrb
));
141 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
142 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FN
;
143 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
144 rrb
->request
.fh
= fh
;
147 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
148 rc
= clp_store_query_pci_fn(zdev
, &rrb
->response
);
151 if (rrb
->response
.pfgid
)
152 rc
= clp_query_pci_fngrp(zdev
, rrb
->response
.pfgid
);
154 zpci_err("Q PCI FN:\n");
155 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
163 int clp_add_pci_device(u32 fid
, u32 fh
, int configured
)
165 struct zpci_dev
*zdev
;
168 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid
, fh
, configured
);
169 zdev
= kzalloc(sizeof(*zdev
), GFP_KERNEL
);
176 /* Query function properties and update zdev */
177 rc
= clp_query_pci_fn(zdev
, fh
);
182 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
184 zdev
->state
= ZPCI_FN_STATE_STANDBY
;
186 rc
= zpci_create_device(zdev
);
197 * Enable/Disable a given PCI function defined by its function handle.
199 static int clp_set_pci_fn(u32
*fh
, u8 nr_dma_as
, u8 command
)
201 struct clp_req_rsp_set_pci
*rrb
;
202 int rc
, retries
= 100;
204 rrb
= clp_alloc_block(GFP_KERNEL
);
209 memset(rrb
, 0, sizeof(*rrb
));
210 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
211 rrb
->request
.hdr
.cmd
= CLP_SET_PCI_FN
;
212 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
213 rrb
->request
.fh
= *fh
;
214 rrb
->request
.oc
= command
;
215 rrb
->request
.ndas
= nr_dma_as
;
218 if (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
) {
224 } while (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
);
226 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
227 *fh
= rrb
->response
.fh
;
229 zpci_err("Set PCI FN:\n");
230 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
237 int clp_enable_fh(struct zpci_dev
*zdev
, u8 nr_dma_as
)
242 rc
= clp_set_pci_fn(&fh
, nr_dma_as
, CLP_SET_ENABLE_PCI_FN
);
244 /* Success -> store enabled handle in zdev */
247 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
251 int clp_disable_fh(struct zpci_dev
*zdev
)
256 if (!zdev_enabled(zdev
))
259 rc
= clp_set_pci_fn(&fh
, 0, CLP_SET_DISABLE_PCI_FN
);
261 /* Success -> store disabled handle in zdev */
264 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
268 static int clp_list_pci(struct clp_req_rsp_list_pci
*rrb
,
269 void (*cb
)(struct clp_fh_list_entry
*entry
))
271 u64 resume_token
= 0;
275 memset(rrb
, 0, sizeof(*rrb
));
276 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
277 rrb
->request
.hdr
.cmd
= CLP_LIST_PCI
;
278 /* store as many entries as possible */
279 rrb
->response
.hdr
.len
= CLP_BLK_SIZE
- LIST_PCI_HDR_LEN
;
280 rrb
->request
.resume_token
= resume_token
;
282 /* Get PCI function handle list */
284 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
285 zpci_err("List PCI FN:\n");
286 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
291 WARN_ON_ONCE(rrb
->response
.entry_size
!=
292 sizeof(struct clp_fh_list_entry
));
294 entries
= (rrb
->response
.hdr
.len
- LIST_PCI_HDR_LEN
) /
295 rrb
->response
.entry_size
;
297 resume_token
= rrb
->response
.resume_token
;
298 for (i
= 0; i
< entries
; i
++)
299 cb(&rrb
->response
.fh_list
[i
]);
300 } while (resume_token
);
305 static void __clp_add(struct clp_fh_list_entry
*entry
)
307 if (!entry
->vendor_id
)
310 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
313 static void __clp_rescan(struct clp_fh_list_entry
*entry
)
315 struct zpci_dev
*zdev
;
317 if (!entry
->vendor_id
)
320 zdev
= get_zdev_by_fid(entry
->fid
);
322 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
326 if (!entry
->config_state
) {
328 * The handle is already disabled, that means no iota/irq freeing via
329 * the firmware interfaces anymore. Need to free resources manually
330 * (DMA memory, debug, sysfs)...
332 zpci_stop_device(zdev
);
336 static void __clp_update(struct clp_fh_list_entry
*entry
)
338 struct zpci_dev
*zdev
;
340 if (!entry
->vendor_id
)
343 zdev
= get_zdev_by_fid(entry
->fid
);
347 zdev
->fh
= entry
->fh
;
350 int clp_scan_pci_devices(void)
352 struct clp_req_rsp_list_pci
*rrb
;
355 rrb
= clp_alloc_block(GFP_KERNEL
);
359 rc
= clp_list_pci(rrb
, __clp_add
);
365 int clp_rescan_pci_devices(void)
367 struct clp_req_rsp_list_pci
*rrb
;
370 rrb
= clp_alloc_block(GFP_KERNEL
);
374 rc
= clp_list_pci(rrb
, __clp_rescan
);
380 int clp_rescan_pci_devices_simple(void)
382 struct clp_req_rsp_list_pci
*rrb
;
385 rrb
= clp_alloc_block(GFP_NOWAIT
);
389 rc
= clp_list_pci(rrb
, __clp_update
);