2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #define COMPONENT "zPCI"
9 #define pr_fmt(fmt) COMPONENT ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/delay.h>
15 #include <linux/pci.h>
16 #include <asm/pci_debug.h>
17 #include <asm/pci_clp.h>
19 static inline void zpci_err_clp(unsigned int rsp
, int rc
)
24 } __packed data
= {rsp
, rc
};
26 zpci_err_hex(&data
, sizeof(data
));
30 * Call Logical Processor
31 * Retry logic is handled by the caller.
33 static inline u8
clp_instr(void *data
)
35 struct { u8 _
[CLP_BLK_SIZE
]; } *req
= data
;
40 " .insn rrf,0xb9a00000,%[ign],%[req],0x0,0x2\n"
43 : [cc
] "=d" (cc
), [ign
] "=d" (ignored
), "+m" (*req
)
49 static void *clp_alloc_block(gfp_t gfp_mask
)
51 return (void *) __get_free_pages(gfp_mask
, get_order(CLP_BLK_SIZE
));
54 static void clp_free_block(void *ptr
)
56 free_pages((unsigned long) ptr
, get_order(CLP_BLK_SIZE
));
59 static void clp_store_query_pci_fngrp(struct zpci_dev
*zdev
,
60 struct clp_rsp_query_pci_grp
*response
)
62 zdev
->tlb_refresh
= response
->refresh
;
63 zdev
->dma_mask
= response
->dasm
;
64 zdev
->msi_addr
= response
->msia
;
65 zdev
->fmb_update
= response
->mui
;
67 switch (response
->version
) {
69 zdev
->max_bus_speed
= PCIE_SPEED_5_0GT
;
72 zdev
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
77 static int clp_query_pci_fngrp(struct zpci_dev
*zdev
, u8 pfgid
)
79 struct clp_req_rsp_query_pci_grp
*rrb
;
82 rrb
= clp_alloc_block(GFP_KERNEL
);
86 memset(rrb
, 0, sizeof(*rrb
));
87 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
88 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FNGRP
;
89 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
90 rrb
->request
.pfgid
= pfgid
;
93 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
94 clp_store_query_pci_fngrp(zdev
, &rrb
->response
);
96 zpci_err("Q PCI FGRP:\n");
97 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
104 static int clp_store_query_pci_fn(struct zpci_dev
*zdev
,
105 struct clp_rsp_query_pci
*response
)
109 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
110 zdev
->bars
[i
].val
= le32_to_cpu(response
->bar
[i
]);
111 zdev
->bars
[i
].size
= response
->bar_size
[i
];
113 zdev
->start_dma
= response
->sdma
;
114 zdev
->end_dma
= response
->edma
;
115 zdev
->pchid
= response
->pchid
;
116 zdev
->pfgid
= response
->pfgid
;
117 zdev
->pft
= response
->pft
;
118 zdev
->vfn
= response
->vfn
;
119 zdev
->uid
= response
->uid
;
121 memcpy(zdev
->pfip
, response
->pfip
, sizeof(zdev
->pfip
));
122 if (response
->util_str_avail
) {
123 memcpy(zdev
->util_str
, response
->util_str
,
124 sizeof(zdev
->util_str
));
130 static int clp_query_pci_fn(struct zpci_dev
*zdev
, u32 fh
)
132 struct clp_req_rsp_query_pci
*rrb
;
135 rrb
= clp_alloc_block(GFP_KERNEL
);
139 memset(rrb
, 0, sizeof(*rrb
));
140 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
141 rrb
->request
.hdr
.cmd
= CLP_QUERY_PCI_FN
;
142 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
143 rrb
->request
.fh
= fh
;
146 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
) {
147 rc
= clp_store_query_pci_fn(zdev
, &rrb
->response
);
150 if (rrb
->response
.pfgid
)
151 rc
= clp_query_pci_fngrp(zdev
, rrb
->response
.pfgid
);
153 zpci_err("Q PCI FN:\n");
154 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
162 int clp_add_pci_device(u32 fid
, u32 fh
, int configured
)
164 struct zpci_dev
*zdev
;
167 zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid
, fh
, configured
);
168 zdev
= kzalloc(sizeof(*zdev
), GFP_KERNEL
);
175 /* Query function properties and update zdev */
176 rc
= clp_query_pci_fn(zdev
, fh
);
181 zdev
->state
= ZPCI_FN_STATE_CONFIGURED
;
183 zdev
->state
= ZPCI_FN_STATE_STANDBY
;
185 rc
= zpci_create_device(zdev
);
196 * Enable/Disable a given PCI function defined by its function handle.
198 static int clp_set_pci_fn(u32
*fh
, u8 nr_dma_as
, u8 command
)
200 struct clp_req_rsp_set_pci
*rrb
;
201 int rc
, retries
= 100;
203 rrb
= clp_alloc_block(GFP_KERNEL
);
208 memset(rrb
, 0, sizeof(*rrb
));
209 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
210 rrb
->request
.hdr
.cmd
= CLP_SET_PCI_FN
;
211 rrb
->response
.hdr
.len
= sizeof(rrb
->response
);
212 rrb
->request
.fh
= *fh
;
213 rrb
->request
.oc
= command
;
214 rrb
->request
.ndas
= nr_dma_as
;
217 if (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
) {
223 } while (rrb
->response
.hdr
.rsp
== CLP_RC_SETPCIFN_BUSY
);
225 if (!rc
&& rrb
->response
.hdr
.rsp
== CLP_RC_OK
)
226 *fh
= rrb
->response
.fh
;
228 zpci_err("Set PCI FN:\n");
229 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
236 int clp_enable_fh(struct zpci_dev
*zdev
, u8 nr_dma_as
)
241 rc
= clp_set_pci_fn(&fh
, nr_dma_as
, CLP_SET_ENABLE_PCI_FN
);
243 /* Success -> store enabled handle in zdev */
246 zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
250 int clp_disable_fh(struct zpci_dev
*zdev
)
255 if (!zdev_enabled(zdev
))
258 rc
= clp_set_pci_fn(&fh
, 0, CLP_SET_DISABLE_PCI_FN
);
260 /* Success -> store disabled handle in zdev */
263 zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev
->fid
, zdev
->fh
, rc
);
267 static int clp_list_pci(struct clp_req_rsp_list_pci
*rrb
,
268 void (*cb
)(struct clp_fh_list_entry
*entry
))
270 u64 resume_token
= 0;
274 memset(rrb
, 0, sizeof(*rrb
));
275 rrb
->request
.hdr
.len
= sizeof(rrb
->request
);
276 rrb
->request
.hdr
.cmd
= CLP_LIST_PCI
;
277 /* store as many entries as possible */
278 rrb
->response
.hdr
.len
= CLP_BLK_SIZE
- LIST_PCI_HDR_LEN
;
279 rrb
->request
.resume_token
= resume_token
;
281 /* Get PCI function handle list */
283 if (rc
|| rrb
->response
.hdr
.rsp
!= CLP_RC_OK
) {
284 zpci_err("List PCI FN:\n");
285 zpci_err_clp(rrb
->response
.hdr
.rsp
, rc
);
290 WARN_ON_ONCE(rrb
->response
.entry_size
!=
291 sizeof(struct clp_fh_list_entry
));
293 entries
= (rrb
->response
.hdr
.len
- LIST_PCI_HDR_LEN
) /
294 rrb
->response
.entry_size
;
296 resume_token
= rrb
->response
.resume_token
;
297 for (i
= 0; i
< entries
; i
++)
298 cb(&rrb
->response
.fh_list
[i
]);
299 } while (resume_token
);
304 static void __clp_add(struct clp_fh_list_entry
*entry
)
306 if (!entry
->vendor_id
)
309 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
312 static void __clp_rescan(struct clp_fh_list_entry
*entry
)
314 struct zpci_dev
*zdev
;
316 if (!entry
->vendor_id
)
319 zdev
= get_zdev_by_fid(entry
->fid
);
321 clp_add_pci_device(entry
->fid
, entry
->fh
, entry
->config_state
);
325 if (!entry
->config_state
) {
327 * The handle is already disabled, that means no iota/irq freeing via
328 * the firmware interfaces anymore. Need to free resources manually
329 * (DMA memory, debug, sysfs)...
331 zpci_stop_device(zdev
);
335 static void __clp_update(struct clp_fh_list_entry
*entry
)
337 struct zpci_dev
*zdev
;
339 if (!entry
->vendor_id
)
342 zdev
= get_zdev_by_fid(entry
->fid
);
346 zdev
->fh
= entry
->fh
;
349 int clp_scan_pci_devices(void)
351 struct clp_req_rsp_list_pci
*rrb
;
354 rrb
= clp_alloc_block(GFP_KERNEL
);
358 rc
= clp_list_pci(rrb
, __clp_add
);
364 int clp_rescan_pci_devices(void)
366 struct clp_req_rsp_list_pci
*rrb
;
369 rrb
= clp_alloc_block(GFP_KERNEL
);
373 rc
= clp_list_pci(rrb
, __clp_rescan
);
379 int clp_rescan_pci_devices_simple(void)
381 struct clp_req_rsp_list_pci
*rrb
;
384 rrb
= clp_alloc_block(GFP_NOWAIT
);
388 rc
= clp_list_pci(rrb
, __clp_update
);