1 // SPDX-License-Identifier: GPL-2.0
3 * Synopsys DesignWare PCIe PMU driver
5 * Copyright (C) 2021-2023 Alibaba Inc.
8 #include <linux/bitfield.h>
9 #include <linux/bitops.h>
10 #include <linux/cpuhotplug.h>
11 #include <linux/cpumask.h>
12 #include <linux/device.h>
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/perf_event.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/smp.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
23 #define DWC_PCIE_VSEC_RAS_DES_ID 0x02
24 #define DWC_PCIE_EVENT_CNT_CTL 0x8
27 * Event Counter Data Select includes two parts:
28 * - 27-24: Group number(4-bit: 0..0x7)
29 * - 23-16: Event number(8-bit: 0..0x13) within the Group
31 * Put them together as in TRM.
33 #define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16)
34 #define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8)
35 #define DWC_PCIE_CNT_STATUS BIT(7)
36 #define DWC_PCIE_CNT_ENABLE GENMASK(4, 2)
37 #define DWC_PCIE_PER_EVENT_OFF 0x1
38 #define DWC_PCIE_PER_EVENT_ON 0x3
39 #define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
40 #define DWC_PCIE_EVENT_PER_CLEAR 0x1
42 #define DWC_PCIE_EVENT_CNT_DATA 0xC
44 #define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
45 #define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24)
46 #define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8)
47 #define DWC_PCIE_DURATION_MANUAL_CTL 0x0
48 #define DWC_PCIE_DURATION_1MS 0x1
49 #define DWC_PCIE_DURATION_10MS 0x2
50 #define DWC_PCIE_DURATION_100MS 0x3
51 #define DWC_PCIE_DURATION_1S 0x4
52 #define DWC_PCIE_DURATION_2S 0x5
53 #define DWC_PCIE_DURATION_4S 0x6
54 #define DWC_PCIE_DURATION_4US 0xFF
55 #define DWC_PCIE_TIME_BASED_TIMER_START BIT(0)
56 #define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1
58 #define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14
59 #define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18
61 /* Event attributes */
62 #define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0)
63 #define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16)
64 #define DWC_PCIE_CONFIG_LANE GENMASK(27, 20)
66 #define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
67 #define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
68 #define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
70 enum dwc_pcie_event_type
{
71 DWC_PCIE_TIME_BASE_EVENT
,
73 DWC_PCIE_EVENT_TYPE_MAX
,
76 #define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
77 #define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
81 struct pci_dev
*pdev
; /* Root Port device */
85 struct hlist_node cpuhp_node
;
86 struct perf_event
*event
[DWC_PCIE_EVENT_TYPE_MAX
];
90 #define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu))
92 static int dwc_pcie_pmu_hp_state
;
93 static struct list_head dwc_pcie_dev_info_head
=
94 LIST_HEAD_INIT(dwc_pcie_dev_info_head
);
97 struct dwc_pcie_dev_info
{
98 struct platform_device
*plat_dev
;
100 struct list_head dev_node
;
103 struct dwc_pcie_vendor_id
{
107 static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids
[] = {
108 {.vendor_id
= PCI_VENDOR_ID_ALIBABA
},
109 {.vendor_id
= PCI_VENDOR_ID_AMPERE
},
110 {.vendor_id
= PCI_VENDOR_ID_QCOM
},
114 static ssize_t
cpumask_show(struct device
*dev
,
115 struct device_attribute
*attr
,
118 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(dev_get_drvdata(dev
));
120 return cpumap_print_to_pagebuf(true, buf
, cpumask_of(pcie_pmu
->on_cpu
));
122 static DEVICE_ATTR_RO(cpumask
);
124 static struct attribute
*dwc_pcie_pmu_cpumask_attrs
[] = {
125 &dev_attr_cpumask
.attr
,
129 static struct attribute_group dwc_pcie_cpumask_attr_group
= {
130 .attrs
= dwc_pcie_pmu_cpumask_attrs
,
133 struct dwc_pcie_format_attr
{
134 struct device_attribute attr
;
139 PMU_FORMAT_ATTR(eventid
, "config:0-15");
140 PMU_FORMAT_ATTR(type
, "config:16-19");
141 PMU_FORMAT_ATTR(lane
, "config:20-27");
143 static struct attribute
*dwc_pcie_format_attrs
[] = {
144 &format_attr_type
.attr
,
145 &format_attr_eventid
.attr
,
146 &format_attr_lane
.attr
,
150 static struct attribute_group dwc_pcie_format_attrs_group
= {
152 .attrs
= dwc_pcie_format_attrs
,
155 struct dwc_pcie_event_attr
{
156 struct device_attribute attr
;
157 enum dwc_pcie_event_type type
;
162 static ssize_t
dwc_pcie_event_show(struct device
*dev
,
163 struct device_attribute
*attr
, char *buf
)
165 struct dwc_pcie_event_attr
*eattr
;
167 eattr
= container_of(attr
, typeof(*eattr
), attr
);
169 if (eattr
->type
== DWC_PCIE_LANE_EVENT
)
170 return sysfs_emit(buf
, "eventid=0x%x,type=0x%x,lane=?\n",
171 eattr
->eventid
, eattr
->type
);
172 else if (eattr
->type
== DWC_PCIE_TIME_BASE_EVENT
)
173 return sysfs_emit(buf
, "eventid=0x%x,type=0x%x\n",
174 eattr
->eventid
, eattr
->type
);
179 #define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \
180 (&((struct dwc_pcie_event_attr[]) {{ \
181 .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \
183 .eventid = _eventid, \
187 #define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \
188 DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0)
189 #define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \
190 DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0)
192 static struct attribute
*dwc_pcie_pmu_time_event_attrs
[] = {
194 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle
, 0x00),
195 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S
, 0x01),
196 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S
, 0x02),
197 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0
, 0x03),
198 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1
, 0x04),
199 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1
, 0x05),
200 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2
, 0x06),
201 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY
, 0x07),
202 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S
, 0x08),
203 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX
, 0x09),
206 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload
, 0x20),
207 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload
, 0x21),
208 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload
, 0x22),
209 DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload
, 0x23),
212 * Leave it to the user to specify the lane ID to avoid generating
213 * a list of hundreds of events.
215 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp
, 0x600),
216 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp
, 0x601),
217 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp
, 0x602),
218 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp
, 0x603),
219 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp
, 0x604),
220 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp
, 0x605),
221 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp
, 0x606),
222 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write
, 0x700),
223 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read
, 0x701),
224 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write
, 0x702),
225 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read
, 0x703),
226 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write
, 0x704),
227 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read
, 0x705),
228 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data
, 0x706),
229 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data
, 0x707),
230 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp
, 0x708),
231 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic
, 0x709),
232 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix
, 0x70A),
233 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write
, 0x70B),
234 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read
, 0x70C),
235 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write
, 0x70F),
236 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read
, 0x710),
237 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data
, 0x711),
238 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data
, 0x712),
239 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp
, 0x713),
240 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic
, 0x714),
241 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix
, 0x715),
242 DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp
, 0x716),
243 DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp
, 0x717),
247 static const struct attribute_group dwc_pcie_event_attrs_group
= {
249 .attrs
= dwc_pcie_pmu_time_event_attrs
,
252 static const struct attribute_group
*dwc_pcie_attr_groups
[] = {
253 &dwc_pcie_event_attrs_group
,
254 &dwc_pcie_format_attrs_group
,
255 &dwc_pcie_cpumask_attr_group
,
259 static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu
*pcie_pmu
,
262 struct pci_dev
*pdev
= pcie_pmu
->pdev
;
263 u16 ras_des_offset
= pcie_pmu
->ras_des_offset
;
266 pci_clear_and_set_config_dword(pdev
,
267 ras_des_offset
+ DWC_PCIE_EVENT_CNT_CTL
,
268 DWC_PCIE_CNT_ENABLE
, DWC_PCIE_PER_EVENT_ON
);
270 pci_clear_and_set_config_dword(pdev
,
271 ras_des_offset
+ DWC_PCIE_EVENT_CNT_CTL
,
272 DWC_PCIE_CNT_ENABLE
, DWC_PCIE_PER_EVENT_OFF
);
275 static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu
*pcie_pmu
,
278 struct pci_dev
*pdev
= pcie_pmu
->pdev
;
279 u16 ras_des_offset
= pcie_pmu
->ras_des_offset
;
281 pci_clear_and_set_config_dword(pdev
,
282 ras_des_offset
+ DWC_PCIE_TIME_BASED_ANAL_CTL
,
283 DWC_PCIE_TIME_BASED_TIMER_START
, enable
);
286 static u64
dwc_pcie_pmu_read_lane_event_counter(struct perf_event
*event
)
288 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
289 struct pci_dev
*pdev
= pcie_pmu
->pdev
;
290 u16 ras_des_offset
= pcie_pmu
->ras_des_offset
;
293 pci_read_config_dword(pdev
, ras_des_offset
+ DWC_PCIE_EVENT_CNT_DATA
, &val
);
298 static u64
dwc_pcie_pmu_read_time_based_counter(struct perf_event
*event
)
300 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
301 struct pci_dev
*pdev
= pcie_pmu
->pdev
;
302 int event_id
= DWC_PCIE_EVENT_ID(event
);
303 u16 ras_des_offset
= pcie_pmu
->ras_des_offset
;
308 * The 64-bit value of the data counter is spread across two
309 * registers that are not synchronized. In order to read them
310 * atomically, ensure that the high 32 bits match before and after
311 * reading the low 32 bits.
313 pci_read_config_dword(pdev
,
314 ras_des_offset
+ DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH
, &hi
);
316 /* snapshot the high 32 bits */
319 pci_read_config_dword(
320 pdev
, ras_des_offset
+ DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW
,
322 pci_read_config_dword(
323 pdev
, ras_des_offset
+ DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH
,
327 val
= ((u64
)hi
<< 32) | lo
;
329 * The Group#1 event measures the amount of data processed in 16-byte
330 * units. Simplify the end-user interface by multiplying the counter
331 * at the point of read.
333 if (event_id
>= 0x20 && event_id
<= 0x23)
339 static void dwc_pcie_pmu_event_update(struct perf_event
*event
)
341 struct hw_perf_event
*hwc
= &event
->hw
;
342 enum dwc_pcie_event_type type
= DWC_PCIE_EVENT_TYPE(event
);
343 u64 delta
, prev
, now
= 0;
346 prev
= local64_read(&hwc
->prev_count
);
348 if (type
== DWC_PCIE_LANE_EVENT
)
349 now
= dwc_pcie_pmu_read_lane_event_counter(event
);
350 else if (type
== DWC_PCIE_TIME_BASE_EVENT
)
351 now
= dwc_pcie_pmu_read_time_based_counter(event
);
353 } while (local64_cmpxchg(&hwc
->prev_count
, prev
, now
) != prev
);
355 delta
= (now
- prev
) & DWC_PCIE_MAX_PERIOD
;
356 /* 32-bit counter for Lane Event Counting */
357 if (type
== DWC_PCIE_LANE_EVENT
)
358 delta
&= DWC_PCIE_LANE_EVENT_MAX_PERIOD
;
360 local64_add(delta
, &event
->count
);
363 static int dwc_pcie_pmu_event_init(struct perf_event
*event
)
365 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
366 enum dwc_pcie_event_type type
= DWC_PCIE_EVENT_TYPE(event
);
367 struct perf_event
*sibling
;
370 if (event
->attr
.type
!= event
->pmu
->type
)
373 /* We don't support sampling */
374 if (is_sampling_event(event
))
377 /* We cannot support task bound events */
378 if (event
->cpu
< 0 || event
->attach_state
& PERF_ATTACH_TASK
)
381 if (event
->group_leader
!= event
&&
382 !is_software_event(event
->group_leader
))
385 for_each_sibling_event(sibling
, event
->group_leader
) {
386 if (sibling
->pmu
!= event
->pmu
&& !is_software_event(sibling
))
390 if (type
< 0 || type
>= DWC_PCIE_EVENT_TYPE_MAX
)
393 if (type
== DWC_PCIE_LANE_EVENT
) {
394 lane
= DWC_PCIE_EVENT_LANE(event
);
395 if (lane
< 0 || lane
>= pcie_pmu
->nr_lanes
)
399 event
->cpu
= pcie_pmu
->on_cpu
;
404 static void dwc_pcie_pmu_event_start(struct perf_event
*event
, int flags
)
406 struct hw_perf_event
*hwc
= &event
->hw
;
407 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
408 enum dwc_pcie_event_type type
= DWC_PCIE_EVENT_TYPE(event
);
411 local64_set(&hwc
->prev_count
, 0);
413 if (type
== DWC_PCIE_LANE_EVENT
)
414 dwc_pcie_pmu_lane_event_enable(pcie_pmu
, true);
415 else if (type
== DWC_PCIE_TIME_BASE_EVENT
)
416 dwc_pcie_pmu_time_based_event_enable(pcie_pmu
, true);
419 static void dwc_pcie_pmu_event_stop(struct perf_event
*event
, int flags
)
421 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
422 enum dwc_pcie_event_type type
= DWC_PCIE_EVENT_TYPE(event
);
423 struct hw_perf_event
*hwc
= &event
->hw
;
425 if (event
->hw
.state
& PERF_HES_STOPPED
)
428 if (type
== DWC_PCIE_LANE_EVENT
)
429 dwc_pcie_pmu_lane_event_enable(pcie_pmu
, false);
430 else if (type
== DWC_PCIE_TIME_BASE_EVENT
)
431 dwc_pcie_pmu_time_based_event_enable(pcie_pmu
, false);
433 dwc_pcie_pmu_event_update(event
);
434 hwc
->state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
437 static int dwc_pcie_pmu_event_add(struct perf_event
*event
, int flags
)
439 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
440 struct pci_dev
*pdev
= pcie_pmu
->pdev
;
441 struct hw_perf_event
*hwc
= &event
->hw
;
442 enum dwc_pcie_event_type type
= DWC_PCIE_EVENT_TYPE(event
);
443 int event_id
= DWC_PCIE_EVENT_ID(event
);
444 int lane
= DWC_PCIE_EVENT_LANE(event
);
445 u16 ras_des_offset
= pcie_pmu
->ras_des_offset
;
448 /* one counter for each type and it is in use */
449 if (pcie_pmu
->event
[type
])
452 pcie_pmu
->event
[type
] = event
;
453 hwc
->state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
455 if (type
== DWC_PCIE_LANE_EVENT
) {
456 /* EVENT_COUNTER_DATA_REG needs clear manually */
457 ctrl
= FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL
, event_id
) |
458 FIELD_PREP(DWC_PCIE_CNT_LANE_SEL
, lane
) |
459 FIELD_PREP(DWC_PCIE_CNT_ENABLE
, DWC_PCIE_PER_EVENT_OFF
) |
460 FIELD_PREP(DWC_PCIE_EVENT_CLEAR
, DWC_PCIE_EVENT_PER_CLEAR
);
461 pci_write_config_dword(pdev
, ras_des_offset
+ DWC_PCIE_EVENT_CNT_CTL
,
463 } else if (type
== DWC_PCIE_TIME_BASE_EVENT
) {
465 * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
466 * use it with any manually controlled duration. And it is
467 * cleared when next measurement starts.
469 ctrl
= FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL
, event_id
) |
470 FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL
,
471 DWC_PCIE_DURATION_MANUAL_CTL
) |
472 DWC_PCIE_TIME_BASED_CNT_ENABLE
;
473 pci_write_config_dword(
474 pdev
, ras_des_offset
+ DWC_PCIE_TIME_BASED_ANAL_CTL
, ctrl
);
477 if (flags
& PERF_EF_START
)
478 dwc_pcie_pmu_event_start(event
, PERF_EF_RELOAD
);
480 perf_event_update_userpage(event
);
485 static void dwc_pcie_pmu_event_del(struct perf_event
*event
, int flags
)
487 struct dwc_pcie_pmu
*pcie_pmu
= to_dwc_pcie_pmu(event
->pmu
);
488 enum dwc_pcie_event_type type
= DWC_PCIE_EVENT_TYPE(event
);
490 dwc_pcie_pmu_event_stop(event
, flags
| PERF_EF_UPDATE
);
491 perf_event_update_userpage(event
);
492 pcie_pmu
->event
[type
] = NULL
;
495 static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node
)
497 cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state
, hotplug_node
);
501 * Find the binded DES capability device info of a PCI device.
502 * @pdev: The PCI device.
504 static struct dwc_pcie_dev_info
*dwc_pcie_find_dev_info(struct pci_dev
*pdev
)
506 struct dwc_pcie_dev_info
*dev_info
;
508 list_for_each_entry(dev_info
, &dwc_pcie_dev_info_head
, dev_node
)
509 if (dev_info
->pdev
== pdev
)
515 static void dwc_pcie_unregister_pmu(void *data
)
517 struct dwc_pcie_pmu
*pcie_pmu
= data
;
519 perf_pmu_unregister(&pcie_pmu
->pmu
);
522 static bool dwc_pcie_match_des_cap(struct pci_dev
*pdev
)
524 const struct dwc_pcie_vendor_id
*vid
;
528 if (!pci_is_pcie(pdev
) || !(pci_pcie_type(pdev
) == PCI_EXP_TYPE_ROOT_PORT
))
531 for (vid
= dwc_pcie_vendor_ids
; vid
->vendor_id
; vid
++) {
532 vsec
= pci_find_vsec_capability(pdev
, vid
->vendor_id
,
533 DWC_PCIE_VSEC_RAS_DES_ID
);
540 pci_read_config_dword(pdev
, vsec
+ PCI_VNDR_HEADER
, &val
);
541 if (PCI_VNDR_HEADER_REV(val
) != 0x04)
545 "Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
549 static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info
*dev_info
)
551 platform_device_unregister(dev_info
->plat_dev
);
552 list_del(&dev_info
->dev_node
);
556 static int dwc_pcie_register_dev(struct pci_dev
*pdev
)
558 struct platform_device
*plat_dev
;
559 struct dwc_pcie_dev_info
*dev_info
;
562 sbdf
= (pci_domain_nr(pdev
->bus
) << 16) | PCI_DEVID(pdev
->bus
->number
, pdev
->devfn
);
563 plat_dev
= platform_device_register_data(NULL
, "dwc_pcie_pmu", sbdf
,
564 pdev
, sizeof(*pdev
));
566 if (IS_ERR(plat_dev
))
567 return PTR_ERR(plat_dev
);
569 dev_info
= kzalloc(sizeof(*dev_info
), GFP_KERNEL
);
573 /* Cache platform device to handle pci device hotplug */
574 dev_info
->plat_dev
= plat_dev
;
575 dev_info
->pdev
= pdev
;
576 list_add(&dev_info
->dev_node
, &dwc_pcie_dev_info_head
);
581 static int dwc_pcie_pmu_notifier(struct notifier_block
*nb
,
582 unsigned long action
, void *data
)
584 struct device
*dev
= data
;
585 struct pci_dev
*pdev
= to_pci_dev(dev
);
586 struct dwc_pcie_dev_info
*dev_info
;
589 case BUS_NOTIFY_ADD_DEVICE
:
590 if (!dwc_pcie_match_des_cap(pdev
))
592 if (dwc_pcie_register_dev(pdev
))
595 case BUS_NOTIFY_DEL_DEVICE
:
596 dev_info
= dwc_pcie_find_dev_info(pdev
);
599 dwc_pcie_unregister_dev(dev_info
);
606 static struct notifier_block dwc_pcie_pmu_nb
= {
607 .notifier_call
= dwc_pcie_pmu_notifier
,
610 static int dwc_pcie_pmu_probe(struct platform_device
*plat_dev
)
612 struct pci_dev
*pdev
= plat_dev
->dev
.platform_data
;
613 struct dwc_pcie_pmu
*pcie_pmu
;
619 vsec
= pci_find_vsec_capability(pdev
, pdev
->vendor
,
620 DWC_PCIE_VSEC_RAS_DES_ID
);
621 pci_read_config_dword(pdev
, vsec
+ PCI_VNDR_HEADER
, &val
);
623 name
= devm_kasprintf(&plat_dev
->dev
, GFP_KERNEL
, "dwc_rootport_%x", sbdf
);
627 pcie_pmu
= devm_kzalloc(&plat_dev
->dev
, sizeof(*pcie_pmu
), GFP_KERNEL
);
631 pcie_pmu
->pdev
= pdev
;
632 pcie_pmu
->ras_des_offset
= vsec
;
633 pcie_pmu
->nr_lanes
= pcie_get_width_cap(pdev
);
634 pcie_pmu
->on_cpu
= -1;
635 pcie_pmu
->pmu
= (struct pmu
){
637 .parent
= &pdev
->dev
,
638 .module
= THIS_MODULE
,
639 .attr_groups
= dwc_pcie_attr_groups
,
640 .capabilities
= PERF_PMU_CAP_NO_EXCLUDE
,
641 .task_ctx_nr
= perf_invalid_context
,
642 .event_init
= dwc_pcie_pmu_event_init
,
643 .add
= dwc_pcie_pmu_event_add
,
644 .del
= dwc_pcie_pmu_event_del
,
645 .start
= dwc_pcie_pmu_event_start
,
646 .stop
= dwc_pcie_pmu_event_stop
,
647 .read
= dwc_pcie_pmu_event_update
,
650 /* Add this instance to the list used by the offline callback */
651 ret
= cpuhp_state_add_instance(dwc_pcie_pmu_hp_state
,
652 &pcie_pmu
->cpuhp_node
);
654 pci_err(pdev
, "Error %d registering hotplug @%x\n", ret
, sbdf
);
658 /* Unwind when platform driver removes */
659 ret
= devm_add_action_or_reset(&plat_dev
->dev
,
660 dwc_pcie_pmu_remove_cpuhp_instance
,
661 &pcie_pmu
->cpuhp_node
);
665 ret
= perf_pmu_register(&pcie_pmu
->pmu
, name
, -1);
667 pci_err(pdev
, "Error %d registering PMU @%x\n", ret
, sbdf
);
670 ret
= devm_add_action_or_reset(&plat_dev
->dev
, dwc_pcie_unregister_pmu
,
678 static int dwc_pcie_pmu_online_cpu(unsigned int cpu
, struct hlist_node
*cpuhp_node
)
680 struct dwc_pcie_pmu
*pcie_pmu
;
682 pcie_pmu
= hlist_entry_safe(cpuhp_node
, struct dwc_pcie_pmu
, cpuhp_node
);
683 if (pcie_pmu
->on_cpu
== -1)
684 pcie_pmu
->on_cpu
= cpumask_local_spread(
685 0, dev_to_node(&pcie_pmu
->pdev
->dev
));
690 static int dwc_pcie_pmu_offline_cpu(unsigned int cpu
, struct hlist_node
*cpuhp_node
)
692 struct dwc_pcie_pmu
*pcie_pmu
;
693 struct pci_dev
*pdev
;
697 pcie_pmu
= hlist_entry_safe(cpuhp_node
, struct dwc_pcie_pmu
, cpuhp_node
);
698 /* Nothing to do if this CPU doesn't own the PMU */
699 if (cpu
!= pcie_pmu
->on_cpu
)
702 pcie_pmu
->on_cpu
= -1;
703 pdev
= pcie_pmu
->pdev
;
704 node
= dev_to_node(&pdev
->dev
);
706 target
= cpumask_any_and_but(cpumask_of_node(node
), cpu_online_mask
, cpu
);
707 if (target
>= nr_cpu_ids
)
708 target
= cpumask_any_but(cpu_online_mask
, cpu
);
710 if (target
>= nr_cpu_ids
) {
711 pci_err(pdev
, "There is no CPU to set\n");
715 /* This PMU does NOT support interrupt, just migrate context. */
716 perf_pmu_migrate_context(&pcie_pmu
->pmu
, cpu
, target
);
717 pcie_pmu
->on_cpu
= target
;
722 static struct platform_driver dwc_pcie_pmu_driver
= {
723 .probe
= dwc_pcie_pmu_probe
,
724 .driver
= {.name
= "dwc_pcie_pmu",},
727 static int __init
dwc_pcie_pmu_init(void)
729 struct pci_dev
*pdev
= NULL
;
732 for_each_pci_dev(pdev
) {
733 if (!dwc_pcie_match_des_cap(pdev
))
736 ret
= dwc_pcie_register_dev(pdev
);
743 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
,
744 "perf/dwc_pcie_pmu:online",
745 dwc_pcie_pmu_online_cpu
,
746 dwc_pcie_pmu_offline_cpu
);
750 dwc_pcie_pmu_hp_state
= ret
;
752 ret
= platform_driver_register(&dwc_pcie_pmu_driver
);
754 goto platform_driver_register_err
;
756 ret
= bus_register_notifier(&pci_bus_type
, &dwc_pcie_pmu_nb
);
758 goto platform_driver_register_err
;
763 platform_driver_register_err
:
764 cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state
);
769 static void __exit
dwc_pcie_pmu_exit(void)
771 struct dwc_pcie_dev_info
*dev_info
, *tmp
;
774 bus_unregister_notifier(&pci_bus_type
, &dwc_pcie_pmu_nb
);
775 list_for_each_entry_safe(dev_info
, tmp
, &dwc_pcie_dev_info_head
, dev_node
)
776 dwc_pcie_unregister_dev(dev_info
);
777 platform_driver_unregister(&dwc_pcie_pmu_driver
);
778 cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state
);
781 module_init(dwc_pcie_pmu_init
);
782 module_exit(dwc_pcie_pmu_exit
);
784 MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller");
785 MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
786 MODULE_LICENSE("GPL v2");