4 * Copyright(C) 2023 Intel Corporation.
6 * This work is licensed under the terms of the GNU GPL, version 2. See the
7 * COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
12 #include "qemu/bswap.h"
13 #include "qemu/error-report.h"
14 #include "hw/pci/msi.h"
15 #include "hw/pci/msix.h"
16 #include "hw/cxl/cxl.h"
17 #include "hw/cxl/cxl_events.h"
19 /* Artificial limit on the number of events a log can hold */
20 #define CXL_TEST_EVENT_OVERFLOW 8
22 static void reset_overflow(CXLEventLog
*log
)
24 log
->overflow_err_count
= 0;
25 log
->first_overflow_timestamp
= 0;
26 log
->last_overflow_timestamp
= 0;
29 void cxl_event_init(CXLDeviceState
*cxlds
, int start_msg_num
)
34 for (i
= 0; i
< CXL_EVENT_TYPE_MAX
; i
++) {
35 log
= &cxlds
->event_logs
[i
];
37 log
->overflow_err_count
= 0;
38 log
->first_overflow_timestamp
= 0;
39 log
->last_overflow_timestamp
= 0;
40 log
->irq_enabled
= false;
41 log
->irq_vec
= start_msg_num
++;
42 qemu_mutex_init(&log
->lock
);
43 QSIMPLEQ_INIT(&log
->events
);
46 /* Override -- Dynamic Capacity uses the same vector as info */
47 cxlds
->event_logs
[CXL_EVENT_TYPE_DYNAMIC_CAP
].irq_vec
=
48 cxlds
->event_logs
[CXL_EVENT_TYPE_INFO
].irq_vec
;
52 static CXLEvent
*cxl_event_get_head(CXLEventLog
*log
)
54 return QSIMPLEQ_FIRST(&log
->events
);
57 static CXLEvent
*cxl_event_get_next(CXLEvent
*entry
)
59 return QSIMPLEQ_NEXT(entry
, node
);
62 static int cxl_event_count(CXLEventLog
*log
)
67 QSIMPLEQ_FOREACH(event
, &log
->events
, node
) {
74 static bool cxl_event_empty(CXLEventLog
*log
)
76 return QSIMPLEQ_EMPTY(&log
->events
);
79 static void cxl_event_delete_head(CXLDeviceState
*cxlds
,
80 CXLEventLogType log_type
,
83 CXLEvent
*entry
= cxl_event_get_head(log
);
86 QSIMPLEQ_REMOVE_HEAD(&log
->events
, node
);
87 if (cxl_event_empty(log
)) {
88 cxl_event_set_status(cxlds
, log_type
, false);
94 * return true if an interrupt should be generated as a result
95 * of inserting this event.
97 bool cxl_event_insert(CXLDeviceState
*cxlds
, CXLEventLogType log_type
,
98 CXLEventRecordRaw
*event
)
104 if (log_type
>= CXL_EVENT_TYPE_MAX
) {
108 time
= cxl_device_get_timestamp(cxlds
);
110 log
= &cxlds
->event_logs
[log_type
];
112 QEMU_LOCK_GUARD(&log
->lock
);
114 if (cxl_event_count(log
) >= CXL_TEST_EVENT_OVERFLOW
) {
115 if (log
->overflow_err_count
== 0) {
116 log
->first_overflow_timestamp
= time
;
118 log
->overflow_err_count
++;
119 log
->last_overflow_timestamp
= time
;
123 entry
= g_new0(CXLEvent
, 1);
125 memcpy(&entry
->data
, event
, sizeof(*event
));
127 entry
->data
.hdr
.handle
= cpu_to_le16(log
->next_handle
);
129 /* 0 handle is never valid */
130 if (log
->next_handle
== 0) {
133 entry
->data
.hdr
.timestamp
= cpu_to_le64(time
);
135 QSIMPLEQ_INSERT_TAIL(&log
->events
, entry
, node
);
136 cxl_event_set_status(cxlds
, log_type
, true);
138 /* Count went from 0 to 1 */
139 return cxl_event_count(log
) == 1;
142 void cxl_discard_all_event_records(CXLDeviceState
*cxlds
)
144 CXLEventLogType log_type
;
147 for (log_type
= 0; log_type
< CXL_EVENT_TYPE_MAX
; log_type
++) {
148 log
= &cxlds
->event_logs
[log_type
];
149 while (!cxl_event_empty(log
)) {
150 cxl_event_delete_head(cxlds
, log_type
, log
);
155 CXLRetCode
cxl_event_get_records(CXLDeviceState
*cxlds
, CXLGetEventPayload
*pl
,
156 uint8_t log_type
, int max_recs
,
163 if (log_type
>= CXL_EVENT_TYPE_MAX
) {
164 return CXL_MBOX_INVALID_INPUT
;
167 log
= &cxlds
->event_logs
[log_type
];
169 QEMU_LOCK_GUARD(&log
->lock
);
171 entry
= cxl_event_get_head(log
);
172 for (nr
= 0; entry
&& nr
< max_recs
; nr
++) {
173 memcpy(&pl
->records
[nr
], &entry
->data
, CXL_EVENT_RECORD_SIZE
);
174 entry
= cxl_event_get_next(entry
);
177 if (!cxl_event_empty(log
)) {
178 pl
->flags
|= CXL_GET_EVENT_FLAG_MORE_RECORDS
;
181 if (log
->overflow_err_count
) {
182 pl
->flags
|= CXL_GET_EVENT_FLAG_OVERFLOW
;
183 pl
->overflow_err_count
= cpu_to_le16(log
->overflow_err_count
);
184 pl
->first_overflow_timestamp
=
185 cpu_to_le64(log
->first_overflow_timestamp
);
186 pl
->last_overflow_timestamp
=
187 cpu_to_le64(log
->last_overflow_timestamp
);
190 pl
->record_count
= cpu_to_le16(nr
);
191 *len
= CXL_EVENT_PAYLOAD_HDR_SIZE
+ (CXL_EVENT_RECORD_SIZE
* nr
);
193 return CXL_MBOX_SUCCESS
;
196 CXLRetCode
cxl_event_clear_records(CXLDeviceState
*cxlds
,
197 CXLClearEventPayload
*pl
)
204 log_type
= pl
->event_log
;
206 if (log_type
>= CXL_EVENT_TYPE_MAX
) {
207 return CXL_MBOX_INVALID_INPUT
;
210 log
= &cxlds
->event_logs
[log_type
];
212 QEMU_LOCK_GUARD(&log
->lock
);
214 * Must iterate the queue twice.
215 * "The device shall verify the event record handles specified in the input
216 * payload are in temporal order. If the device detects an older event
217 * record that will not be cleared when Clear Event Records is executed,
218 * the device shall return the Invalid Handle return code and shall not
219 * clear any of the specified event records."
220 * -- CXL r3.1 Section 8.2.9.2.3: Clear Event Records (0101h)
222 entry
= cxl_event_get_head(log
);
223 for (nr
= 0; entry
&& nr
< pl
->nr_recs
; nr
++) {
224 uint16_t handle
= pl
->handle
[nr
];
226 /* NOTE: Both handles are little endian. */
227 if (handle
== 0 || entry
->data
.hdr
.handle
!= handle
) {
228 return CXL_MBOX_INVALID_INPUT
;
230 entry
= cxl_event_get_next(entry
);
233 entry
= cxl_event_get_head(log
);
234 for (nr
= 0; entry
&& nr
< pl
->nr_recs
; nr
++) {
235 cxl_event_delete_head(cxlds
, log_type
, log
);
236 entry
= cxl_event_get_head(log
);
239 return CXL_MBOX_SUCCESS
;
242 void cxl_event_irq_assert(CXLType3Dev
*ct3d
)
244 CXLDeviceState
*cxlds
= &ct3d
->cxl_dstate
;
245 PCIDevice
*pdev
= &ct3d
->parent_obj
;
248 for (i
= 0; i
< CXL_EVENT_TYPE_MAX
; i
++) {
249 CXLEventLog
*log
= &cxlds
->event_logs
[i
];
251 if (!log
->irq_enabled
|| cxl_event_empty(log
)) {
255 /* Notifies interrupt, legacy IRQ is not supported */
256 if (msix_enabled(pdev
)) {
257 msix_notify(pdev
, log
->irq_vec
);
258 } else if (msi_enabled(pdev
)) {
259 msi_notify(pdev
, log
->irq_vec
);