1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
13 #include "../watermark.h"
16 static int interleave_arithmetic
;
18 #define FAKE_QTG_ID 42
20 #define NR_CXL_HOST_BRIDGES 2
21 #define NR_CXL_SINGLE_HOST 1
23 #define NR_CXL_ROOT_PORTS 2
24 #define NR_CXL_SWITCH_PORTS 2
25 #define NR_CXL_PORT_DECODERS 8
26 #define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
28 static struct platform_device
*cxl_acpi
;
29 static struct platform_device
*cxl_host_bridge
[NR_CXL_HOST_BRIDGES
];
30 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
31 static struct platform_device
*cxl_root_port
[NR_MULTI_ROOT
];
32 static struct platform_device
*cxl_switch_uport
[NR_MULTI_ROOT
];
33 #define NR_MEM_MULTI \
34 (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
35 static struct platform_device
*cxl_switch_dport
[NR_MEM_MULTI
];
37 static struct platform_device
*cxl_hb_single
[NR_CXL_SINGLE_HOST
];
38 static struct platform_device
*cxl_root_single
[NR_CXL_SINGLE_HOST
];
39 static struct platform_device
*cxl_swu_single
[NR_CXL_SINGLE_HOST
];
40 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
41 static struct platform_device
*cxl_swd_single
[NR_MEM_SINGLE
];
43 struct platform_device
*cxl_mem
[NR_MEM_MULTI
];
44 struct platform_device
*cxl_mem_single
[NR_MEM_SINGLE
];
46 static struct platform_device
*cxl_rch
[NR_CXL_RCH
];
47 static struct platform_device
*cxl_rcd
[NR_CXL_RCH
];
49 static inline bool is_multi_bridge(struct device
*dev
)
53 for (i
= 0; i
< ARRAY_SIZE(cxl_host_bridge
); i
++)
54 if (&cxl_host_bridge
[i
]->dev
== dev
)
59 static inline bool is_single_bridge(struct device
*dev
)
63 for (i
= 0; i
< ARRAY_SIZE(cxl_hb_single
); i
++)
64 if (&cxl_hb_single
[i
]->dev
== dev
)
69 static struct acpi_device acpi0017_mock
;
70 static struct acpi_device host_bridge
[NR_BRIDGES
] = {
72 .handle
= &host_bridge
[0],
76 .handle
= &host_bridge
[1],
80 .handle
= &host_bridge
[2],
84 .handle
= &host_bridge
[3],
89 static bool is_mock_dev(struct device
*dev
)
93 for (i
= 0; i
< ARRAY_SIZE(cxl_mem
); i
++)
94 if (dev
== &cxl_mem
[i
]->dev
)
96 for (i
= 0; i
< ARRAY_SIZE(cxl_mem_single
); i
++)
97 if (dev
== &cxl_mem_single
[i
]->dev
)
99 for (i
= 0; i
< ARRAY_SIZE(cxl_rcd
); i
++)
100 if (dev
== &cxl_rcd
[i
]->dev
)
102 if (dev
== &cxl_acpi
->dev
)
107 static bool is_mock_adev(struct acpi_device
*adev
)
111 if (adev
== &acpi0017_mock
)
114 for (i
= 0; i
< ARRAY_SIZE(host_bridge
); i
++)
115 if (adev
== &host_bridge
[i
])
122 struct acpi_table_cedt cedt
;
123 struct acpi_cedt_chbs chbs
[NR_BRIDGES
];
125 struct acpi_cedt_cfmws cfmws
;
129 struct acpi_cedt_cfmws cfmws
;
133 struct acpi_cedt_cfmws cfmws
;
137 struct acpi_cedt_cfmws cfmws
;
141 struct acpi_cedt_cfmws cfmws
;
145 struct acpi_cedt_cfmws cfmws
;
149 struct acpi_cedt_cfmws cfmws
;
153 struct acpi_cedt_cfmws cfmws
;
157 struct acpi_cedt_cfmws cfmws
;
161 struct acpi_cedt_cxims cxims
;
164 } __packed mock_cedt
= {
168 .length
= sizeof(mock_cedt
),
174 .type
= ACPI_CEDT_TYPE_CHBS
,
175 .length
= sizeof(mock_cedt
.chbs
[0]),
178 .cxl_version
= ACPI_CEDT_CHBS_VERSION_CXL20
,
182 .type
= ACPI_CEDT_TYPE_CHBS
,
183 .length
= sizeof(mock_cedt
.chbs
[0]),
186 .cxl_version
= ACPI_CEDT_CHBS_VERSION_CXL20
,
190 .type
= ACPI_CEDT_TYPE_CHBS
,
191 .length
= sizeof(mock_cedt
.chbs
[0]),
194 .cxl_version
= ACPI_CEDT_CHBS_VERSION_CXL20
,
198 .type
= ACPI_CEDT_TYPE_CHBS
,
199 .length
= sizeof(mock_cedt
.chbs
[0]),
202 .cxl_version
= ACPI_CEDT_CHBS_VERSION_CXL11
,
207 .type
= ACPI_CEDT_TYPE_CFMWS
,
208 .length
= sizeof(mock_cedt
.cfmws0
),
210 .interleave_ways
= 0,
212 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
213 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE
,
214 .qtg_id
= FAKE_QTG_ID
,
215 .window_size
= SZ_256M
* 4UL,
222 .type
= ACPI_CEDT_TYPE_CFMWS
,
223 .length
= sizeof(mock_cedt
.cfmws1
),
225 .interleave_ways
= 1,
227 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
228 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE
,
229 .qtg_id
= FAKE_QTG_ID
,
230 .window_size
= SZ_256M
* 8UL,
237 .type
= ACPI_CEDT_TYPE_CFMWS
,
238 .length
= sizeof(mock_cedt
.cfmws2
),
240 .interleave_ways
= 0,
242 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
243 ACPI_CEDT_CFMWS_RESTRICT_PMEM
,
244 .qtg_id
= FAKE_QTG_ID
,
245 .window_size
= SZ_256M
* 4UL,
252 .type
= ACPI_CEDT_TYPE_CFMWS
,
253 .length
= sizeof(mock_cedt
.cfmws3
),
255 .interleave_ways
= 1,
257 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
258 ACPI_CEDT_CFMWS_RESTRICT_PMEM
,
259 .qtg_id
= FAKE_QTG_ID
,
260 .window_size
= SZ_256M
* 8UL,
267 .type
= ACPI_CEDT_TYPE_CFMWS
,
268 .length
= sizeof(mock_cedt
.cfmws4
),
270 .interleave_ways
= 0,
272 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
273 ACPI_CEDT_CFMWS_RESTRICT_PMEM
,
274 .qtg_id
= FAKE_QTG_ID
,
275 .window_size
= SZ_256M
* 4UL,
282 .type
= ACPI_CEDT_TYPE_CFMWS
,
283 .length
= sizeof(mock_cedt
.cfmws5
),
285 .interleave_ways
= 0,
287 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
288 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE
,
289 .qtg_id
= FAKE_QTG_ID
,
290 .window_size
= SZ_256M
,
294 /* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
298 .type
= ACPI_CEDT_TYPE_CFMWS
,
299 .length
= sizeof(mock_cedt
.cfmws6
),
301 .interleave_arithmetic
= ACPI_CEDT_CFMWS_ARITHMETIC_XOR
,
302 .interleave_ways
= 0,
304 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
305 ACPI_CEDT_CFMWS_RESTRICT_PMEM
,
306 .qtg_id
= FAKE_QTG_ID
,
307 .window_size
= SZ_256M
* 8UL,
314 .type
= ACPI_CEDT_TYPE_CFMWS
,
315 .length
= sizeof(mock_cedt
.cfmws7
),
317 .interleave_arithmetic
= ACPI_CEDT_CFMWS_ARITHMETIC_XOR
,
318 .interleave_ways
= 1,
320 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
321 ACPI_CEDT_CFMWS_RESTRICT_PMEM
,
322 .qtg_id
= FAKE_QTG_ID
,
323 .window_size
= SZ_256M
* 8UL,
330 .type
= ACPI_CEDT_TYPE_CFMWS
,
331 .length
= sizeof(mock_cedt
.cfmws8
),
333 .interleave_arithmetic
= ACPI_CEDT_CFMWS_ARITHMETIC_XOR
,
334 .interleave_ways
= 2,
336 .restrictions
= ACPI_CEDT_CFMWS_RESTRICT_TYPE3
|
337 ACPI_CEDT_CFMWS_RESTRICT_PMEM
,
338 .qtg_id
= FAKE_QTG_ID
,
339 .window_size
= SZ_256M
* 16UL,
341 .target
= { 0, 1, 0, 1, },
346 .type
= ACPI_CEDT_TYPE_CXIMS
,
347 .length
= sizeof(mock_cedt
.cxims0
),
352 .xormap_list
= { 0x404100, 0x808200, },
356 struct acpi_cedt_cfmws
*mock_cfmws
[] = {
357 [0] = &mock_cedt
.cfmws0
.cfmws
,
358 [1] = &mock_cedt
.cfmws1
.cfmws
,
359 [2] = &mock_cedt
.cfmws2
.cfmws
,
360 [3] = &mock_cedt
.cfmws3
.cfmws
,
361 [4] = &mock_cedt
.cfmws4
.cfmws
,
362 [5] = &mock_cedt
.cfmws5
.cfmws
,
363 /* Modulo Math above, XOR Math below */
364 [6] = &mock_cedt
.cfmws6
.cfmws
,
365 [7] = &mock_cedt
.cfmws7
.cfmws
,
366 [8] = &mock_cedt
.cfmws8
.cfmws
,
369 static int cfmws_start
;
370 static int cfmws_end
;
371 #define CFMWS_MOD_ARRAY_START 0
372 #define CFMWS_MOD_ARRAY_END 5
373 #define CFMWS_XOR_ARRAY_START 6
374 #define CFMWS_XOR_ARRAY_END 8
376 struct acpi_cedt_cxims
*mock_cxims
[1] = {
377 [0] = &mock_cedt
.cxims0
.cxims
,
380 struct cxl_mock_res
{
381 struct list_head list
;
385 static LIST_HEAD(mock_res
);
386 static DEFINE_MUTEX(mock_res_lock
);
387 static struct gen_pool
*cxl_mock_pool
;
389 static void depopulate_all_mock_resources(void)
391 struct cxl_mock_res
*res
, *_res
;
393 mutex_lock(&mock_res_lock
);
394 list_for_each_entry_safe(res
, _res
, &mock_res
, list
) {
395 gen_pool_free(cxl_mock_pool
, res
->range
.start
,
396 range_len(&res
->range
));
397 list_del(&res
->list
);
400 mutex_unlock(&mock_res_lock
);
403 static struct cxl_mock_res
*alloc_mock_res(resource_size_t size
, int align
)
405 struct cxl_mock_res
*res
= kzalloc(sizeof(*res
), GFP_KERNEL
);
406 struct genpool_data_align data
= {
411 INIT_LIST_HEAD(&res
->list
);
412 phys
= gen_pool_alloc_algo(cxl_mock_pool
, size
,
413 gen_pool_first_fit_align
, &data
);
417 res
->range
= (struct range
) {
419 .end
= phys
+ size
- 1,
421 mutex_lock(&mock_res_lock
);
422 list_add(&res
->list
, &mock_res
);
423 mutex_unlock(&mock_res_lock
);
428 static int populate_cedt(void)
430 struct cxl_mock_res
*res
;
433 for (i
= 0; i
< ARRAY_SIZE(mock_cedt
.chbs
); i
++) {
434 struct acpi_cedt_chbs
*chbs
= &mock_cedt
.chbs
[i
];
435 resource_size_t size
;
437 if (chbs
->cxl_version
== ACPI_CEDT_CHBS_VERSION_CXL20
)
438 size
= ACPI_CEDT_CHBS_LENGTH_CXL20
;
440 size
= ACPI_CEDT_CHBS_LENGTH_CXL11
;
442 res
= alloc_mock_res(size
, size
);
445 chbs
->base
= res
->range
.start
;
449 for (i
= cfmws_start
; i
<= cfmws_end
; i
++) {
450 struct acpi_cedt_cfmws
*window
= mock_cfmws
[i
];
452 res
= alloc_mock_res(window
->window_size
, SZ_256M
);
455 window
->base_hpa
= res
->range
.start
;
461 static bool is_mock_port(struct device
*dev
);
464 * WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
465 * and 'struct cxl_chbs_context' share the property that the first
466 * struct member is a cxl_test device being probed by the cxl_acpi
469 struct cxl_cedt_context
{
473 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id
,
474 acpi_tbl_entry_handler_arg handler_arg
,
477 struct cxl_cedt_context
*ctx
= arg
;
478 struct device
*dev
= ctx
->dev
;
479 union acpi_subtable_headers
*h
;
483 if (!is_mock_port(dev
) && !is_mock_dev(dev
))
484 return acpi_table_parse_cedt(id
, handler_arg
, arg
);
486 if (id
== ACPI_CEDT_TYPE_CHBS
)
487 for (i
= 0; i
< ARRAY_SIZE(mock_cedt
.chbs
); i
++) {
488 h
= (union acpi_subtable_headers
*)&mock_cedt
.chbs
[i
];
489 end
= (unsigned long)&mock_cedt
.chbs
[i
+ 1];
490 handler_arg(h
, arg
, end
);
493 if (id
== ACPI_CEDT_TYPE_CFMWS
)
494 for (i
= cfmws_start
; i
<= cfmws_end
; i
++) {
495 h
= (union acpi_subtable_headers
*) mock_cfmws
[i
];
496 end
= (unsigned long) h
+ mock_cfmws
[i
]->header
.length
;
497 handler_arg(h
, arg
, end
);
500 if (id
== ACPI_CEDT_TYPE_CXIMS
)
501 for (i
= 0; i
< ARRAY_SIZE(mock_cxims
); i
++) {
502 h
= (union acpi_subtable_headers
*)mock_cxims
[i
];
503 end
= (unsigned long)h
+ mock_cxims
[i
]->header
.length
;
504 handler_arg(h
, arg
, end
);
510 static bool is_mock_bridge(struct device
*dev
)
514 for (i
= 0; i
< ARRAY_SIZE(cxl_host_bridge
); i
++)
515 if (dev
== &cxl_host_bridge
[i
]->dev
)
517 for (i
= 0; i
< ARRAY_SIZE(cxl_hb_single
); i
++)
518 if (dev
== &cxl_hb_single
[i
]->dev
)
520 for (i
= 0; i
< ARRAY_SIZE(cxl_rch
); i
++)
521 if (dev
== &cxl_rch
[i
]->dev
)
527 static bool is_mock_port(struct device
*dev
)
531 if (is_mock_bridge(dev
))
534 for (i
= 0; i
< ARRAY_SIZE(cxl_root_port
); i
++)
535 if (dev
== &cxl_root_port
[i
]->dev
)
538 for (i
= 0; i
< ARRAY_SIZE(cxl_switch_uport
); i
++)
539 if (dev
== &cxl_switch_uport
[i
]->dev
)
542 for (i
= 0; i
< ARRAY_SIZE(cxl_switch_dport
); i
++)
543 if (dev
== &cxl_switch_dport
[i
]->dev
)
546 for (i
= 0; i
< ARRAY_SIZE(cxl_root_single
); i
++)
547 if (dev
== &cxl_root_single
[i
]->dev
)
550 for (i
= 0; i
< ARRAY_SIZE(cxl_swu_single
); i
++)
551 if (dev
== &cxl_swu_single
[i
]->dev
)
554 for (i
= 0; i
< ARRAY_SIZE(cxl_swd_single
); i
++)
555 if (dev
== &cxl_swd_single
[i
]->dev
)
558 if (is_cxl_memdev(dev
))
559 return is_mock_dev(dev
->parent
);
564 static int host_bridge_index(struct acpi_device
*adev
)
566 return adev
- host_bridge
;
569 static struct acpi_device
*find_host_bridge(acpi_handle handle
)
573 for (i
= 0; i
< ARRAY_SIZE(host_bridge
); i
++)
574 if (handle
== host_bridge
[i
].handle
)
575 return &host_bridge
[i
];
580 mock_acpi_evaluate_integer(acpi_handle handle
, acpi_string pathname
,
581 struct acpi_object_list
*arguments
,
582 unsigned long long *data
)
584 struct acpi_device
*adev
= find_host_bridge(handle
);
586 if (!adev
|| strcmp(pathname
, METHOD_NAME__UID
) != 0)
587 return acpi_evaluate_integer(handle
, pathname
, arguments
, data
);
589 *data
= host_bridge_index(adev
);
593 static struct pci_bus mock_pci_bus
[NR_BRIDGES
];
594 static struct acpi_pci_root mock_pci_root
[ARRAY_SIZE(mock_pci_bus
)] = {
596 .bus
= &mock_pci_bus
[0],
599 .bus
= &mock_pci_bus
[1],
602 .bus
= &mock_pci_bus
[2],
605 .bus
= &mock_pci_bus
[3],
610 static bool is_mock_bus(struct pci_bus
*bus
)
614 for (i
= 0; i
< ARRAY_SIZE(mock_pci_bus
); i
++)
615 if (bus
== &mock_pci_bus
[i
])
620 static struct acpi_pci_root
*mock_acpi_pci_find_root(acpi_handle handle
)
622 struct acpi_device
*adev
= find_host_bridge(handle
);
625 return acpi_pci_find_root(handle
);
626 return &mock_pci_root
[host_bridge_index(adev
)];
629 static struct cxl_hdm
*mock_cxl_setup_hdm(struct cxl_port
*port
,
630 struct cxl_endpoint_dvsec_info
*info
)
632 struct cxl_hdm
*cxlhdm
= devm_kzalloc(&port
->dev
, sizeof(*cxlhdm
), GFP_KERNEL
);
633 struct device
*dev
= &port
->dev
;
636 return ERR_PTR(-ENOMEM
);
639 cxlhdm
->interleave_mask
= ~0U;
640 cxlhdm
->iw_cap_mask
= ~0UL;
641 dev_set_drvdata(dev
, cxlhdm
);
645 static int mock_cxl_add_passthrough_decoder(struct cxl_port
*port
)
647 dev_err(&port
->dev
, "unexpected passthrough decoder for cxl_test\n");
652 struct target_map_ctx
{
658 static int map_targets(struct device
*dev
, void *data
)
660 struct platform_device
*pdev
= to_platform_device(dev
);
661 struct target_map_ctx
*ctx
= data
;
663 ctx
->target_map
[ctx
->index
++] = pdev
->id
;
665 if (ctx
->index
> ctx
->target_count
) {
666 dev_WARN_ONCE(dev
, 1, "too many targets found?\n");
673 static int mock_decoder_commit(struct cxl_decoder
*cxld
)
675 struct cxl_port
*port
= to_cxl_port(cxld
->dev
.parent
);
678 if (cxld
->flags
& CXL_DECODER_F_ENABLE
)
681 dev_dbg(&port
->dev
, "%s commit\n", dev_name(&cxld
->dev
));
682 if (cxl_num_decoders_committed(port
) != id
) {
684 "%s: out of order commit, expected decoder%d.%d\n",
685 dev_name(&cxld
->dev
), port
->id
,
686 cxl_num_decoders_committed(port
));
691 cxld
->flags
|= CXL_DECODER_F_ENABLE
;
696 static void mock_decoder_reset(struct cxl_decoder
*cxld
)
698 struct cxl_port
*port
= to_cxl_port(cxld
->dev
.parent
);
701 if ((cxld
->flags
& CXL_DECODER_F_ENABLE
) == 0)
704 dev_dbg(&port
->dev
, "%s reset\n", dev_name(&cxld
->dev
));
705 if (port
->commit_end
== id
)
706 cxl_port_commit_reap(cxld
);
709 "%s: out of order reset, expected decoder%d.%d\n",
710 dev_name(&cxld
->dev
), port
->id
, port
->commit_end
);
711 cxld
->flags
&= ~CXL_DECODER_F_ENABLE
;
714 static void default_mock_decoder(struct cxl_decoder
*cxld
)
716 cxld
->hpa_range
= (struct range
){
721 cxld
->interleave_ways
= 1;
722 cxld
->interleave_granularity
= 256;
723 cxld
->target_type
= CXL_DECODER_HOSTONLYMEM
;
724 cxld
->commit
= mock_decoder_commit
;
725 cxld
->reset
= mock_decoder_reset
;
728 static int first_decoder(struct device
*dev
, void *data
)
730 struct cxl_decoder
*cxld
;
732 if (!is_switch_decoder(dev
))
734 cxld
= to_cxl_decoder(dev
);
740 static void mock_init_hdm_decoder(struct cxl_decoder
*cxld
)
742 struct acpi_cedt_cfmws
*window
= mock_cfmws
[0];
743 struct platform_device
*pdev
= NULL
;
744 struct cxl_endpoint_decoder
*cxled
;
745 struct cxl_switch_decoder
*cxlsd
;
746 struct cxl_port
*port
, *iter
;
747 const int size
= SZ_512M
;
748 struct cxl_memdev
*cxlmd
;
749 struct cxl_dport
*dport
;
755 if (is_endpoint_decoder(&cxld
->dev
)) {
756 cxled
= to_cxl_endpoint_decoder(&cxld
->dev
);
757 cxlmd
= cxled_to_memdev(cxled
);
758 WARN_ON(!dev_is_platform(cxlmd
->dev
.parent
));
759 pdev
= to_platform_device(cxlmd
->dev
.parent
);
761 /* check is endpoint is attach to host-bridge0 */
762 port
= cxled_to_port(cxled
);
764 if (port
->uport_dev
== &cxl_host_bridge
[0]->dev
) {
768 if (is_cxl_port(port
->dev
.parent
))
769 port
= to_cxl_port(port
->dev
.parent
);
773 port
= cxled_to_port(cxled
);
777 * The first decoder on the first 2 devices on the first switch
778 * attached to host-bridge0 mock a fake / static RAM region. All
779 * other decoders are default disabled. Given the round robin
780 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
782 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
784 if (!hb0
|| pdev
->id
% 4 || pdev
->id
> 4 || cxld
->id
> 0) {
785 default_mock_decoder(cxld
);
789 base
= window
->base_hpa
;
790 cxld
->hpa_range
= (struct range
) {
792 .end
= base
+ size
- 1,
795 cxld
->interleave_ways
= 2;
796 eig_to_granularity(window
->granularity
, &cxld
->interleave_granularity
);
797 cxld
->target_type
= CXL_DECODER_HOSTONLYMEM
;
798 cxld
->flags
= CXL_DECODER_F_ENABLE
;
799 cxled
->state
= CXL_DECODER_STATE_AUTO
;
800 port
->commit_end
= cxld
->id
;
801 devm_cxl_dpa_reserve(cxled
, 0, size
/ cxld
->interleave_ways
, 0);
802 cxld
->commit
= mock_decoder_commit
;
803 cxld
->reset
= mock_decoder_reset
;
806 * Now that endpoint decoder is set up, walk up the hierarchy
807 * and setup the switch and root port decoders targeting @cxlmd.
810 for (i
= 0; i
< 2; i
++) {
811 dport
= iter
->parent_dport
;
813 dev
= device_find_child(&iter
->dev
, NULL
, first_decoder
);
815 * Ancestor ports are guaranteed to be enumerated before
816 * @port, and all ports have at least one decoder.
820 cxlsd
= to_cxl_switch_decoder(dev
);
822 /* put cxl_mem.4 second in the decode order */
824 cxlsd
->target
[1] = dport
;
826 cxlsd
->target
[0] = dport
;
828 cxlsd
->target
[0] = dport
;
830 cxld
->target_type
= CXL_DECODER_HOSTONLYMEM
;
831 cxld
->flags
= CXL_DECODER_F_ENABLE
;
832 iter
->commit_end
= 0;
834 * Switch targets 2 endpoints, while host bridge targets
838 cxld
->interleave_ways
= 2;
840 cxld
->interleave_ways
= 1;
841 cxld
->interleave_granularity
= 4096;
842 cxld
->hpa_range
= (struct range
) {
844 .end
= base
+ size
- 1,
850 static int mock_cxl_enumerate_decoders(struct cxl_hdm
*cxlhdm
,
851 struct cxl_endpoint_dvsec_info
*info
)
853 struct cxl_port
*port
= cxlhdm
->port
;
854 struct cxl_port
*parent_port
= to_cxl_port(port
->dev
.parent
);
857 if (is_cxl_endpoint(port
))
859 else if (is_cxl_root(parent_port
))
860 target_count
= NR_CXL_ROOT_PORTS
;
862 target_count
= NR_CXL_SWITCH_PORTS
;
864 for (i
= 0; i
< NR_CXL_PORT_DECODERS
; i
++) {
865 int target_map
[CXL_DECODER_MAX_INTERLEAVE
] = { 0 };
866 struct target_map_ctx ctx
= {
867 .target_map
= target_map
,
868 .target_count
= target_count
,
870 struct cxl_decoder
*cxld
;
874 struct cxl_switch_decoder
*cxlsd
;
876 cxlsd
= cxl_switch_decoder_alloc(port
, target_count
);
879 "Failed to allocate the decoder\n");
880 return PTR_ERR(cxlsd
);
884 struct cxl_endpoint_decoder
*cxled
;
886 cxled
= cxl_endpoint_decoder_alloc(port
);
890 "Failed to allocate the decoder\n");
891 return PTR_ERR(cxled
);
896 mock_init_hdm_decoder(cxld
);
899 rc
= device_for_each_child(port
->uport_dev
, &ctx
,
902 put_device(&cxld
->dev
);
907 rc
= cxl_decoder_add_locked(cxld
, target_map
);
909 put_device(&cxld
->dev
);
910 dev_err(&port
->dev
, "Failed to add decoder\n");
914 rc
= cxl_decoder_autoremove(&port
->dev
, cxld
);
917 dev_dbg(&cxld
->dev
, "Added to port %s\n", dev_name(&port
->dev
));
923 static int mock_cxl_port_enumerate_dports(struct cxl_port
*port
)
925 struct platform_device
**array
;
928 if (port
->depth
== 1) {
929 if (is_multi_bridge(port
->uport_dev
)) {
930 array_size
= ARRAY_SIZE(cxl_root_port
);
931 array
= cxl_root_port
;
932 } else if (is_single_bridge(port
->uport_dev
)) {
933 array_size
= ARRAY_SIZE(cxl_root_single
);
934 array
= cxl_root_single
;
936 dev_dbg(&port
->dev
, "%s: unknown bridge type\n",
937 dev_name(port
->uport_dev
));
940 } else if (port
->depth
== 2) {
941 struct cxl_port
*parent
= to_cxl_port(port
->dev
.parent
);
943 if (is_multi_bridge(parent
->uport_dev
)) {
944 array_size
= ARRAY_SIZE(cxl_switch_dport
);
945 array
= cxl_switch_dport
;
946 } else if (is_single_bridge(parent
->uport_dev
)) {
947 array_size
= ARRAY_SIZE(cxl_swd_single
);
948 array
= cxl_swd_single
;
950 dev_dbg(&port
->dev
, "%s: unknown bridge type\n",
951 dev_name(port
->uport_dev
));
955 dev_WARN_ONCE(&port
->dev
, 1, "unexpected depth %d\n",
960 for (i
= 0; i
< array_size
; i
++) {
961 struct platform_device
*pdev
= array
[i
];
962 struct cxl_dport
*dport
;
964 if (pdev
->dev
.parent
!= port
->uport_dev
) {
965 dev_dbg(&port
->dev
, "%s: mismatch parent %s\n",
966 dev_name(port
->uport_dev
),
967 dev_name(pdev
->dev
.parent
));
971 dport
= devm_cxl_add_dport(port
, &pdev
->dev
, pdev
->id
,
975 return PTR_ERR(dport
);
982 * Faking the cxl_dpa_perf for the memdev when appropriate.
984 static void dpa_perf_setup(struct cxl_port
*endpoint
, struct range
*range
,
985 struct cxl_dpa_perf
*dpa_perf
)
987 dpa_perf
->qos_class
= FAKE_QTG_ID
;
988 dpa_perf
->dpa_range
= *range
;
989 for (int i
= 0; i
< ACCESS_COORDINATE_MAX
; i
++) {
990 dpa_perf
->coord
[i
].read_latency
= 500;
991 dpa_perf
->coord
[i
].write_latency
= 500;
992 dpa_perf
->coord
[i
].read_bandwidth
= 1000;
993 dpa_perf
->coord
[i
].write_bandwidth
= 1000;
997 static void mock_cxl_endpoint_parse_cdat(struct cxl_port
*port
)
999 struct cxl_root
*cxl_root
__free(put_cxl_root
) =
1000 find_cxl_root(port
);
1001 struct cxl_memdev
*cxlmd
= to_cxl_memdev(port
->uport_dev
);
1002 struct cxl_dev_state
*cxlds
= cxlmd
->cxlds
;
1003 struct cxl_memdev_state
*mds
= to_cxl_memdev_state(cxlds
);
1004 struct access_coordinate ep_c
[ACCESS_COORDINATE_MAX
];
1005 struct range pmem_range
= {
1006 .start
= cxlds
->pmem_res
.start
,
1007 .end
= cxlds
->pmem_res
.end
,
1009 struct range ram_range
= {
1010 .start
= cxlds
->ram_res
.start
,
1011 .end
= cxlds
->ram_res
.end
,
1017 if (range_len(&ram_range
))
1018 dpa_perf_setup(port
, &ram_range
, &mds
->ram_perf
);
1020 if (range_len(&pmem_range
))
1021 dpa_perf_setup(port
, &pmem_range
, &mds
->pmem_perf
);
1023 cxl_memdev_update_perf(cxlmd
);
1026 * This function is here to only test the topology iterator. It serves
1029 cxl_endpoint_get_perf_coordinates(port
, ep_c
);
1032 static struct cxl_mock_ops cxl_mock_ops
= {
1033 .is_mock_adev
= is_mock_adev
,
1034 .is_mock_bridge
= is_mock_bridge
,
1035 .is_mock_bus
= is_mock_bus
,
1036 .is_mock_port
= is_mock_port
,
1037 .is_mock_dev
= is_mock_dev
,
1038 .acpi_table_parse_cedt
= mock_acpi_table_parse_cedt
,
1039 .acpi_evaluate_integer
= mock_acpi_evaluate_integer
,
1040 .acpi_pci_find_root
= mock_acpi_pci_find_root
,
1041 .devm_cxl_port_enumerate_dports
= mock_cxl_port_enumerate_dports
,
1042 .devm_cxl_setup_hdm
= mock_cxl_setup_hdm
,
1043 .devm_cxl_add_passthrough_decoder
= mock_cxl_add_passthrough_decoder
,
1044 .devm_cxl_enumerate_decoders
= mock_cxl_enumerate_decoders
,
1045 .cxl_endpoint_parse_cdat
= mock_cxl_endpoint_parse_cdat
,
1046 .list
= LIST_HEAD_INIT(cxl_mock_ops
.list
),
1049 static void mock_companion(struct acpi_device
*adev
, struct device
*dev
)
1051 device_initialize(&adev
->dev
);
1052 fwnode_init(&adev
->fwnode
, NULL
);
1053 dev
->fwnode
= &adev
->fwnode
;
1054 adev
->fwnode
.dev
= dev
;
1058 #define SZ_64G (SZ_32G * 2)
1061 static __init
int cxl_rch_topo_init(void)
1065 for (i
= 0; i
< ARRAY_SIZE(cxl_rch
); i
++) {
1066 int idx
= NR_CXL_HOST_BRIDGES
+ NR_CXL_SINGLE_HOST
+ i
;
1067 struct acpi_device
*adev
= &host_bridge
[idx
];
1068 struct platform_device
*pdev
;
1070 pdev
= platform_device_alloc("cxl_host_bridge", idx
);
1074 mock_companion(adev
, &pdev
->dev
);
1075 rc
= platform_device_add(pdev
);
1077 platform_device_put(pdev
);
1082 mock_pci_bus
[idx
].bridge
= &pdev
->dev
;
1083 rc
= sysfs_create_link(&pdev
->dev
.kobj
, &pdev
->dev
.kobj
,
1092 for (i
= ARRAY_SIZE(cxl_rch
) - 1; i
>= 0; i
--) {
1093 struct platform_device
*pdev
= cxl_rch
[i
];
1097 sysfs_remove_link(&pdev
->dev
.kobj
, "firmware_node");
1098 platform_device_unregister(cxl_rch
[i
]);
1104 static void cxl_rch_topo_exit(void)
1108 for (i
= ARRAY_SIZE(cxl_rch
) - 1; i
>= 0; i
--) {
1109 struct platform_device
*pdev
= cxl_rch
[i
];
1113 sysfs_remove_link(&pdev
->dev
.kobj
, "firmware_node");
1114 platform_device_unregister(cxl_rch
[i
]);
1118 static __init
int cxl_single_topo_init(void)
1122 for (i
= 0; i
< ARRAY_SIZE(cxl_hb_single
); i
++) {
1123 struct acpi_device
*adev
=
1124 &host_bridge
[NR_CXL_HOST_BRIDGES
+ i
];
1125 struct platform_device
*pdev
;
1127 pdev
= platform_device_alloc("cxl_host_bridge",
1128 NR_CXL_HOST_BRIDGES
+ i
);
1132 mock_companion(adev
, &pdev
->dev
);
1133 rc
= platform_device_add(pdev
);
1135 platform_device_put(pdev
);
1139 cxl_hb_single
[i
] = pdev
;
1140 mock_pci_bus
[i
+ NR_CXL_HOST_BRIDGES
].bridge
= &pdev
->dev
;
1141 rc
= sysfs_create_link(&pdev
->dev
.kobj
, &pdev
->dev
.kobj
,
1147 for (i
= 0; i
< ARRAY_SIZE(cxl_root_single
); i
++) {
1148 struct platform_device
*bridge
=
1149 cxl_hb_single
[i
% ARRAY_SIZE(cxl_hb_single
)];
1150 struct platform_device
*pdev
;
1152 pdev
= platform_device_alloc("cxl_root_port",
1156 pdev
->dev
.parent
= &bridge
->dev
;
1158 rc
= platform_device_add(pdev
);
1160 platform_device_put(pdev
);
1163 cxl_root_single
[i
] = pdev
;
1166 for (i
= 0; i
< ARRAY_SIZE(cxl_swu_single
); i
++) {
1167 struct platform_device
*root_port
= cxl_root_single
[i
];
1168 struct platform_device
*pdev
;
1170 pdev
= platform_device_alloc("cxl_switch_uport",
1174 pdev
->dev
.parent
= &root_port
->dev
;
1176 rc
= platform_device_add(pdev
);
1178 platform_device_put(pdev
);
1181 cxl_swu_single
[i
] = pdev
;
1184 for (i
= 0; i
< ARRAY_SIZE(cxl_swd_single
); i
++) {
1185 struct platform_device
*uport
=
1186 cxl_swu_single
[i
% ARRAY_SIZE(cxl_swu_single
)];
1187 struct platform_device
*pdev
;
1189 pdev
= platform_device_alloc("cxl_switch_dport",
1193 pdev
->dev
.parent
= &uport
->dev
;
1195 rc
= platform_device_add(pdev
);
1197 platform_device_put(pdev
);
1200 cxl_swd_single
[i
] = pdev
;
1206 for (i
= ARRAY_SIZE(cxl_swd_single
) - 1; i
>= 0; i
--)
1207 platform_device_unregister(cxl_swd_single
[i
]);
1209 for (i
= ARRAY_SIZE(cxl_swu_single
) - 1; i
>= 0; i
--)
1210 platform_device_unregister(cxl_swu_single
[i
]);
1212 for (i
= ARRAY_SIZE(cxl_root_single
) - 1; i
>= 0; i
--)
1213 platform_device_unregister(cxl_root_single
[i
]);
1215 for (i
= ARRAY_SIZE(cxl_hb_single
) - 1; i
>= 0; i
--) {
1216 struct platform_device
*pdev
= cxl_hb_single
[i
];
1220 sysfs_remove_link(&pdev
->dev
.kobj
, "physical_node");
1221 platform_device_unregister(cxl_hb_single
[i
]);
1227 static void cxl_single_topo_exit(void)
1231 for (i
= ARRAY_SIZE(cxl_swd_single
) - 1; i
>= 0; i
--)
1232 platform_device_unregister(cxl_swd_single
[i
]);
1233 for (i
= ARRAY_SIZE(cxl_swu_single
) - 1; i
>= 0; i
--)
1234 platform_device_unregister(cxl_swu_single
[i
]);
1235 for (i
= ARRAY_SIZE(cxl_root_single
) - 1; i
>= 0; i
--)
1236 platform_device_unregister(cxl_root_single
[i
]);
1237 for (i
= ARRAY_SIZE(cxl_hb_single
) - 1; i
>= 0; i
--) {
1238 struct platform_device
*pdev
= cxl_hb_single
[i
];
1242 sysfs_remove_link(&pdev
->dev
.kobj
, "physical_node");
1243 platform_device_unregister(cxl_hb_single
[i
]);
1247 static void cxl_mem_exit(void)
1251 for (i
= ARRAY_SIZE(cxl_rcd
) - 1; i
>= 0; i
--)
1252 platform_device_unregister(cxl_rcd
[i
]);
1253 for (i
= ARRAY_SIZE(cxl_mem_single
) - 1; i
>= 0; i
--)
1254 platform_device_unregister(cxl_mem_single
[i
]);
1255 for (i
= ARRAY_SIZE(cxl_mem
) - 1; i
>= 0; i
--)
1256 platform_device_unregister(cxl_mem
[i
]);
1259 static int cxl_mem_init(void)
1263 for (i
= 0; i
< ARRAY_SIZE(cxl_mem
); i
++) {
1264 struct platform_device
*dport
= cxl_switch_dport
[i
];
1265 struct platform_device
*pdev
;
1267 pdev
= platform_device_alloc("cxl_mem", i
);
1270 pdev
->dev
.parent
= &dport
->dev
;
1271 set_dev_node(&pdev
->dev
, i
% 2);
1273 rc
= platform_device_add(pdev
);
1275 platform_device_put(pdev
);
1281 for (i
= 0; i
< ARRAY_SIZE(cxl_mem_single
); i
++) {
1282 struct platform_device
*dport
= cxl_swd_single
[i
];
1283 struct platform_device
*pdev
;
1285 pdev
= platform_device_alloc("cxl_mem", NR_MEM_MULTI
+ i
);
1288 pdev
->dev
.parent
= &dport
->dev
;
1289 set_dev_node(&pdev
->dev
, i
% 2);
1291 rc
= platform_device_add(pdev
);
1293 platform_device_put(pdev
);
1296 cxl_mem_single
[i
] = pdev
;
1299 for (i
= 0; i
< ARRAY_SIZE(cxl_rcd
); i
++) {
1300 int idx
= NR_MEM_MULTI
+ NR_MEM_SINGLE
+ i
;
1301 struct platform_device
*rch
= cxl_rch
[i
];
1302 struct platform_device
*pdev
;
1304 pdev
= platform_device_alloc("cxl_rcd", idx
);
1307 pdev
->dev
.parent
= &rch
->dev
;
1308 set_dev_node(&pdev
->dev
, i
% 2);
1310 rc
= platform_device_add(pdev
);
1312 platform_device_put(pdev
);
1321 for (i
= ARRAY_SIZE(cxl_rcd
) - 1; i
>= 0; i
--)
1322 platform_device_unregister(cxl_rcd
[i
]);
1324 for (i
= ARRAY_SIZE(cxl_mem_single
) - 1; i
>= 0; i
--)
1325 platform_device_unregister(cxl_mem_single
[i
]);
1327 for (i
= ARRAY_SIZE(cxl_mem
) - 1; i
>= 0; i
--)
1328 platform_device_unregister(cxl_mem
[i
]);
1332 static __init
int cxl_test_init(void)
1342 register_cxl_mock_ops(&cxl_mock_ops
);
1344 cxl_mock_pool
= gen_pool_create(ilog2(SZ_2M
), NUMA_NO_NODE
);
1345 if (!cxl_mock_pool
) {
1347 goto err_gen_pool_create
;
1350 rc
= gen_pool_add(cxl_mock_pool
, iomem_resource
.end
+ 1 - SZ_64G
,
1351 SZ_64G
, NUMA_NO_NODE
);
1353 goto err_gen_pool_add
;
1355 if (interleave_arithmetic
== 1) {
1356 cfmws_start
= CFMWS_XOR_ARRAY_START
;
1357 cfmws_end
= CFMWS_XOR_ARRAY_END
;
1359 cfmws_start
= CFMWS_MOD_ARRAY_START
;
1360 cfmws_end
= CFMWS_MOD_ARRAY_END
;
1363 rc
= populate_cedt();
1367 for (i
= 0; i
< ARRAY_SIZE(cxl_host_bridge
); i
++) {
1368 struct acpi_device
*adev
= &host_bridge
[i
];
1369 struct platform_device
*pdev
;
1371 pdev
= platform_device_alloc("cxl_host_bridge", i
);
1375 mock_companion(adev
, &pdev
->dev
);
1376 rc
= platform_device_add(pdev
);
1378 platform_device_put(pdev
);
1382 cxl_host_bridge
[i
] = pdev
;
1383 mock_pci_bus
[i
].bridge
= &pdev
->dev
;
1384 rc
= sysfs_create_link(&pdev
->dev
.kobj
, &pdev
->dev
.kobj
,
1390 for (i
= 0; i
< ARRAY_SIZE(cxl_root_port
); i
++) {
1391 struct platform_device
*bridge
=
1392 cxl_host_bridge
[i
% ARRAY_SIZE(cxl_host_bridge
)];
1393 struct platform_device
*pdev
;
1395 pdev
= platform_device_alloc("cxl_root_port", i
);
1398 pdev
->dev
.parent
= &bridge
->dev
;
1400 rc
= platform_device_add(pdev
);
1402 platform_device_put(pdev
);
1405 cxl_root_port
[i
] = pdev
;
1408 BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport
) != ARRAY_SIZE(cxl_root_port
));
1409 for (i
= 0; i
< ARRAY_SIZE(cxl_switch_uport
); i
++) {
1410 struct platform_device
*root_port
= cxl_root_port
[i
];
1411 struct platform_device
*pdev
;
1413 pdev
= platform_device_alloc("cxl_switch_uport", i
);
1416 pdev
->dev
.parent
= &root_port
->dev
;
1418 rc
= platform_device_add(pdev
);
1420 platform_device_put(pdev
);
1423 cxl_switch_uport
[i
] = pdev
;
1426 for (i
= 0; i
< ARRAY_SIZE(cxl_switch_dport
); i
++) {
1427 struct platform_device
*uport
=
1428 cxl_switch_uport
[i
% ARRAY_SIZE(cxl_switch_uport
)];
1429 struct platform_device
*pdev
;
1431 pdev
= platform_device_alloc("cxl_switch_dport", i
);
1434 pdev
->dev
.parent
= &uport
->dev
;
1436 rc
= platform_device_add(pdev
);
1438 platform_device_put(pdev
);
1441 cxl_switch_dport
[i
] = pdev
;
1444 rc
= cxl_single_topo_init();
1448 rc
= cxl_rch_topo_init();
1452 cxl_acpi
= platform_device_alloc("cxl_acpi", 0);
1456 mock_companion(&acpi0017_mock
, &cxl_acpi
->dev
);
1457 acpi0017_mock
.dev
.bus
= &platform_bus_type
;
1459 rc
= platform_device_add(cxl_acpi
);
1463 rc
= cxl_mem_init();
1470 platform_device_put(cxl_acpi
);
1472 cxl_rch_topo_exit();
1474 cxl_single_topo_exit();
1476 for (i
= ARRAY_SIZE(cxl_switch_dport
) - 1; i
>= 0; i
--)
1477 platform_device_unregister(cxl_switch_dport
[i
]);
1479 for (i
= ARRAY_SIZE(cxl_switch_uport
) - 1; i
>= 0; i
--)
1480 platform_device_unregister(cxl_switch_uport
[i
]);
1482 for (i
= ARRAY_SIZE(cxl_root_port
) - 1; i
>= 0; i
--)
1483 platform_device_unregister(cxl_root_port
[i
]);
1485 for (i
= ARRAY_SIZE(cxl_host_bridge
) - 1; i
>= 0; i
--) {
1486 struct platform_device
*pdev
= cxl_host_bridge
[i
];
1490 sysfs_remove_link(&pdev
->dev
.kobj
, "physical_node");
1491 platform_device_unregister(cxl_host_bridge
[i
]);
1494 depopulate_all_mock_resources();
1496 gen_pool_destroy(cxl_mock_pool
);
1497 err_gen_pool_create
:
1498 unregister_cxl_mock_ops(&cxl_mock_ops
);
1502 static __exit
void cxl_test_exit(void)
1507 platform_device_unregister(cxl_acpi
);
1508 cxl_rch_topo_exit();
1509 cxl_single_topo_exit();
1510 for (i
= ARRAY_SIZE(cxl_switch_dport
) - 1; i
>= 0; i
--)
1511 platform_device_unregister(cxl_switch_dport
[i
]);
1512 for (i
= ARRAY_SIZE(cxl_switch_uport
) - 1; i
>= 0; i
--)
1513 platform_device_unregister(cxl_switch_uport
[i
]);
1514 for (i
= ARRAY_SIZE(cxl_root_port
) - 1; i
>= 0; i
--)
1515 platform_device_unregister(cxl_root_port
[i
]);
1516 for (i
= ARRAY_SIZE(cxl_host_bridge
) - 1; i
>= 0; i
--) {
1517 struct platform_device
*pdev
= cxl_host_bridge
[i
];
1521 sysfs_remove_link(&pdev
->dev
.kobj
, "physical_node");
1522 platform_device_unregister(cxl_host_bridge
[i
]);
1524 depopulate_all_mock_resources();
1525 gen_pool_destroy(cxl_mock_pool
);
1526 unregister_cxl_mock_ops(&cxl_mock_ops
);
1529 module_param(interleave_arithmetic
, int, 0444);
1530 MODULE_PARM_DESC(interleave_arithmetic
, "Modulo:0, XOR:1");
1531 module_init(cxl_test_init
);
1532 module_exit(cxl_test_exit
);
1533 MODULE_LICENSE("GPL v2");
1534 MODULE_IMPORT_NS("ACPI");
1535 MODULE_IMPORT_NS("CXL");