1 /* adi_64.c: support for ADI (Application Data Integrity) feature on
2 * sparc m7 and newer processors. This feature is also known as
3 * SSM (Silicon Secured Memory).
5 * Copyright (C) 2016 Oracle and/or its affiliates. All rights reserved.
6 * Author: Khalid Aziz (khalid.aziz@oracle.com)
8 * This work is licensed under the terms of the GNU GPL, version 2.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/mm_types.h>
13 #include <asm/mdesc.h>
14 #include <asm/adi_64.h>
15 #include <asm/mmu_64.h>
16 #include <asm/pgtable_64.h>
18 /* Each page of storage for ADI tags can accommodate tags for 128
19 * pages. When ADI enabled pages are being swapped out, it would be
20 * prudent to allocate at least enough tag storage space to accommodate
21 * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to
22 * store tags for four SWAPFILE_CLUSTER pages to reduce need for
23 * further allocations for same vma.
25 #define TAG_STORAGE_PAGES 8
27 struct adi_config adi_state
;
28 EXPORT_SYMBOL(adi_state
);
30 /* mdesc_adi_init() : Parse machine description provided by the
31 * hypervisor to detect ADI capabilities
33 * Hypervisor reports ADI capabilities of platform in "hwcap-list" property
34 * for "cpu" node. If the platform supports ADI, "hwcap-list" property
35 * contains the keyword "adp". If the platform supports ADI, "platform"
36 * node will contain "adp-blksz", "adp-nbits" and "ue-on-adp" properties
37 * to describe the ADI capabilities.
39 void __init
mdesc_adi_init(void)
41 struct mdesc_handle
*hp
= mdesc_grab();
49 pn
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "cpu");
50 if (pn
== MDESC_NODE_NULL
)
53 prop
= mdesc_get_property(hp
, pn
, "hwcap-list", &len
);
58 * Look for "adp" keyword in hwcap-list which would indicate
61 adi_state
.enabled
= false;
65 if (!strcmp(prop
, "adp")) {
66 adi_state
.enabled
= true;
70 plen
= strlen(prop
) + 1;
75 if (!adi_state
.enabled
)
78 /* Find the ADI properties in "platform" node. If all ADI
79 * properties are not found, ADI support is incomplete and
80 * do not enable ADI in the kernel.
82 pn
= mdesc_node_by_name(hp
, MDESC_NODE_NULL
, "platform");
83 if (pn
== MDESC_NODE_NULL
)
86 val
= (u64
*) mdesc_get_property(hp
, pn
, "adp-blksz", &len
);
89 adi_state
.caps
.blksz
= *val
;
91 val
= (u64
*) mdesc_get_property(hp
, pn
, "adp-nbits", &len
);
94 adi_state
.caps
.nbits
= *val
;
96 val
= (u64
*) mdesc_get_property(hp
, pn
, "ue-on-adp", &len
);
99 adi_state
.caps
.ue_on_adi
= *val
;
101 /* Some of the code to support swapping ADI tags is written
102 * assumption that two ADI tags can fit inside one byte. If
103 * this assumption is broken by a future architecture change,
104 * that code will have to be revisited. If that were to happen,
105 * disable ADI support so we do not get unpredictable results
106 * with programs trying to use ADI and their pages getting
109 if (adi_state
.caps
.nbits
> 4) {
110 pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n");
111 adi_state
.enabled
= false;
118 adi_state
.enabled
= false;
119 adi_state
.caps
.blksz
= 0;
120 adi_state
.caps
.nbits
= 0;
125 tag_storage_desc_t
*find_tag_store(struct mm_struct
*mm
,
126 struct vm_area_struct
*vma
,
129 tag_storage_desc_t
*tag_desc
= NULL
;
130 unsigned long i
, max_desc
, flags
;
132 /* Check if this vma already has tag storage descriptor
135 max_desc
= PAGE_SIZE
/sizeof(tag_storage_desc_t
);
136 if (mm
->context
.tag_store
) {
137 tag_desc
= mm
->context
.tag_store
;
138 spin_lock_irqsave(&mm
->context
.tag_lock
, flags
);
139 for (i
= 0; i
< max_desc
; i
++) {
140 if ((addr
>= tag_desc
->start
) &&
141 ((addr
+ PAGE_SIZE
- 1) <= tag_desc
->end
))
145 spin_unlock_irqrestore(&mm
->context
.tag_lock
, flags
);
147 /* If no matching entries were found, this must be a
148 * freshly allocated page
157 tag_storage_desc_t
*alloc_tag_store(struct mm_struct
*mm
,
158 struct vm_area_struct
*vma
,
162 unsigned long i
, size
, max_desc
, flags
;
163 tag_storage_desc_t
*tag_desc
, *open_desc
;
164 unsigned long end_addr
, hole_start
, hole_end
;
166 max_desc
= PAGE_SIZE
/sizeof(tag_storage_desc_t
);
169 hole_end
= ULONG_MAX
;
170 end_addr
= addr
+ PAGE_SIZE
- 1;
172 /* Check if this vma already has tag storage descriptor
175 spin_lock_irqsave(&mm
->context
.tag_lock
, flags
);
176 if (mm
->context
.tag_store
) {
177 tag_desc
= mm
->context
.tag_store
;
179 /* Look for a matching entry for this address. While doing
180 * that, look for the first open slot as well and find
181 * the hole in already allocated range where this request
184 for (i
= 0; i
< max_desc
; i
++) {
185 if (tag_desc
->tag_users
== 0) {
186 if (open_desc
== NULL
)
187 open_desc
= tag_desc
;
189 if ((addr
>= tag_desc
->start
) &&
190 (tag_desc
->end
>= (addr
+ PAGE_SIZE
- 1))) {
191 tag_desc
->tag_users
++;
195 if ((tag_desc
->start
> end_addr
) &&
196 (tag_desc
->start
< hole_end
))
197 hole_end
= tag_desc
->start
;
198 if ((tag_desc
->end
< addr
) &&
199 (tag_desc
->end
> hole_start
))
200 hole_start
= tag_desc
->end
;
205 size
= sizeof(tag_storage_desc_t
)*max_desc
;
206 mm
->context
.tag_store
= kzalloc(size
, GFP_NOWAIT
|__GFP_NOWARN
);
207 if (mm
->context
.tag_store
== NULL
) {
211 tag_desc
= mm
->context
.tag_store
;
212 for (i
= 0; i
< max_desc
; i
++, tag_desc
++)
213 tag_desc
->tag_users
= 0;
214 open_desc
= mm
->context
.tag_store
;
218 /* Check if we ran out of tag storage descriptors */
219 if (open_desc
== NULL
) {
224 /* Mark this tag descriptor slot in use and then initialize it */
225 tag_desc
= open_desc
;
226 tag_desc
->tag_users
= 1;
228 /* Tag storage has not been allocated for this vma and space
229 * is available in tag storage descriptor. Since this page is
230 * being swapped out, there is high probability subsequent pages
231 * in the VMA will be swapped out as well. Allocate pages to
232 * store tags for as many pages in this vma as possible but not
233 * more than TAG_STORAGE_PAGES. Each byte in tag space holds
234 * two ADI tags since each ADI tag is 4 bits. Each ADI tag
235 * covers adi_blksize() worth of addresses. Check if the hole is
236 * big enough to accommodate full address range for using
237 * TAG_STORAGE_PAGES number of tag pages.
239 size
= TAG_STORAGE_PAGES
* PAGE_SIZE
;
240 end_addr
= addr
+ (size
*2*adi_blksize()) - 1;
241 /* Check for overflow. If overflow occurs, allocate only one page */
242 if (end_addr
< addr
) {
244 end_addr
= addr
+ (size
*2*adi_blksize()) - 1;
245 /* If overflow happens with the minimum tag storage
246 * allocation as well, adjust ending address for this
250 end_addr
= ULONG_MAX
;
252 if (hole_end
< end_addr
) {
253 /* Available hole is too small on the upper end of
254 * address. Can we expand the range towards the lower
255 * address and maximize use of this slot?
257 unsigned long tmp_addr
;
259 end_addr
= hole_end
- 1;
260 tmp_addr
= end_addr
- (size
*2*adi_blksize()) + 1;
261 /* Check for underflow. If underflow occurs, allocate
262 * only one page for storing ADI tags
264 if (tmp_addr
> addr
) {
266 tmp_addr
= end_addr
- (size
*2*adi_blksize()) - 1;
267 /* If underflow happens with the minimum tag storage
268 * allocation as well, adjust starting address for
274 if (tmp_addr
< hole_start
) {
275 /* Available hole is restricted on lower address
278 tmp_addr
= hole_start
+ 1;
281 size
= (end_addr
+ 1 - addr
)/(2*adi_blksize());
282 size
= (size
+ (PAGE_SIZE
-adi_blksize()))/PAGE_SIZE
;
283 size
= size
* PAGE_SIZE
;
285 tags
= kzalloc(size
, GFP_NOWAIT
|__GFP_NOWARN
);
287 tag_desc
->tag_users
= 0;
291 tag_desc
->start
= addr
;
292 tag_desc
->tags
= tags
;
293 tag_desc
->end
= end_addr
;
296 spin_unlock_irqrestore(&mm
->context
.tag_lock
, flags
);
300 void del_tag_store(tag_storage_desc_t
*tag_desc
, struct mm_struct
*mm
)
303 unsigned char *tags
= NULL
;
305 spin_lock_irqsave(&mm
->context
.tag_lock
, flags
);
306 tag_desc
->tag_users
--;
307 if (tag_desc
->tag_users
== 0) {
308 tag_desc
->start
= tag_desc
->end
= 0;
309 /* Do not free up the tag storage space allocated
310 * by the first descriptor. This is persistent
311 * emergency tag storage space for the task.
313 if (tag_desc
!= mm
->context
.tag_store
) {
314 tags
= tag_desc
->tags
;
315 tag_desc
->tags
= NULL
;
318 spin_unlock_irqrestore(&mm
->context
.tag_lock
, flags
);
322 #define tag_start(addr, tag_desc) \
323 ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize())))
325 /* Retrieve any saved ADI tags for the page being swapped back in and
326 * restore these tags to the newly allocated physical page.
328 void adi_restore_tags(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
329 unsigned long addr
, pte_t pte
)
332 tag_storage_desc_t
*tag_desc
;
333 unsigned long paddr
, tmp
, version1
, version2
;
335 /* Check if the swapped out page has an ADI version
336 * saved. If yes, restore version tag to the newly
339 tag_desc
= find_tag_store(mm
, vma
, addr
);
340 if (tag_desc
== NULL
)
343 tag
= tag_start(addr
, tag_desc
);
344 paddr
= pte_val(pte
) & _PAGE_PADDR_4V
;
345 for (tmp
= paddr
; tmp
< (paddr
+PAGE_SIZE
); tmp
+= adi_blksize()) {
346 version1
= (*tag
) >> 4;
347 version2
= (*tag
) & 0x0f;
349 asm volatile("stxa %0, [%1] %2\n\t"
351 : "r" (version1
), "r" (tmp
),
353 tmp
+= adi_blksize();
354 asm volatile("stxa %0, [%1] %2\n\t"
356 : "r" (version2
), "r" (tmp
),
359 asm volatile("membar #Sync\n\t");
361 /* Check and mark this tag space for release later if
362 * the swapped in page was the last user of tag space
364 del_tag_store(tag_desc
, mm
);
367 /* A page is about to be swapped out. Save any ADI tags associated with
368 * this physical page so they can be restored later when the page is swapped
371 int adi_save_tags(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
372 unsigned long addr
, pte_t oldpte
)
375 tag_storage_desc_t
*tag_desc
;
376 unsigned long version1
, version2
, paddr
, tmp
;
378 tag_desc
= alloc_tag_store(mm
, vma
, addr
);
379 if (tag_desc
== NULL
)
382 tag
= tag_start(addr
, tag_desc
);
383 paddr
= pte_val(oldpte
) & _PAGE_PADDR_4V
;
384 for (tmp
= paddr
; tmp
< (paddr
+PAGE_SIZE
); tmp
+= adi_blksize()) {
385 asm volatile("ldxa [%1] %2, %0\n\t"
387 : "r" (tmp
), "i" (ASI_MCD_REAL
));
388 tmp
+= adi_blksize();
389 asm volatile("ldxa [%1] %2, %0\n\t"
391 : "r" (tmp
), "i" (ASI_MCD_REAL
));
392 *tag
= (version1
<< 4) | version2
;