x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / arch / tile / kernel / module.c
blob09233fbe78017f41f646366bc8af5f3d8ac33736
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * Based on i386 version, copyright (C) 2001 Rusty Russell.
17 #include <linux/moduleloader.h>
18 #include <linux/elf.h>
19 #include <linux/vmalloc.h>
20 #include <linux/fs.h>
21 #include <linux/string.h>
22 #include <linux/kernel.h>
23 #include <asm/pgtable.h>
24 #include <asm/homecache.h>
25 #include <arch/opcode.h>
27 #ifdef MODULE_DEBUG
28 #define DEBUGP printk
29 #else
30 #define DEBUGP(fmt...)
31 #endif
34 * Allocate some address space in the range MEM_MODULE_START to
35 * MEM_MODULE_END and populate it with memory.
37 void *module_alloc(unsigned long size)
39 struct page **pages;
40 pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC);
41 struct vm_struct *area;
42 int i = 0;
43 int npages;
45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
46 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
47 if (pages == NULL)
48 return NULL;
49 for (; i < npages; ++i) {
50 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
51 if (!pages[i])
52 goto free_pages;
55 area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
56 if (!area)
57 goto free_pages;
58 area->nr_pages = npages;
59 area->pages = pages;
61 if (map_vm_area(area, prot_rwx, pages)) {
62 vunmap(area->addr);
63 goto free_pages;
66 return area->addr;
67 free_pages:
68 while (--i >= 0)
69 __free_page(pages[i]);
70 kfree(pages);
71 return NULL;
75 /* Free memory returned from module_alloc */
76 void module_memfree(void *module_region)
78 vfree(module_region);
80 /* Globally flush the L1 icache. */
81 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
82 0, 0, 0, NULL, NULL, 0);
85 * FIXME: Add module_arch_freeing_init to trim exception
86 * table entries.
90 #ifdef __tilegx__
92 * Validate that the high 16 bits of "value" is just the sign-extension of
93 * the low 48 bits.
95 static int validate_hw2_last(long value, struct module *me)
97 if (((value << 16) >> 16) != value) {
98 pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
99 me->name, value);
100 return 0;
102 return 1;
106 * Validate that "value" isn't too big to hold in a JumpOff relocation.
108 static int validate_jumpoff(long value)
110 /* Determine size of jump offset. */
111 int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1)));
113 /* Check to see if it fits into the relocation slot. */
114 long f = get_JumpOff_X1(create_JumpOff_X1(value));
115 f = (f << shift) >> shift;
117 return f == value;
119 #endif
121 int apply_relocate_add(Elf_Shdr *sechdrs,
122 const char *strtab,
123 unsigned int symindex,
124 unsigned int relsec,
125 struct module *me)
127 unsigned int i;
128 Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
129 Elf_Sym *sym;
130 u64 *location;
131 unsigned long value;
133 DEBUGP("Applying relocate section %u to %u\n", relsec,
134 sechdrs[relsec].sh_info);
135 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
136 /* This is where to make the change */
137 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
138 + rel[i].r_offset;
140 * This is the symbol it is referring to.
141 * Note that all undefined symbols have been resolved.
143 sym = (Elf_Sym *)sechdrs[symindex].sh_addr
144 + ELF_R_SYM(rel[i].r_info);
145 value = sym->st_value + rel[i].r_addend;
147 switch (ELF_R_TYPE(rel[i].r_info)) {
149 #ifdef __LITTLE_ENDIAN
150 # define MUNGE(func) \
151 (*location = ((*location & ~func(-1)) | func(value)))
152 #else
154 * Instructions are always little-endian, so when we read them as data,
155 * we have to swap them around before and after modifying them.
157 # define MUNGE(func) \
158 (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
159 #endif
161 #ifndef __tilegx__
162 case R_TILE_32:
163 *(uint32_t *)location = value;
164 break;
165 case R_TILE_IMM16_X0_HA:
166 value = (value + 0x8000) >> 16;
167 /*FALLTHROUGH*/
168 case R_TILE_IMM16_X0_LO:
169 MUNGE(create_Imm16_X0);
170 break;
171 case R_TILE_IMM16_X1_HA:
172 value = (value + 0x8000) >> 16;
173 /*FALLTHROUGH*/
174 case R_TILE_IMM16_X1_LO:
175 MUNGE(create_Imm16_X1);
176 break;
177 case R_TILE_JOFFLONG_X1:
178 value -= (unsigned long) location; /* pc-relative */
179 value = (long) value >> 3; /* count by instrs */
180 MUNGE(create_JOffLong_X1);
181 break;
182 #else
183 case R_TILEGX_64:
184 *location = value;
185 break;
186 case R_TILEGX_IMM16_X0_HW2_LAST:
187 if (!validate_hw2_last(value, me))
188 return -ENOEXEC;
189 value >>= 16;
190 /*FALLTHROUGH*/
191 case R_TILEGX_IMM16_X0_HW1:
192 value >>= 16;
193 /*FALLTHROUGH*/
194 case R_TILEGX_IMM16_X0_HW0:
195 MUNGE(create_Imm16_X0);
196 break;
197 case R_TILEGX_IMM16_X1_HW2_LAST:
198 if (!validate_hw2_last(value, me))
199 return -ENOEXEC;
200 value >>= 16;
201 /*FALLTHROUGH*/
202 case R_TILEGX_IMM16_X1_HW1:
203 value >>= 16;
204 /*FALLTHROUGH*/
205 case R_TILEGX_IMM16_X1_HW0:
206 MUNGE(create_Imm16_X1);
207 break;
208 case R_TILEGX_JUMPOFF_X1:
209 value -= (unsigned long) location; /* pc-relative */
210 value = (long) value >> 3; /* count by instrs */
211 if (!validate_jumpoff(value)) {
212 pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
213 me->name,
214 sym->st_value + rel[i].r_addend,
215 rel[i].r_offset, location);
216 return -ENOEXEC;
218 MUNGE(create_JumpOff_X1);
219 break;
220 #endif
222 #undef MUNGE
224 default:
225 pr_err("module %s: Unknown relocation: %d\n",
226 me->name, (int) ELF_R_TYPE(rel[i].r_info));
227 return -ENOEXEC;
230 return 0;