4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/types.h>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
12 #include <linux/proc_fs.h>
13 #include <linux/bigmem.h>
16 #include <asm/uaccess.h>
18 #include <asm/pgtable.h>
21 * mem_write isn't really a good idea right now. It needs
22 * to check a lot more: if the process we try to write to
23 * dies in the middle right now, mem_write will overwrite
24 * kernel memory.. This disables it altogether.
26 #define mem_write NULL
28 static int check_range(struct mm_struct
* mm
, unsigned long addr
, int count
)
30 struct vm_area_struct
*vma
;
33 vma
= find_vma(mm
, addr
);
36 if (vma
->vm_start
> addr
)
38 if (!(vma
->vm_flags
& VM_READ
))
40 while ((retval
= vma
->vm_end
- addr
) < count
) {
41 struct vm_area_struct
*next
= vma
->vm_next
;
44 if (vma
->vm_end
!= next
->vm_start
)
46 if (!(next
->vm_flags
& VM_READ
))
55 static struct task_struct
* get_task(int pid
)
57 struct task_struct
* tsk
= current
;
59 if (pid
!= tsk
->pid
) {
60 tsk
= find_task_by_pid(pid
);
62 /* Allow accesses only under the same circumstances
63 * that we would allow ptrace to work.
66 if (!(tsk
->flags
& PF_PTRACED
)
67 || tsk
->state
!= TASK_STOPPED
68 || tsk
->p_pptr
!= current
)
75 static ssize_t
mem_read(struct file
* file
, char * buf
,
76 size_t count
, loff_t
*ppos
)
78 struct inode
* inode
= file
->f_dentry
->d_inode
;
83 struct task_struct
* tsk
;
88 read_lock(&tasklist_lock
);
89 tsk
= get_task(inode
->i_ino
>> 16);
90 read_unlock(&tasklist_lock
); /* FIXME: This should really be done only afetr not using tsk any more!!! */
94 scount
= check_range(tsk
->mm
, addr
, count
);
99 if (signal_pending(current
))
101 page_dir
= pgd_offset(tsk
->mm
,addr
);
102 if (pgd_none(*page_dir
))
104 if (pgd_bad(*page_dir
)) {
105 printk("Bad page dir entry %08lx\n", pgd_val(*page_dir
));
109 page_middle
= pmd_offset(page_dir
,addr
);
110 if (pmd_none(*page_middle
))
112 if (pmd_bad(*page_middle
)) {
113 printk("Bad page middle entry %08lx\n", pmd_val(*page_middle
));
114 pmd_clear(page_middle
);
117 pte
= *pte_offset(page_middle
,addr
);
118 if (!pte_present(pte
))
120 page
= (char *) pte_page(pte
) + (addr
& ~PAGE_MASK
);
121 i
= PAGE_SIZE
-(addr
& ~PAGE_MASK
);
124 page
= (char *) kmap((unsigned long) page
, KM_READ
);
125 copy_to_user(tmp
, page
, i
);
126 kunmap((unsigned long) page
, KM_READ
);
137 static ssize_t
mem_write(struct file
* file
, char * buf
,
138 size_t count
, loff_t
*ppos
)
140 struct inode
* inode
= file
->f_dentry
->d_inode
;
145 struct task_struct
* tsk
;
151 tsk
= get_task(inode
->i_ino
>> 16);
156 if (signal_pending(current
))
158 page_dir
= pgd_offset(tsk
,addr
);
159 if (pgd_none(*page_dir
))
161 if (pgd_bad(*page_dir
)) {
162 printk("Bad page dir entry %08lx\n", pgd_val(*page_dir
));
166 page_middle
= pmd_offset(page_dir
,addr
);
167 if (pmd_none(*page_middle
))
169 if (pmd_bad(*page_middle
)) {
170 printk("Bad page middle entry %08lx\n", pmd_val(*page_middle
));
171 pmd_clear(page_middle
);
174 pte
= *pte_offset(page_middle
,addr
);
175 if (!pte_present(pte
))
179 page
= (char *) pte_page(pte
) + (addr
& ~PAGE_MASK
);
180 i
= PAGE_SIZE
-(addr
& ~PAGE_MASK
);
183 page
= (unsigned long) kmap((unsigned long) page
, KM_WRITE
);
184 copy_from_user(page
, tmp
, i
);
185 kunmap((unsigned long) page
, KM_WRITE
);
193 if (signal_pending(current
))
200 static long long mem_lseek(struct file
* file
, long long offset
, int orig
)
204 file
->f_pos
= offset
;
207 file
->f_pos
+= offset
;
215 * This isn't really reliable by any means..
217 int mem_mmap(struct file
* file
, struct vm_area_struct
* vma
)
219 struct task_struct
*tsk
;
220 pgd_t
*src_dir
, *dest_dir
;
221 pmd_t
*src_middle
, *dest_middle
;
222 pte_t
*src_table
, *dest_table
;
223 unsigned long stmp
, dtmp
, mapnr
;
224 struct vm_area_struct
*src_vma
= NULL
;
225 struct inode
*inode
= file
->f_dentry
->d_inode
;
227 /* Get the source's task information */
229 tsk
= get_task(inode
->i_ino
>> 16);
234 /* Ensure that we have a valid source area. (Has to be mmap'ed and
235 have valid page information.) We can't map shared memory at the
236 moment because working out the vm_area_struct & nattach stuff isn't
239 src_vma
= tsk
->mm
->mmap
;
240 stmp
= vma
->vm_offset
;
241 while (stmp
< vma
->vm_offset
+ (vma
->vm_end
- vma
->vm_start
)) {
242 while (src_vma
&& stmp
> src_vma
->vm_end
)
243 src_vma
= src_vma
->vm_next
;
244 if (!src_vma
|| (src_vma
->vm_flags
& VM_SHM
))
247 src_dir
= pgd_offset(tsk
->mm
, stmp
);
248 if (pgd_none(*src_dir
))
250 if (pgd_bad(*src_dir
)) {
251 printk("Bad source page dir entry %08lx\n", pgd_val(*src_dir
));
254 src_middle
= pmd_offset(src_dir
, stmp
);
255 if (pmd_none(*src_middle
))
257 if (pmd_bad(*src_middle
)) {
258 printk("Bad source page middle entry %08lx\n", pmd_val(*src_middle
));
261 src_table
= pte_offset(src_middle
, stmp
);
262 if (pte_none(*src_table
))
265 if (stmp
< src_vma
->vm_start
) {
266 if (!(src_vma
->vm_flags
& VM_GROWSDOWN
))
268 if (src_vma
->vm_end
- stmp
> current
->rlim
[RLIMIT_STACK
].rlim_cur
)
274 src_vma
= tsk
->mm
->mmap
;
275 stmp
= vma
->vm_offset
;
276 dtmp
= vma
->vm_start
;
278 flush_cache_range(vma
->vm_mm
, vma
->vm_start
, vma
->vm_end
);
279 flush_cache_range(src_vma
->vm_mm
, src_vma
->vm_start
, src_vma
->vm_end
);
280 while (dtmp
< vma
->vm_end
) {
281 while (src_vma
&& stmp
> src_vma
->vm_end
)
282 src_vma
= src_vma
->vm_next
;
284 src_dir
= pgd_offset(tsk
->mm
, stmp
);
285 src_middle
= pmd_offset(src_dir
, stmp
);
286 src_table
= pte_offset(src_middle
, stmp
);
288 dest_dir
= pgd_offset(current
->mm
, dtmp
);
289 dest_middle
= pmd_alloc(dest_dir
, dtmp
);
292 dest_table
= pte_alloc(dest_middle
, dtmp
);
296 if (!pte_present(*src_table
))
297 handle_mm_fault(tsk
, src_vma
, stmp
, 1);
299 if ((vma
->vm_flags
& VM_WRITE
) && !pte_write(*src_table
))
300 handle_mm_fault(tsk
, src_vma
, stmp
, 1);
302 set_pte(src_table
, pte_mkdirty(*src_table
));
303 set_pte(dest_table
, *src_table
);
304 mapnr
= MAP_NR(pte_page(*src_table
));
305 if (mapnr
< max_mapnr
)
306 get_page(mem_map
+ MAP_NR(pte_page(*src_table
)));
312 flush_tlb_range(vma
->vm_mm
, vma
->vm_start
, vma
->vm_end
);
313 flush_tlb_range(src_vma
->vm_mm
, src_vma
->vm_start
, src_vma
->vm_end
);
317 static struct file_operations proc_mem_operations
= {
321 NULL
, /* mem_readdir */
323 NULL
, /* mem_ioctl */
325 NULL
, /* no special open code */
327 NULL
, /* no special release code */
328 NULL
/* can't fsync */
331 struct inode_operations proc_mem_inode_operations
= {
332 &proc_mem_operations
, /* default base directory file-ops */
343 NULL
, /* follow_link */
344 NULL
, /* get_block */
346 NULL
, /* writepage */
347 NULL
, /* flushpage */
349 proc_permission
, /* permission */
351 NULL
/* revalidate */