1 /* The kernel call implemented in this file:
4 * The parameters for this kernel call are:
5 * m_lsys_krn_sys_vumap.endpt (grant owner, or SELF for local addresses)
6 * m_lsys_krn_sys_vumap.vaddr (address of virtual (input) vector)
7 * m_lsys_krn_sys_vumap.vcount (number of elements in virtual vector)
8 * m_lsys_krn_sys_vumap.offset (offset into first entry of input vector)
9 * m_lsys_krn_sys_vumap.access (safecopy access requested for input)
10 * m_lsys_krn_sys_vumap.paddr (address of physical (output) vector)
11 * m_lsys_krn_sys_vumap.pmax (maximum number of physical vector elements)
12 * m_krn_lsys_sys_vumap.pcount (upon return: number of elements filled)
15 #include "kernel/system.h"
19 /*===========================================================================*
21 *===========================================================================*/
22 int do_vumap(struct proc
*caller
, message
*m_ptr
)
24 /* Map a vector of grants or local virtual addresses to physical addresses.
25 * Designed to be used by drivers to perform an efficient lookup of physical
26 * addresses for the purpose of direct DMA from/to a remote process.
28 endpoint_t endpt
, source
, granter
;
30 struct vumap_vir vvec
[MAPVEC_NR
];
31 struct vumap_phys pvec
[MAPVEC_NR
];
32 vir_bytes vaddr
, paddr
, vir_addr
;
34 int i
, r
, proc_nr
, vcount
, pcount
, pmax
, access
;
35 size_t size
, chunk
, offset
;
37 endpt
= caller
->p_endpoint
;
39 /* Retrieve and check input parameters. */
40 source
= m_ptr
->m_lsys_krn_sys_vumap
.endpt
;
41 vaddr
= m_ptr
->m_lsys_krn_sys_vumap
.vaddr
;
42 vcount
= m_ptr
->m_lsys_krn_sys_vumap
.vcount
;
43 offset
= m_ptr
->m_lsys_krn_sys_vumap
.offset
;
44 access
= m_ptr
->m_lsys_krn_sys_vumap
.access
;
45 paddr
= m_ptr
->m_lsys_krn_sys_vumap
.paddr
;
46 pmax
= m_ptr
->m_lsys_krn_sys_vumap
.pmax
;
48 if (vcount
<= 0 || pmax
<= 0)
51 if (vcount
> MAPVEC_NR
) vcount
= MAPVEC_NR
;
52 if (pmax
> MAPVEC_NR
) pmax
= MAPVEC_NR
;
54 /* Convert access to safecopy access flags. */
56 case VUA_READ
: access
= CPF_READ
; break;
57 case VUA_WRITE
: access
= CPF_WRITE
; break;
58 case VUA_READ
|VUA_WRITE
: access
= CPF_READ
|CPF_WRITE
; break;
59 default: return EINVAL
;
62 /* Copy in the vector of virtual addresses. */
63 size
= vcount
* sizeof(vvec
[0]);
65 if (data_copy(endpt
, vaddr
, KERNEL
, (vir_bytes
) vvec
, size
) != OK
)
70 /* Go through the input entries, one at a time. Stop early in case the output
71 * vector has filled up.
73 for (i
= 0; i
< vcount
&& pcount
< pmax
; i
++) {
74 size
= vvec
[i
].vv_size
;
80 r
= verify_grant(source
, endpt
, vvec
[i
].vv_grant
, size
, access
,
81 offset
, &vir_addr
, &granter
, NULL
);
85 vir_addr
= vvec
[i
].vv_addr
+ offset
;
89 okendpt(granter
, &proc_nr
);
90 procp
= proc_addr(proc_nr
);
92 /* Each virtual range is made up of one or more physical ranges. */
93 while (size
> 0 && pcount
< pmax
) {
94 chunk
= vm_lookup_range(procp
, vir_addr
, &phys_addr
, size
);
97 /* Try to get the memory allocated, unless the memory
98 * is supposed to be there to be read from.
100 if (access
& CPF_READ
)
103 /* This call may suspend the current call, or return an
104 * error for a previous invocation.
106 return vm_check_range(caller
, procp
, vir_addr
, size
, 1);
109 pvec
[pcount
].vp_addr
= phys_addr
;
110 pvec
[pcount
].vp_size
= chunk
;
120 /* Copy out the resulting vector of physical addresses. */
123 size
= pcount
* sizeof(pvec
[0]);
125 r
= data_copy_vmcheck(caller
, KERNEL
, (vir_bytes
) pvec
, endpt
, paddr
, size
);
128 m_ptr
->m_krn_lsys_sys_vumap
.pcount
= pcount
;