1 /*-------------------------------------------------------------
3 ipc.c -- Interprocess Communication with Starlet
6 Michael Wiedenbauer (shagkur)
7 Dave Murphy (WinterMute)
10 This software is provided 'as-is', without any express or implied
11 warranty. In no event will the authors be held liable for any
12 damages arising from the use of this software.
14 Permission is granted to anyone to use this software for any
15 purpose, including commercial applications, and to alter it and
16 redistribute it freely, subject to the following restrictions:
18 1. The origin of this software must not be misrepresented; you
19 must not claim that you wrote the original software. If you use
20 this software in a product, an acknowledgment in the product
21 documentation would be appreciated but is not required.
23 2. Altered source versions must be plainly marked as such, and
24 must not be misrepresented as being the original software.
26 3. This notice may not be removed or altered from any source
29 -------------------------------------------------------------*/
42 #include "processor.h"
49 #include "lwp_wkspace.h"
53 #define IPC_HEAP_SIZE 4096
54 #define IPC_REQUESTSIZE 64
55 #define IPC_NUMHEAPS 16
57 #define IOS_MAXFMT_PARAMS 32
60 #define IOS_CLOSE 0x02
62 #define IOS_WRITE 0x04
64 #define IOS_IOCTL 0x06
65 #define IOS_IOCTLV 0x07
67 #define RELNCH_RELAUNCH 1
68 #define RELNCH_BACKGROUND 2
71 { //ipc struct size: 32
102 struct _ioctlv
*argv
;
110 lwpq_t syncqueue
; //44
111 u32 magic
; //48 - used to avoid spurious responses, like from zelda.
112 u8 pad1
[12]; //52 - 60
121 struct _ipcreq
*reqs
[16];
131 struct _ioctlvfmt_bufent
138 struct _ioctlvfmt_cbdata
144 struct _ioctlvfmt_bufent
*bufs
;
147 static u32 IPC_REQ_MAGIC
;
149 static s32 _ipc_hid
= -1;
150 static s32 _ipc_mailboxack
= 1;
151 static u32 _ipc_relnchFl
= 0;
152 static u32 _ipc_initialized
= 0;
153 static u32 _ipc_clntinitialized
= 0;
154 static u64 _ipc_spuriousresponsecnt
= 0;
155 static struct _ipcreq
*_ipc_relnchRpc
= NULL
;
157 static void *_ipc_bufferlo
= NULL
;
158 static void *_ipc_bufferhi
= NULL
;
159 static void *_ipc_currbufferlo
= NULL
;
160 static void *_ipc_currbufferhi
= NULL
;
162 static u32 _ipc_seed
= 0xffffffff;
164 static struct _ipcreqres _ipc_responses
;
166 static struct _ipcheap _ipc_heaps
[IPC_NUMHEAPS
] =
168 {NULL
, 0, {}} // all other elements should be inited to zero, says C standard, so this should do
171 static vu32
* const _ipcReg
= (u32
*)0xCD000000;
173 extern void __MaskIrq(u32 nMask
);
174 extern void __UnmaskIrq(u32 nMask
);
175 extern void* __SYS_GetIPCBufferLo(void);
176 extern void* __SYS_GetIPCBufferHi(void);
178 extern u32
gettick();
180 static __inline__ u32
IPC_ReadReg(u32 reg
)
185 static __inline__
void IPC_WriteReg(u32 reg
,u32 val
)
190 static __inline__
void ACR_WriteReg(u32 reg
,u32 val
)
192 _ipcReg
[reg
>>2] = val
;
195 static __inline__
void* __ipc_allocreq()
197 return iosAlloc(_ipc_hid
,IPC_REQUESTSIZE
);
200 static __inline__
void __ipc_freereq(void *ptr
)
202 iosFree(_ipc_hid
,ptr
);
205 static __inline__
void __ipc_srand(u32 seed
)
210 static __inline__ u32
__ipc_rand()
212 _ipc_seed
= (214013*_ipc_seed
) + 2531011;
216 static s32
__ioctlvfmtCB(s32 result
,void *userdata
)
220 struct _ioctlvfmt_cbdata
*cbdata
;
221 struct _ioctlvfmt_bufent
*pbuf
;
223 cbdata
= (struct _ioctlvfmt_cbdata
*)userdata
;
225 // deal with data buffers
228 while(cbdata
->num_bufs
--) {
230 // copy data if needed
231 if(pbuf
->io_buf
&& pbuf
->copy_len
)
232 memcpy(pbuf
->io_buf
, pbuf
->ipc_buf
, pbuf
->copy_len
);
233 // then free the buffer
234 iosFree(cbdata
->hId
, pbuf
->ipc_buf
);
240 user_cb
= cbdata
->user_cb
;
241 user_data
= cbdata
->user_data
;
244 __lwp_wkspace_free(cbdata
->bufs
);
246 // free callback data
247 __lwp_wkspace_free(cbdata
);
249 // call the user callback
251 return user_cb(result
, user_data
);
256 static s32
__ipc_queuerequest(struct _ipcreq
*req
)
261 printf("__ipc_queuerequest(0x%p)\n",req
);
263 _CPU_ISR_Disable(level
);
265 cnt
= (_ipc_responses
.cnt_queue
- _ipc_responses
.cnt_sent
);
267 _CPU_ISR_Restore(level
);
268 return IPC_EQUEUEFULL
;
271 _ipc_responses
.reqs
[_ipc_responses
.req_queue_no
] = req
;
272 _ipc_responses
.req_queue_no
= ((_ipc_responses
.req_queue_no
+1)&0x0f);
273 _ipc_responses
.cnt_queue
++;
275 _CPU_ISR_Restore(level
);
279 static s32
__ipc_syncqueuerequest(struct _ipcreq
*req
)
283 printf("__ipc_syncqueuerequest(0x%p)\n",req
);
285 cnt
= (_ipc_responses
.cnt_queue
- _ipc_responses
.cnt_sent
);
287 return IPC_EQUEUEFULL
;
290 _ipc_responses
.reqs
[_ipc_responses
.req_queue_no
] = req
;
291 _ipc_responses
.req_queue_no
= ((_ipc_responses
.req_queue_no
+1)&0x0f);
292 _ipc_responses
.cnt_queue
++;
297 static void __ipc_sendrequest()
303 printf("__ipc_sendrequest()\n");
305 cnt
= (_ipc_responses
.cnt_queue
- _ipc_responses
.cnt_sent
);
307 req
= _ipc_responses
.reqs
[_ipc_responses
.req_send_no
];
309 req
->magic
= IPC_REQ_MAGIC
;
310 if(req
->relnch
&RELNCH_RELAUNCH
) {
312 _ipc_relnchRpc
= req
;
313 if(!(req
->relnch
&RELNCH_BACKGROUND
))
316 DCFlushRange(req
,sizeof(struct _ipcreq
));
318 IPC_WriteReg(0,MEM_VIRTUAL_TO_PHYSICAL(req
));
319 _ipc_responses
.req_send_no
= ((_ipc_responses
.req_send_no
+1)&0x0f);
320 _ipc_responses
.cnt_sent
++;
323 ipc_send
= ((IPC_ReadReg(1)&0x30)|0x01);
324 IPC_WriteReg(1,ipc_send
);
329 static void __ipc_replyhandler()
332 struct _ipcreq
*req
= NULL
;
335 printf("__ipc_replyhandler()\n");
337 req
= (struct _ipcreq
*)IPC_ReadReg(2);
338 if(req
==NULL
) return;
340 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x04);
341 IPC_WriteReg(1,ipc_ack
);
342 ACR_WriteReg(48,0x40000000);
344 req
= MEM_PHYSICAL_TO_K0(req
);
345 DCInvalidateRange(req
,32);
347 if(req
->magic
==IPC_REQ_MAGIC
) {
349 printf("IPC res: cmd %08x rcmd %08x res %08x\n",req
->cmd
,req
->req_cmd
,req
->result
);
351 if(req
->req_cmd
==IOS_READ
) {
352 if(req
->read
.data
!=NULL
) {
353 req
->read
.data
= MEM_PHYSICAL_TO_K0(req
->read
.data
);
354 if(req
->result
>0) DCInvalidateRange(req
->read
.data
,req
->result
);
356 } else if(req
->req_cmd
==IOS_IOCTL
) {
357 if(req
->ioctl
.buffer_io
!=NULL
) {
358 req
->ioctl
.buffer_io
= MEM_PHYSICAL_TO_K0(req
->ioctl
.buffer_io
);
359 DCInvalidateRange(req
->ioctl
.buffer_io
,req
->ioctl
.len_io
);
361 DCInvalidateRange(req
->ioctl
.buffer_in
,req
->ioctl
.len_in
);
362 } else if(req
->req_cmd
==IOS_IOCTLV
) {
363 if(req
->ioctlv
.argv
!=NULL
) {
364 req
->ioctlv
.argv
= MEM_PHYSICAL_TO_K0(req
->ioctlv
.argv
);
365 DCInvalidateRange(req
->ioctlv
.argv
,((req
->ioctlv
.argcin
+req
->ioctlv
.argcio
)*sizeof(struct _ioctlv
)));
369 v
= (ioctlv
*)req
->ioctlv
.argv
;
370 while(cnt
<(req
->ioctlv
.argcin
+req
->ioctlv
.argcio
)) {
371 if(v
[cnt
].data
!=NULL
) {
372 v
[cnt
].data
= MEM_PHYSICAL_TO_K0(v
[cnt
].data
);
373 DCInvalidateRange(v
[cnt
].data
,v
[cnt
].len
);
377 if(_ipc_relnchFl
&& _ipc_relnchRpc
==req
) {
379 if(_ipc_mailboxack
<1) _ipc_mailboxack
++;
385 req
->cb(req
->result
,req
->usrdata
);
388 LWP_ThreadSignal(req
->syncqueue
);
390 // NOTE: we really want to find out if this ever happens
391 // and take steps to prevent it beforehand (because it will
392 // clobber memory, among other things). I suggest leaving this in
393 // even in non-DEBUG mode. Maybe even cause a system halt.
394 // It is the responsibility of the loader to clear these things,
395 // but we want to find out if they happen so loaders can be fixed.
397 printf("Received unknown IPC response (magic %08x):\n", req
->magic
);
398 printf(" CMD %08x RES %08x REQCMD %08x\n", req
->cmd
, req
->result
, req
->req_cmd
);
399 printf(" Args: %08x %08x %08x %08x %08x\n", req
->args
[0], req
->args
[1], req
->args
[2], req
->args
[3], req
->args
[4]);
400 printf(" CB %08x DATA %08x REL %08x QUEUE %08x\n", (u32
)req
->cb
, (u32
)req
->usrdata
, req
->relnch
, (u32
)req
->syncqueue
);
402 _ipc_spuriousresponsecnt
++;
404 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x08);
405 IPC_WriteReg(1,ipc_ack
);
408 static void __ipc_ackhandler()
412 printf("__ipc_ackhandler()\n");
414 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x02);
415 IPC_WriteReg(1,ipc_ack
);
416 ACR_WriteReg(48,0x40000000);
418 if(_ipc_mailboxack
<1) _ipc_mailboxack
++;
419 if(_ipc_mailboxack
>0) {
421 _ipc_relnchRpc
->result
= 0;
424 LWP_ThreadSignal(_ipc_relnchRpc
->syncqueue
);
426 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x08);
427 IPC_WriteReg(1,ipc_ack
);
434 static void __ipc_interrupthandler(u32 irq
,void *ctx
)
438 printf("__ipc_interrupthandler(%d)\n",irq
);
440 ipc_int
= IPC_ReadReg(1);
441 if((ipc_int
&0x0014)==0x0014) __ipc_replyhandler();
443 ipc_int
= IPC_ReadReg(1);
444 if((ipc_int
&0x0022)==0x0022) __ipc_ackhandler();
447 static s32
__ios_ioctlvformat_parse(const char *format
,va_list args
,struct _ioctlvfmt_cbdata
*cbdata
,s32
*cnt_in
,s32
*cnt_io
,struct _ioctlv
**argv
,s32 hId
)
455 struct _ioctlvfmt_bufent
*bufp
;
457 if(hId
== IPC_HEAP
) hId
= _ipc_hid
;
458 if(hId
< 0) return IPC_EINVAL
;
460 maxbufs
= strnlen(format
,IOS_MAXFMT_PARAMS
);
461 if(maxbufs
>=IOS_MAXFMT_PARAMS
) return IPC_EINVAL
;
464 cbdata
->bufs
= __lwp_wkspace_allocate((sizeof(struct _ioctlvfmt_bufent
)*(maxbufs
+1)));
465 if(cbdata
->bufs
==NULL
) return IPC_ENOMEM
;
467 argp
= iosAlloc(hId
,(sizeof(struct _ioctlv
)*(maxbufs
+1)));
469 __lwp_wkspace_free(cbdata
->bufs
);
475 memset(argp
,0,(sizeof(struct _ioctlv
)*(maxbufs
+1)));
476 memset(bufp
,0,(sizeof(struct _ioctlvfmt_bufent
)*(maxbufs
+1)));
478 cbdata
->num_bufs
= 1;
479 bufp
->ipc_buf
= argp
;
487 type
= tolower((int)*format
);
490 pdata
= iosAlloc(hId
,sizeof(u8
));
495 *(u8
*)pdata
= va_arg(args
,u32
);
497 argp
->len
= sizeof(u8
);
498 bufp
->ipc_buf
= pdata
;
505 pdata
= iosAlloc(hId
,sizeof(u16
));
510 *(u16
*)pdata
= va_arg(args
,u32
);
512 argp
->len
= sizeof(u16
);
513 bufp
->ipc_buf
= pdata
;
520 pdata
= iosAlloc(hId
,sizeof(u32
));
525 *(u32
*)pdata
= va_arg(args
,u32
);
527 argp
->len
= sizeof(u32
);
528 bufp
->ipc_buf
= pdata
;
535 pdata
= iosAlloc(hId
,sizeof(u64
));
540 *(u64
*)pdata
= va_arg(args
,u64
);
542 argp
->len
= sizeof(u64
);
543 bufp
->ipc_buf
= pdata
;
550 argp
->data
= va_arg(args
, void*);
551 argp
->len
= va_arg(args
, u32
);
556 ps
= va_arg(args
, char*);
557 len
= strnlen(ps
,256);
563 pdata
= iosAlloc(hId
,(len
+1));
568 memcpy(pdata
,ps
,(len
+1));
571 bufp
->ipc_buf
= pdata
;
579 goto parse_io_params
;
589 type
= tolower((int)*format
);
592 pdata
= iosAlloc(hId
,sizeof(u8
));
597 iodata
= va_arg(args
,u8
*);
598 *(u8
*)pdata
= *(u8
*)iodata
;
600 argp
->len
= sizeof(u8
);
601 bufp
->ipc_buf
= pdata
;
602 bufp
->io_buf
= iodata
;
603 bufp
->copy_len
= sizeof(u8
);
610 pdata
= iosAlloc(hId
,sizeof(u16
));
615 iodata
= va_arg(args
,u16
*);
616 *(u16
*)pdata
= *(u16
*)iodata
;
618 argp
->len
= sizeof(u16
);
619 bufp
->ipc_buf
= pdata
;
620 bufp
->io_buf
= iodata
;
621 bufp
->copy_len
= sizeof(u16
);
628 pdata
= iosAlloc(hId
,sizeof(u32
));
633 iodata
= va_arg(args
,u32
*);
634 *(u32
*)pdata
= *(u32
*)iodata
;
636 argp
->len
= sizeof(u32
);
637 bufp
->ipc_buf
= pdata
;
638 bufp
->io_buf
= iodata
;
639 bufp
->copy_len
= sizeof(u32
);
646 pdata
= iosAlloc(hId
,sizeof(u64
));
651 iodata
= va_arg(args
,u64
*);
652 *(u64
*)pdata
= *(u64
*)iodata
;
654 argp
->len
= sizeof(u64
);
655 bufp
->ipc_buf
= pdata
;
656 bufp
->io_buf
= iodata
;
657 bufp
->copy_len
= sizeof(u64
);
664 argp
->data
= va_arg(args
, void*);
665 argp
->len
= va_arg(args
, u32
);
678 for(i
=0;i
<cbdata
->num_bufs
;i
++) {
679 if(cbdata
->bufs
[i
].ipc_buf
!=NULL
) iosFree(hId
,cbdata
->bufs
[i
].ipc_buf
);
681 __lwp_wkspace_free(cbdata
->bufs
);
685 static s32
__ipc_asyncrequest(struct _ipcreq
*req
)
690 ret
= __ipc_queuerequest(req
);
691 if(ret
) __ipc_freereq(req
);
693 _CPU_ISR_Disable(level
);
694 if(_ipc_mailboxack
>0) __ipc_sendrequest();
695 _CPU_ISR_Restore(level
);
700 static s32
__ipc_syncrequest(struct _ipcreq
*req
)
705 LWP_InitQueue(&req
->syncqueue
);
707 _CPU_ISR_Disable(level
);
708 ret
= __ipc_syncqueuerequest(req
);
710 if(_ipc_mailboxack
>0) __ipc_sendrequest();
711 LWP_ThreadSleep(req
->syncqueue
);
714 _CPU_ISR_Restore(level
);
716 LWP_CloseQueue(req
->syncqueue
);
720 s32
iosCreateHeap(s32 size
)
727 printf("iosCreateHeap(%d)\n",size
);
729 _CPU_ISR_Disable(level
);
732 while(i
<IPC_NUMHEAPS
) {
733 if(_ipc_heaps
[i
].membase
==NULL
) break;
736 if(i
>=IPC_NUMHEAPS
) {
737 _CPU_ISR_Restore(level
);
741 ipclo
= (((u32
)IPC_GetBufferLo()+0x1f)&~0x1f);
742 ipchi
= (u32
)IPC_GetBufferHi();
743 free
= (ipchi
- (ipclo
+ size
));
744 if(free
<0) return IPC_ENOMEM
;
746 _ipc_heaps
[i
].membase
= (void*)ipclo
;
747 _ipc_heaps
[i
].size
= size
;
749 ret
= __lwp_heap_init(&_ipc_heaps
[i
].heap
,(void*)ipclo
,size
,PPC_CACHE_ALIGNMENT
);
750 if(ret
<=0) return IPC_ENOMEM
;
752 IPC_SetBufferLo((void*)(ipclo
+size
));
753 _CPU_ISR_Restore(level
);
757 void* iosAlloc(s32 hid
,s32 size
)
760 printf("iosAlloc(%d,%d)\n",hid
,size
);
762 if(hid
<0 || hid
>=IPC_NUMHEAPS
|| size
<=0) return NULL
;
763 return __lwp_heap_allocate(&_ipc_heaps
[hid
].heap
,size
);
766 void iosFree(s32 hid
,void *ptr
)
769 printf("iosFree(%d,0x%p)\n",hid
,ptr
);
771 if(hid
<0 || hid
>=IPC_NUMHEAPS
|| ptr
==NULL
) return;
772 __lwp_heap_free(&_ipc_heaps
[hid
].heap
,ptr
);
775 void* IPC_GetBufferLo()
777 return _ipc_currbufferlo
;
780 void* IPC_GetBufferHi()
782 return _ipc_currbufferhi
;
785 void IPC_SetBufferLo(void *bufferlo
)
787 if(_ipc_bufferlo
<=bufferlo
) _ipc_currbufferlo
= bufferlo
;
790 void IPC_SetBufferHi(void *bufferhi
)
792 if(bufferhi
<=_ipc_bufferhi
) _ipc_currbufferhi
= bufferhi
;
795 void __IPC_Init(void)
797 if(!_ipc_initialized
) {
798 _ipc_bufferlo
= _ipc_currbufferlo
= __SYS_GetIPCBufferLo();
799 _ipc_bufferhi
= _ipc_currbufferhi
= __SYS_GetIPCBufferHi();
800 _ipc_initialized
= 1;
804 u32
__IPC_ClntInit(void)
806 if(!_ipc_clntinitialized
) {
807 _ipc_clntinitialized
= 1;
809 // generate a random request magic
810 __ipc_srand(gettick());
811 IPC_REQ_MAGIC
= __ipc_rand();
815 _ipc_hid
= iosCreateHeap(IPC_HEAP_SIZE
);
816 IRQ_Request(IRQ_PI_ACR
,__ipc_interrupthandler
,NULL
);
817 __UnmaskIrq(IM_PI_ACR
);
823 void __IPC_Reinitialize(void)
827 _CPU_ISR_Disable(level
);
833 _ipc_relnchRpc
= NULL
;
835 _ipc_responses
.req_queue_no
= 0;
836 _ipc_responses
.cnt_queue
= 0;
837 _ipc_responses
.req_send_no
= 0;
838 _ipc_responses
.cnt_sent
= 0;
840 _CPU_ISR_Restore(level
);
843 s32
IOS_Open(const char *filepath
,u32 mode
)
848 if(filepath
==NULL
) return IPC_EINVAL
;
850 req
= __ipc_allocreq();
851 if(req
==NULL
) return IPC_ENOMEM
;
857 DCFlushRange((void*)filepath
,strnlen(filepath
,IPC_MAXPATH_LEN
) + 1);
859 req
->open
.filepath
= (char*)MEM_VIRTUAL_TO_PHYSICAL(filepath
);
860 req
->open
.mode
= mode
;
862 ret
= __ipc_syncrequest(req
);
864 if(req
!=NULL
) __ipc_freereq(req
);
868 s32
IOS_OpenAsync(const char *filepath
,u32 mode
,ipccallback ipc_cb
,void *usrdata
)
872 req
= __ipc_allocreq();
873 if(req
==NULL
) return IPC_ENOMEM
;
877 req
->usrdata
= usrdata
;
880 DCFlushRange((void*)filepath
,strnlen(filepath
,IPC_MAXPATH_LEN
) + 1);
882 req
->open
.filepath
= (char*)MEM_VIRTUAL_TO_PHYSICAL(filepath
);
883 req
->open
.mode
= mode
;
885 return __ipc_asyncrequest(req
);
888 s32
IOS_Close(s32 fd
)
893 req
= __ipc_allocreq();
894 if(req
==NULL
) return IPC_ENOMEM
;
896 req
->cmd
= IOS_CLOSE
;
901 ret
= __ipc_syncrequest(req
);
903 if(req
!=NULL
) __ipc_freereq(req
);
907 s32
IOS_CloseAsync(s32 fd
,ipccallback ipc_cb
,void *usrdata
)
911 req
= __ipc_allocreq();
912 if(req
==NULL
) return IPC_ENOMEM
;
914 req
->cmd
= IOS_CLOSE
;
917 req
->usrdata
= usrdata
;
920 return __ipc_asyncrequest(req
);
923 s32
IOS_Read(s32 fd
,void *buf
,s32 len
)
928 req
= __ipc_allocreq();
929 if(req
==NULL
) return IPC_ENOMEM
;
936 DCInvalidateRange(buf
,len
);
937 req
->read
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
940 ret
= __ipc_syncrequest(req
);
942 if(req
!=NULL
) __ipc_freereq(req
);
946 s32
IOS_ReadAsync(s32 fd
,void *buf
,s32 len
,ipccallback ipc_cb
,void *usrdata
)
950 req
= __ipc_allocreq();
951 if(req
==NULL
) return IPC_ENOMEM
;
956 req
->usrdata
= usrdata
;
959 DCInvalidateRange(buf
,len
);
960 req
->read
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
963 return __ipc_asyncrequest(req
);
966 s32
IOS_Write(s32 fd
,const void *buf
,s32 len
)
971 req
= __ipc_allocreq();
972 if(req
==NULL
) return IPC_ENOMEM
;
974 req
->cmd
= IOS_WRITE
;
979 DCFlushRange((void*)buf
,len
);
980 req
->write
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
981 req
->write
.len
= len
;
983 ret
= __ipc_syncrequest(req
);
985 if(req
!=NULL
) __ipc_freereq(req
);
989 s32
IOS_WriteAsync(s32 fd
,const void *buf
,s32 len
,ipccallback ipc_cb
,void *usrdata
)
993 req
= __ipc_allocreq();
994 if(req
==NULL
) return IPC_ENOMEM
;
996 req
->cmd
= IOS_WRITE
;
999 req
->usrdata
= usrdata
;
1002 DCFlushRange((void*)buf
,len
);
1003 req
->write
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
1004 req
->write
.len
= len
;
1006 return __ipc_asyncrequest(req
);
1009 s32
IOS_Seek(s32 fd
,s32 where
,s32 whence
)
1012 struct _ipcreq
*req
;
1014 req
= __ipc_allocreq();
1015 if(req
==NULL
) return IPC_ENOMEM
;
1017 req
->cmd
= IOS_SEEK
;
1022 req
->seek
.where
= where
;
1023 req
->seek
.whence
= whence
;
1025 ret
= __ipc_syncrequest(req
);
1027 if(req
!=NULL
) __ipc_freereq(req
);
1031 s32
IOS_SeekAsync(s32 fd
,s32 where
,s32 whence
,ipccallback ipc_cb
,void *usrdata
)
1033 struct _ipcreq
*req
;
1035 req
= __ipc_allocreq();
1036 if(req
==NULL
) return IPC_ENOMEM
;
1038 req
->cmd
= IOS_SEEK
;
1041 req
->usrdata
= usrdata
;
1044 req
->seek
.where
= where
;
1045 req
->seek
.whence
= whence
;
1047 return __ipc_asyncrequest(req
);
1050 s32
IOS_Ioctl(s32 fd
,s32 ioctl
,void *buffer_in
,s32 len_in
,void *buffer_io
,s32 len_io
)
1053 struct _ipcreq
*req
;
1055 req
= __ipc_allocreq();
1056 if(req
==NULL
) return IPC_ENOMEM
;
1058 req
->cmd
= IOS_IOCTL
;
1063 req
->ioctl
.ioctl
= ioctl
;
1064 req
->ioctl
.buffer_in
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_in
);
1065 req
->ioctl
.len_in
= len_in
;
1066 req
->ioctl
.buffer_io
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_io
);
1067 req
->ioctl
.len_io
= len_io
;
1069 DCFlushRange(buffer_in
,len_in
);
1070 DCFlushRange(buffer_io
,len_io
);
1072 ret
= __ipc_syncrequest(req
);
1074 if(req
!=NULL
) __ipc_freereq(req
);
1078 s32
IOS_IoctlAsync(s32 fd
,s32 ioctl
,void *buffer_in
,s32 len_in
,void *buffer_io
,s32 len_io
,ipccallback ipc_cb
,void *usrdata
)
1080 struct _ipcreq
*req
;
1082 req
= __ipc_allocreq();
1083 if(req
==NULL
) return IPC_ENOMEM
;
1085 req
->cmd
= IOS_IOCTL
;
1088 req
->usrdata
= usrdata
;
1091 req
->ioctl
.ioctl
= ioctl
;
1092 req
->ioctl
.buffer_in
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_in
);
1093 req
->ioctl
.len_in
= len_in
;
1094 req
->ioctl
.buffer_io
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_io
);
1095 req
->ioctl
.len_io
= len_io
;
1097 DCFlushRange(buffer_in
,len_in
);
1098 DCFlushRange(buffer_io
,len_io
);
1100 return __ipc_asyncrequest(req
);
1103 s32
IOS_Ioctlv(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
)
1106 struct _ipcreq
*req
;
1108 req
= __ipc_allocreq();
1109 if(req
==NULL
) return IPC_ENOMEM
;
1111 req
->cmd
= IOS_IOCTLV
;
1116 req
->ioctlv
.ioctl
= ioctl
;
1117 req
->ioctlv
.argcin
= cnt_in
;
1118 req
->ioctlv
.argcio
= cnt_io
;
1119 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1123 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1124 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1125 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1132 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1133 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1134 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1138 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1140 ret
= __ipc_syncrequest(req
);
1142 if(req
!=NULL
) __ipc_freereq(req
);
1147 s32
IOS_IoctlvAsync(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
,ipccallback ipc_cb
,void *usrdata
)
1150 struct _ipcreq
*req
;
1152 req
= __ipc_allocreq();
1153 if(req
==NULL
) return IPC_ENOMEM
;
1155 req
->cmd
= IOS_IOCTLV
;
1158 req
->usrdata
= usrdata
;
1161 req
->ioctlv
.ioctl
= ioctl
;
1162 req
->ioctlv
.argcin
= cnt_in
;
1163 req
->ioctlv
.argcio
= cnt_io
;
1164 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1168 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1169 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1170 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1177 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1178 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1179 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1183 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1185 return __ipc_asyncrequest(req
);
1188 s32
IOS_IoctlvFormat(s32 hId
,s32 fd
,s32 ioctl
,const char *format
,...)
1193 struct _ioctlv
*argv
;
1194 struct _ioctlvfmt_cbdata
*cbdata
;
1196 cbdata
= __lwp_wkspace_allocate(sizeof(struct _ioctlvfmt_cbdata
));
1197 if(cbdata
==NULL
) return IPC_ENOMEM
;
1199 memset(cbdata
,0,sizeof(struct _ioctlvfmt_cbdata
));
1201 va_start(args
,format
);
1202 ret
= __ios_ioctlvformat_parse(format
,args
,cbdata
,&cnt_in
,&cnt_io
,&argv
,hId
);
1205 __lwp_wkspace_free(cbdata
);
1209 ret
= IOS_Ioctlv(fd
,ioctl
,cnt_in
,cnt_io
,argv
);
1210 __ioctlvfmtCB(ret
,cbdata
);
1215 s32
IOS_IoctlvFormatAsync(s32 hId
,s32 fd
,s32 ioctl
,ipccallback usr_cb
,void *usr_data
,const char *format
,...)
1220 struct _ioctlv
*argv
;
1221 struct _ioctlvfmt_cbdata
*cbdata
;
1223 cbdata
= __lwp_wkspace_allocate(sizeof(struct _ioctlvfmt_cbdata
));
1224 if(cbdata
==NULL
) return IPC_ENOMEM
;
1226 memset(cbdata
,0,sizeof(struct _ioctlvfmt_cbdata
));
1228 va_start(args
,format
);
1229 ret
= __ios_ioctlvformat_parse(format
,args
,cbdata
,&cnt_in
,&cnt_io
,&argv
,hId
);
1232 __lwp_wkspace_free(cbdata
);
1236 cbdata
->user_cb
= usr_cb
;
1237 cbdata
->user_data
= usr_data
;
1238 return IOS_IoctlvAsync(fd
,ioctl
,cnt_in
,cnt_io
,argv
,__ioctlvfmtCB
,cbdata
);
1241 s32
IOS_IoctlvReboot(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
)
1244 struct _ipcreq
*req
;
1246 req
= __ipc_allocreq();
1247 if(req
==NULL
) return IPC_ENOMEM
;
1249 req
->cmd
= IOS_IOCTLV
;
1252 req
->relnch
= RELNCH_RELAUNCH
;
1254 req
->ioctlv
.ioctl
= ioctl
;
1255 req
->ioctlv
.argcin
= cnt_in
;
1256 req
->ioctlv
.argcio
= cnt_io
;
1257 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1261 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1262 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1263 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1270 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1271 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1272 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1276 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1278 ret
= __ipc_syncrequest(req
);
1280 if(req
!=NULL
) __ipc_freereq(req
);
1284 s32
IOS_IoctlvRebootBackground(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
)
1287 struct _ipcreq
*req
;
1289 req
= __ipc_allocreq();
1290 if(req
==NULL
) return IPC_ENOMEM
;
1292 req
->cmd
= IOS_IOCTLV
;
1296 req
->relnch
= RELNCH_BACKGROUND
|RELNCH_RELAUNCH
;
1298 req
->ioctlv
.ioctl
= ioctl
;
1299 req
->ioctlv
.argcin
= cnt_in
;
1300 req
->ioctlv
.argcio
= cnt_io
;
1301 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1305 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1306 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1307 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1314 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1315 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1316 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1320 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1322 ret
= __ipc_syncrequest(req
);
1324 if(req
!=NULL
) __ipc_freereq(req
);