1 /*-------------------------------------------------------------
3 ipc.c -- Interprocess Communication with Starlet
6 Michael Wiedenbauer (shagkur)
7 Dave Murphy (WinterMute)
10 This software is provided 'as-is', without any express or implied
11 warranty. In no event will the authors be held liable for any
12 damages arising from the use of this software.
14 Permission is granted to anyone to use this software for any
15 purpose, including commercial applications, and to alter it and
16 redistribute it freely, subject to the following restrictions:
18 1. The origin of this software must not be misrepresented; you
19 must not claim that you wrote the original software. If you use
20 this software in a product, an acknowledgment in the product
21 documentation would be appreciated but is not required.
23 2. Altered source versions must be plainly marked as such, and
24 must not be misrepresented as being the original software.
26 3. This notice may not be removed or altered from any source
29 -------------------------------------------------------------*/
42 #include "processor.h"
49 #include "lwp_wkspace.h"
53 #define IPC_REQ_MAGIC 0x4C4F4743
55 #define IPC_HEAP_SIZE 4096
56 #define IPC_REQUESTSIZE 64
57 #define IPC_NUMHEAPS 8
59 #define IOS_MAXFMT_PARAMS 32
62 #define IOS_CLOSE 0x02
64 #define IOS_WRITE 0x04
66 #define IOS_IOCTL 0x06
67 #define IOS_IOCTLV 0x07
69 #define RELNCH_RELAUNCH 1
70 #define RELNCH_BACKGROUND 2
73 { //ipc struct size: 32
104 struct _ioctlv
*argv
;
112 lwpq_t syncqueue
; //44
113 u32 magic
; //48 - used to avoid spurious responses, like from zelda.
114 u8 pad1
[12]; //52 - 60
123 struct _ipcreq
*reqs
[16];
133 struct _ioctlvfmt_bufent
140 struct _ioctlvfmt_cbdata
146 struct _ioctlvfmt_bufent
*bufs
;
149 static s32 _ipc_hid
= -1;
150 static s32 _ipc_mailboxack
= 1;
151 static u32 _ipc_relnchFl
= 0;
152 static u32 _ipc_initialized
= 0;
153 static u32 _ipc_clntinitialized
= 0;
154 static u64 _ipc_spuriousresponsecnt
= 0;
155 static struct _ipcreq
*_ipc_relnchRpc
= NULL
;
157 static void *_ipc_bufferlo
= NULL
;
158 static void *_ipc_bufferhi
= NULL
;
159 static void *_ipc_currbufferlo
= NULL
;
160 static void *_ipc_currbufferhi
= NULL
;
162 static struct _ipcreqres _ipc_responses
;
164 static struct _ipcheap _ipc_heaps
[IPC_NUMHEAPS
] =
166 {NULL
, 0, {}} // all other elements should be inited to zero, says C standard, so this should do
169 static vu32
*_ipcReg
= (u32
*)0xCD000000;
171 extern void __MaskIrq(u32 nMask
);
172 extern void __UnmaskIrq(u32 nMask
);
173 extern void* __SYS_GetIPCBufferLo();
174 extern void* __SYS_GetIPCBufferHi();
176 static __inline__ u32
IPC_ReadReg(u32 reg
)
181 static __inline__
void IPC_WriteReg(u32 reg
,u32 val
)
186 static __inline__
void ACR_WriteReg(u32 reg
,u32 val
)
188 _ipcReg
[reg
>>2] = val
;
191 static __inline__
void* __ipc_allocreq()
193 return iosAlloc(_ipc_hid
,IPC_REQUESTSIZE
);
196 static __inline__
void __ipc_freereq(void *ptr
)
198 iosFree(_ipc_hid
,ptr
);
201 static s32
__ioctlvfmtCB(s32 result
,void *userdata
)
205 struct _ioctlvfmt_cbdata
*cbdata
;
206 struct _ioctlvfmt_bufent
*pbuf
;
208 cbdata
= (struct _ioctlvfmt_cbdata
*)userdata
;
210 // deal with data buffers
213 while(cbdata
->num_bufs
--) {
215 // copy data if needed
216 if(pbuf
->io_buf
&& pbuf
->copy_len
)
217 memcpy(pbuf
->io_buf
, pbuf
->ipc_buf
, pbuf
->copy_len
);
218 // then free the buffer
219 iosFree(cbdata
->hId
, pbuf
->ipc_buf
);
225 user_cb
= cbdata
->user_cb
;
226 user_data
= cbdata
->user_data
;
229 __lwp_wkspace_free(cbdata
->bufs
);
231 // free callback data
232 __lwp_wkspace_free(cbdata
);
234 // call the user callback
236 return user_cb(result
, user_data
);
241 static s32
__ipc_queuerequest(struct _ipcreq
*req
)
246 printf("__ipc_queuerequest(0x%p)\n",req
);
248 _CPU_ISR_Disable(level
);
250 cnt
= (_ipc_responses
.cnt_queue
- _ipc_responses
.cnt_sent
);
252 _CPU_ISR_Restore(level
);
253 return IPC_EQUEUEFULL
;
256 _ipc_responses
.reqs
[_ipc_responses
.req_queue_no
] = req
;
257 _ipc_responses
.req_queue_no
= ((_ipc_responses
.req_queue_no
+1)&0x0f);
258 _ipc_responses
.cnt_queue
++;
260 _CPU_ISR_Restore(level
);
264 static s32
__ipc_syncqueuerequest(struct _ipcreq
*req
)
268 printf("__ipc_syncqueuerequest(0x%p)\n",req
);
270 cnt
= (_ipc_responses
.cnt_queue
- _ipc_responses
.cnt_sent
);
272 return IPC_EQUEUEFULL
;
275 _ipc_responses
.reqs
[_ipc_responses
.req_queue_no
] = req
;
276 _ipc_responses
.req_queue_no
= ((_ipc_responses
.req_queue_no
+1)&0x0f);
277 _ipc_responses
.cnt_queue
++;
282 static void __ipc_sendrequest()
288 printf("__ipc_sendrequest()\n");
290 cnt
= (_ipc_responses
.cnt_queue
- _ipc_responses
.cnt_sent
);
292 req
= _ipc_responses
.reqs
[_ipc_responses
.req_send_no
];
294 req
->magic
= IPC_REQ_MAGIC
;
295 if(req
->relnch
&RELNCH_RELAUNCH
) {
297 _ipc_relnchRpc
= req
;
298 if(!(req
->relnch
&RELNCH_BACKGROUND
))
301 DCFlushRange(req
,sizeof(struct _ipcreq
));
303 IPC_WriteReg(0,MEM_VIRTUAL_TO_PHYSICAL(req
));
304 _ipc_responses
.req_send_no
= ((_ipc_responses
.req_send_no
+1)&0x0f);
305 _ipc_responses
.cnt_sent
++;
308 ipc_send
= ((IPC_ReadReg(1)&0x30)|0x01);
309 IPC_WriteReg(1,ipc_send
);
314 static void __ipc_replyhandler()
317 struct _ipcreq
*req
= NULL
;
320 printf("__ipc_replyhandler()\n");
322 req
= (struct _ipcreq
*)IPC_ReadReg(2);
323 if(req
==NULL
) return;
325 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x04);
326 IPC_WriteReg(1,ipc_ack
);
327 ACR_WriteReg(48,0x40000000);
329 req
= MEM_PHYSICAL_TO_K0(req
);
330 DCInvalidateRange(req
,32);
332 if(req
->magic
==IPC_REQ_MAGIC
) {
334 printf("IPC res: cmd %08x rcmd %08x res %08x\n",req
->cmd
,req
->req_cmd
,req
->result
);
336 if(req
->req_cmd
==IOS_READ
) {
337 if(req
->read
.data
!=NULL
) {
338 req
->read
.data
= MEM_PHYSICAL_TO_K0(req
->read
.data
);
339 if(req
->result
>0) DCInvalidateRange(req
->read
.data
,req
->result
);
341 } else if(req
->req_cmd
==IOS_IOCTL
) {
342 if(req
->ioctl
.buffer_io
!=NULL
) {
343 req
->ioctl
.buffer_io
= MEM_PHYSICAL_TO_K0(req
->ioctl
.buffer_io
);
344 DCInvalidateRange(req
->ioctl
.buffer_io
,req
->ioctl
.len_io
);
346 DCInvalidateRange(req
->ioctl
.buffer_in
,req
->ioctl
.len_in
);
347 } else if(req
->req_cmd
==IOS_IOCTLV
) {
348 if(req
->ioctlv
.argv
!=NULL
) {
349 req
->ioctlv
.argv
= MEM_PHYSICAL_TO_K0(req
->ioctlv
.argv
);
350 DCInvalidateRange(req
->ioctlv
.argv
,((req
->ioctlv
.argcin
+req
->ioctlv
.argcio
)*sizeof(struct _ioctlv
)));
354 v
= (ioctlv
*)req
->ioctlv
.argv
;
355 while(cnt
<(req
->ioctlv
.argcin
+req
->ioctlv
.argcio
)) {
356 if(v
[cnt
].data
!=NULL
) {
357 v
[cnt
].data
= MEM_PHYSICAL_TO_K0(v
[cnt
].data
);
358 DCInvalidateRange(v
[cnt
].data
,v
[cnt
].len
);
362 if(_ipc_relnchFl
&& _ipc_relnchRpc
==req
) {
364 if(_ipc_mailboxack
<1) _ipc_mailboxack
++;
370 req
->cb(req
->result
,req
->usrdata
);
373 LWP_ThreadSignal(req
->syncqueue
);
375 // NOTE: we really want to find out if this ever happens
376 // and take steps to prevent it beforehand (because it will
377 // clobber memory, among other things). I suggest leaving this in
378 // even in non-DEBUG mode. Maybe even cause a system halt.
379 // It is the responsibility of the loader to clear these things,
380 // but we want to find out if they happen so loaders can be fixed.
382 printf("Received unknown IPC response (magic %08x):\n", req
->magic
);
383 printf(" CMD %08x RES %08x REQCMD %08x\n", req
->cmd
, req
->result
, req
->req_cmd
);
384 printf(" Args: %08x %08x %08x %08x %08x\n", req
->args
[0], req
->args
[1], req
->args
[2], req
->args
[3], req
->args
[4]);
385 printf(" CB %08x DATA %08x REL %08x QUEUE %08x\n", (u32
)req
->cb
, (u32
)req
->usrdata
, req
->relnch
, (u32
)req
->syncqueue
);
387 _ipc_spuriousresponsecnt
++;
389 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x08);
390 IPC_WriteReg(1,ipc_ack
);
393 static void __ipc_ackhandler()
397 printf("__ipc_ackhandler()\n");
399 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x02);
400 IPC_WriteReg(1,ipc_ack
);
401 ACR_WriteReg(48,0x40000000);
403 if(_ipc_mailboxack
<1) _ipc_mailboxack
++;
404 if(_ipc_mailboxack
>0) {
406 _ipc_relnchRpc
->result
= 0;
409 LWP_ThreadSignal(_ipc_relnchRpc
->syncqueue
);
411 ipc_ack
= ((IPC_ReadReg(1)&0x30)|0x08);
412 IPC_WriteReg(1,ipc_ack
);
419 static void __ipc_interrupthandler(u32 irq
,void *ctx
)
423 printf("__ipc_interrupthandler(%d)\n",irq
);
425 ipc_int
= IPC_ReadReg(1);
426 if((ipc_int
&0x0014)==0x0014) __ipc_replyhandler();
428 ipc_int
= IPC_ReadReg(1);
429 if((ipc_int
&0x0022)==0x0022) __ipc_ackhandler();
432 static s32
__ios_ioctlvformat_parse(const char *format
,va_list args
,struct _ioctlvfmt_cbdata
*cbdata
,s32
*cnt_in
,s32
*cnt_io
,struct _ioctlv
**argv
,s32 hId
)
440 struct _ioctlvfmt_bufent
*bufp
;
442 if(hId
== IPC_HEAP
) hId
= _ipc_hid
;
443 if(hId
< 0) return IPC_EINVAL
;
445 maxbufs
= strnlen(format
,IOS_MAXFMT_PARAMS
);
446 if(maxbufs
>=IOS_MAXFMT_PARAMS
) return IPC_EINVAL
;
449 cbdata
->bufs
= __lwp_wkspace_allocate((sizeof(struct _ioctlvfmt_bufent
)*(maxbufs
+1)));
450 if(cbdata
->bufs
==NULL
) return IPC_ENOMEM
;
452 argp
= iosAlloc(hId
,(sizeof(struct _ioctlv
)*(maxbufs
+1)));
454 __lwp_wkspace_free(cbdata
->bufs
);
460 memset(argp
,0,(sizeof(struct _ioctlv
)*(maxbufs
+1)));
461 memset(bufp
,0,(sizeof(struct _ioctlvfmt_bufent
)*(maxbufs
+1)));
463 cbdata
->num_bufs
= 1;
464 bufp
->ipc_buf
= argp
;
472 type
= tolower(*format
);
475 pdata
= iosAlloc(hId
,sizeof(u8
));
480 *(u8
*)pdata
= va_arg(args
,u32
);
482 argp
->len
= sizeof(u8
);
483 bufp
->ipc_buf
= pdata
;
490 pdata
= iosAlloc(hId
,sizeof(u16
));
495 *(u16
*)pdata
= va_arg(args
,u32
);
497 argp
->len
= sizeof(u16
);
498 bufp
->ipc_buf
= pdata
;
505 pdata
= iosAlloc(hId
,sizeof(u32
));
510 *(u32
*)pdata
= va_arg(args
,u32
);
512 argp
->len
= sizeof(u32
);
513 bufp
->ipc_buf
= pdata
;
520 pdata
= iosAlloc(hId
,sizeof(u64
));
525 *(u64
*)pdata
= va_arg(args
,u64
);
527 argp
->len
= sizeof(u64
);
528 bufp
->ipc_buf
= pdata
;
535 argp
->data
= va_arg(args
, void*);
536 argp
->len
= va_arg(args
, u32
);
541 ps
= va_arg(args
, char*);
542 len
= strnlen(ps
,256);
548 pdata
= iosAlloc(hId
,(len
+1));
553 memcpy(pdata
,ps
,(len
+1));
556 bufp
->ipc_buf
= pdata
;
564 goto parse_io_params
;
574 type
= tolower(*format
);
577 pdata
= iosAlloc(hId
,sizeof(u8
));
582 iodata
= va_arg(args
,u8
*);
583 *(u8
*)pdata
= *(u8
*)iodata
;
585 argp
->len
= sizeof(u8
);
586 bufp
->ipc_buf
= pdata
;
587 bufp
->io_buf
= iodata
;
588 bufp
->copy_len
= sizeof(u8
);
595 pdata
= iosAlloc(hId
,sizeof(u16
));
600 iodata
= va_arg(args
,u16
*);
601 *(u16
*)pdata
= *(u16
*)iodata
;
603 argp
->len
= sizeof(u16
);
604 bufp
->ipc_buf
= pdata
;
605 bufp
->io_buf
= iodata
;
606 bufp
->copy_len
= sizeof(u16
);
613 pdata
= iosAlloc(hId
,sizeof(u32
));
618 iodata
= va_arg(args
,u32
*);
619 *(u32
*)pdata
= *(u32
*)iodata
;
621 argp
->len
= sizeof(u32
);
622 bufp
->ipc_buf
= pdata
;
623 bufp
->io_buf
= iodata
;
624 bufp
->copy_len
= sizeof(u32
);
631 pdata
= iosAlloc(hId
,sizeof(u64
));
636 iodata
= va_arg(args
,u64
*);
637 *(u64
*)pdata
= *(u64
*)iodata
;
639 argp
->len
= sizeof(u64
);
640 bufp
->ipc_buf
= pdata
;
641 bufp
->io_buf
= iodata
;
642 bufp
->copy_len
= sizeof(u64
);
649 argp
->data
= va_arg(args
, void*);
650 argp
->len
= va_arg(args
, u32
);
663 for(i
=0;i
<cbdata
->num_bufs
;i
++) {
664 if(cbdata
->bufs
[i
].ipc_buf
!=NULL
) iosFree(hId
,cbdata
->bufs
[i
].ipc_buf
);
666 __lwp_wkspace_free(cbdata
->bufs
);
670 static s32
__ipc_asyncrequest(struct _ipcreq
*req
)
675 ret
= __ipc_queuerequest(req
);
676 if(ret
) __ipc_freereq(req
);
678 _CPU_ISR_Disable(level
);
679 if(_ipc_mailboxack
>0) __ipc_sendrequest();
680 _CPU_ISR_Restore(level
);
685 static s32
__ipc_syncrequest(struct _ipcreq
*req
)
690 LWP_InitQueue(&req
->syncqueue
);
692 _CPU_ISR_Disable(level
);
693 ret
= __ipc_syncqueuerequest(req
);
695 if(_ipc_mailboxack
>0) __ipc_sendrequest();
696 LWP_ThreadSleep(req
->syncqueue
);
699 _CPU_ISR_Restore(level
);
701 LWP_CloseQueue(req
->syncqueue
);
705 s32
iosCreateHeap(s32 size
)
712 printf("iosCreateHeap(%d)\n",size
);
714 _CPU_ISR_Disable(level
);
717 while(i
<IPC_NUMHEAPS
) {
718 if(_ipc_heaps
[i
].membase
==NULL
) break;
721 if(i
>=IPC_NUMHEAPS
) {
722 _CPU_ISR_Restore(level
);
726 ipclo
= (((u32
)IPC_GetBufferLo()+0x1f)&~0x1f);
727 ipchi
= (u32
)IPC_GetBufferHi();
728 free
= (ipchi
- (ipclo
+ size
));
729 if(free
<0) return IPC_ENOMEM
;
731 _ipc_heaps
[i
].membase
= (void*)ipclo
;
732 _ipc_heaps
[i
].size
= size
;
734 ret
= __lwp_heap_init(&_ipc_heaps
[i
].heap
,(void*)ipclo
,size
,PPC_CACHE_ALIGNMENT
);
735 if(ret
<=0) return IPC_ENOMEM
;
737 IPC_SetBufferLo((void*)(ipclo
+size
));
738 _CPU_ISR_Restore(level
);
742 s32
iosDestroyHeap(s32 hid
)
747 printf("iosDestroyHeap(%d)\n",hid
);
749 _CPU_ISR_Disable(level
);
751 if(hid
>=0 && hid
<IPC_NUMHEAPS
) {
752 if(_ipc_heaps
[hid
].membase
!=NULL
) {
753 _ipc_heaps
[hid
].membase
= NULL
;
754 _ipc_heaps
[hid
].size
= 0;
759 _CPU_ISR_Restore(level
);
763 void* iosAlloc(s32 hid
,s32 size
)
766 printf("iosAlloc(%d,%d)\n",hid
,size
);
768 if(hid
<0 || hid
>=IPC_NUMHEAPS
|| size
<=0) return NULL
;
769 return __lwp_heap_allocate(&_ipc_heaps
[hid
].heap
,size
);
772 void iosFree(s32 hid
,void *ptr
)
775 printf("iosFree(%d,0x%p)\n",hid
,ptr
);
777 if(hid
<0 || hid
>=IPC_NUMHEAPS
|| ptr
==NULL
) return;
778 __lwp_heap_free(&_ipc_heaps
[hid
].heap
,ptr
);
781 void* IPC_GetBufferLo()
783 return _ipc_currbufferlo
;
786 void* IPC_GetBufferHi()
788 return _ipc_currbufferhi
;
791 void IPC_SetBufferLo(void *bufferlo
)
793 if(_ipc_bufferlo
<=bufferlo
) _ipc_currbufferlo
= bufferlo
;
796 void IPC_SetBufferHi(void *bufferhi
)
798 if(bufferhi
<=_ipc_bufferhi
) _ipc_currbufferhi
= bufferhi
;
801 void __IPC_Init(void)
803 if(!_ipc_initialized
) {
804 _ipc_bufferlo
= _ipc_currbufferlo
= __SYS_GetIPCBufferLo();
805 _ipc_bufferhi
= _ipc_currbufferhi
= __SYS_GetIPCBufferHi();
806 _ipc_initialized
= 1;
810 u32
__IPC_ClntInit(void)
812 if(!_ipc_clntinitialized
) {
813 _ipc_clntinitialized
= 1;
817 _ipc_hid
= iosCreateHeap(IPC_HEAP_SIZE
);
818 IRQ_Request(IRQ_PI_ACR
,__ipc_interrupthandler
,NULL
);
819 __UnmaskIrq(IM_PI_ACR
);
825 void __IPC_Reinitialize(void)
829 _CPU_ISR_Disable(level
);
835 _ipc_relnchRpc
= NULL
;
837 _ipc_responses
.req_queue_no
= 0;
838 _ipc_responses
.cnt_queue
= 0;
839 _ipc_responses
.req_send_no
= 0;
840 _ipc_responses
.cnt_sent
= 0;
842 _CPU_ISR_Restore(level
);
845 s32
IOS_Open(const char *filepath
,u32 mode
)
850 if(filepath
==NULL
) return IPC_EINVAL
;
852 req
= __ipc_allocreq();
853 if(req
==NULL
) return IPC_ENOMEM
;
859 DCFlushRange((void*)filepath
,strnlen(filepath
,IPC_MAXPATH_LEN
) + 1);
861 req
->open
.filepath
= (char*)MEM_VIRTUAL_TO_PHYSICAL(filepath
);
862 req
->open
.mode
= mode
;
864 ret
= __ipc_syncrequest(req
);
866 if(req
!=NULL
) __ipc_freereq(req
);
870 s32
IOS_OpenAsync(const char *filepath
,u32 mode
,ipccallback ipc_cb
,void *usrdata
)
874 req
= __ipc_allocreq();
875 if(req
==NULL
) return IPC_ENOMEM
;
879 req
->usrdata
= usrdata
;
882 DCFlushRange((void*)filepath
,strnlen(filepath
,IPC_MAXPATH_LEN
));
884 req
->open
.filepath
= (char*)MEM_VIRTUAL_TO_PHYSICAL(filepath
);
885 req
->open
.mode
= mode
;
887 return __ipc_asyncrequest(req
);
890 s32
IOS_Close(s32 fd
)
895 req
= __ipc_allocreq();
896 if(req
==NULL
) return IPC_ENOMEM
;
898 req
->cmd
= IOS_CLOSE
;
903 ret
= __ipc_syncrequest(req
);
905 if(req
!=NULL
) __ipc_freereq(req
);
909 s32
IOS_CloseAsync(s32 fd
,ipccallback ipc_cb
,void *usrdata
)
913 req
= __ipc_allocreq();
914 if(req
==NULL
) return IPC_ENOMEM
;
916 req
->cmd
= IOS_CLOSE
;
919 req
->usrdata
= usrdata
;
922 return __ipc_asyncrequest(req
);
925 s32
IOS_Read(s32 fd
,void *buf
,s32 len
)
930 req
= __ipc_allocreq();
931 if(req
==NULL
) return IPC_ENOMEM
;
938 DCInvalidateRange(buf
,len
);
939 req
->read
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
942 ret
= __ipc_syncrequest(req
);
944 if(req
!=NULL
) __ipc_freereq(req
);
948 s32
IOS_ReadAsync(s32 fd
,void *buf
,s32 len
,ipccallback ipc_cb
,void *usrdata
)
952 req
= __ipc_allocreq();
953 if(req
==NULL
) return IPC_ENOMEM
;
958 req
->usrdata
= usrdata
;
961 DCInvalidateRange(buf
,len
);
962 req
->read
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
965 return __ipc_asyncrequest(req
);
968 s32
IOS_Write(s32 fd
,const void *buf
,s32 len
)
973 req
= __ipc_allocreq();
974 if(req
==NULL
) return IPC_ENOMEM
;
976 req
->cmd
= IOS_WRITE
;
981 DCFlushRange((void*)buf
,len
);
982 req
->write
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
983 req
->write
.len
= len
;
985 ret
= __ipc_syncrequest(req
);
987 if(req
!=NULL
) __ipc_freereq(req
);
991 s32
IOS_WriteAsync(s32 fd
,const void *buf
,s32 len
,ipccallback ipc_cb
,void *usrdata
)
995 req
= __ipc_allocreq();
996 if(req
==NULL
) return IPC_ENOMEM
;
998 req
->cmd
= IOS_WRITE
;
1001 req
->usrdata
= usrdata
;
1004 DCFlushRange((void*)buf
,len
);
1005 req
->write
.data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buf
);
1006 req
->write
.len
= len
;
1008 return __ipc_asyncrequest(req
);
1011 s32
IOS_Seek(s32 fd
,s32 where
,s32 whence
)
1014 struct _ipcreq
*req
;
1016 req
= __ipc_allocreq();
1017 if(req
==NULL
) return IPC_ENOMEM
;
1019 req
->cmd
= IOS_SEEK
;
1024 req
->seek
.where
= where
;
1025 req
->seek
.whence
= whence
;
1027 ret
= __ipc_syncrequest(req
);
1029 if(req
!=NULL
) __ipc_freereq(req
);
1033 s32
IOS_SeekAsync(s32 fd
,s32 where
,s32 whence
,ipccallback ipc_cb
,void *usrdata
)
1035 struct _ipcreq
*req
;
1037 req
= __ipc_allocreq();
1038 if(req
==NULL
) return IPC_ENOMEM
;
1040 req
->cmd
= IOS_SEEK
;
1043 req
->usrdata
= usrdata
;
1046 req
->seek
.where
= where
;
1047 req
->seek
.whence
= whence
;
1049 return __ipc_asyncrequest(req
);
1052 s32
IOS_Ioctl(s32 fd
,s32 ioctl
,void *buffer_in
,s32 len_in
,void *buffer_io
,s32 len_io
)
1055 struct _ipcreq
*req
;
1057 req
= __ipc_allocreq();
1058 if(req
==NULL
) return IPC_ENOMEM
;
1060 req
->cmd
= IOS_IOCTL
;
1065 req
->ioctl
.ioctl
= ioctl
;
1066 req
->ioctl
.buffer_in
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_in
);
1067 req
->ioctl
.len_in
= len_in
;
1068 req
->ioctl
.buffer_io
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_io
);
1069 req
->ioctl
.len_io
= len_io
;
1071 DCFlushRange(buffer_in
,len_in
);
1072 DCFlushRange(buffer_io
,len_io
);
1074 ret
= __ipc_syncrequest(req
);
1076 if(req
!=NULL
) __ipc_freereq(req
);
1080 s32
IOS_IoctlAsync(s32 fd
,s32 ioctl
,void *buffer_in
,s32 len_in
,void *buffer_io
,s32 len_io
,ipccallback ipc_cb
,void *usrdata
)
1082 struct _ipcreq
*req
;
1084 req
= __ipc_allocreq();
1085 if(req
==NULL
) return IPC_ENOMEM
;
1087 req
->cmd
= IOS_IOCTL
;
1090 req
->usrdata
= usrdata
;
1093 req
->ioctl
.ioctl
= ioctl
;
1094 req
->ioctl
.buffer_in
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_in
);
1095 req
->ioctl
.len_in
= len_in
;
1096 req
->ioctl
.buffer_io
= (void*)MEM_VIRTUAL_TO_PHYSICAL(buffer_io
);
1097 req
->ioctl
.len_io
= len_io
;
1099 DCFlushRange(buffer_in
,len_in
);
1100 DCFlushRange(buffer_io
,len_io
);
1102 return __ipc_asyncrequest(req
);
1105 s32
IOS_Ioctlv(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
)
1108 struct _ipcreq
*req
;
1110 req
= __ipc_allocreq();
1111 if(req
==NULL
) return IPC_ENOMEM
;
1113 req
->cmd
= IOS_IOCTLV
;
1118 req
->ioctlv
.ioctl
= ioctl
;
1119 req
->ioctlv
.argcin
= cnt_in
;
1120 req
->ioctlv
.argcio
= cnt_io
;
1121 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1125 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1126 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1127 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1134 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1135 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1136 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1140 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1142 ret
= __ipc_syncrequest(req
);
1144 if(req
!=NULL
) __ipc_freereq(req
);
1149 s32
IOS_IoctlvAsync(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
,ipccallback ipc_cb
,void *usrdata
)
1152 struct _ipcreq
*req
;
1154 req
= __ipc_allocreq();
1155 if(req
==NULL
) return IPC_ENOMEM
;
1157 req
->cmd
= IOS_IOCTLV
;
1160 req
->usrdata
= usrdata
;
1163 req
->ioctlv
.ioctl
= ioctl
;
1164 req
->ioctlv
.argcin
= cnt_in
;
1165 req
->ioctlv
.argcio
= cnt_io
;
1166 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1170 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1171 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1172 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1179 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1180 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1181 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1185 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1187 return __ipc_asyncrequest(req
);
1190 s32
IOS_IoctlvFormat(s32 hId
,s32 fd
,s32 ioctl
,const char *format
,...)
1195 struct _ioctlv
*argv
;
1196 struct _ioctlvfmt_cbdata
*cbdata
;
1198 cbdata
= __lwp_wkspace_allocate(sizeof(struct _ioctlvfmt_cbdata
));
1199 if(cbdata
==NULL
) return IPC_ENOMEM
;
1201 memset(cbdata
,0,sizeof(struct _ioctlvfmt_cbdata
));
1203 va_start(args
,format
);
1204 ret
= __ios_ioctlvformat_parse(format
,args
,cbdata
,&cnt_in
,&cnt_io
,&argv
,hId
);
1207 __lwp_wkspace_free(cbdata
);
1211 ret
= IOS_Ioctlv(fd
,ioctl
,cnt_in
,cnt_io
,argv
);
1212 __ioctlvfmtCB(ret
,cbdata
);
1217 s32
IOS_IoctlvFormatAsync(s32 hId
,s32 fd
,s32 ioctl
,ipccallback usr_cb
,void *usr_data
,const char *format
,...)
1222 struct _ioctlv
*argv
;
1223 struct _ioctlvfmt_cbdata
*cbdata
;
1225 cbdata
= __lwp_wkspace_allocate(sizeof(struct _ioctlvfmt_cbdata
));
1226 if(cbdata
==NULL
) return IPC_ENOMEM
;
1228 memset(cbdata
,0,sizeof(struct _ioctlvfmt_cbdata
));
1230 va_start(args
,format
);
1231 ret
= __ios_ioctlvformat_parse(format
,args
,cbdata
,&cnt_in
,&cnt_io
,&argv
,hId
);
1234 __lwp_wkspace_free(cbdata
);
1238 cbdata
->user_cb
= usr_cb
;
1239 cbdata
->user_data
= usr_data
;
1240 return IOS_IoctlvAsync(fd
,ioctl
,cnt_in
,cnt_io
,argv
,__ioctlvfmtCB
,cbdata
);
1243 s32
IOS_IoctlvReboot(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
)
1246 struct _ipcreq
*req
;
1248 req
= __ipc_allocreq();
1249 if(req
==NULL
) return IPC_ENOMEM
;
1251 req
->cmd
= IOS_IOCTLV
;
1254 req
->relnch
= RELNCH_RELAUNCH
;
1256 req
->ioctlv
.ioctl
= ioctl
;
1257 req
->ioctlv
.argcin
= cnt_in
;
1258 req
->ioctlv
.argcio
= cnt_io
;
1259 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1263 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1264 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1265 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1272 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1273 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1274 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1278 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1280 ret
= __ipc_syncrequest(req
);
1282 if(req
!=NULL
) __ipc_freereq(req
);
1286 s32
IOS_IoctlvRebootBackground(s32 fd
,s32 ioctl
,s32 cnt_in
,s32 cnt_io
,ioctlv
*argv
)
1289 struct _ipcreq
*req
;
1291 req
= __ipc_allocreq();
1292 if(req
==NULL
) return IPC_ENOMEM
;
1294 req
->cmd
= IOS_IOCTLV
;
1298 req
->relnch
= RELNCH_BACKGROUND
|RELNCH_RELAUNCH
;
1300 req
->ioctlv
.ioctl
= ioctl
;
1301 req
->ioctlv
.argcin
= cnt_in
;
1302 req
->ioctlv
.argcio
= cnt_io
;
1303 req
->ioctlv
.argv
= (struct _ioctlv
*)MEM_VIRTUAL_TO_PHYSICAL(argv
);
1307 if(argv
[i
].data
!=NULL
&& argv
[i
].len
>0) {
1308 DCFlushRange(argv
[i
].data
,argv
[i
].len
);
1309 argv
[i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[i
].data
);
1316 if(argv
[cnt_in
+i
].data
!=NULL
&& argv
[cnt_in
+i
].len
>0) {
1317 DCFlushRange(argv
[cnt_in
+i
].data
,argv
[cnt_in
+i
].len
);
1318 argv
[cnt_in
+i
].data
= (void*)MEM_VIRTUAL_TO_PHYSICAL(argv
[cnt_in
+i
].data
);
1322 DCFlushRange(argv
,((cnt_in
+cnt_io
)<<3));
1324 ret
= __ipc_syncrequest(req
);
1326 if(req
!=NULL
) __ipc_freereq(req
);