4 * @brief XMEM Reflective Memory Library Implementation
8 * @date Created on 09/02/2005
10 * @version 1.1 Emilio G. Cota 16/01/2009
12 * @version 1.0 Julian Lewis
16 #include <sys/types.h>
19 #include <sys/ioctl.h>
31 /*! @name device specific backend code
35 #define LN 128 //!< length of string (e.g. for full filenames)
37 static void *attach_address
[XmemMAX_TABLES
];
39 static XmemDrvrSegTable seg_tab
;
40 static int segcnt
= 0;
42 static XmemDrvrNodeTable node_tab
;
43 static int nodecnt
= 0;
45 static XmemNodeId my_nid
= 0; //!< My node ID
46 static int warm_start
= 0; //!< Warm vs Cold start
47 /* see description in function XmemCheckForWarmStart for further info on this */
49 static XmemMarkersMask markers_mask
= XmemMarkersDISABLE
;
52 static char gbConfigPath
[LN
] = "";
55 * @brief Set the default path for initialisation files
57 * @param pbPath - Path where the configuration files are stored
61 XmemError
XmemSetPath(char *pbPath
)
64 return XmemErrorSUCCESS
;
66 if (strlen(pbPath
) < (LN
- 20)) {
67 strcpy(gbConfigPath
, pbPath
);
68 if (gbConfigPath
[strlen(gbConfigPath
) - 1] != '/')
69 strcat(gbConfigPath
, "/");
72 return XmemErrorSUCCESS
;
76 * @brief Form the full path of a given configuration file
78 * @param name - file name
80 * @return full path of the given file name
82 char *XmemGetFile(char *name
)
87 configpath
= strlen(gbConfigPath
) > 0 ? gbConfigPath
: XMEM_PATH
;
88 sprintf(path
, "%s%s", configpath
, name
);
92 /*! @name Static variables, constants and functions
94 * These are not exported to the users of this library.
97 #include "./vmic/VmicLib.c"
98 #include "./network/NetworkLib.c"
99 #include "./shmem/ShmemLib.c"
102 XmemNodeId (*GetAllNodeIds
)();
103 XmemError (*RegisterCallback
)();
104 XmemEventMask (*Wait
)();
105 XmemEventMask (*Poll
)();
106 XmemError (*SendTable
)();
107 XmemError (*RecvTable
)();
108 XmemError (*SendMessage
)();
109 XmemTableId (*CheckTables
)();
110 XmemError (*SendSoftWakeup
)();
113 static int libinitialized
= 0;
114 static XmemEventMask libcallmask
= 0;
115 static XmemLibRoutines routines
;
116 static void (*libcallback
)(XmemCallbackStruct
*cbs
) = NULL
;
118 static char *estr
[XmemErrorCOUNT
] = {
120 "Timeout expired while waiting for interrupt",
121 "The Xmem library is not initialized",
122 "Write access to that table is denied for this node",
123 "Could not read table descriptors from file: " SEG_TABLE_NAME
,
124 "Syntax error in table descriptors file: " SEG_TABLE_NAME
,
125 "Could not read node descriptors from file: " NODE_TABLE_NAME
,
126 "Syntax error in node descriptors file: " NODE_TABLE_NAME
,
127 "There are currently no tables defined",
128 "That table is not defined in: " SEG_TABLE_NAME
,
129 "That node is not defined in: " NODE_TABLE_NAME
,
130 "Illegal message ID",
131 "A run time hardware IO error has occured, see: IOError",
132 "System error, see: errno",
133 "Incoherent markers: header/footer mismatch",
139 * Marker's (header/footer) implementation
141 * Tables may or may not be wrapped by markers (header/footer).
142 * This is enabled via a configurable parameter and has to be transparent
143 * to the users of the library.
144 * If markers are enabled, the resulting available space on a given
145 * table is smaller (but not by much, check the structs below).
146 * To take account of this size/offset mangling, we refer to the real (i.e.
147 * physical XMEM addresses) as private (priv), and the addresses that users
148 * operate with as public (pub).
149 * NOTE: The simple markers implementation is not atomic; use the flag
150 * XmemMarkersATOMIC if you really need to ensure atomicity, at the price of
151 * using a bounce buffer for each access.
158 } __attribute__((__packed__
));
164 #define XMEM_H_SIZE sizeof(struct header)
165 #define XMEM_F_SIZE sizeof(struct footer)
166 #define XMEM_HF_SIZE ((XMEM_H_SIZE)+(XMEM_F_SIZE))
168 #define XMEM_H_ELEMS ((XMEM_H_SIZE)/sizeof(uint32_t))
169 #define XMEM_F_ELEMS ((XMEM_F_SIZE)/sizeof(uint32_t))
170 #define XMEM_HF_ELEMS ((XMEM_HF_SIZE)/sizeof(uint32_t))
173 * header's element offset
174 * Note. We call 'element offset' an offset whose unit is 4 bytes.
176 static int __h_eloff(int pub_eloff
)
181 /* physical's element offset of a given address */
182 static int phys_eloff(int pub_eloff
)
184 if (markers_mask
& XmemMarkersENABLE
)
185 return pub_eloff
+ XMEM_H_ELEMS
;
189 /* footer's offset */
190 static int __f_eloff(int pub_elems
, int pub_eloff
)
192 return phys_eloff(pub_eloff
) + pub_elems
;
195 static int priv_to_pub_elems(int priv_elems
)
197 if (markers_mask
& XmemMarkersENABLE
)
198 return priv_elems
- XMEM_HF_ELEMS
;
203 * XmemReadNodeTableFile - Reads the node table
207 * The node table is in the default place (NODE_TABLE_NAME).
209 * @return Appropriate error code (XmemError)
211 static XmemError
XmemReadNodeTableFile()
219 fp
= fopen(XmemGetFile(NODE_TABLE_NAME
), "r");
221 return XmemErrorCallback(XmemErrorNODE_TABLE_READ
, 0);
224 for (i
= 0; i
< XmemDrvrNODES
;) {
225 if (fscanf(fp
, "{ %s 0x%lx }\n", node_tab
.Descriptors
[i
].Name
,
226 &node_tab
.Descriptors
[i
].Id
) == 2) {
227 node_tab
.Used
|= node_tab
.Descriptors
[i
].Id
;
230 } else if (fscanf(fp
, "%c", &c
) == 1) {
232 * Empty lines, lines with only blank characters and
233 * lines starting with a hash (#) are ignored.
237 } else if (c
== '#') {
238 while (fscanf(fp
, "%c", &c
) != EOF
) {
242 } else if (isspace(c
)) {
243 while (fscanf(fp
, "%c", &c
) != EOF
) {
257 return XmemErrorSUCCESS
;
259 return XmemErrorCallback(XmemErrorNODE_TABLE_SYNTAX
, 0);
264 * XmemReadSegTableFile - Reads the segment table
268 * The segment table is in the default place (SEG_TABLE_NAME).
270 * @return Appropriate error code (XmemError)
272 static XmemError
XmemReadSegTableFile()
280 fp
= fopen(XmemGetFile(SEG_TABLE_NAME
), "r");
282 return XmemErrorCallback(XmemErrorSEG_TABLE_READ
, 0);
284 for (i
= 0; i
< XmemDrvrSEGMENTS
;) {
285 if (fscanf(fp
, "{ %s 0x%lx 0x%lx 0x%x 0x%lx 0x%lx }\n",
286 seg_tab
.Descriptors
[i
].Name
,
287 &seg_tab
.Descriptors
[i
].Id
,
288 &seg_tab
.Descriptors
[i
].Size
,
289 (unsigned int *)&seg_tab
.Descriptors
[i
].Address
,
290 &seg_tab
.Descriptors
[i
].Nodes
,
291 &seg_tab
.Descriptors
[i
].User
) == 6) {
292 seg_tab
.Used
|= seg_tab
.Descriptors
[i
].Id
;
295 } else if (fscanf(fp
, "%c", &c
) == 1) {
297 * Empty lines, lines with only blank characters and
298 * lines starting with a hash (#) are ignored.
302 } else if (c
== '#') {
303 while (fscanf(fp
, "%c", &c
) != EOF
) {
307 } else if (isspace(c
)) {
308 while (fscanf(fp
, "%c", &c
) != EOF
) {
322 return XmemErrorSUCCESS
;
324 return XmemErrorCallback(XmemErrorSEG_TABLE_SYNTAX
, 0);
329 * InitDevice - Local routine to initialise one real device
331 * @param device: type of device
333 * Remember that device can be VMIC, SHMEM or NETWORK.
335 * @return device initialisation on success; Not initialised error otherwise.
337 static XmemError
InitDevice(XmemDevice device
)
342 routines
.GetAllNodeIds
= VmicGetAllNodeIds
;
343 routines
.RegisterCallback
= VmicRegisterCallback
;
344 routines
.Wait
= VmicWait
;
345 routines
.Poll
= VmicPoll
;
346 routines
.SendTable
= VmicSendTable
;
347 routines
.RecvTable
= VmicRecvTable
;
348 routines
.SendMessage
= VmicSendMessage
;
349 routines
.CheckTables
= VmicCheckTables
;
350 routines
.SendSoftWakeup
= VmicSendSoftWakeup
;
351 return VmicInitialize();
353 case XmemDeviceSHMEM
:
354 routines
.GetAllNodeIds
= ShmemGetAllNodeIds
;
355 routines
.RegisterCallback
= ShmemRegisterCallback
;
356 routines
.Wait
= ShmemWait
;
357 routines
.Poll
= ShmemPoll
;
358 routines
.SendTable
= ShmemSendTable
;
359 routines
.RecvTable
= ShmemRecvTable
;
360 routines
.SendMessage
= ShmemSendMessage
;
361 routines
.CheckTables
= ShmemCheckTables
;
362 routines
.SendSoftWakeup
= ShmemSendSoftWakeup
;
363 return ShmemInitialize();
365 case XmemDeviceNETWORK
:
366 routines
.GetAllNodeIds
= NetworkGetAllNodeIds
;
367 routines
.RegisterCallback
= NetworkRegisterCallback
;
368 routines
.Wait
= NetworkWait
;
369 routines
.Poll
= NetworkPoll
;
370 routines
.SendTable
= NetworkSendTable
;
371 routines
.RecvTable
= NetworkRecvTable
;
372 routines
.SendMessage
= NetworkSendMessage
;
373 routines
.CheckTables
= NetworkCheckTables
;
374 routines
.SendSoftWakeup
= NetworkSendSoftWakeup
;
375 return NetworkInitialize();
381 return XmemErrorNOT_INITIALIZED
;
384 static unsigned long calc_adler32(void *pub_buf
, int pub_elems
)
386 unsigned long adler
= zlib_adler32(0L, NULL
, 0);
388 return zlib_adler32(adler
, pub_buf
, pub_elems
* sizeof(uint32_t));
391 static XmemError
evaluate_hf(struct header
*header
, struct footer
*footer
,
392 int pub_elems
, void *pub_buf
)
394 if (header
->val
!= footer
->val
)
395 return XmemErrorINCOHERENT_MARKERS
;
397 if (pub_buf
!= NULL
&& markers_mask
& XmemMarkersCHECKSUM
) {
398 if (header
->checksum
!= calc_adler32(pub_buf
, pub_elems
))
399 return XmemErrorCHECKSUM
;
401 return XmemErrorSUCCESS
;
404 static XmemError
check_markers(XmemTableId table
, int pub_elems
, int pub_eloff
)
406 struct header header
;
407 struct footer footer
;
410 if (markers_mask
& XmemMarkersDISABLE
)
411 return XmemErrorSUCCESS
;
414 err
= routines
.RecvTable(table
, &header
, XMEM_H_ELEMS
,
415 __h_eloff(pub_eloff
));
416 if (err
!= XmemErrorSUCCESS
)
420 err
= routines
.RecvTable(table
, &footer
, XMEM_F_ELEMS
,
421 __f_eloff(pub_elems
, pub_eloff
));
422 if (err
!= XmemErrorSUCCESS
)
426 err
= evaluate_hf(&header
, &footer
, pub_elems
, NULL
);
427 if (err
!= XmemErrorSUCCESS
)
430 return XmemErrorSUCCESS
;
433 static void fill_hf(struct header
*header
, struct footer
*footer
, int pub_elems
,
436 uint32_t randval
= rand();
438 header
->val
= randval
;
439 header
->size
= pub_elems
* sizeof(uint32_t);
440 footer
->val
= randval
;
442 if (pub_buf
!= NULL
&& markers_mask
& XmemMarkersCHECKSUM
)
443 header
->checksum
= calc_adler32(pub_buf
, pub_elems
);
447 * When XmemMarkersATOMIC is set, a bounce buffer is allocated when reading
448 * any table. The table is copied atomically to the bounce buffer, and the
449 * data coherency is evaluated on that local bounce buffer.
450 * If the data are coherent, they're copied to the user's buffer from
451 * the bounce buffer. We proceed in a similar fashion for writes.
452 * We may want to implement this on a per-segment basis, but for the time
453 * being as a per-process parameter seems enough for our needs.
455 static XmemError
send_table_atomic(XmemTableId table
, void *buf
, int pub_elems
,
456 int pub_eloff
, int upflag
)
458 size_t priv_elems
= pub_elems
+ XMEM_HF_ELEMS
;
459 uint32_t *bounce
= NULL
;
460 struct header
*header
;
461 struct footer
*footer
;
464 bounce
= malloc(priv_elems
* sizeof(uint32_t));
466 return XmemErrorENOMEM
;
468 /* copy the public data to the bounce buffer */
469 memcpy(bounce
+ XMEM_H_ELEMS
, buf
, pub_elems
* sizeof(uint32_t));
471 /* fill in the markers */
472 header
= (void *)bounce
;
473 footer
= (void *)(bounce
+ __f_eloff(pub_elems
, pub_eloff
));
474 fill_hf(header
, footer
, pub_elems
, bounce
+ XMEM_H_ELEMS
);
476 /* copy the table to XMEM */
477 err
= routines
.SendTable(table
, bounce
, priv_elems
,
478 __h_eloff(pub_eloff
), upflag
);
479 if (err
!= XmemErrorSUCCESS
)
483 return XmemErrorSUCCESS
;
491 static XmemError
send_table(XmemTableId table
, void *buf
, int pub_elems
,
492 int pub_eloff
, int upflag
)
494 struct header header
;
495 struct footer footer
;
498 fill_hf(&header
, &footer
, pub_elems
, NULL
);
500 /* write header, do not send SEGMENT_UPDATE */
501 err
= routines
.SendTable(table
, &header
, XMEM_H_ELEMS
,
502 __h_eloff(pub_eloff
), 0);
503 if (err
!= XmemErrorSUCCESS
)
506 /* write footer, do not send SEGMENT_UPDATE */
507 err
= routines
.SendTable(table
, &footer
, XMEM_F_ELEMS
,
508 __f_eloff(pub_elems
, pub_eloff
), 0);
509 if (err
!= XmemErrorSUCCESS
)
512 /* write the table itself, send SEGMENT_UPDATE if requested */
513 err
= routines
.SendTable(table
, buf
, pub_elems
, phys_eloff(pub_eloff
),
515 if (err
!= XmemErrorSUCCESS
)
518 return XmemErrorSUCCESS
;
521 static XmemError
receive_table_atomic(XmemTableId table
, void *buf
,
522 int pub_elems
, int pub_eloff
)
524 size_t priv_elems
= pub_elems
+ XMEM_HF_ELEMS
;
525 uint32_t *bounce
= NULL
;
526 struct header
*header
;
527 struct footer
*footer
;
530 bounce
= malloc(priv_elems
* sizeof(uint32_t));
532 return XmemErrorENOMEM
;
534 err
= routines
.RecvTable(table
, bounce
, priv_elems
,
535 __h_eloff(pub_eloff
));
536 if (err
!= XmemErrorSUCCESS
)
540 header
= (void *)bounce
;
541 footer
= (void *)(bounce
+ __f_eloff(pub_elems
, pub_eloff
));
542 err
= evaluate_hf(header
, footer
, pub_elems
, bounce
+ XMEM_H_ELEMS
);
543 if (err
!= XmemErrorSUCCESS
)
546 /* copy from the bounce buffer to the user's buffer */
547 memcpy(buf
, bounce
+ XMEM_H_ELEMS
, pub_elems
* sizeof(uint32_t));
550 return XmemErrorSUCCESS
;
561 * The following are exported (non-static) Xmem Lib functions
562 * These are documented in the header file.
566 XmemError
XmemInitialize(XmemDevice device
)
574 return XmemErrorSUCCESS
;
575 bzero((void *)attach_address
, XmemMAX_TABLES
* sizeof(void *));
576 bzero((void *)&node_tab
, sizeof(XmemDrvrNodeTable
));
578 err
= XmemReadNodeTableFile();
579 if (err
!= XmemErrorSUCCESS
)
582 bzero((void *) &seg_tab
, sizeof(XmemDrvrSegTable
));
583 err
= XmemReadSegTableFile();
584 if (err
!= XmemErrorSUCCESS
)
587 if (device
== XmemDeviceANY
) {
588 fdev
= XmemDeviceVMIC
;
589 ldev
= XmemDeviceNETWORK
;
595 for (dev
= fdev
; dev
<= ldev
; dev
++) {
596 err
= InitDevice(dev
);
597 if (err
== XmemErrorSUCCESS
) {
602 return XmemErrorNOT_INITIALIZED
;
607 XmemNodeId
XmemWhoAmI()
614 int XmemCheckForWarmStart()
620 char *XmemErrorToString(XmemError err
)
623 static char result
[XmemErrorSTRING_SIZE
];
625 if (err
< 0 || err
>= XmemErrorCOUNT
)
626 cp
= "No such error number";
628 cp
= estr
[(int)err
]; /* estr: global error string array */
629 bzero((void *)result
, XmemErrorSTRING_SIZE
);
635 XmemNodeId
XmemGetAllNodeIds()
638 return routines
.GetAllNodeIds();
643 XmemError
XmemRegisterCallback(void (*cb
)(XmemCallbackStruct
*cbs
),
648 if (! libinitialized
)
649 return XmemErrorNOT_INITIALIZED
;
650 err
= routines
.RegisterCallback(cb
, mask
);
651 if (err
== XmemErrorSUCCESS
) {
665 XmemEventMask
XmemGetRegisteredEvents()
667 return (XmemEventMask
)libcallmask
;
671 XmemEventMask
XmemWait(int timeout
)
674 return routines
.Wait(timeout
);
679 XmemEventMask
XmemPoll()
682 return routines
.Poll();
687 XmemError
XmemSendTable(XmemTableId table
, void *buf
, int elems
,
688 int offset
, int upflag
)
691 return XmemErrorNOT_INITIALIZED
;
693 if (markers_mask
& XmemMarkersDISABLE
) {
694 return routines
.SendTable(table
, buf
, elems
, phys_eloff(offset
),
698 if (markers_mask
& XmemMarkersATOMIC
)
699 return send_table_atomic(table
, buf
, elems
, offset
, upflag
);
701 return send_table(table
, buf
, elems
, offset
, upflag
);
705 XmemError
XmemRecvTable(XmemTableId table
, void *buf
, int elems
,
711 return XmemErrorNOT_INITIALIZED
;
713 if (markers_mask
& XmemMarkersATOMIC
)
714 return receive_table_atomic(table
, buf
, elems
, offset
);
716 err
= check_markers(table
, elems
, offset
);
717 if (err
!= XmemErrorSUCCESS
)
720 return routines
.RecvTable(table
, buf
, elems
, phys_eloff(offset
));
724 XmemError
XmemSendMessage(XmemNodeId nodes
, XmemMessage
*mess
)
727 return routines
.SendMessage(nodes
, mess
);
728 return XmemErrorNOT_INITIALIZED
;
732 XmemTableId
XmemCheckTables()
735 return routines
.CheckTables();
736 return XmemErrorNOT_INITIALIZED
;
740 XmemError
XmemErrorCallback(XmemError err
, unsigned long ioe
)
742 XmemCallbackStruct cbs
;
746 bzero((void *)&cbs
, sizeof(XmemCallbackStruct
));
749 case XmemErrorSUCCESS
:
752 case XmemErrorTIMEOUT
:
754 cbs
.Mask
= XmemEventMaskTIMEOUT
;
755 if (libcallmask
& XmemEventMaskTIMEOUT
)
761 cbs
.Mask
= XmemEventMaskIO
;
763 if (libcallmask
& XmemEventMaskIO
)
767 case XmemErrorSYSTEM
:
769 cbs
.Mask
= XmemEventMaskSYSTEM
;
771 if (libcallmask
& XmemEventMaskSYSTEM
)
777 cbs
.Mask
= XmemEventMaskSOFTWARE
;
778 cbs
.Data
= (unsigned long)err
;
779 if (libcallmask
& XmemEventMaskSOFTWARE
)
787 int XmemGetKey(char *name
)
795 for (i
= 0; i
< strlen(name
); i
++)
796 key
= (key
<< 1) + (int)name
[i
];
801 void *XmemGetSharedMemory(XmemTableId tid
)
804 unsigned long bytes
, smemid
;
813 if (! libinitialized
)
815 for (tnum
= 0; tnum
< XmemMAX_TABLES
; tnum
++) {
819 if (attach_address
[tnum
])
820 return attach_address
[tnum
];
822 break; /* it doesn't exist yet */
826 cp
= XmemGetTableName(tid
);
830 err
= XmemGetTableDesc(tid
, &elems
, &nid
, &user
);
831 if (err
!= XmemErrorSUCCESS
)
834 bytes
= elems
* sizeof(uint32_t);
835 smemky
= XmemGetKey(cp
);
836 smemid
= shmget(smemky
, bytes
, 0666);
841 /* segment does not exist; create it */
842 smemid
= shmget(smemky
, bytes
, IPC_CREAT
| 0666);
845 /* attach memory segment to smemid */
846 table
= shmat(smemid
, (char *)0, 0);
847 if (-1 == (int)table
)
849 if (tnum
< XmemMAX_TABLES
)
850 attach_address
[tnum
] = table
;
852 err
= XmemRecvTable(tid
, table
, elems
, 0);
853 if (XmemErrorSUCCESS
!= err
)
857 else { /* segment was already there */
858 table
= shmat(smemid
, (char *)0, 0);
859 if (-1 == (int)table
)
861 if (tnum
< XmemMAX_TABLES
)
862 attach_address
[tnum
] = table
;
866 XmemErrorCallback(XmemErrorSYSTEM
, errno
);
871 XmemTableId
XmemGetAllTableIds()
877 char *XmemGetNodeName(XmemNodeId node
)
882 if (!xmem
) /* global variable, means a device is open */
884 for (i
= 0; i
< XmemMAX_NODES
; i
++) {
886 if (node_tab
.Used
& msk
&& node
== node_tab
.Descriptors
[i
].Id
)
887 return node_tab
.Descriptors
[i
].Name
;
889 XmemErrorCallback(XmemErrorNO_SUCH_NODE
, 0);
894 XmemNodeId
XmemGetNodeId(XmemName name
)
901 for (i
= 0; i
< XmemMAX_NODES
; i
++) {
903 if (strcmp(name
, node_tab
.Descriptors
[i
].Name
) == 0 &&
905 return node_tab
.Descriptors
[i
].Id
;
907 XmemErrorCallback(XmemErrorNO_SUCH_NODE
, 0);
912 char * XmemGetTableName(XmemTableId table
)
919 for (i
= 0; i
< XmemMAX_TABLES
; i
++) {
921 if (seg_tab
.Used
& msk
&& table
== seg_tab
.Descriptors
[i
].Id
)
922 return seg_tab
.Descriptors
[i
].Name
;
924 XmemErrorCallback(XmemErrorNO_SUCH_TABLE
, 0);
930 XmemTableId
XmemGetTableId(XmemName name
)
937 for (i
= 0; i
< XmemMAX_TABLES
; i
++) {
939 if (strcmp(name
,seg_tab
.Descriptors
[i
].Name
) == 0 &&
941 return seg_tab
.Descriptors
[i
].Id
;
943 XmemErrorCallback(XmemErrorNO_SUCH_TABLE
, 0);
949 XmemError
XmemGetTableDesc(XmemTableId table
, int *elems
,
950 XmemNodeId
*nodes
, uint32_t *user
)
956 return XmemErrorCallback(XmemErrorNOT_INITIALIZED
, 0);
957 for (i
= 0; i
< XmemMAX_TABLES
; i
++) {
959 if (seg_tab
.Used
& msk
&& table
== seg_tab
.Descriptors
[i
].Id
) {
960 *elems
= priv_to_pub_elems(seg_tab
.Descriptors
[i
].Size
/
962 *nodes
= seg_tab
.Descriptors
[i
].Nodes
;
963 *user
= seg_tab
.Descriptors
[i
].User
;
964 return XmemErrorSUCCESS
;
967 return XmemErrorCallback(XmemErrorNO_SUCH_TABLE
, 0);
971 XmemError
XmemReadTableFile(XmemTableId tid
)
985 for (i
= 0; i
< XmemMAX_TABLES
; i
++) {
989 cp
= XmemGetTableName(msk
);
990 err
= XmemGetTableDesc(msk
, &elems
, &nodes
, &user
);
991 if (XmemErrorSUCCESS
!= err
)
993 table
= XmemGetSharedMemory(msk
);
995 return XmemErrorNO_SUCH_TABLE
;
997 bzero((void *)tbnam
, 64);
1001 fp
= fopen(XmemGetFile(tbnam
), "r");
1003 return XmemErrorCallback(XmemErrorSYSTEM
, errno
);
1004 bytes
= elems
* sizeof(uint32_t);
1005 cnt
= fread(table
, bytes
, 1, fp
);
1007 err
= XmemErrorCallback(XmemErrorSYSTEM
, errno
);
1011 if (XmemErrorSUCCESS
!= err
)
1014 return XmemErrorSUCCESS
;
1018 XmemError
XmemWriteTableFile(XmemTableId tid
)
1022 unsigned long bytes
;
1033 for (i
= 0; i
< XmemMAX_TABLES
; i
++) {
1037 cp
= XmemGetTableName(msk
);
1038 err
= XmemGetTableDesc(msk
, &elems
, &nodes
, &user
);
1039 if (XmemErrorSUCCESS
!= err
)
1041 table
= XmemGetSharedMemory(msk
);
1043 return XmemErrorNO_SUCH_TABLE
;
1045 bzero((void *)tbnam
, 64);
1049 fp
= fopen(XmemGetFile(tbnam
), "w");
1051 return XmemErrorCallback(XmemErrorSYSTEM
, errno
);
1052 bytes
= elems
* sizeof(uint32_t);
1053 cnt
= fwrite(table
, bytes
, 1, fp
);
1055 err
= XmemErrorCallback(XmemErrorSYSTEM
,errno
);
1059 if (err
!= XmemErrorSUCCESS
)
1062 return XmemErrorSUCCESS
;
1065 XmemError
XmemSendSoftWakeup(uint32_t nodeid
, uint32_t data
)
1067 if (!libinitialized
)
1068 return XmemErrorNOT_INITIALIZED
;
1069 return routines
.SendSoftWakeup(nodeid
, data
);
1073 * XmemLibUsleep - Sleep for 'delay' us.
1075 * @param dly: desired delay, in us
1078 void XmemLibUsleep(int dly
)
1080 struct timespec rqtp
, rmtp
;
1083 rqtp
.tv_nsec
= dly
* 1000;
1084 nanosleep(&rqtp
, &rmtp
);
1087 XmemMarkersMask
XmemSetMarkersMask(XmemMarkersMask mask
)
1089 XmemMarkersMask omask
= markers_mask
;
1092 * mask clean-up: enforce single ENABLE/DISABLE. Require one of them.
1094 if (mask
& XmemMarkersENABLE
) {
1095 if (mask
& XmemMarkersDISABLE
)
1096 mask
= XmemMarkersDISABLE
;
1098 mask
= XmemMarkersDISABLE
;
1100 mask
&= XmemMarkersALL
;
1102 if (mask
& XmemMarkersCHECKSUM
&& !(mask
& XmemMarkersATOMIC
))
1103 mask
&= ~XmemMarkersCHECKSUM
;
1106 markers_mask
= mask
;