vmod/vmodttl: fixed bug related to luns not ordered and/or not starting from zero.
[ht-drivers.git] / xmem / lib / libxmem.c
blob674974ac28fe1c56e55a2d417a73c7269d786201
1 /**
2 * @file libxmem.c
4 * @brief XMEM Reflective Memory Library Implementation
6 * @author Julian Lewis
8 * @date Created on 09/02/2005
10 * @version 1.1 Emilio G. Cota 16/01/2009
12 * @version 1.0 Julian Lewis
14 #include <unistd.h>
15 #include <stdint.h>
16 #include <sys/types.h>
17 #include <sys/stat.h>
18 #include <string.h>
19 #include <sys/ioctl.h>
20 #include <stdio.h>
21 #include <mqueue.h>
22 #include <stdlib.h>
23 #include <ctype.h>
24 #include <time.h>
25 #include <sys/shm.h>
27 #include <xmemDrvr.h>
28 #include <libxmem.h>
29 #include <adler32.h>
31 /*! @name device specific backend code
34 //@{
35 #define LN 128 //!< length of string (e.g. for full filenames)
37 static void *attach_address[XmemMAX_TABLES];
39 static XmemDrvrSegTable seg_tab;
40 static int segcnt = 0;
42 static XmemDrvrNodeTable node_tab;
43 static int nodecnt = 0;
45 static XmemNodeId my_nid = 0; //!< My node ID
46 static int warm_start = 0; //!< Warm vs Cold start
47 /* see description in function XmemCheckForWarmStart for further info on this */
49 static XmemMarkersMask markers_mask = XmemMarkersDISABLE;
50 //@}
52 static char gbConfigPath[LN] = "";
54 /**
55 * @brief Set the default path for initialisation files
57 * @param pbPath - Path where the configuration files are stored
59 * @return XmemError
61 XmemError XmemSetPath(char *pbPath)
63 if (pbPath == NULL)
64 return XmemErrorSUCCESS;
66 if (strlen(pbPath) < (LN - 20)) {
67 strcpy(gbConfigPath, pbPath);
68 if (gbConfigPath[strlen(gbConfigPath) - 1] != '/')
69 strcat(gbConfigPath, "/");
72 return XmemErrorSUCCESS;
75 /**
76 * @brief Form the full path of a given configuration file
78 * @param name - file name
80 * @return full path of the given file name
82 char *XmemGetFile(char *name)
84 static char path[LN];
85 char *configpath;
87 configpath = strlen(gbConfigPath) > 0 ? gbConfigPath : XMEM_PATH;
88 sprintf(path, "%s%s", configpath, name);
89 return path;
92 /*! @name Static variables, constants and functions
94 * These are not exported to the users of this library.
96 //@{
97 #include "./vmic/VmicLib.c"
98 #include "./network/NetworkLib.c"
99 #include "./shmem/ShmemLib.c"
101 typedef struct {
102 XmemNodeId (*GetAllNodeIds)();
103 XmemError (*RegisterCallback)();
104 XmemEventMask (*Wait)();
105 XmemEventMask (*Poll)();
106 XmemError (*SendTable)();
107 XmemError (*RecvTable)();
108 XmemError (*SendMessage)();
109 XmemTableId (*CheckTables)();
110 XmemError (*SendSoftWakeup)();
111 } XmemLibRoutines;
113 static int libinitialized = 0;
114 static XmemEventMask libcallmask = 0;
115 static XmemLibRoutines routines;
116 static void (*libcallback)(XmemCallbackStruct *cbs) = NULL;
118 static char *estr[XmemErrorCOUNT] = {
119 "No Error",
120 "Timeout expired while waiting for interrupt",
121 "The Xmem library is not initialized",
122 "Write access to that table is denied for this node",
123 "Could not read table descriptors from file: " SEG_TABLE_NAME,
124 "Syntax error in table descriptors file: " SEG_TABLE_NAME,
125 "Could not read node descriptors from file: " NODE_TABLE_NAME,
126 "Syntax error in node descriptors file: " NODE_TABLE_NAME,
127 "There are currently no tables defined",
128 "That table is not defined in: " SEG_TABLE_NAME,
129 "That node is not defined in: " NODE_TABLE_NAME,
130 "Illegal message ID",
131 "A run time hardware IO error has occured, see: IOError",
132 "System error, see: errno",
133 "Incoherent markers: header/footer mismatch",
134 "Not enough memory",
135 "Checksum error"
139 * Marker's (header/footer) implementation
141 * Tables may or may not be wrapped by markers (header/footer).
142 * This is enabled via a configurable parameter and has to be transparent
143 * to the users of the library.
144 * If markers are enabled, the resulting available space on a given
145 * table is smaller (but not by much, check the structs below).
146 * To take account of this size/offset mangling, we refer to the real (i.e.
147 * physical XMEM addresses) as private (priv), and the addresses that users
148 * operate with as public (pub).
149 * NOTE: The simple markers implementation is not atomic; use the flag
150 * XmemMarkersATOMIC if you really need to ensure atomicity, at the price of
151 * using a bounce buffer for each access.
154 struct header {
155 uint32_t val;
156 uint32_t checksum;
157 uint32_t size;
158 } __attribute__((__packed__));
160 struct footer {
161 uint32_t val;
164 #define XMEM_H_SIZE sizeof(struct header)
165 #define XMEM_F_SIZE sizeof(struct footer)
166 #define XMEM_HF_SIZE ((XMEM_H_SIZE)+(XMEM_F_SIZE))
168 #define XMEM_H_ELEMS ((XMEM_H_SIZE)/sizeof(uint32_t))
169 #define XMEM_F_ELEMS ((XMEM_F_SIZE)/sizeof(uint32_t))
170 #define XMEM_HF_ELEMS ((XMEM_HF_SIZE)/sizeof(uint32_t))
173 * header's element offset
174 * Note. We call 'element offset' an offset whose unit is 4 bytes.
176 static int __h_eloff(int pub_eloff)
178 return pub_eloff;
181 /* physical's element offset of a given address */
182 static int phys_eloff(int pub_eloff)
184 if (markers_mask & XmemMarkersENABLE)
185 return pub_eloff + XMEM_H_ELEMS;
186 return pub_eloff;
189 /* footer's offset */
190 static int __f_eloff(int pub_elems, int pub_eloff)
192 return phys_eloff(pub_eloff) + pub_elems;
195 static int priv_to_pub_elems(int priv_elems)
197 if (markers_mask & XmemMarkersENABLE)
198 return priv_elems - XMEM_HF_ELEMS;
199 return priv_elems;
203 * XmemReadNodeTableFile - Reads the node table
205 * @param : none
207 * The node table is in the default place (NODE_TABLE_NAME).
209 * @return Appropriate error code (XmemError)
211 static XmemError XmemReadNodeTableFile()
213 int i;
214 FILE *fp;
215 char c;
217 nodecnt = 0;
218 umask(0);
219 fp = fopen(XmemGetFile(NODE_TABLE_NAME), "r");
220 if (NULL == fp)
221 return XmemErrorCallback(XmemErrorNODE_TABLE_READ, 0);
223 node_tab.Used = 0;
224 for (i = 0; i < XmemDrvrNODES;) {
225 if (fscanf(fp, "{ %s 0x%lx }\n", node_tab.Descriptors[i].Name,
226 &node_tab.Descriptors[i].Id) == 2) {
227 node_tab.Used |= node_tab.Descriptors[i].Id;
228 nodecnt++;
229 i++;
230 } else if (fscanf(fp, "%c", &c) == 1) {
232 * Empty lines, lines with only blank characters and
233 * lines starting with a hash (#) are ignored.
235 if (c == '\n') {
236 continue;
237 } else if (c == '#') {
238 while (fscanf(fp, "%c", &c) != EOF) {
239 if (c == '\n')
240 break;
242 } else if (isspace(c)) {
243 while (fscanf(fp, "%c", &c) != EOF) {
244 if (c == '\n')
245 break;
246 if (!isspace(c))
247 goto out_err;
249 } else {
250 goto out_err;
252 } else {
253 goto out;
256 out:
257 return XmemErrorSUCCESS;
258 out_err:
259 return XmemErrorCallback(XmemErrorNODE_TABLE_SYNTAX, 0);
264 * XmemReadSegTableFile - Reads the segment table
266 * @param : none
268 * The segment table is in the default place (SEG_TABLE_NAME).
270 * @return Appropriate error code (XmemError)
272 static XmemError XmemReadSegTableFile()
274 int i;
275 FILE *fp;
276 char c;
278 segcnt = 0;
279 umask(0);
280 fp = fopen(XmemGetFile(SEG_TABLE_NAME), "r");
281 if (NULL == fp)
282 return XmemErrorCallback(XmemErrorSEG_TABLE_READ, 0);
283 seg_tab.Used = 0;
284 for (i = 0; i < XmemDrvrSEGMENTS;) {
285 if (fscanf(fp, "{ %s 0x%lx 0x%lx 0x%x 0x%lx 0x%lx }\n",
286 seg_tab.Descriptors[i].Name,
287 &seg_tab.Descriptors[i].Id,
288 &seg_tab.Descriptors[i].Size,
289 (unsigned int *)&seg_tab.Descriptors[i].Address,
290 &seg_tab.Descriptors[i].Nodes,
291 &seg_tab.Descriptors[i].User) == 6) {
292 seg_tab.Used |= seg_tab.Descriptors[i].Id;
293 segcnt++;
294 i++;
295 } else if (fscanf(fp, "%c", &c) == 1) {
297 * Empty lines, lines with only blank characters and
298 * lines starting with a hash (#) are ignored.
300 if (c == '\n') {
301 continue;
302 } else if (c == '#') {
303 while (fscanf(fp, "%c", &c) != EOF) {
304 if (c == '\n')
305 break;
307 } else if (isspace(c)) {
308 while (fscanf(fp, "%c", &c) != EOF) {
309 if (c == '\n')
310 break;
311 if (!isspace(c))
312 goto out_err;
314 } else {
315 goto out_err;
317 } else {
318 goto out;
321 out:
322 return XmemErrorSUCCESS;
323 out_err:
324 return XmemErrorCallback(XmemErrorSEG_TABLE_SYNTAX, 0);
329 * InitDevice - Local routine to initialise one real device
331 * @param device: type of device
333 * Remember that device can be VMIC, SHMEM or NETWORK.
335 * @return device initialisation on success; Not initialised error otherwise.
337 static XmemError InitDevice(XmemDevice device)
339 switch (device) {
341 case XmemDeviceVMIC:
342 routines.GetAllNodeIds = VmicGetAllNodeIds;
343 routines.RegisterCallback = VmicRegisterCallback;
344 routines.Wait = VmicWait;
345 routines.Poll = VmicPoll;
346 routines.SendTable = VmicSendTable;
347 routines.RecvTable = VmicRecvTable;
348 routines.SendMessage = VmicSendMessage;
349 routines.CheckTables = VmicCheckTables;
350 routines.SendSoftWakeup = VmicSendSoftWakeup;
351 return VmicInitialize();
353 case XmemDeviceSHMEM:
354 routines.GetAllNodeIds = ShmemGetAllNodeIds;
355 routines.RegisterCallback = ShmemRegisterCallback;
356 routines.Wait = ShmemWait;
357 routines.Poll = ShmemPoll;
358 routines.SendTable = ShmemSendTable;
359 routines.RecvTable = ShmemRecvTable;
360 routines.SendMessage = ShmemSendMessage;
361 routines.CheckTables = ShmemCheckTables;
362 routines.SendSoftWakeup = ShmemSendSoftWakeup;
363 return ShmemInitialize();
365 case XmemDeviceNETWORK:
366 routines.GetAllNodeIds = NetworkGetAllNodeIds;
367 routines.RegisterCallback = NetworkRegisterCallback;
368 routines.Wait = NetworkWait;
369 routines.Poll = NetworkPoll;
370 routines.SendTable = NetworkSendTable;
371 routines.RecvTable = NetworkRecvTable;
372 routines.SendMessage = NetworkSendMessage;
373 routines.CheckTables = NetworkCheckTables;
374 routines.SendSoftWakeup = NetworkSendSoftWakeup;
375 return NetworkInitialize();
377 default:
378 break;
381 return XmemErrorNOT_INITIALIZED;
384 static unsigned long calc_adler32(void *pub_buf, int pub_elems)
386 unsigned long adler = zlib_adler32(0L, NULL, 0);
388 return zlib_adler32(adler, pub_buf, pub_elems * sizeof(uint32_t));
391 static XmemError evaluate_hf(struct header *header, struct footer *footer,
392 int pub_elems, void *pub_buf)
394 if (header->val != footer->val)
395 return XmemErrorINCOHERENT_MARKERS;
397 if (pub_buf != NULL && markers_mask & XmemMarkersCHECKSUM) {
398 if (header->checksum != calc_adler32(pub_buf, pub_elems))
399 return XmemErrorCHECKSUM;
401 return XmemErrorSUCCESS;
404 static XmemError check_markers(XmemTableId table, int pub_elems, int pub_eloff)
406 struct header header;
407 struct footer footer;
408 XmemError err;
410 if (markers_mask & XmemMarkersDISABLE)
411 return XmemErrorSUCCESS;
413 /* read header */
414 err = routines.RecvTable(table, &header, XMEM_H_ELEMS,
415 __h_eloff(pub_eloff));
416 if (err != XmemErrorSUCCESS)
417 return err;
419 /* read footer */
420 err = routines.RecvTable(table, &footer, XMEM_F_ELEMS,
421 __f_eloff(pub_elems, pub_eloff));
422 if (err != XmemErrorSUCCESS)
423 return err;
425 /* check markers */
426 err = evaluate_hf(&header, &footer, pub_elems, NULL);
427 if (err != XmemErrorSUCCESS)
428 return err;
430 return XmemErrorSUCCESS;
433 static void fill_hf(struct header *header, struct footer *footer, int pub_elems,
434 void *pub_buf)
436 uint32_t randval = rand();
438 header->val = randval;
439 header->size = pub_elems * sizeof(uint32_t);
440 footer->val = randval;
442 if (pub_buf != NULL && markers_mask & XmemMarkersCHECKSUM)
443 header->checksum = calc_adler32(pub_buf, pub_elems);
447 * When XmemMarkersATOMIC is set, a bounce buffer is allocated when reading
448 * any table. The table is copied atomically to the bounce buffer, and the
449 * data coherency is evaluated on that local bounce buffer.
450 * If the data are coherent, they're copied to the user's buffer from
451 * the bounce buffer. We proceed in a similar fashion for writes.
452 * We may want to implement this on a per-segment basis, but for the time
453 * being as a per-process parameter seems enough for our needs.
455 static XmemError send_table_atomic(XmemTableId table, void *buf, int pub_elems,
456 int pub_eloff, int upflag)
458 size_t priv_elems = pub_elems + XMEM_HF_ELEMS;
459 uint32_t *bounce = NULL;
460 struct header *header;
461 struct footer *footer;
462 XmemError err;
464 bounce = malloc(priv_elems * sizeof(uint32_t));
465 if (bounce == NULL)
466 return XmemErrorENOMEM;
468 /* copy the public data to the bounce buffer */
469 memcpy(bounce + XMEM_H_ELEMS, buf, pub_elems * sizeof(uint32_t));
471 /* fill in the markers */
472 header = (void *)bounce;
473 footer = (void *)(bounce + __f_eloff(pub_elems, pub_eloff));
474 fill_hf(header, footer, pub_elems, bounce + XMEM_H_ELEMS);
476 /* copy the table to XMEM */
477 err = routines.SendTable(table, bounce, priv_elems,
478 __h_eloff(pub_eloff), upflag);
479 if (err != XmemErrorSUCCESS)
480 goto out_err;
482 free(bounce);
483 return XmemErrorSUCCESS;
485 out_err:
486 if (bounce)
487 free(bounce);
488 return err;
491 static XmemError send_table(XmemTableId table, void *buf, int pub_elems,
492 int pub_eloff, int upflag)
494 struct header header;
495 struct footer footer;
496 XmemError err;
498 fill_hf(&header, &footer, pub_elems, NULL);
500 /* write header, do not send SEGMENT_UPDATE */
501 err = routines.SendTable(table, &header, XMEM_H_ELEMS,
502 __h_eloff(pub_eloff), 0);
503 if (err != XmemErrorSUCCESS)
504 return err;
506 /* write footer, do not send SEGMENT_UPDATE */
507 err = routines.SendTable(table, &footer, XMEM_F_ELEMS,
508 __f_eloff(pub_elems, pub_eloff), 0);
509 if (err != XmemErrorSUCCESS)
510 return err;
512 /* write the table itself, send SEGMENT_UPDATE if requested */
513 err = routines.SendTable(table, buf, pub_elems, phys_eloff(pub_eloff),
514 upflag);
515 if (err != XmemErrorSUCCESS)
516 return err;
518 return XmemErrorSUCCESS;
521 static XmemError receive_table_atomic(XmemTableId table, void *buf,
522 int pub_elems, int pub_eloff)
524 size_t priv_elems = pub_elems + XMEM_HF_ELEMS;
525 uint32_t *bounce = NULL;
526 struct header *header;
527 struct footer *footer;
528 XmemError err;
530 bounce = malloc(priv_elems * sizeof(uint32_t));
531 if (bounce == NULL)
532 return XmemErrorENOMEM;
534 err = routines.RecvTable(table, bounce, priv_elems,
535 __h_eloff(pub_eloff));
536 if (err != XmemErrorSUCCESS)
537 goto out_err;
539 /* check markers */
540 header = (void *)bounce;
541 footer = (void *)(bounce + __f_eloff(pub_elems, pub_eloff));
542 err = evaluate_hf(header, footer, pub_elems, bounce + XMEM_H_ELEMS);
543 if (err != XmemErrorSUCCESS)
544 goto out_err;
546 /* copy from the bounce buffer to the user's buffer */
547 memcpy(buf, bounce + XMEM_H_ELEMS, pub_elems * sizeof(uint32_t));
549 free(bounce);
550 return XmemErrorSUCCESS;
552 out_err:
553 if (bounce)
554 free(bounce);
555 return err;
557 //@}
561 * The following are exported (non-static) Xmem Lib functions
562 * These are documented in the header file.
566 XmemError XmemInitialize(XmemDevice device)
568 XmemDevice fdev;
569 XmemDevice ldev;
570 XmemDevice dev;
571 XmemError err;
573 if (libinitialized)
574 return XmemErrorSUCCESS;
575 bzero((void *)attach_address, XmemMAX_TABLES * sizeof(void *));
576 bzero((void *)&node_tab, sizeof(XmemDrvrNodeTable));
578 err = XmemReadNodeTableFile();
579 if (err != XmemErrorSUCCESS)
580 return err;
582 bzero((void *) &seg_tab, sizeof(XmemDrvrSegTable));
583 err = XmemReadSegTableFile();
584 if (err != XmemErrorSUCCESS)
585 return err;
587 if (device == XmemDeviceANY) {
588 fdev = XmemDeviceVMIC;
589 ldev = XmemDeviceNETWORK;
591 else {
592 fdev = device;
593 ldev = device;
595 for (dev = fdev; dev <= ldev; dev++) {
596 err = InitDevice(dev);
597 if (err == XmemErrorSUCCESS) {
598 libinitialized = 1;
599 return err;
602 return XmemErrorNOT_INITIALIZED;
607 XmemNodeId XmemWhoAmI()
609 return my_nid;
614 int XmemCheckForWarmStart()
616 return warm_start;
620 char *XmemErrorToString(XmemError err)
622 char *cp;
623 static char result[XmemErrorSTRING_SIZE];
625 if (err < 0 || err >= XmemErrorCOUNT)
626 cp = "No such error number";
627 else
628 cp = estr[(int)err]; /* estr: global error string array */
629 bzero((void *)result, XmemErrorSTRING_SIZE);
630 strcpy(result, cp);
631 return result;
635 XmemNodeId XmemGetAllNodeIds()
637 if (libinitialized)
638 return routines.GetAllNodeIds();
639 return 0;
643 XmemError XmemRegisterCallback(void (*cb)(XmemCallbackStruct *cbs),
644 XmemEventMask mask)
646 XmemError err;
648 if (! libinitialized)
649 return XmemErrorNOT_INITIALIZED;
650 err = routines.RegisterCallback(cb, mask);
651 if (err == XmemErrorSUCCESS) {
652 if (mask) {
653 libcallmask |= mask;
654 libcallback = cb;
656 else {
657 libcallmask = 0;
658 libcallback = NULL;
661 return err;
665 XmemEventMask XmemGetRegisteredEvents()
667 return (XmemEventMask)libcallmask;
671 XmemEventMask XmemWait(int timeout)
673 if (libinitialized)
674 return routines.Wait(timeout);
675 return 0;
679 XmemEventMask XmemPoll()
681 if (libinitialized)
682 return routines.Poll();
683 return 0;
687 XmemError XmemSendTable(XmemTableId table, void *buf, int elems,
688 int offset, int upflag)
690 if (!libinitialized)
691 return XmemErrorNOT_INITIALIZED;
693 if (markers_mask & XmemMarkersDISABLE) {
694 return routines.SendTable(table, buf, elems, phys_eloff(offset),
695 upflag);
698 if (markers_mask & XmemMarkersATOMIC)
699 return send_table_atomic(table, buf, elems, offset, upflag);
701 return send_table(table, buf, elems, offset, upflag);
705 XmemError XmemRecvTable(XmemTableId table, void *buf, int elems,
706 int offset)
708 XmemError err;
710 if (!libinitialized)
711 return XmemErrorNOT_INITIALIZED;
713 if (markers_mask & XmemMarkersATOMIC)
714 return receive_table_atomic(table, buf, elems, offset);
716 err = check_markers(table, elems, offset);
717 if (err != XmemErrorSUCCESS)
718 return err;
720 return routines.RecvTable(table, buf, elems, phys_eloff(offset));
724 XmemError XmemSendMessage(XmemNodeId nodes, XmemMessage *mess)
726 if (libinitialized)
727 return routines.SendMessage(nodes, mess);
728 return XmemErrorNOT_INITIALIZED;
732 XmemTableId XmemCheckTables()
734 if (libinitialized)
735 return routines.CheckTables();
736 return XmemErrorNOT_INITIALIZED;
740 XmemError XmemErrorCallback(XmemError err, unsigned long ioe)
742 XmemCallbackStruct cbs;
744 if (!libcallback)
745 return err;
746 bzero((void *)&cbs, sizeof(XmemCallbackStruct));
747 switch (err) {
749 case XmemErrorSUCCESS:
750 break;
752 case XmemErrorTIMEOUT:
754 cbs.Mask = XmemEventMaskTIMEOUT;
755 if (libcallmask & XmemEventMaskTIMEOUT)
756 libcallback(&cbs);
757 break;
759 case XmemErrorIO:
761 cbs.Mask = XmemEventMaskIO;
762 cbs.Data = ioe;
763 if (libcallmask & XmemEventMaskIO)
764 libcallback(&cbs);
765 break;
767 case XmemErrorSYSTEM:
769 cbs.Mask = XmemEventMaskSYSTEM;
770 cbs.Data = ioe;
771 if (libcallmask & XmemEventMaskSYSTEM)
772 libcallback(&cbs);
773 break;
775 default:
777 cbs.Mask = XmemEventMaskSOFTWARE;
778 cbs.Data = (unsigned long)err;
779 if (libcallmask & XmemEventMaskSOFTWARE)
780 libcallback(&cbs);
781 break;
783 return err;
787 int XmemGetKey(char *name)
789 int i;
790 int key;
792 key = 0;
793 if (NULL == name)
794 return key;
795 for (i = 0; i < strlen(name); i++)
796 key = (key << 1) + (int)name[i];
797 return key;
801 void *XmemGetSharedMemory(XmemTableId tid)
803 int tnum, msk;
804 unsigned long bytes, smemid;
805 int elems;
806 uint32_t user;
807 key_t smemky;
808 XmemError err;
809 XmemNodeId nid;
810 void *table;
811 char *cp;
813 if (! libinitialized)
814 goto error;
815 for (tnum = 0; tnum < XmemMAX_TABLES; tnum++) {
816 msk = 1 << tnum;
817 if (! (msk & tid))
818 continue;
819 if (attach_address[tnum])
820 return attach_address[tnum];
821 else
822 break; /* it doesn't exist yet */
826 cp = XmemGetTableName(tid);
827 if (!cp)
828 goto error;
830 err = XmemGetTableDesc(tid, &elems, &nid, &user);
831 if (err != XmemErrorSUCCESS)
832 goto error;
834 bytes = elems * sizeof(uint32_t);
835 smemky = XmemGetKey(cp);
836 smemid = shmget(smemky, bytes, 0666);
838 if (smemid == -1) {
839 if (ENOENT != errno)
840 goto error;
841 /* segment does not exist; create it */
842 smemid = shmget(smemky, bytes, IPC_CREAT | 0666);
843 if (-1 == smemid)
844 goto error;
845 /* attach memory segment to smemid */
846 table = shmat(smemid, (char *)0, 0);
847 if (-1 == (int)table)
848 goto error;
849 if (tnum < XmemMAX_TABLES)
850 attach_address[tnum] = table;
852 err = XmemRecvTable(tid, table, elems, 0);
853 if (XmemErrorSUCCESS != err)
854 goto error;
855 return table;
857 else { /* segment was already there */
858 table = shmat(smemid, (char *)0, 0);
859 if (-1 == (int)table)
860 goto error;
861 if (tnum < XmemMAX_TABLES)
862 attach_address[tnum] = table;
863 return table;
865 error:
866 XmemErrorCallback(XmemErrorSYSTEM, errno);
867 return NULL;
871 XmemTableId XmemGetAllTableIds()
873 return seg_tab.Used;
877 char *XmemGetNodeName(XmemNodeId node)
879 int i;
880 unsigned long msk;
882 if (!xmem) /* global variable, means a device is open */
883 return (char *)0;
884 for (i = 0; i < XmemMAX_NODES; i++) {
885 msk = 1 << i;
886 if (node_tab.Used & msk && node == node_tab.Descriptors[i].Id)
887 return node_tab.Descriptors[i].Name;
889 XmemErrorCallback(XmemErrorNO_SUCH_NODE, 0);
890 return (char *)0;
894 XmemNodeId XmemGetNodeId(XmemName name)
896 int i;
897 unsigned long msk;
899 if (!xmem)
900 return 0;
901 for (i = 0; i < XmemMAX_NODES; i++) {
902 msk = 1 << i;
903 if (strcmp(name, node_tab.Descriptors[i].Name) == 0 &&
904 node_tab.Used & msk)
905 return node_tab.Descriptors[i].Id;
907 XmemErrorCallback(XmemErrorNO_SUCH_NODE, 0);
908 return 0;
912 char * XmemGetTableName(XmemTableId table)
914 int i;
915 unsigned long msk;
917 if (!xmem)
918 return (char *)0;
919 for (i = 0; i < XmemMAX_TABLES; i++) {
920 msk = 1 << i;
921 if (seg_tab.Used & msk && table == seg_tab.Descriptors[i].Id)
922 return seg_tab.Descriptors[i].Name;
924 XmemErrorCallback(XmemErrorNO_SUCH_TABLE, 0);
925 return (char *)0;
930 XmemTableId XmemGetTableId(XmemName name)
932 int i;
933 unsigned long msk;
935 if (!xmem)
936 return 0;
937 for (i = 0; i < XmemMAX_TABLES; i++) {
938 msk = 1 << i;
939 if (strcmp(name,seg_tab.Descriptors[i].Name) == 0 &&
940 seg_tab.Used & msk)
941 return seg_tab.Descriptors[i].Id;
943 XmemErrorCallback(XmemErrorNO_SUCH_TABLE, 0);
944 return 0;
949 XmemError XmemGetTableDesc(XmemTableId table, int *elems,
950 XmemNodeId *nodes, uint32_t *user)
952 int i;
953 unsigned long msk;
955 if (!xmem)
956 return XmemErrorCallback(XmemErrorNOT_INITIALIZED, 0);
957 for (i = 0; i < XmemMAX_TABLES; i++) {
958 msk = 1 << i;
959 if (seg_tab.Used & msk && table == seg_tab.Descriptors[i].Id) {
960 *elems = priv_to_pub_elems(seg_tab.Descriptors[i].Size /
961 sizeof(uint32_t));
962 *nodes = seg_tab.Descriptors[i].Nodes;
963 *user = seg_tab.Descriptors[i].User;
964 return XmemErrorSUCCESS;
967 return XmemErrorCallback(XmemErrorNO_SUCH_TABLE, 0);
971 XmemError XmemReadTableFile(XmemTableId tid)
973 int i, cnt;
974 unsigned long msk;
975 unsigned long bytes;
976 int elems;
977 uint32_t user;
978 XmemNodeId nodes;
979 XmemError err;
980 char *cp;
981 char tbnam[64];
982 FILE *fp;
983 void *table;
985 for (i = 0; i < XmemMAX_TABLES; i++) {
986 msk = 1 << i;
987 if (! (msk & tid))
988 continue;
989 cp = XmemGetTableName(msk);
990 err = XmemGetTableDesc(msk, &elems, &nodes, &user);
991 if (XmemErrorSUCCESS != err)
992 return err;
993 table = XmemGetSharedMemory(msk);
994 if (!table)
995 return XmemErrorNO_SUCH_TABLE;
997 bzero((void *)tbnam, 64);
998 strcat(tbnam, cp);
999 umask(0);
1001 fp = fopen(XmemGetFile(tbnam), "r");
1002 if (!fp)
1003 return XmemErrorCallback(XmemErrorSYSTEM, errno);
1004 bytes = elems * sizeof(uint32_t);
1005 cnt = fread(table, bytes, 1, fp);
1006 if (cnt <= 0)
1007 err = XmemErrorCallback(XmemErrorSYSTEM, errno);
1008 fclose(fp);
1009 fp = NULL;
1011 if (XmemErrorSUCCESS != err)
1012 return err;
1014 return XmemErrorSUCCESS;
1018 XmemError XmemWriteTableFile(XmemTableId tid)
1020 int i, cnt;
1021 unsigned long msk;
1022 unsigned long bytes;
1023 int elems;
1024 uint32_t user;
1025 XmemError err;
1026 XmemNodeId nodes;
1027 char *cp;
1028 char tbnam[64];
1029 void *table;
1030 FILE *fp;
1033 for (i = 0; i < XmemMAX_TABLES; i++) {
1034 msk = 1 << i;
1035 if (! (msk & tid))
1036 continue;
1037 cp = XmemGetTableName(msk);
1038 err = XmemGetTableDesc(msk, &elems, &nodes, &user);
1039 if (XmemErrorSUCCESS != err)
1040 return err;
1041 table = XmemGetSharedMemory(msk);
1042 if (!table)
1043 return XmemErrorNO_SUCH_TABLE;
1045 bzero((void *)tbnam, 64);
1046 strcat(tbnam, cp);
1047 umask(0);
1049 fp = fopen(XmemGetFile(tbnam), "w");
1050 if (!fp)
1051 return XmemErrorCallback(XmemErrorSYSTEM, errno);
1052 bytes = elems * sizeof(uint32_t);
1053 cnt = fwrite(table, bytes, 1, fp);
1054 if (cnt <= 0)
1055 err = XmemErrorCallback(XmemErrorSYSTEM,errno);
1056 fclose(fp);
1057 fp = NULL;
1059 if (err != XmemErrorSUCCESS)
1060 return err;
1062 return XmemErrorSUCCESS;
1065 XmemError XmemSendSoftWakeup(uint32_t nodeid, uint32_t data)
1067 if (!libinitialized)
1068 return XmemErrorNOT_INITIALIZED;
1069 return routines.SendSoftWakeup(nodeid, data);
1073 * XmemLibUsleep - Sleep for 'delay' us.
1075 * @param dly: desired delay, in us
1078 void XmemLibUsleep(int dly)
1080 struct timespec rqtp, rmtp;
1082 rqtp.tv_sec = 0;
1083 rqtp.tv_nsec = dly * 1000;
1084 nanosleep(&rqtp, &rmtp);
1087 XmemMarkersMask XmemSetMarkersMask(XmemMarkersMask mask)
1089 XmemMarkersMask omask = markers_mask;
1092 * mask clean-up: enforce single ENABLE/DISABLE. Require one of them.
1094 if (mask & XmemMarkersENABLE) {
1095 if (mask & XmemMarkersDISABLE)
1096 mask = XmemMarkersDISABLE;
1097 } else {
1098 mask = XmemMarkersDISABLE;
1100 mask &= XmemMarkersALL;
1102 if (mask & XmemMarkersCHECKSUM && !(mask & XmemMarkersATOMIC))
1103 mask &= ~XmemMarkersCHECKSUM;
1105 if (mask != 0)
1106 markers_mask = mask;
1107 return omask;