migration/rdma: Plug memory leaks in qemu_rdma_registration_stop()
[qemu/armbru.git] / target / mips / lmmi_helper.c
blob6c645cf679add5b73c7711098bd207a66fa5393d
1 /*
2 * Loongson Multimedia Instruction emulation helpers for QEMU.
4 * Copyright (c) 2011 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
25 * If the byte ordering doesn't matter, i.e. all columns are treated
26 * identically, then this union can be used directly. If byte ordering
27 * does matter, we generally ignore dumping to memory.
29 typedef union {
30 uint8_t ub[8];
31 int8_t sb[8];
32 uint16_t uh[4];
33 int16_t sh[4];
34 uint32_t uw[2];
35 int32_t sw[2];
36 uint64_t d;
37 } LMIValue;
39 /* Some byte ordering issues can be mitigated by XORing in the following. */
40 #ifdef HOST_WORDS_BIGENDIAN
41 # define BYTE_ORDER_XOR(N) N
42 #else
43 # define BYTE_ORDER_XOR(N) 0
44 #endif
46 #define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x)
47 #define SATUB(x) (x > 0xff ? 0xff : x)
49 #define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x)
50 #define SATUH(x) (x > 0xffff ? 0xffff : x)
52 #define SATSW(x) \
53 (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x)
54 #define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x)
56 uint64_t helper_paddsb(uint64_t fs, uint64_t ft)
58 LMIValue vs, vt;
59 unsigned int i;
61 vs.d = fs;
62 vt.d = ft;
63 for (i = 0; i < 8; ++i) {
64 int r = vs.sb[i] + vt.sb[i];
65 vs.sb[i] = SATSB(r);
67 return vs.d;
70 uint64_t helper_paddusb(uint64_t fs, uint64_t ft)
72 LMIValue vs, vt;
73 unsigned int i;
75 vs.d = fs;
76 vt.d = ft;
77 for (i = 0; i < 8; ++i) {
78 int r = vs.ub[i] + vt.ub[i];
79 vs.ub[i] = SATUB(r);
81 return vs.d;
84 uint64_t helper_paddsh(uint64_t fs, uint64_t ft)
86 LMIValue vs, vt;
87 unsigned int i;
89 vs.d = fs;
90 vt.d = ft;
91 for (i = 0; i < 4; ++i) {
92 int r = vs.sh[i] + vt.sh[i];
93 vs.sh[i] = SATSH(r);
95 return vs.d;
98 uint64_t helper_paddush(uint64_t fs, uint64_t ft)
100 LMIValue vs, vt;
101 unsigned int i;
103 vs.d = fs;
104 vt.d = ft;
105 for (i = 0; i < 4; ++i) {
106 int r = vs.uh[i] + vt.uh[i];
107 vs.uh[i] = SATUH(r);
109 return vs.d;
112 uint64_t helper_paddb(uint64_t fs, uint64_t ft)
114 LMIValue vs, vt;
115 unsigned int i;
117 vs.d = fs;
118 vt.d = ft;
119 for (i = 0; i < 8; ++i) {
120 vs.ub[i] += vt.ub[i];
122 return vs.d;
125 uint64_t helper_paddh(uint64_t fs, uint64_t ft)
127 LMIValue vs, vt;
128 unsigned int i;
130 vs.d = fs;
131 vt.d = ft;
132 for (i = 0; i < 4; ++i) {
133 vs.uh[i] += vt.uh[i];
135 return vs.d;
138 uint64_t helper_paddw(uint64_t fs, uint64_t ft)
140 LMIValue vs, vt;
141 unsigned int i;
143 vs.d = fs;
144 vt.d = ft;
145 for (i = 0; i < 2; ++i) {
146 vs.uw[i] += vt.uw[i];
148 return vs.d;
151 uint64_t helper_psubsb(uint64_t fs, uint64_t ft)
153 LMIValue vs, vt;
154 unsigned int i;
156 vs.d = fs;
157 vt.d = ft;
158 for (i = 0; i < 8; ++i) {
159 int r = vs.sb[i] - vt.sb[i];
160 vs.sb[i] = SATSB(r);
162 return vs.d;
165 uint64_t helper_psubusb(uint64_t fs, uint64_t ft)
167 LMIValue vs, vt;
168 unsigned int i;
170 vs.d = fs;
171 vt.d = ft;
172 for (i = 0; i < 8; ++i) {
173 int r = vs.ub[i] - vt.ub[i];
174 vs.ub[i] = SATUB(r);
176 return vs.d;
179 uint64_t helper_psubsh(uint64_t fs, uint64_t ft)
181 LMIValue vs, vt;
182 unsigned int i;
184 vs.d = fs;
185 vt.d = ft;
186 for (i = 0; i < 4; ++i) {
187 int r = vs.sh[i] - vt.sh[i];
188 vs.sh[i] = SATSH(r);
190 return vs.d;
193 uint64_t helper_psubush(uint64_t fs, uint64_t ft)
195 LMIValue vs, vt;
196 unsigned int i;
198 vs.d = fs;
199 vt.d = ft;
200 for (i = 0; i < 4; ++i) {
201 int r = vs.uh[i] - vt.uh[i];
202 vs.uh[i] = SATUH(r);
204 return vs.d;
207 uint64_t helper_psubb(uint64_t fs, uint64_t ft)
209 LMIValue vs, vt;
210 unsigned int i;
212 vs.d = fs;
213 vt.d = ft;
214 for (i = 0; i < 8; ++i) {
215 vs.ub[i] -= vt.ub[i];
217 return vs.d;
220 uint64_t helper_psubh(uint64_t fs, uint64_t ft)
222 LMIValue vs, vt;
223 unsigned int i;
225 vs.d = fs;
226 vt.d = ft;
227 for (i = 0; i < 4; ++i) {
228 vs.uh[i] -= vt.uh[i];
230 return vs.d;
233 uint64_t helper_psubw(uint64_t fs, uint64_t ft)
235 LMIValue vs, vt;
236 unsigned int i;
238 vs.d = fs;
239 vt.d = ft;
240 for (i = 0; i < 2; ++i) {
241 vs.uw[i] -= vt.uw[i];
243 return vs.d;
246 uint64_t helper_pshufh(uint64_t fs, uint64_t ft)
248 unsigned host = BYTE_ORDER_XOR(3);
249 LMIValue vd, vs;
250 unsigned i;
252 vs.d = fs;
253 vd.d = 0;
254 for (i = 0; i < 4; i++, ft >>= 2) {
255 vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host];
257 return vd.d;
260 uint64_t helper_packsswh(uint64_t fs, uint64_t ft)
262 uint64_t fd = 0;
263 int64_t tmp;
265 tmp = (int32_t)(fs >> 0);
266 tmp = SATSH(tmp);
267 fd |= (tmp & 0xffff) << 0;
269 tmp = (int32_t)(fs >> 32);
270 tmp = SATSH(tmp);
271 fd |= (tmp & 0xffff) << 16;
273 tmp = (int32_t)(ft >> 0);
274 tmp = SATSH(tmp);
275 fd |= (tmp & 0xffff) << 32;
277 tmp = (int32_t)(ft >> 32);
278 tmp = SATSH(tmp);
279 fd |= (tmp & 0xffff) << 48;
281 return fd;
284 uint64_t helper_packsshb(uint64_t fs, uint64_t ft)
286 uint64_t fd = 0;
287 unsigned int i;
289 for (i = 0; i < 4; ++i) {
290 int16_t tmp = fs >> (i * 16);
291 tmp = SATSB(tmp);
292 fd |= (uint64_t)(tmp & 0xff) << (i * 8);
294 for (i = 0; i < 4; ++i) {
295 int16_t tmp = ft >> (i * 16);
296 tmp = SATSB(tmp);
297 fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
300 return fd;
303 uint64_t helper_packushb(uint64_t fs, uint64_t ft)
305 uint64_t fd = 0;
306 unsigned int i;
308 for (i = 0; i < 4; ++i) {
309 int16_t tmp = fs >> (i * 16);
310 tmp = SATUB(tmp);
311 fd |= (uint64_t)(tmp & 0xff) << (i * 8);
313 for (i = 0; i < 4; ++i) {
314 int16_t tmp = ft >> (i * 16);
315 tmp = SATUB(tmp);
316 fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
319 return fd;
322 uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft)
324 return (fs & 0xffffffff) | (ft << 32);
327 uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft)
329 return (fs >> 32) | (ft & ~0xffffffffull);
332 uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft)
334 unsigned host = BYTE_ORDER_XOR(3);
335 LMIValue vd, vs, vt;
337 vs.d = fs;
338 vt.d = ft;
339 vd.uh[0 ^ host] = vs.uh[0 ^ host];
340 vd.uh[1 ^ host] = vt.uh[0 ^ host];
341 vd.uh[2 ^ host] = vs.uh[1 ^ host];
342 vd.uh[3 ^ host] = vt.uh[1 ^ host];
344 return vd.d;
347 uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft)
349 unsigned host = BYTE_ORDER_XOR(3);
350 LMIValue vd, vs, vt;
352 vs.d = fs;
353 vt.d = ft;
354 vd.uh[0 ^ host] = vs.uh[2 ^ host];
355 vd.uh[1 ^ host] = vt.uh[2 ^ host];
356 vd.uh[2 ^ host] = vs.uh[3 ^ host];
357 vd.uh[3 ^ host] = vt.uh[3 ^ host];
359 return vd.d;
362 uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft)
364 unsigned host = BYTE_ORDER_XOR(7);
365 LMIValue vd, vs, vt;
367 vs.d = fs;
368 vt.d = ft;
369 vd.ub[0 ^ host] = vs.ub[0 ^ host];
370 vd.ub[1 ^ host] = vt.ub[0 ^ host];
371 vd.ub[2 ^ host] = vs.ub[1 ^ host];
372 vd.ub[3 ^ host] = vt.ub[1 ^ host];
373 vd.ub[4 ^ host] = vs.ub[2 ^ host];
374 vd.ub[5 ^ host] = vt.ub[2 ^ host];
375 vd.ub[6 ^ host] = vs.ub[3 ^ host];
376 vd.ub[7 ^ host] = vt.ub[3 ^ host];
378 return vd.d;
381 uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft)
383 unsigned host = BYTE_ORDER_XOR(7);
384 LMIValue vd, vs, vt;
386 vs.d = fs;
387 vt.d = ft;
388 vd.ub[0 ^ host] = vs.ub[4 ^ host];
389 vd.ub[1 ^ host] = vt.ub[4 ^ host];
390 vd.ub[2 ^ host] = vs.ub[5 ^ host];
391 vd.ub[3 ^ host] = vt.ub[5 ^ host];
392 vd.ub[4 ^ host] = vs.ub[6 ^ host];
393 vd.ub[5 ^ host] = vt.ub[6 ^ host];
394 vd.ub[6 ^ host] = vs.ub[7 ^ host];
395 vd.ub[7 ^ host] = vt.ub[7 ^ host];
397 return vd.d;
400 uint64_t helper_pavgh(uint64_t fs, uint64_t ft)
402 LMIValue vs, vt;
403 unsigned i;
405 vs.d = fs;
406 vt.d = ft;
407 for (i = 0; i < 4; i++) {
408 vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1;
410 return vs.d;
413 uint64_t helper_pavgb(uint64_t fs, uint64_t ft)
415 LMIValue vs, vt;
416 unsigned i;
418 vs.d = fs;
419 vt.d = ft;
420 for (i = 0; i < 8; i++) {
421 vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1;
423 return vs.d;
426 uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft)
428 LMIValue vs, vt;
429 unsigned i;
431 vs.d = fs;
432 vt.d = ft;
433 for (i = 0; i < 4; i++) {
434 vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
436 return vs.d;
439 uint64_t helper_pminsh(uint64_t fs, uint64_t ft)
441 LMIValue vs, vt;
442 unsigned i;
444 vs.d = fs;
445 vt.d = ft;
446 for (i = 0; i < 4; i++) {
447 vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
449 return vs.d;
452 uint64_t helper_pmaxub(uint64_t fs, uint64_t ft)
454 LMIValue vs, vt;
455 unsigned i;
457 vs.d = fs;
458 vt.d = ft;
459 for (i = 0; i < 4; i++) {
460 vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
462 return vs.d;
465 uint64_t helper_pminub(uint64_t fs, uint64_t ft)
467 LMIValue vs, vt;
468 unsigned i;
470 vs.d = fs;
471 vt.d = ft;
472 for (i = 0; i < 4; i++) {
473 vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
475 return vs.d;
478 uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft)
480 LMIValue vs, vt;
481 unsigned i;
483 vs.d = fs;
484 vt.d = ft;
485 for (i = 0; i < 2; i++) {
486 vs.uw[i] = -(vs.uw[i] == vt.uw[i]);
488 return vs.d;
491 uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft)
493 LMIValue vs, vt;
494 unsigned i;
496 vs.d = fs;
497 vt.d = ft;
498 for (i = 0; i < 2; i++) {
499 vs.uw[i] = -(vs.uw[i] > vt.uw[i]);
501 return vs.d;
504 uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft)
506 LMIValue vs, vt;
507 unsigned i;
509 vs.d = fs;
510 vt.d = ft;
511 for (i = 0; i < 4; i++) {
512 vs.uh[i] = -(vs.uh[i] == vt.uh[i]);
514 return vs.d;
517 uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft)
519 LMIValue vs, vt;
520 unsigned i;
522 vs.d = fs;
523 vt.d = ft;
524 for (i = 0; i < 4; i++) {
525 vs.uh[i] = -(vs.uh[i] > vt.uh[i]);
527 return vs.d;
530 uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft)
532 LMIValue vs, vt;
533 unsigned i;
535 vs.d = fs;
536 vt.d = ft;
537 for (i = 0; i < 8; i++) {
538 vs.ub[i] = -(vs.ub[i] == vt.ub[i]);
540 return vs.d;
543 uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft)
545 LMIValue vs, vt;
546 unsigned i;
548 vs.d = fs;
549 vt.d = ft;
550 for (i = 0; i < 8; i++) {
551 vs.ub[i] = -(vs.ub[i] > vt.ub[i]);
553 return vs.d;
556 uint64_t helper_psllw(uint64_t fs, uint64_t ft)
558 LMIValue vs;
559 unsigned i;
561 ft &= 0x7f;
562 if (ft > 31) {
563 return 0;
565 vs.d = fs;
566 for (i = 0; i < 2; ++i) {
567 vs.uw[i] <<= ft;
569 return vs.d;
572 uint64_t helper_psrlw(uint64_t fs, uint64_t ft)
574 LMIValue vs;
575 unsigned i;
577 ft &= 0x7f;
578 if (ft > 31) {
579 return 0;
581 vs.d = fs;
582 for (i = 0; i < 2; ++i) {
583 vs.uw[i] >>= ft;
585 return vs.d;
588 uint64_t helper_psraw(uint64_t fs, uint64_t ft)
590 LMIValue vs;
591 unsigned i;
593 ft &= 0x7f;
594 if (ft > 31) {
595 ft = 31;
597 vs.d = fs;
598 for (i = 0; i < 2; ++i) {
599 vs.sw[i] >>= ft;
601 return vs.d;
604 uint64_t helper_psllh(uint64_t fs, uint64_t ft)
606 LMIValue vs;
607 unsigned i;
609 ft &= 0x7f;
610 if (ft > 15) {
611 return 0;
613 vs.d = fs;
614 for (i = 0; i < 4; ++i) {
615 vs.uh[i] <<= ft;
617 return vs.d;
620 uint64_t helper_psrlh(uint64_t fs, uint64_t ft)
622 LMIValue vs;
623 unsigned i;
625 ft &= 0x7f;
626 if (ft > 15) {
627 return 0;
629 vs.d = fs;
630 for (i = 0; i < 4; ++i) {
631 vs.uh[i] >>= ft;
633 return vs.d;
636 uint64_t helper_psrah(uint64_t fs, uint64_t ft)
638 LMIValue vs;
639 unsigned i;
641 ft &= 0x7f;
642 if (ft > 15) {
643 ft = 15;
645 vs.d = fs;
646 for (i = 0; i < 4; ++i) {
647 vs.sh[i] >>= ft;
649 return vs.d;
652 uint64_t helper_pmullh(uint64_t fs, uint64_t ft)
654 LMIValue vs, vt;
655 unsigned i;
657 vs.d = fs;
658 vt.d = ft;
659 for (i = 0; i < 4; ++i) {
660 vs.sh[i] *= vt.sh[i];
662 return vs.d;
665 uint64_t helper_pmulhh(uint64_t fs, uint64_t ft)
667 LMIValue vs, vt;
668 unsigned i;
670 vs.d = fs;
671 vt.d = ft;
672 for (i = 0; i < 4; ++i) {
673 int32_t r = vs.sh[i] * vt.sh[i];
674 vs.sh[i] = r >> 16;
676 return vs.d;
679 uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft)
681 LMIValue vs, vt;
682 unsigned i;
684 vs.d = fs;
685 vt.d = ft;
686 for (i = 0; i < 4; ++i) {
687 uint32_t r = vs.uh[i] * vt.uh[i];
688 vs.uh[i] = r >> 16;
690 return vs.d;
693 uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft)
695 unsigned host = BYTE_ORDER_XOR(3);
696 LMIValue vs, vt;
697 uint32_t p0, p1;
699 vs.d = fs;
700 vt.d = ft;
701 p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host];
702 p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host];
703 p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host];
704 p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host];
706 return ((uint64_t)p1 << 32) | p0;
709 uint64_t helper_pasubub(uint64_t fs, uint64_t ft)
711 LMIValue vs, vt;
712 unsigned i;
714 vs.d = fs;
715 vt.d = ft;
716 for (i = 0; i < 8; ++i) {
717 int r = vs.ub[i] - vt.ub[i];
718 vs.ub[i] = (r < 0 ? -r : r);
720 return vs.d;
723 uint64_t helper_biadd(uint64_t fs)
725 unsigned i, fd;
727 for (i = fd = 0; i < 8; ++i) {
728 fd += (fs >> (i * 8)) & 0xff;
730 return fd & 0xffff;
733 uint64_t helper_pmovmskb(uint64_t fs)
735 unsigned fd = 0;
737 fd |= ((fs >> 7) & 1) << 0;
738 fd |= ((fs >> 15) & 1) << 1;
739 fd |= ((fs >> 23) & 1) << 2;
740 fd |= ((fs >> 31) & 1) << 3;
741 fd |= ((fs >> 39) & 1) << 4;
742 fd |= ((fs >> 47) & 1) << 5;
743 fd |= ((fs >> 55) & 1) << 6;
744 fd |= ((fs >> 63) & 1) << 7;
746 return fd & 0xff;