[PATCH 22/57][Arm][GAS] Add support for MVE instructions: vmlaldav, vmlalv, vmlsldav...
[binutils-gdb.git] / sim / frv / cache.c
blob26e9dad179b7f19fcf48f0cec9e2faaf374cbf21
1 /* frv cache model.
2 Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #define WANT_CPU frvbf
21 #define WANT_CPU_FRVBF
23 #include "libiberty.h"
24 #include "sim-main.h"
25 #include "cache.h"
26 #include "bfd.h"
28 void
29 frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
31 int elements;
32 int i, j;
33 SIM_DESC sd;
35 /* Set defaults for fields which are not initialized. */
36 sd = CPU_STATE (cpu);
37 switch (STATE_ARCHITECTURE (sd)->mach)
39 case bfd_mach_fr400:
40 case bfd_mach_fr450:
41 if (cache->configured_sets == 0)
42 cache->configured_sets = 512;
43 if (cache->configured_ways == 0)
44 cache->configured_ways = 2;
45 if (cache->line_size == 0)
46 cache->line_size = 32;
47 if (cache->memory_latency == 0)
48 cache->memory_latency = 20;
49 break;
50 case bfd_mach_fr550:
51 if (cache->configured_sets == 0)
52 cache->configured_sets = 128;
53 if (cache->configured_ways == 0)
54 cache->configured_ways = 4;
55 if (cache->line_size == 0)
56 cache->line_size = 64;
57 if (cache->memory_latency == 0)
58 cache->memory_latency = 20;
59 break;
60 default:
61 if (cache->configured_sets == 0)
62 cache->configured_sets = 64;
63 if (cache->configured_ways == 0)
64 cache->configured_ways = 4;
65 if (cache->line_size == 0)
66 cache->line_size = 64;
67 if (cache->memory_latency == 0)
68 cache->memory_latency = 20;
69 break;
72 frv_cache_reconfigure (cpu, cache);
74 /* First allocate the cache storage based on the given dimensions. */
75 elements = cache->sets * cache->ways;
76 cache->tag_storage = (FRV_CACHE_TAG *)
77 zalloc (elements * sizeof (*cache->tag_storage));
78 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
80 /* Initialize the pipelines and status buffers. */
81 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
83 cache->pipeline[i].requests = NULL;
84 cache->pipeline[i].status.flush.valid = 0;
85 cache->pipeline[i].status.return_buffer.valid = 0;
86 cache->pipeline[i].status.return_buffer.data
87 = (char *) xmalloc (cache->line_size);
88 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
89 cache->pipeline[i].stages[j].request = NULL;
91 cache->BARS.valid = 0;
92 cache->NARS.valid = 0;
94 /* Now set the cache state. */
95 cache->cpu = cpu;
96 cache->statistics.accesses = 0;
97 cache->statistics.hits = 0;
100 void
101 frv_cache_term (FRV_CACHE *cache)
103 /* Free the cache storage. */
104 free (cache->tag_storage);
105 free (cache->data_storage);
106 free (cache->pipeline[LS].status.return_buffer.data);
107 free (cache->pipeline[LD].status.return_buffer.data);
110 /* Reset the cache configuration based on registers in the cpu. */
111 void
112 frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
114 int ihsr8;
115 int icdm;
116 SIM_DESC sd;
118 /* Set defaults for fields which are not initialized. */
119 sd = CPU_STATE (current_cpu);
120 switch (STATE_ARCHITECTURE (sd)->mach)
122 case bfd_mach_fr550:
123 if (cache == CPU_INSN_CACHE (current_cpu))
125 ihsr8 = GET_IHSR8 ();
126 icdm = GET_IHSR8_ICDM (ihsr8);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
128 if (icdm)
130 cache->sets = cache->sets * cache->ways;
131 cache->ways = 1;
132 break;
135 /* fall through */
136 default:
137 /* Set the cache to its original settings. */
138 cache->sets = cache->configured_sets;
139 cache->ways = cache->configured_ways;
140 break;
144 /* Determine whether the given cache is enabled. */
146 frv_cache_enabled (FRV_CACHE *cache)
148 SIM_CPU *current_cpu = cache->cpu;
149 int hsr0 = GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
151 return 1;
152 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
153 return 1;
154 return 0;
157 /* Determine whether the given address is RAM access, assuming that HSR0.RME
158 is set. */
159 static int
160 ram_access (FRV_CACHE *cache, USI address)
162 int ihsr8;
163 int cwe;
164 USI start, end, way_size;
165 SIM_CPU *current_cpu = cache->cpu;
166 SIM_DESC sd = CPU_STATE (current_cpu);
168 switch (STATE_ARCHITECTURE (sd)->mach)
170 case bfd_mach_fr550:
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8 = GET_IHSR8 ();
173 if (cache == CPU_INSN_CACHE (current_cpu))
175 start = 0xfe000000;
176 end = 0xfe008000;
177 cwe = GET_IHSR8_ICWE (ihsr8);
179 else
181 start = 0xfe400000;
182 end = 0xfe408000;
183 cwe = GET_IHSR8_DCWE (ihsr8);
185 way_size = (end - start) / 4;
186 end -= way_size * cwe;
187 return address >= start && address < end;
188 default:
189 break;
192 return 1; /* RAM access */
195 /* Determine whether the given address should be accessed without using
196 the cache. */
197 static int
198 non_cache_access (FRV_CACHE *cache, USI address)
200 int hsr0;
201 SIM_DESC sd;
202 SIM_CPU *current_cpu = cache->cpu;
204 sd = CPU_STATE (current_cpu);
205 switch (STATE_ARCHITECTURE (sd)->mach)
207 case bfd_mach_fr400:
208 case bfd_mach_fr450:
209 if (address >= 0xff000000
210 || address >= 0xfe000000 && address <= 0xfeffffff)
211 return 1; /* non-cache access */
212 break;
213 case bfd_mach_fr550:
214 if (address >= 0xff000000
215 || address >= 0xfeff0000 && address <= 0xfeffffff)
216 return 1; /* non-cache access */
217 if (cache == CPU_INSN_CACHE (current_cpu))
219 if (address >= 0xfe000000 && address <= 0xfe007fff)
220 return 1; /* non-cache access */
222 else if (address >= 0xfe400000 && address <= 0xfe407fff)
223 return 1; /* non-cache access */
224 break;
225 default:
226 if (address >= 0xff000000
227 || address >= 0xfeff0000 && address <= 0xfeffffff)
228 return 1; /* non-cache access */
229 if (cache == CPU_INSN_CACHE (current_cpu))
231 if (address >= 0xfe000000 && address <= 0xfe003fff)
232 return 1; /* non-cache access */
234 else if (address >= 0xfe400000 && address <= 0xfe403fff)
235 return 1; /* non-cache access */
236 break;
239 hsr0 = GET_HSR0 ();
240 if (GET_HSR0_RME (hsr0))
241 return ram_access (cache, address);
243 return 0; /* cache-access */
246 /* Find the cache line corresponding to the given address.
247 If it is found then 'return_tag' is set to point to the tag for that line
248 and 1 is returned.
249 If it is not found, 'return_tag' is set to point to the tag for the least
250 recently used line and 0 is returned.
252 static int
253 get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
255 int set;
256 int way;
257 int bits;
258 USI tag;
259 FRV_CACHE_TAG *found;
260 FRV_CACHE_TAG *available;
262 ++cache->statistics.accesses;
264 /* First calculate which set this address will fall into. Do this by
265 shifting out the bits representing the offset within the line and
266 then keeping enough bits to index the set. */
267 set = address & ~(cache->line_size - 1);
268 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
269 set >>= 1;
270 set &= (cache->sets - 1);
272 /* Now search the set for a valid tag which matches this address. At the
273 same time make note of the least recently used tag, which we will return
274 if no match is found. */
275 available = NULL;
276 tag = CACHE_ADDRESS_TAG (cache, address);
277 for (way = 0; way < cache->ways; ++way)
279 found = CACHE_TAG (cache, set, way);
280 /* This tag is available as the least recently used if it is the
281 least recently used seen so far and it is not locked. */
282 if (! found->locked && (available == NULL || available->lru > found->lru))
283 available = found;
284 if (found->valid && found->tag == tag)
286 *return_tag = found;
287 ++cache->statistics.hits;
288 return 1; /* found it */
292 *return_tag = available;
293 return 0; /* not found */
296 /* Write the given data out to memory. */
297 static void
298 write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
300 SIM_CPU *cpu = cache->cpu;
301 IADDR pc = CPU_PC_GET (cpu);
302 int write_index = 0;
304 switch (length)
306 case 1:
307 default:
308 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
309 break;
310 case 2:
311 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
312 break;
313 case 4:
314 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
315 break;
316 case 8:
317 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
318 break;
321 for (write_index = 0; write_index < length; ++write_index)
323 /* TODO: Better way to copy memory than a byte at a time? */
324 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
325 data[write_index]);
329 /* Write a cache line out to memory. */
330 static void
331 write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
333 SI address = tag->tag;
334 int set = CACHE_TAG_SET_NUMBER (cache, tag);
335 int bits;
336 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
337 set <<= 1;
338 address |= set;
339 write_data_to_memory (cache, address, tag->line, cache->line_size);
342 static void
343 read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
344 int length)
346 PCADDR pc = CPU_PC_GET (current_cpu);
347 int i;
348 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
349 for (i = 0; i < length; ++i)
351 /* TODO: Better way to copy memory than a byte at a time? */
352 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
353 address + i);
357 /* Fill the given cache line from memory. */
358 static void
359 fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
361 PCADDR pc;
362 int line_alignment;
363 SI read_address;
364 SIM_CPU *current_cpu = cache->cpu;
366 /* If this line is already valid and the cache is in copy-back mode, then
367 write this line to memory before refilling it.
368 Check the dirty bit first, since it is less likely to be set. */
369 if (tag->dirty && tag->valid)
371 int hsr0 = GET_HSR0 ();
372 if (GET_HSR0_CBM (hsr0))
373 write_line_to_memory (cache, tag);
375 else if (tag->line == NULL)
377 int line_index = tag - cache->tag_storage;
378 tag->line = cache->data_storage + (line_index * cache->line_size);
381 pc = CPU_PC_GET (current_cpu);
382 line_alignment = cache->line_size - 1;
383 read_address = address & ~line_alignment;
384 read_data_from_memory (current_cpu, read_address, tag->line,
385 cache->line_size);
386 tag->tag = CACHE_ADDRESS_TAG (cache, address);
387 tag->valid = 1;
390 /* Update the LRU information for the tags in the same set as the given tag. */
391 static void
392 set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
394 /* All tags in the same set are contiguous, so find the beginning of the
395 set by aligning to the size of a set. */
396 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
397 FRV_CACHE_TAG *limit = item + cache->ways;
399 while (item < limit)
401 if (item->lru > tag->lru)
402 --item->lru;
403 ++item;
405 tag->lru = cache->ways; /* Mark as most recently used. */
408 /* Update the LRU information for the tags in the same set as the given tag. */
409 static void
410 set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
412 /* All tags in the same set are contiguous, so find the beginning of the
413 set by aligning to the size of a set. */
414 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
415 FRV_CACHE_TAG *limit = item + cache->ways;
417 while (item < limit)
419 if (item->lru != 0 && item->lru < tag->lru)
420 ++item->lru;
421 ++item;
423 tag->lru = 0; /* Mark as least recently used. */
426 /* Find the line containing the given address and load it if it is not
427 already loaded.
428 Returns the tag of the requested line. */
429 static FRV_CACHE_TAG *
430 find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
432 /* See if this data is already in the cache. */
433 FRV_CACHE_TAG *tag;
434 int found = get_tag (cache, address, &tag);
436 /* Fill the line from memory, if it is not valid. */
437 if (! found)
439 /* The tag could be NULL is all ways in the set were used and locked. */
440 if (tag == NULL)
441 return tag;
443 fill_line_from_memory (cache, tag, address);
444 tag->dirty = 0;
447 /* Update the LRU information for the tags in this set. */
448 set_most_recently_used (cache, tag);
450 return tag;
453 static void
454 copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
455 SI address)
457 /* A cache line was available for the data.
458 Copy the data from the cache line to the output buffer. */
459 memcpy (cache->pipeline[pipe].status.return_buffer.data,
460 tag->line, cache->line_size);
461 cache->pipeline[pipe].status.return_buffer.address
462 = address & ~(cache->line_size - 1);
463 cache->pipeline[pipe].status.return_buffer.valid = 1;
466 static void
467 copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
469 address &= ~(cache->line_size - 1);
470 read_data_from_memory (cache->cpu, address,
471 cache->pipeline[pipe].status.return_buffer.data,
472 cache->line_size);
473 cache->pipeline[pipe].status.return_buffer.address = address;
474 cache->pipeline[pipe].status.return_buffer.valid = 1;
477 static void
478 set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
480 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
483 /* Read data from the given cache.
484 Returns the number of cycles required to obtain the data. */
486 frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
488 FRV_CACHE_TAG *tag;
490 if (non_cache_access (cache, address))
492 copy_memory_to_return_buffer (cache, pipe, address);
493 return 1;
496 tag = find_or_retrieve_cache_line (cache, address);
498 if (tag == NULL)
499 return 0; /* Indicate non-cache-access. */
501 /* A cache line was available for the data.
502 Copy the data from the cache line to the output buffer. */
503 copy_line_to_return_buffer (cache, pipe, tag, address);
505 return 1; /* TODO - number of cycles unknown */
508 /* Writes data through the given cache.
509 The data is assumed to be in target endian order.
510 Returns the number of cycles required to write the data. */
512 frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
514 int copy_back;
516 /* See if this data is already in the cache. */
517 SIM_CPU *current_cpu = cache->cpu;
518 USI hsr0 = GET_HSR0 ();
519 FRV_CACHE_TAG *tag;
520 int found;
522 if (non_cache_access (cache, address))
524 write_data_to_memory (cache, address, data, length);
525 return 1;
528 found = get_tag (cache, address, &tag);
530 /* Write the data to the cache line if one was available and if it is
531 either a hit or a miss in copy-back mode.
532 The tag may be NULL if all ways were in use and locked on a miss.
534 copy_back = GET_HSR0_CBM (GET_HSR0 ());
535 if (tag != NULL && (found || copy_back))
537 int line_offset;
538 /* Load the line from memory first, if it was a miss. */
539 if (! found)
540 fill_line_from_memory (cache, tag, address);
541 line_offset = address & (cache->line_size - 1);
542 memcpy (tag->line + line_offset, data, length);
543 tag->dirty = 1;
545 /* Update the LRU information for the tags in this set. */
546 set_most_recently_used (cache, tag);
549 /* Write the data to memory if there was no line available or we are in
550 write-through (not copy-back mode). */
551 if (tag == NULL || ! copy_back)
553 write_data_to_memory (cache, address, data, length);
554 if (tag != NULL)
555 tag->dirty = 0;
558 return 1; /* TODO - number of cycles unknown */
561 /* Preload the cache line containing the given address. Lock the
562 data if requested.
563 Returns the number of cycles required to write the data. */
565 frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
567 int offset;
568 int lines;
570 if (non_cache_access (cache, address))
571 return 1;
573 /* preload at least 1 line. */
574 if (length == 0)
575 length = 1;
577 offset = address & (cache->line_size - 1);
578 lines = 1 + (offset + length - 1) / cache->line_size;
580 /* Careful with this loop -- length is unsigned. */
581 for (/**/; lines > 0; --lines)
583 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
584 if (lock && tag != NULL)
585 tag->locked = 1;
586 address += cache->line_size;
589 return 1; /* TODO - number of cycles unknown */
592 /* Unlock the cache line containing the given address.
593 Returns the number of cycles required to unlock the line. */
595 frv_cache_unlock (FRV_CACHE *cache, SI address)
597 FRV_CACHE_TAG *tag;
598 int found;
600 if (non_cache_access (cache, address))
601 return 1;
603 found = get_tag (cache, address, &tag);
605 if (found)
606 tag->locked = 0;
608 return 1; /* TODO - number of cycles unknown */
611 static void
612 invalidate_return_buffer (FRV_CACHE *cache, SI address)
614 /* If this address is in one of the return buffers, then invalidate that
615 return buffer. */
616 address &= ~(cache->line_size - 1);
617 if (address == cache->pipeline[LS].status.return_buffer.address)
618 cache->pipeline[LS].status.return_buffer.valid = 0;
619 if (address == cache->pipeline[LD].status.return_buffer.address)
620 cache->pipeline[LD].status.return_buffer.valid = 0;
623 /* Invalidate the cache line containing the given address. Flush the
624 data if requested.
625 Returns the number of cycles required to write the data. */
627 frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
629 /* See if this data is already in the cache. */
630 FRV_CACHE_TAG *tag;
631 int found;
633 /* Check for non-cache access. This operation is still perfromed even if
634 the cache is not currently enabled. */
635 if (non_cache_access (cache, address))
636 return 1;
638 /* If the line is found, invalidate it. If a flush is requested, then flush
639 it if it is dirty. */
640 found = get_tag (cache, address, &tag);
641 if (found)
643 SIM_CPU *cpu;
644 /* If a flush is requested, then flush it if it is dirty. */
645 if (tag->dirty && flush)
646 write_line_to_memory (cache, tag);
647 set_least_recently_used (cache, tag);
648 tag->valid = 0;
649 tag->locked = 0;
651 /* If this is the insn cache, then flush the cpu's scache as well. */
652 cpu = cache->cpu;
653 if (cache == CPU_INSN_CACHE (cpu))
654 scache_flush_cpu (cpu);
657 invalidate_return_buffer (cache, address);
659 return 1; /* TODO - number of cycles unknown */
662 /* Invalidate the entire cache. Flush the data if requested. */
664 frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
666 /* See if this data is already in the cache. */
667 int elements = cache->sets * cache->ways;
668 FRV_CACHE_TAG *tag = cache->tag_storage;
669 SIM_CPU *cpu;
670 int i;
672 for(i = 0; i < elements; ++i, ++tag)
674 /* If a flush is requested, then flush it if it is dirty. */
675 if (tag->valid && tag->dirty && flush)
676 write_line_to_memory (cache, tag);
677 tag->valid = 0;
678 tag->locked = 0;
682 /* If this is the insn cache, then flush the cpu's scache as well. */
683 cpu = cache->cpu;
684 if (cache == CPU_INSN_CACHE (cpu))
685 scache_flush_cpu (cpu);
687 /* Invalidate both return buffers. */
688 cache->pipeline[LS].status.return_buffer.valid = 0;
689 cache->pipeline[LD].status.return_buffer.valid = 0;
691 return 1; /* TODO - number of cycles unknown */
694 /* ---------------------------------------------------------------------------
695 Functions for operating the cache in cycle accurate mode.
696 ------------------------------------------------------------------------- */
697 /* Convert a VLIW slot to a cache pipeline index. */
698 static int
699 convert_slot_to_index (int slot)
701 switch (slot)
703 case UNIT_I0:
704 case UNIT_C:
705 return LS;
706 case UNIT_I1:
707 return LD;
708 default:
709 abort ();
711 return 0;
714 /* Allocate free chains of cache requests. */
715 #define FREE_CHAIN_SIZE 16
716 static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
717 static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
719 static void
720 allocate_new_cache_requests (void)
722 int i;
723 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
724 * sizeof (FRV_CACHE_REQUEST));
725 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
727 frv_cache_request_free_chain[i].next
728 = & frv_cache_request_free_chain[i + 1];
731 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
734 /* Return the next free request in the queue for the given cache pipeline. */
735 static FRV_CACHE_REQUEST *
736 new_cache_request (void)
738 FRV_CACHE_REQUEST *req;
740 /* Allocate new elements for the free chain if necessary. */
741 if (frv_cache_request_free_chain == NULL)
742 allocate_new_cache_requests ();
744 req = frv_cache_request_free_chain;
745 frv_cache_request_free_chain = req->next;
747 return req;
750 /* Return the given cache request to the free chain. */
751 static void
752 free_cache_request (FRV_CACHE_REQUEST *req)
754 if (req->kind == req_store)
756 req->next = frv_store_request_free_chain;
757 frv_store_request_free_chain = req;
759 else
761 req->next = frv_cache_request_free_chain;
762 frv_cache_request_free_chain = req;
766 /* Search the free chain for an existing store request with a buffer that's
767 large enough. */
768 static FRV_CACHE_REQUEST *
769 new_store_request (int length)
771 FRV_CACHE_REQUEST *prev = NULL;
772 FRV_CACHE_REQUEST *req;
773 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
775 if (req->u.store.length == length)
776 break;
777 prev = req;
779 if (req != NULL)
781 if (prev == NULL)
782 frv_store_request_free_chain = req->next;
783 else
784 prev->next = req->next;
785 return req;
788 /* No existing request buffer was found, so make a new one. */
789 req = new_cache_request ();
790 req->kind = req_store;
791 req->u.store.data = xmalloc (length);
792 req->u.store.length = length;
793 return req;
796 /* Remove the given request from the given pipeline. */
797 static void
798 pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
800 FRV_CACHE_REQUEST *next = request->next;
801 FRV_CACHE_REQUEST *prev = request->prev;
803 if (prev == NULL)
804 p->requests = next;
805 else
806 prev->next = next;
808 if (next != NULL)
809 next->prev = prev;
812 /* Add the given request to the given pipeline. */
813 static void
814 pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
816 FRV_CACHE_REQUEST *prev = NULL;
817 FRV_CACHE_REQUEST *item;
819 /* Add the request in priority order. 0 is the highest priority. */
820 for (item = p->requests; item != NULL; item = item->next)
822 if (item->priority > request->priority)
823 break;
824 prev = item;
827 request->next = item;
828 request->prev = prev;
829 if (prev == NULL)
830 p->requests = request;
831 else
832 prev->next = request;
833 if (item != NULL)
834 item->prev = request;
837 /* Requeu the given request from the last of the given pipeline. */
838 static void
839 pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
841 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
842 FRV_CACHE_REQUEST *req = stage->request;
843 stage->request = NULL;
844 pipeline_add_request (p, req);
847 /* Return the priority lower than the lowest one in this cache pipeline.
848 0 is the highest priority. */
849 static int
850 next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
852 int i, j;
853 int pipe;
854 int lowest = 0;
855 FRV_CACHE_REQUEST *req;
857 /* Check the priorities of any queued items. */
858 for (req = pipeline->requests; req != NULL; req = req->next)
859 if (req->priority > lowest)
860 lowest = req->priority;
862 /* Check the priorities of items in the pipeline stages. */
863 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
865 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
866 if (stage->request != NULL && stage->request->priority > lowest)
867 lowest = stage->request->priority;
870 /* Check the priorities of load requests waiting in WAR. These are one
871 higher than the request that spawned them. */
872 for (i = 0; i < NUM_WARS; ++i)
874 FRV_CACHE_WAR *war = & pipeline->WAR[i];
875 if (war->valid && war->priority > lowest)
876 lowest = war->priority + 1;
879 /* Check the priorities of any BARS or NARS associated with this pipeline.
880 These are one higher than the request that spawned them. */
881 pipe = pipeline - cache->pipeline;
882 if (cache->BARS.valid && cache->BARS.pipe == pipe
883 && cache->BARS.priority > lowest)
884 lowest = cache->BARS.priority + 1;
885 if (cache->NARS.valid && cache->NARS.pipe == pipe
886 && cache->NARS.priority > lowest)
887 lowest = cache->NARS.priority + 1;
889 /* Return a priority 2 lower than the lowest found. This allows a WAR
890 request to be generated with a priority greater than this but less than
891 the next higher priority request. */
892 return lowest + 2;
895 static void
896 add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
898 /* Add the load request to the indexed pipeline. */
899 FRV_CACHE_REQUEST *req = new_cache_request ();
900 req->kind = req_WAR;
901 req->reqno = war->reqno;
902 req->priority = war->priority;
903 req->address = war->address;
904 req->u.WAR.preload = war->preload;
905 req->u.WAR.lock = war->lock;
906 pipeline_add_request (pipeline, req);
909 /* Remove the next request from the given pipeline and return it. */
910 static FRV_CACHE_REQUEST *
911 pipeline_next_request (FRV_CACHE_PIPELINE *p)
913 FRV_CACHE_REQUEST *first = p->requests;
914 if (first != NULL)
915 pipeline_remove_request (p, first);
916 return first;
919 /* Return the request which is at the given stage of the given pipeline. */
920 static FRV_CACHE_REQUEST *
921 pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
923 return p->stages[stage].request;
926 static void
927 advance_pipelines (FRV_CACHE *cache)
929 int stage;
930 int pipe;
931 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
933 /* Free the final stage requests. */
934 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
936 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
937 if (req != NULL)
938 free_cache_request (req);
941 /* Shuffle the requests along the pipeline. */
942 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
944 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
945 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
948 /* Add a new request to the pipeline. */
949 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
950 pipelines[pipe].stages[FIRST_STAGE].request
951 = pipeline_next_request (& pipelines[pipe]);
954 /* Handle a request for a load from the given address. */
955 void
956 frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
958 FRV_CACHE_REQUEST *req;
960 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
961 int pipe = convert_slot_to_index (slot);
962 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
964 /* Add the load request to the indexed pipeline. */
965 req = new_cache_request ();
966 req->kind = req_load;
967 req->reqno = reqno;
968 req->priority = next_priority (cache, pipeline);
969 req->address = address;
971 pipeline_add_request (pipeline, req);
974 void
975 frv_cache_request_store (FRV_CACHE *cache, SI address,
976 int slot, char *data, unsigned length)
978 FRV_CACHE_REQUEST *req;
980 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
981 int pipe = convert_slot_to_index (slot);
982 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
984 /* Add the load request to the indexed pipeline. */
985 req = new_store_request (length);
986 req->kind = req_store;
987 req->reqno = NO_REQNO;
988 req->priority = next_priority (cache, pipeline);
989 req->address = address;
990 req->u.store.length = length;
991 memcpy (req->u.store.data, data, length);
993 pipeline_add_request (pipeline, req);
994 invalidate_return_buffer (cache, address);
997 /* Handle a request to invalidate the cache line containing the given address.
998 Flush the data if requested. */
999 void
1000 frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
1001 int slot, int all, int flush)
1003 FRV_CACHE_REQUEST *req;
1005 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1006 int pipe = convert_slot_to_index (slot);
1007 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1009 /* Add the load request to the indexed pipeline. */
1010 req = new_cache_request ();
1011 req->kind = req_invalidate;
1012 req->reqno = reqno;
1013 req->priority = next_priority (cache, pipeline);
1014 req->address = address;
1015 req->u.invalidate.all = all;
1016 req->u.invalidate.flush = flush;
1018 pipeline_add_request (pipeline, req);
1021 /* Handle a request to preload the cache line containing the given address. */
1022 void
1023 frv_cache_request_preload (FRV_CACHE *cache, SI address,
1024 int slot, int length, int lock)
1026 FRV_CACHE_REQUEST *req;
1028 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1029 int pipe = convert_slot_to_index (slot);
1030 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1032 /* Add the load request to the indexed pipeline. */
1033 req = new_cache_request ();
1034 req->kind = req_preload;
1035 req->reqno = NO_REQNO;
1036 req->priority = next_priority (cache, pipeline);
1037 req->address = address;
1038 req->u.preload.length = length;
1039 req->u.preload.lock = lock;
1041 pipeline_add_request (pipeline, req);
1042 invalidate_return_buffer (cache, address);
1045 /* Handle a request to unlock the cache line containing the given address. */
1046 void
1047 frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1049 FRV_CACHE_REQUEST *req;
1051 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1052 int pipe = convert_slot_to_index (slot);
1053 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1055 /* Add the load request to the indexed pipeline. */
1056 req = new_cache_request ();
1057 req->kind = req_unlock;
1058 req->reqno = NO_REQNO;
1059 req->priority = next_priority (cache, pipeline);
1060 req->address = address;
1062 pipeline_add_request (pipeline, req);
1065 /* Check whether this address interferes with a pending request of
1066 higher priority. */
1067 static int
1068 address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1069 int pipe)
1071 int i, j;
1072 int line_mask = ~(cache->line_size - 1);
1073 int other_pipe;
1074 int priority = req->priority;
1075 FRV_CACHE_REQUEST *other_req;
1076 SI other_address;
1077 SI all_address;
1079 address &= line_mask;
1080 all_address = -1 & line_mask;
1082 /* Check for collisions in the queue for this pipeline. */
1083 for (other_req = cache->pipeline[pipe].requests;
1084 other_req != NULL;
1085 other_req = other_req->next)
1087 other_address = other_req->address & line_mask;
1088 if ((address == other_address || address == all_address)
1089 && priority > other_req->priority)
1090 return 1;
1093 /* Check for a collision in the the other pipeline. */
1094 other_pipe = pipe ^ 1;
1095 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1096 if (other_req != NULL)
1098 other_address = other_req->address & line_mask;
1099 if (address == other_address || address == all_address)
1100 return 1;
1103 /* Check for a collision with load requests waiting in WAR. */
1104 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1106 for (j = 0; j < NUM_WARS; ++j)
1108 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1109 if (war->valid
1110 && (address == (war->address & line_mask)
1111 || address == all_address)
1112 && priority > war->priority)
1113 return 1;
1115 /* If this is not a WAR request, then yield to any WAR requests in
1116 either pipeline or to a higher priority request in the same pipeline.
1118 if (req->kind != req_WAR)
1120 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1122 other_req = cache->pipeline[i].stages[j].request;
1123 if (other_req != NULL)
1125 if (other_req->kind == req_WAR)
1126 return 1;
1127 if (i == pipe
1128 && (address == (other_req->address & line_mask)
1129 || address == all_address)
1130 && priority > other_req->priority)
1131 return 1;
1137 /* Check for a collision with load requests waiting in ARS. */
1138 if (cache->BARS.valid
1139 && (address == (cache->BARS.address & line_mask)
1140 || address == all_address)
1141 && priority > cache->BARS.priority)
1142 return 1;
1143 if (cache->NARS.valid
1144 && (address == (cache->NARS.address & line_mask)
1145 || address == all_address)
1146 && priority > cache->NARS.priority)
1147 return 1;
1149 return 0;
1152 /* Wait for a free WAR register in BARS or NARS. */
1153 static void
1154 wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1156 FRV_CACHE_WAR war;
1157 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1159 if (! cache->BARS.valid)
1161 cache->BARS.pipe = pipe;
1162 cache->BARS.reqno = req->reqno;
1163 cache->BARS.address = req->address;
1164 cache->BARS.priority = req->priority - 1;
1165 switch (req->kind)
1167 case req_load:
1168 cache->BARS.preload = 0;
1169 cache->BARS.lock = 0;
1170 break;
1171 case req_store:
1172 cache->BARS.preload = 1;
1173 cache->BARS.lock = 0;
1174 break;
1175 case req_preload:
1176 cache->BARS.preload = 1;
1177 cache->BARS.lock = req->u.preload.lock;
1178 break;
1180 cache->BARS.valid = 1;
1181 return;
1183 if (! cache->NARS.valid)
1185 cache->NARS.pipe = pipe;
1186 cache->NARS.reqno = req->reqno;
1187 cache->NARS.address = req->address;
1188 cache->NARS.priority = req->priority - 1;
1189 switch (req->kind)
1191 case req_load:
1192 cache->NARS.preload = 0;
1193 cache->NARS.lock = 0;
1194 break;
1195 case req_store:
1196 cache->NARS.preload = 1;
1197 cache->NARS.lock = 0;
1198 break;
1199 case req_preload:
1200 cache->NARS.preload = 1;
1201 cache->NARS.lock = req->u.preload.lock;
1202 break;
1204 cache->NARS.valid = 1;
1205 return;
1207 /* All wait registers are busy, so resubmit this request. */
1208 pipeline_requeue_request (pipeline);
1211 /* Find a free WAR register and wait for memory to fetch the data. */
1212 static void
1213 wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1215 int war;
1216 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1218 /* Find a valid WAR to hold this request. */
1219 for (war = 0; war < NUM_WARS; ++war)
1220 if (! pipeline->WAR[war].valid)
1221 break;
1222 if (war >= NUM_WARS)
1224 wait_for_WAR (cache, pipe, req);
1225 return;
1228 pipeline->WAR[war].address = req->address;
1229 pipeline->WAR[war].reqno = req->reqno;
1230 pipeline->WAR[war].priority = req->priority - 1;
1231 pipeline->WAR[war].latency = cache->memory_latency + 1;
1232 switch (req->kind)
1234 case req_load:
1235 pipeline->WAR[war].preload = 0;
1236 pipeline->WAR[war].lock = 0;
1237 break;
1238 case req_store:
1239 pipeline->WAR[war].preload = 1;
1240 pipeline->WAR[war].lock = 0;
1241 break;
1242 case req_preload:
1243 pipeline->WAR[war].preload = 1;
1244 pipeline->WAR[war].lock = req->u.preload.lock;
1245 break;
1247 pipeline->WAR[war].valid = 1;
1250 static void
1251 handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1253 FRV_CACHE_TAG *tag;
1254 SI address = req->address;
1256 /* If this address interferes with an existing request, then requeue it. */
1257 if (address_interference (cache, address, req, pipe))
1259 pipeline_requeue_request (& cache->pipeline[pipe]);
1260 return;
1263 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1265 int found = get_tag (cache, address, &tag);
1267 /* If the data was found, return it to the caller. */
1268 if (found)
1270 set_most_recently_used (cache, tag);
1271 copy_line_to_return_buffer (cache, pipe, tag, address);
1272 set_return_buffer_reqno (cache, pipe, req->reqno);
1273 return;
1277 /* The data is not in the cache or this is a non-cache access. We need to
1278 wait for the memory unit to fetch it. Store this request in the WAR in
1279 the meantime. */
1280 wait_in_WAR (cache, pipe, req);
1283 static void
1284 handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1286 int found;
1287 FRV_CACHE_WAR war;
1288 FRV_CACHE_TAG *tag;
1289 int length;
1290 int lock;
1291 int offset;
1292 int lines;
1293 int line;
1294 SI address = req->address;
1295 SI cur_address;
1297 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1298 return;
1300 /* preload at least 1 line. */
1301 length = req->u.preload.length;
1302 if (length == 0)
1303 length = 1;
1305 /* Make sure that this request does not interfere with a pending request. */
1306 offset = address & (cache->line_size - 1);
1307 lines = 1 + (offset + length - 1) / cache->line_size;
1308 cur_address = address & ~(cache->line_size - 1);
1309 for (line = 0; line < lines; ++line)
1311 /* If this address interferes with an existing request,
1312 then requeue it. */
1313 if (address_interference (cache, cur_address, req, pipe))
1315 pipeline_requeue_request (& cache->pipeline[pipe]);
1316 return;
1318 cur_address += cache->line_size;
1321 /* Now process each cache line. */
1322 /* Careful with this loop -- length is unsigned. */
1323 lock = req->u.preload.lock;
1324 cur_address = address & ~(cache->line_size - 1);
1325 for (line = 0; line < lines; ++line)
1327 /* If the data was found, then lock it if requested. */
1328 found = get_tag (cache, cur_address, &tag);
1329 if (found)
1331 if (lock)
1332 tag->locked = 1;
1334 else
1336 /* The data is not in the cache. We need to wait for the memory
1337 unit to fetch it. Store this request in the WAR in the meantime.
1339 wait_in_WAR (cache, pipe, req);
1341 cur_address += cache->line_size;
1345 static void
1346 handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1348 SIM_CPU *current_cpu;
1349 FRV_CACHE_TAG *tag;
1350 int found;
1351 int copy_back;
1352 SI address = req->address;
1353 char *data = req->u.store.data;
1354 int length = req->u.store.length;
1356 /* If this address interferes with an existing request, then requeue it. */
1357 if (address_interference (cache, address, req, pipe))
1359 pipeline_requeue_request (& cache->pipeline[pipe]);
1360 return;
1363 /* Non-cache access. Write the data directly to memory. */
1364 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1366 write_data_to_memory (cache, address, data, length);
1367 return;
1370 /* See if the data is in the cache. */
1371 found = get_tag (cache, address, &tag);
1373 /* Write the data to the cache line if one was available and if it is
1374 either a hit or a miss in copy-back mode.
1375 The tag may be NULL if all ways were in use and locked on a miss.
1377 current_cpu = cache->cpu;
1378 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1379 if (tag != NULL && (found || copy_back))
1381 int line_offset;
1382 /* Load the line from memory first, if it was a miss. */
1383 if (! found)
1385 /* We need to wait for the memory unit to fetch the data.
1386 Store this request in the WAR and requeue the store request. */
1387 wait_in_WAR (cache, pipe, req);
1388 pipeline_requeue_request (& cache->pipeline[pipe]);
1389 /* Decrement the counts of accesses and hits because when the requeued
1390 request is processed again, it will appear to be a new access and
1391 a hit. */
1392 --cache->statistics.accesses;
1393 --cache->statistics.hits;
1394 return;
1396 line_offset = address & (cache->line_size - 1);
1397 memcpy (tag->line + line_offset, data, length);
1398 invalidate_return_buffer (cache, address);
1399 tag->dirty = 1;
1401 /* Update the LRU information for the tags in this set. */
1402 set_most_recently_used (cache, tag);
1405 /* Write the data to memory if there was no line available or we are in
1406 write-through (not copy-back mode). */
1407 if (tag == NULL || ! copy_back)
1409 write_data_to_memory (cache, address, data, length);
1410 if (tag != NULL)
1411 tag->dirty = 0;
1415 static void
1416 handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1418 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1419 SI address = req->address;
1420 SI interfere_address = req->u.invalidate.all ? -1 : address;
1422 /* If this address interferes with an existing request, then requeue it. */
1423 if (address_interference (cache, interfere_address, req, pipe))
1425 pipeline_requeue_request (pipeline);
1426 return;
1429 /* Invalidate the cache line now. This function already checks for
1430 non-cache access. */
1431 if (req->u.invalidate.all)
1432 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1433 else
1434 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1435 if (req->u.invalidate.flush)
1437 pipeline->status.flush.reqno = req->reqno;
1438 pipeline->status.flush.address = address;
1439 pipeline->status.flush.valid = 1;
1443 static void
1444 handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1446 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1447 SI address = req->address;
1449 /* If this address interferes with an existing request, then requeue it. */
1450 if (address_interference (cache, address, req, pipe))
1452 pipeline_requeue_request (pipeline);
1453 return;
1456 /* Unlock the cache line. This function checks for non-cache access. */
1457 frv_cache_unlock (cache, address);
1460 static void
1461 handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1463 char *buffer;
1464 FRV_CACHE_TAG *tag;
1465 SI address = req->address;
1467 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1469 /* Look for the data in the cache. The statistics of cache hit or
1470 miss have already been recorded, so save and restore the stats before
1471 and after obtaining the cache line. */
1472 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1473 tag = find_or_retrieve_cache_line (cache, address);
1474 cache->statistics = save_stats;
1475 if (tag != NULL)
1477 if (! req->u.WAR.preload)
1479 copy_line_to_return_buffer (cache, pipe, tag, address);
1480 set_return_buffer_reqno (cache, pipe, req->reqno);
1482 else
1484 invalidate_return_buffer (cache, address);
1485 if (req->u.WAR.lock)
1486 tag->locked = 1;
1488 return;
1492 /* All cache lines in the set were locked, so just copy the data to the
1493 return buffer directly. */
1494 if (! req->u.WAR.preload)
1496 copy_memory_to_return_buffer (cache, pipe, address);
1497 set_return_buffer_reqno (cache, pipe, req->reqno);
1501 /* Resolve any conflicts and/or execute the given requests. */
1502 static void
1503 arbitrate_requests (FRV_CACHE *cache)
1505 int pipe;
1506 /* Simply execute the requests in the final pipeline stages. */
1507 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1509 FRV_CACHE_REQUEST *req
1510 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1511 /* Make sure that there is a request to handle. */
1512 if (req == NULL)
1513 continue;
1515 /* Handle the request. */
1516 switch (req->kind)
1518 case req_load:
1519 handle_req_load (cache, pipe, req);
1520 break;
1521 case req_store:
1522 handle_req_store (cache, pipe, req);
1523 break;
1524 case req_invalidate:
1525 handle_req_invalidate (cache, pipe, req);
1526 break;
1527 case req_preload:
1528 handle_req_preload (cache, pipe, req);
1529 break;
1530 case req_unlock:
1531 handle_req_unlock (cache, pipe, req);
1532 break;
1533 case req_WAR:
1534 handle_req_WAR (cache, pipe, req);
1535 break;
1536 default:
1537 abort ();
1542 /* Move a waiting ARS register to a free WAR register. */
1543 static void
1544 move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1546 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1547 NARS to BARS if it is valid. */
1548 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1550 war->address = cache->BARS.address;
1551 war->reqno = cache->BARS.reqno;
1552 war->priority = cache->BARS.priority;
1553 war->preload = cache->BARS.preload;
1554 war->lock = cache->BARS.lock;
1555 war->latency = cache->memory_latency + 1;
1556 war->valid = 1;
1557 if (cache->NARS.valid)
1559 cache->BARS = cache->NARS;
1560 cache->NARS.valid = 0;
1562 else
1563 cache->BARS.valid = 0;
1564 return;
1566 /* If NARS is valid for this pipe, then move it to the given WAR. */
1567 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1569 war->address = cache->NARS.address;
1570 war->reqno = cache->NARS.reqno;
1571 war->priority = cache->NARS.priority;
1572 war->preload = cache->NARS.preload;
1573 war->lock = cache->NARS.lock;
1574 war->latency = cache->memory_latency + 1;
1575 war->valid = 1;
1576 cache->NARS.valid = 0;
1580 /* Decrease the latencies of the various states in the cache. */
1581 static void
1582 decrease_latencies (FRV_CACHE *cache)
1584 int pipe, j;
1585 /* Check the WAR registers. */
1586 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1588 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1589 for (j = 0; j < NUM_WARS; ++j)
1591 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1592 if (war->valid)
1594 --war->latency;
1595 /* If the latency has expired, then submit a WAR request to the
1596 pipeline. */
1597 if (war->latency <= 0)
1599 add_WAR_request (pipeline, war);
1600 war->valid = 0;
1601 move_ARS_to_WAR (cache, pipe, war);
1608 /* Run the cache for the given number of cycles. */
1609 void
1610 frv_cache_run (FRV_CACHE *cache, int cycles)
1612 int i;
1613 for (i = 0; i < cycles; ++i)
1615 advance_pipelines (cache);
1616 arbitrate_requests (cache);
1617 decrease_latencies (cache);
1622 frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1624 SI offset;
1625 FRV_CACHE_TAG *tag;
1627 if (non_cache_access (cache, address))
1628 return 0;
1631 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1632 int found = get_tag (cache, address, &tag);
1633 cache->statistics = save_stats;
1635 if (! found)
1636 return 0; /* Indicate non-cache-access. */
1639 /* A cache line was available for the data.
1640 Extract the target data from the line. */
1641 offset = address & (cache->line_size - 1);
1642 *value = T2H_4 (*(SI *)(tag->line + offset));
1643 return 1;
1646 /* Check the return buffers of the data cache to see if the requested data is
1647 available. */
1649 frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1650 unsigned reqno)
1652 return cache->pipeline[pipe].status.return_buffer.valid
1653 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1654 && cache->pipeline[pipe].status.return_buffer.address <= address
1655 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1656 > address;
1659 /* Check to see if the requested data has been flushed. */
1661 frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1663 return cache->pipeline[pipe].status.flush.valid
1664 && cache->pipeline[pipe].status.flush.reqno == reqno
1665 && cache->pipeline[pipe].status.flush.address <= address
1666 && cache->pipeline[pipe].status.flush.address + cache->line_size
1667 > address;