2 Copyright (C) 1999-2024 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 /* This must come before any other includes. */
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
26 #include "libiberty.h"
33 frv_cache_init (SIM_CPU
*cpu
, FRV_CACHE
*cache
)
39 /* Set defaults for fields which are not initialized. */
41 switch (STATE_ARCHITECTURE (sd
)->mach
)
45 if (cache
->configured_sets
== 0)
46 cache
->configured_sets
= 512;
47 if (cache
->configured_ways
== 0)
48 cache
->configured_ways
= 2;
49 if (cache
->line_size
== 0)
50 cache
->line_size
= 32;
51 if (cache
->memory_latency
== 0)
52 cache
->memory_latency
= 20;
55 if (cache
->configured_sets
== 0)
56 cache
->configured_sets
= 128;
57 if (cache
->configured_ways
== 0)
58 cache
->configured_ways
= 4;
59 if (cache
->line_size
== 0)
60 cache
->line_size
= 64;
61 if (cache
->memory_latency
== 0)
62 cache
->memory_latency
= 20;
65 if (cache
->configured_sets
== 0)
66 cache
->configured_sets
= 64;
67 if (cache
->configured_ways
== 0)
68 cache
->configured_ways
= 4;
69 if (cache
->line_size
== 0)
70 cache
->line_size
= 64;
71 if (cache
->memory_latency
== 0)
72 cache
->memory_latency
= 20;
76 frv_cache_reconfigure (cpu
, cache
);
78 /* First allocate the cache storage based on the given dimensions. */
79 elements
= cache
->sets
* cache
->ways
;
80 cache
->tag_storage
= (FRV_CACHE_TAG
*)
81 zalloc (elements
* sizeof (*cache
->tag_storage
));
82 cache
->data_storage
= (char *) xmalloc (elements
* cache
->line_size
);
84 /* Initialize the pipelines and status buffers. */
85 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
87 cache
->pipeline
[i
].requests
= NULL
;
88 cache
->pipeline
[i
].status
.flush
.valid
= 0;
89 cache
->pipeline
[i
].status
.return_buffer
.valid
= 0;
90 cache
->pipeline
[i
].status
.return_buffer
.data
91 = (char *) xmalloc (cache
->line_size
);
92 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
93 cache
->pipeline
[i
].stages
[j
].request
= NULL
;
95 cache
->BARS
.valid
= 0;
96 cache
->NARS
.valid
= 0;
98 /* Now set the cache state. */
100 cache
->statistics
.accesses
= 0;
101 cache
->statistics
.hits
= 0;
105 frv_cache_term (FRV_CACHE
*cache
)
107 /* Free the cache storage. */
108 free (cache
->tag_storage
);
109 free (cache
->data_storage
);
110 free (cache
->pipeline
[LS
].status
.return_buffer
.data
);
111 free (cache
->pipeline
[LD
].status
.return_buffer
.data
);
114 /* Reset the cache configuration based on registers in the cpu. */
116 frv_cache_reconfigure (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
)
122 /* Set defaults for fields which are not initialized. */
123 sd
= CPU_STATE (current_cpu
);
124 switch (STATE_ARCHITECTURE (sd
)->mach
)
127 if (cache
== CPU_INSN_CACHE (current_cpu
))
129 ihsr8
= GET_IHSR8 ();
130 icdm
= GET_IHSR8_ICDM (ihsr8
);
131 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
134 cache
->sets
= cache
->sets
* cache
->ways
;
139 ATTRIBUTE_FALLTHROUGH
;
141 /* Set the cache to its original settings. */
142 cache
->sets
= cache
->configured_sets
;
143 cache
->ways
= cache
->configured_ways
;
148 /* Determine whether the given cache is enabled. */
150 frv_cache_enabled (FRV_CACHE
*cache
)
152 SIM_CPU
*current_cpu
= cache
->cpu
;
153 int hsr0
= GET_HSR0 ();
154 if (GET_HSR0_ICE (hsr0
) && cache
== CPU_INSN_CACHE (current_cpu
))
156 if (GET_HSR0_DCE (hsr0
) && cache
== CPU_DATA_CACHE (current_cpu
))
161 /* Determine whether the given address is RAM access, assuming that HSR0.RME
164 ram_access (FRV_CACHE
*cache
, USI address
)
168 USI start
, end
, way_size
;
169 SIM_CPU
*current_cpu
= cache
->cpu
;
170 SIM_DESC sd
= CPU_STATE (current_cpu
);
172 switch (STATE_ARCHITECTURE (sd
)->mach
)
175 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
176 ihsr8
= GET_IHSR8 ();
177 if (cache
== CPU_INSN_CACHE (current_cpu
))
181 cwe
= GET_IHSR8_ICWE (ihsr8
);
187 cwe
= GET_IHSR8_DCWE (ihsr8
);
189 way_size
= (end
- start
) / 4;
190 end
-= way_size
* cwe
;
191 return address
>= start
&& address
< end
;
196 return 1; /* RAM access */
199 /* Determine whether the given address should be accessed without using
202 non_cache_access (FRV_CACHE
*cache
, USI address
)
206 SIM_CPU
*current_cpu
= cache
->cpu
;
208 sd
= CPU_STATE (current_cpu
);
209 switch (STATE_ARCHITECTURE (sd
)->mach
)
213 if (address
>= 0xff000000
214 || (address
>= 0xfe000000 && address
<= 0xfeffffff))
215 return 1; /* non-cache access */
218 if (address
>= 0xff000000
219 || (address
>= 0xfeff0000 && address
<= 0xfeffffff))
220 return 1; /* non-cache access */
221 if (cache
== CPU_INSN_CACHE (current_cpu
))
223 if (address
>= 0xfe000000 && address
<= 0xfe007fff)
224 return 1; /* non-cache access */
226 else if (address
>= 0xfe400000 && address
<= 0xfe407fff)
227 return 1; /* non-cache access */
230 if (address
>= 0xff000000
231 || (address
>= 0xfeff0000 && address
<= 0xfeffffff))
232 return 1; /* non-cache access */
233 if (cache
== CPU_INSN_CACHE (current_cpu
))
235 if (address
>= 0xfe000000 && address
<= 0xfe003fff)
236 return 1; /* non-cache access */
238 else if (address
>= 0xfe400000 && address
<= 0xfe403fff)
239 return 1; /* non-cache access */
244 if (GET_HSR0_RME (hsr0
))
245 return ram_access (cache
, address
);
247 return 0; /* cache-access */
250 /* Find the cache line corresponding to the given address.
251 If it is found then 'return_tag' is set to point to the tag for that line
253 If it is not found, 'return_tag' is set to point to the tag for the least
254 recently used line and 0 is returned.
257 get_tag (FRV_CACHE
*cache
, SI address
, FRV_CACHE_TAG
**return_tag
)
263 FRV_CACHE_TAG
*found
;
264 FRV_CACHE_TAG
*available
;
266 ++cache
->statistics
.accesses
;
268 /* First calculate which set this address will fall into. Do this by
269 shifting out the bits representing the offset within the line and
270 then keeping enough bits to index the set. */
271 set
= address
& ~(cache
->line_size
- 1);
272 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
274 set
&= (cache
->sets
- 1);
276 /* Now search the set for a valid tag which matches this address. At the
277 same time make note of the least recently used tag, which we will return
278 if no match is found. */
280 tag
= CACHE_ADDRESS_TAG (cache
, address
);
281 for (way
= 0; way
< cache
->ways
; ++way
)
283 found
= CACHE_TAG (cache
, set
, way
);
284 /* This tag is available as the least recently used if it is the
285 least recently used seen so far and it is not locked. */
286 if (! found
->locked
&& (available
== NULL
|| available
->lru
> found
->lru
))
288 if (found
->valid
&& found
->tag
== tag
)
291 ++cache
->statistics
.hits
;
292 return 1; /* found it */
296 *return_tag
= available
;
297 return 0; /* not found */
300 /* Write the given data out to memory. */
302 write_data_to_memory (FRV_CACHE
*cache
, SI address
, char *data
, int length
)
304 SIM_CPU
*cpu
= cache
->cpu
;
305 IADDR pc
= CPU_PC_GET (cpu
);
312 PROFILE_COUNT_WRITE (cpu
, address
, MODE_QI
);
315 PROFILE_COUNT_WRITE (cpu
, address
, MODE_HI
);
318 PROFILE_COUNT_WRITE (cpu
, address
, MODE_SI
);
321 PROFILE_COUNT_WRITE (cpu
, address
, MODE_DI
);
325 for (write_index
= 0; write_index
< length
; ++write_index
)
327 /* TODO: Better way to copy memory than a byte at a time? */
328 sim_core_write_unaligned_1 (cpu
, pc
, write_map
, address
+ write_index
,
333 /* Write a cache line out to memory. */
335 write_line_to_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
337 SI address
= tag
->tag
;
338 int set
= CACHE_TAG_SET_NUMBER (cache
, tag
);
340 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
343 write_data_to_memory (cache
, address
, tag
->line
, cache
->line_size
);
347 read_data_from_memory (SIM_CPU
*current_cpu
, SI address
, char *buffer
,
350 PCADDR pc
= CPU_PC_GET (current_cpu
);
352 PROFILE_COUNT_READ (current_cpu
, address
, MODE_QI
);
353 for (i
= 0; i
< length
; ++i
)
355 /* TODO: Better way to copy memory than a byte at a time? */
356 buffer
[i
] = sim_core_read_unaligned_1 (current_cpu
, pc
, read_map
,
361 /* Fill the given cache line from memory. */
363 fill_line_from_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
, SI address
)
367 SIM_CPU
*current_cpu
= cache
->cpu
;
369 /* If this line is already valid and the cache is in copy-back mode, then
370 write this line to memory before refilling it.
371 Check the dirty bit first, since it is less likely to be set. */
372 if (tag
->dirty
&& tag
->valid
)
374 int hsr0
= GET_HSR0 ();
375 if (GET_HSR0_CBM (hsr0
))
376 write_line_to_memory (cache
, tag
);
378 else if (tag
->line
== NULL
)
380 int line_index
= tag
- cache
->tag_storage
;
381 tag
->line
= cache
->data_storage
+ (line_index
* cache
->line_size
);
384 line_alignment
= cache
->line_size
- 1;
385 read_address
= address
& ~line_alignment
;
386 read_data_from_memory (current_cpu
, read_address
, tag
->line
,
388 tag
->tag
= CACHE_ADDRESS_TAG (cache
, address
);
392 /* Update the LRU information for the tags in the same set as the given tag. */
394 set_most_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
396 /* All tags in the same set are contiguous, so find the beginning of the
397 set by aligning to the size of a set. */
398 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
399 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
403 if (item
->lru
> tag
->lru
)
407 tag
->lru
= cache
->ways
; /* Mark as most recently used. */
410 /* Update the LRU information for the tags in the same set as the given tag. */
412 set_least_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
414 /* All tags in the same set are contiguous, so find the beginning of the
415 set by aligning to the size of a set. */
416 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
417 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
421 if (item
->lru
!= 0 && item
->lru
< tag
->lru
)
425 tag
->lru
= 0; /* Mark as least recently used. */
428 /* Find the line containing the given address and load it if it is not
430 Returns the tag of the requested line. */
431 static FRV_CACHE_TAG
*
432 find_or_retrieve_cache_line (FRV_CACHE
*cache
, SI address
)
434 /* See if this data is already in the cache. */
436 int found
= get_tag (cache
, address
, &tag
);
438 /* Fill the line from memory, if it is not valid. */
441 /* The tag could be NULL is all ways in the set were used and locked. */
445 fill_line_from_memory (cache
, tag
, address
);
449 /* Update the LRU information for the tags in this set. */
450 set_most_recently_used (cache
, tag
);
456 copy_line_to_return_buffer (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_TAG
*tag
,
459 /* A cache line was available for the data.
460 Copy the data from the cache line to the output buffer. */
461 memcpy (cache
->pipeline
[pipe
].status
.return_buffer
.data
,
462 tag
->line
, cache
->line_size
);
463 cache
->pipeline
[pipe
].status
.return_buffer
.address
464 = address
& ~(cache
->line_size
- 1);
465 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
469 copy_memory_to_return_buffer (FRV_CACHE
*cache
, int pipe
, SI address
)
471 address
&= ~(cache
->line_size
- 1);
472 read_data_from_memory (cache
->cpu
, address
,
473 cache
->pipeline
[pipe
].status
.return_buffer
.data
,
475 cache
->pipeline
[pipe
].status
.return_buffer
.address
= address
;
476 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
480 set_return_buffer_reqno (FRV_CACHE
*cache
, int pipe
, unsigned reqno
)
482 cache
->pipeline
[pipe
].status
.return_buffer
.reqno
= reqno
;
485 /* Read data from the given cache.
486 Returns the number of cycles required to obtain the data. */
488 frv_cache_read (FRV_CACHE
*cache
, int pipe
, SI address
)
492 if (non_cache_access (cache
, address
))
494 copy_memory_to_return_buffer (cache
, pipe
, address
);
498 tag
= find_or_retrieve_cache_line (cache
, address
);
501 return 0; /* Indicate non-cache-access. */
503 /* A cache line was available for the data.
504 Copy the data from the cache line to the output buffer. */
505 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
507 return 1; /* TODO - number of cycles unknown */
510 /* Writes data through the given cache.
511 The data is assumed to be in target endian order.
512 Returns the number of cycles required to write the data. */
514 frv_cache_write (FRV_CACHE
*cache
, SI address
, char *data
, unsigned length
)
518 /* See if this data is already in the cache. */
519 SIM_CPU
*current_cpu
= cache
->cpu
;
523 if (non_cache_access (cache
, address
))
525 write_data_to_memory (cache
, address
, data
, length
);
529 found
= get_tag (cache
, address
, &tag
);
531 /* Write the data to the cache line if one was available and if it is
532 either a hit or a miss in copy-back mode.
533 The tag may be NULL if all ways were in use and locked on a miss.
535 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
536 if (tag
!= NULL
&& (found
|| copy_back
))
539 /* Load the line from memory first, if it was a miss. */
541 fill_line_from_memory (cache
, tag
, address
);
542 line_offset
= address
& (cache
->line_size
- 1);
543 memcpy (tag
->line
+ line_offset
, data
, length
);
546 /* Update the LRU information for the tags in this set. */
547 set_most_recently_used (cache
, tag
);
550 /* Write the data to memory if there was no line available or we are in
551 write-through (not copy-back mode). */
552 if (tag
== NULL
|| ! copy_back
)
554 write_data_to_memory (cache
, address
, data
, length
);
559 return 1; /* TODO - number of cycles unknown */
562 /* Preload the cache line containing the given address. Lock the
564 Returns the number of cycles required to write the data. */
566 frv_cache_preload (FRV_CACHE
*cache
, SI address
, USI length
, int lock
)
571 if (non_cache_access (cache
, address
))
574 /* preload at least 1 line. */
578 offset
= address
& (cache
->line_size
- 1);
579 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
581 /* Careful with this loop -- length is unsigned. */
582 for (/**/; lines
> 0; --lines
)
584 FRV_CACHE_TAG
*tag
= find_or_retrieve_cache_line (cache
, address
);
585 if (lock
&& tag
!= NULL
)
587 address
+= cache
->line_size
;
590 return 1; /* TODO - number of cycles unknown */
593 /* Unlock the cache line containing the given address.
594 Returns the number of cycles required to unlock the line. */
596 frv_cache_unlock (FRV_CACHE
*cache
, SI address
)
601 if (non_cache_access (cache
, address
))
604 found
= get_tag (cache
, address
, &tag
);
609 return 1; /* TODO - number of cycles unknown */
613 invalidate_return_buffer (FRV_CACHE
*cache
, SI address
)
615 /* If this address is in one of the return buffers, then invalidate that
617 address
&= ~(cache
->line_size
- 1);
618 if (address
== cache
->pipeline
[LS
].status
.return_buffer
.address
)
619 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
620 if (address
== cache
->pipeline
[LD
].status
.return_buffer
.address
)
621 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
624 /* Invalidate the cache line containing the given address. Flush the
626 Returns the number of cycles required to write the data. */
628 frv_cache_invalidate (FRV_CACHE
*cache
, SI address
, int flush
)
630 /* See if this data is already in the cache. */
634 /* Check for non-cache access. This operation is still perfromed even if
635 the cache is not currently enabled. */
636 if (non_cache_access (cache
, address
))
639 /* If the line is found, invalidate it. If a flush is requested, then flush
640 it if it is dirty. */
641 found
= get_tag (cache
, address
, &tag
);
645 /* If a flush is requested, then flush it if it is dirty. */
646 if (tag
->dirty
&& flush
)
647 write_line_to_memory (cache
, tag
);
648 set_least_recently_used (cache
, tag
);
652 /* If this is the insn cache, then flush the cpu's scache as well. */
654 if (cache
== CPU_INSN_CACHE (cpu
))
655 scache_flush_cpu (cpu
);
658 invalidate_return_buffer (cache
, address
);
660 return 1; /* TODO - number of cycles unknown */
663 /* Invalidate the entire cache. Flush the data if requested. */
665 frv_cache_invalidate_all (FRV_CACHE
*cache
, int flush
)
667 /* See if this data is already in the cache. */
668 int elements
= cache
->sets
* cache
->ways
;
669 FRV_CACHE_TAG
*tag
= cache
->tag_storage
;
673 for(i
= 0; i
< elements
; ++i
, ++tag
)
675 /* If a flush is requested, then flush it if it is dirty. */
676 if (tag
->valid
&& tag
->dirty
&& flush
)
677 write_line_to_memory (cache
, tag
);
683 /* If this is the insn cache, then flush the cpu's scache as well. */
685 if (cache
== CPU_INSN_CACHE (cpu
))
686 scache_flush_cpu (cpu
);
688 /* Invalidate both return buffers. */
689 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
690 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
692 return 1; /* TODO - number of cycles unknown */
695 /* ---------------------------------------------------------------------------
696 Functions for operating the cache in cycle accurate mode.
697 ------------------------------------------------------------------------- */
698 /* Convert a VLIW slot to a cache pipeline index. */
700 convert_slot_to_index (int slot
)
715 /* Allocate free chains of cache requests. */
716 #define FREE_CHAIN_SIZE 16
717 static FRV_CACHE_REQUEST
*frv_cache_request_free_chain
= NULL
;
718 static FRV_CACHE_REQUEST
*frv_store_request_free_chain
= NULL
;
721 allocate_new_cache_requests (void)
724 frv_cache_request_free_chain
= xmalloc (FREE_CHAIN_SIZE
725 * sizeof (FRV_CACHE_REQUEST
));
726 for (i
= 0; i
< FREE_CHAIN_SIZE
- 1; ++i
)
728 frv_cache_request_free_chain
[i
].next
729 = & frv_cache_request_free_chain
[i
+ 1];
732 frv_cache_request_free_chain
[FREE_CHAIN_SIZE
- 1].next
= NULL
;
735 /* Return the next free request in the queue for the given cache pipeline. */
736 static FRV_CACHE_REQUEST
*
737 new_cache_request (void)
739 FRV_CACHE_REQUEST
*req
;
741 /* Allocate new elements for the free chain if necessary. */
742 if (frv_cache_request_free_chain
== NULL
)
743 allocate_new_cache_requests ();
745 req
= frv_cache_request_free_chain
;
746 frv_cache_request_free_chain
= req
->next
;
751 /* Return the given cache request to the free chain. */
753 free_cache_request (FRV_CACHE_REQUEST
*req
)
755 if (req
->kind
== req_store
)
757 req
->next
= frv_store_request_free_chain
;
758 frv_store_request_free_chain
= req
;
762 req
->next
= frv_cache_request_free_chain
;
763 frv_cache_request_free_chain
= req
;
767 /* Search the free chain for an existing store request with a buffer that's
769 static FRV_CACHE_REQUEST
*
770 new_store_request (int length
)
772 FRV_CACHE_REQUEST
*prev
= NULL
;
773 FRV_CACHE_REQUEST
*req
;
774 for (req
= frv_store_request_free_chain
; req
!= NULL
; req
= req
->next
)
776 if (req
->u
.store
.length
== length
)
783 frv_store_request_free_chain
= req
->next
;
785 prev
->next
= req
->next
;
789 /* No existing request buffer was found, so make a new one. */
790 req
= new_cache_request ();
791 req
->kind
= req_store
;
792 req
->u
.store
.data
= xmalloc (length
);
793 req
->u
.store
.length
= length
;
797 /* Remove the given request from the given pipeline. */
799 pipeline_remove_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
801 FRV_CACHE_REQUEST
*next
= request
->next
;
802 FRV_CACHE_REQUEST
*prev
= request
->prev
;
813 /* Add the given request to the given pipeline. */
815 pipeline_add_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
817 FRV_CACHE_REQUEST
*prev
= NULL
;
818 FRV_CACHE_REQUEST
*item
;
820 /* Add the request in priority order. 0 is the highest priority. */
821 for (item
= p
->requests
; item
!= NULL
; item
= item
->next
)
823 if (item
->priority
> request
->priority
)
828 request
->next
= item
;
829 request
->prev
= prev
;
831 p
->requests
= request
;
833 prev
->next
= request
;
835 item
->prev
= request
;
838 /* Requeu the given request from the last of the given pipeline. */
840 pipeline_requeue_request (FRV_CACHE_PIPELINE
*p
)
842 FRV_CACHE_STAGE
*stage
= & p
->stages
[LAST_STAGE
];
843 FRV_CACHE_REQUEST
*req
= stage
->request
;
844 stage
->request
= NULL
;
845 pipeline_add_request (p
, req
);
848 /* Return the priority lower than the lowest one in this cache pipeline.
849 0 is the highest priority. */
851 next_priority (FRV_CACHE
*cache
, FRV_CACHE_PIPELINE
*pipeline
)
856 FRV_CACHE_REQUEST
*req
;
858 /* Check the priorities of any queued items. */
859 for (req
= pipeline
->requests
; req
!= NULL
; req
= req
->next
)
860 if (req
->priority
> lowest
)
861 lowest
= req
->priority
;
863 /* Check the priorities of items in the pipeline stages. */
864 for (i
= FIRST_STAGE
; i
< FRV_CACHE_STAGES
; ++i
)
866 FRV_CACHE_STAGE
*stage
= & pipeline
->stages
[i
];
867 if (stage
->request
!= NULL
&& stage
->request
->priority
> lowest
)
868 lowest
= stage
->request
->priority
;
871 /* Check the priorities of load requests waiting in WAR. These are one
872 higher than the request that spawned them. */
873 for (i
= 0; i
< NUM_WARS
; ++i
)
875 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[i
];
876 if (war
->valid
&& war
->priority
> lowest
)
877 lowest
= war
->priority
+ 1;
880 /* Check the priorities of any BARS or NARS associated with this pipeline.
881 These are one higher than the request that spawned them. */
882 pipe
= pipeline
- cache
->pipeline
;
883 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
884 && cache
->BARS
.priority
> lowest
)
885 lowest
= cache
->BARS
.priority
+ 1;
886 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
887 && cache
->NARS
.priority
> lowest
)
888 lowest
= cache
->NARS
.priority
+ 1;
890 /* Return a priority 2 lower than the lowest found. This allows a WAR
891 request to be generated with a priority greater than this but less than
892 the next higher priority request. */
897 add_WAR_request (FRV_CACHE_PIPELINE
* pipeline
, FRV_CACHE_WAR
*war
)
899 /* Add the load request to the indexed pipeline. */
900 FRV_CACHE_REQUEST
*req
= new_cache_request ();
902 req
->reqno
= war
->reqno
;
903 req
->priority
= war
->priority
;
904 req
->address
= war
->address
;
905 req
->u
.WAR
.preload
= war
->preload
;
906 req
->u
.WAR
.lock
= war
->lock
;
907 pipeline_add_request (pipeline
, req
);
910 /* Remove the next request from the given pipeline and return it. */
911 static FRV_CACHE_REQUEST
*
912 pipeline_next_request (FRV_CACHE_PIPELINE
*p
)
914 FRV_CACHE_REQUEST
*first
= p
->requests
;
916 pipeline_remove_request (p
, first
);
920 /* Return the request which is at the given stage of the given pipeline. */
921 static FRV_CACHE_REQUEST
*
922 pipeline_stage_request (FRV_CACHE_PIPELINE
*p
, int stage
)
924 return p
->stages
[stage
].request
;
928 advance_pipelines (FRV_CACHE
*cache
)
932 FRV_CACHE_PIPELINE
*pipelines
= cache
->pipeline
;
934 /* Free the final stage requests. */
935 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
937 FRV_CACHE_REQUEST
*req
= pipelines
[pipe
].stages
[LAST_STAGE
].request
;
939 free_cache_request (req
);
942 /* Shuffle the requests along the pipeline. */
943 for (stage
= LAST_STAGE
; stage
> FIRST_STAGE
; --stage
)
945 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
946 pipelines
[pipe
].stages
[stage
] = pipelines
[pipe
].stages
[stage
- 1];
949 /* Add a new request to the pipeline. */
950 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
951 pipelines
[pipe
].stages
[FIRST_STAGE
].request
952 = pipeline_next_request (& pipelines
[pipe
]);
955 /* Handle a request for a load from the given address. */
957 frv_cache_request_load (FRV_CACHE
*cache
, unsigned reqno
, SI address
, int slot
)
959 FRV_CACHE_REQUEST
*req
;
961 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
962 int pipe
= convert_slot_to_index (slot
);
963 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
965 /* Add the load request to the indexed pipeline. */
966 req
= new_cache_request ();
967 req
->kind
= req_load
;
969 req
->priority
= next_priority (cache
, pipeline
);
970 req
->address
= address
;
972 pipeline_add_request (pipeline
, req
);
976 frv_cache_request_store (FRV_CACHE
*cache
, SI address
,
977 int slot
, char *data
, unsigned length
)
979 FRV_CACHE_REQUEST
*req
;
981 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
982 int pipe
= convert_slot_to_index (slot
);
983 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
985 /* Add the load request to the indexed pipeline. */
986 req
= new_store_request (length
);
987 req
->kind
= req_store
;
988 req
->reqno
= NO_REQNO
;
989 req
->priority
= next_priority (cache
, pipeline
);
990 req
->address
= address
;
991 req
->u
.store
.length
= length
;
992 memcpy (req
->u
.store
.data
, data
, length
);
994 pipeline_add_request (pipeline
, req
);
995 invalidate_return_buffer (cache
, address
);
998 /* Handle a request to invalidate the cache line containing the given address.
999 Flush the data if requested. */
1001 frv_cache_request_invalidate (FRV_CACHE
*cache
, unsigned reqno
, SI address
,
1002 int slot
, int all
, int flush
)
1004 FRV_CACHE_REQUEST
*req
;
1006 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1007 int pipe
= convert_slot_to_index (slot
);
1008 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1010 /* Add the load request to the indexed pipeline. */
1011 req
= new_cache_request ();
1012 req
->kind
= req_invalidate
;
1014 req
->priority
= next_priority (cache
, pipeline
);
1015 req
->address
= address
;
1016 req
->u
.invalidate
.all
= all
;
1017 req
->u
.invalidate
.flush
= flush
;
1019 pipeline_add_request (pipeline
, req
);
1022 /* Handle a request to preload the cache line containing the given address. */
1024 frv_cache_request_preload (FRV_CACHE
*cache
, SI address
,
1025 int slot
, int length
, int lock
)
1027 FRV_CACHE_REQUEST
*req
;
1029 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1030 int pipe
= convert_slot_to_index (slot
);
1031 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1033 /* Add the load request to the indexed pipeline. */
1034 req
= new_cache_request ();
1035 req
->kind
= req_preload
;
1036 req
->reqno
= NO_REQNO
;
1037 req
->priority
= next_priority (cache
, pipeline
);
1038 req
->address
= address
;
1039 req
->u
.preload
.length
= length
;
1040 req
->u
.preload
.lock
= lock
;
1042 pipeline_add_request (pipeline
, req
);
1043 invalidate_return_buffer (cache
, address
);
1046 /* Handle a request to unlock the cache line containing the given address. */
1048 frv_cache_request_unlock (FRV_CACHE
*cache
, SI address
, int slot
)
1050 FRV_CACHE_REQUEST
*req
;
1052 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1053 int pipe
= convert_slot_to_index (slot
);
1054 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1056 /* Add the load request to the indexed pipeline. */
1057 req
= new_cache_request ();
1058 req
->kind
= req_unlock
;
1059 req
->reqno
= NO_REQNO
;
1060 req
->priority
= next_priority (cache
, pipeline
);
1061 req
->address
= address
;
1063 pipeline_add_request (pipeline
, req
);
1066 /* Check whether this address interferes with a pending request of
1069 address_interference (FRV_CACHE
*cache
, SI address
, FRV_CACHE_REQUEST
*req
,
1073 int line_mask
= ~(cache
->line_size
- 1);
1075 int priority
= req
->priority
;
1076 FRV_CACHE_REQUEST
*other_req
;
1080 address
&= line_mask
;
1081 all_address
= -1 & line_mask
;
1083 /* Check for collisions in the queue for this pipeline. */
1084 for (other_req
= cache
->pipeline
[pipe
].requests
;
1086 other_req
= other_req
->next
)
1088 other_address
= other_req
->address
& line_mask
;
1089 if ((address
== other_address
|| address
== all_address
)
1090 && priority
> other_req
->priority
)
1094 /* Check for a collision in the the other pipeline. */
1095 other_pipe
= pipe
^ 1;
1096 other_req
= cache
->pipeline
[other_pipe
].stages
[LAST_STAGE
].request
;
1097 if (other_req
!= NULL
)
1099 other_address
= other_req
->address
& line_mask
;
1100 if (address
== other_address
|| address
== all_address
)
1104 /* Check for a collision with load requests waiting in WAR. */
1105 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
1107 for (j
= 0; j
< NUM_WARS
; ++j
)
1109 FRV_CACHE_WAR
*war
= & cache
->pipeline
[i
].WAR
[j
];
1111 && (address
== (war
->address
& line_mask
)
1112 || address
== all_address
)
1113 && priority
> war
->priority
)
1116 /* If this is not a WAR request, then yield to any WAR requests in
1117 either pipeline or to a higher priority request in the same pipeline.
1119 if (req
->kind
!= req_WAR
)
1121 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
1123 other_req
= cache
->pipeline
[i
].stages
[j
].request
;
1124 if (other_req
!= NULL
)
1126 if (other_req
->kind
== req_WAR
)
1129 && (address
== (other_req
->address
& line_mask
)
1130 || address
== all_address
)
1131 && priority
> other_req
->priority
)
1138 /* Check for a collision with load requests waiting in ARS. */
1139 if (cache
->BARS
.valid
1140 && (address
== (cache
->BARS
.address
& line_mask
)
1141 || address
== all_address
)
1142 && priority
> cache
->BARS
.priority
)
1144 if (cache
->NARS
.valid
1145 && (address
== (cache
->NARS
.address
& line_mask
)
1146 || address
== all_address
)
1147 && priority
> cache
->NARS
.priority
)
1153 /* Wait for a free WAR register in BARS or NARS. */
1155 wait_for_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1157 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1159 if (! cache
->BARS
.valid
)
1161 cache
->BARS
.pipe
= pipe
;
1162 cache
->BARS
.reqno
= req
->reqno
;
1163 cache
->BARS
.address
= req
->address
;
1164 cache
->BARS
.priority
= req
->priority
- 1;
1168 cache
->BARS
.preload
= 0;
1169 cache
->BARS
.lock
= 0;
1172 cache
->BARS
.preload
= 1;
1173 cache
->BARS
.lock
= 0;
1176 cache
->BARS
.preload
= 1;
1177 cache
->BARS
.lock
= req
->u
.preload
.lock
;
1180 cache
->BARS
.valid
= 1;
1183 if (! cache
->NARS
.valid
)
1185 cache
->NARS
.pipe
= pipe
;
1186 cache
->NARS
.reqno
= req
->reqno
;
1187 cache
->NARS
.address
= req
->address
;
1188 cache
->NARS
.priority
= req
->priority
- 1;
1192 cache
->NARS
.preload
= 0;
1193 cache
->NARS
.lock
= 0;
1196 cache
->NARS
.preload
= 1;
1197 cache
->NARS
.lock
= 0;
1200 cache
->NARS
.preload
= 1;
1201 cache
->NARS
.lock
= req
->u
.preload
.lock
;
1204 cache
->NARS
.valid
= 1;
1207 /* All wait registers are busy, so resubmit this request. */
1208 pipeline_requeue_request (pipeline
);
1211 /* Find a free WAR register and wait for memory to fetch the data. */
1213 wait_in_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1216 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1218 /* Find a valid WAR to hold this request. */
1219 for (war
= 0; war
< NUM_WARS
; ++war
)
1220 if (! pipeline
->WAR
[war
].valid
)
1222 if (war
>= NUM_WARS
)
1224 wait_for_WAR (cache
, pipe
, req
);
1228 pipeline
->WAR
[war
].address
= req
->address
;
1229 pipeline
->WAR
[war
].reqno
= req
->reqno
;
1230 pipeline
->WAR
[war
].priority
= req
->priority
- 1;
1231 pipeline
->WAR
[war
].latency
= cache
->memory_latency
+ 1;
1235 pipeline
->WAR
[war
].preload
= 0;
1236 pipeline
->WAR
[war
].lock
= 0;
1239 pipeline
->WAR
[war
].preload
= 1;
1240 pipeline
->WAR
[war
].lock
= 0;
1243 pipeline
->WAR
[war
].preload
= 1;
1244 pipeline
->WAR
[war
].lock
= req
->u
.preload
.lock
;
1247 pipeline
->WAR
[war
].valid
= 1;
1251 handle_req_load (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1254 SI address
= req
->address
;
1256 /* If this address interferes with an existing request, then requeue it. */
1257 if (address_interference (cache
, address
, req
, pipe
))
1259 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1263 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1265 int found
= get_tag (cache
, address
, &tag
);
1267 /* If the data was found, return it to the caller. */
1270 set_most_recently_used (cache
, tag
);
1271 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1272 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1277 /* The data is not in the cache or this is a non-cache access. We need to
1278 wait for the memory unit to fetch it. Store this request in the WAR in
1280 wait_in_WAR (cache
, pipe
, req
);
1284 handle_req_preload (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1293 SI address
= req
->address
;
1296 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1299 /* preload at least 1 line. */
1300 length
= req
->u
.preload
.length
;
1304 /* Make sure that this request does not interfere with a pending request. */
1305 offset
= address
& (cache
->line_size
- 1);
1306 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
1307 cur_address
= address
& ~(cache
->line_size
- 1);
1308 for (line
= 0; line
< lines
; ++line
)
1310 /* If this address interferes with an existing request,
1312 if (address_interference (cache
, cur_address
, req
, pipe
))
1314 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1317 cur_address
+= cache
->line_size
;
1320 /* Now process each cache line. */
1321 /* Careful with this loop -- length is unsigned. */
1322 lock
= req
->u
.preload
.lock
;
1323 cur_address
= address
& ~(cache
->line_size
- 1);
1324 for (line
= 0; line
< lines
; ++line
)
1326 /* If the data was found, then lock it if requested. */
1327 found
= get_tag (cache
, cur_address
, &tag
);
1335 /* The data is not in the cache. We need to wait for the memory
1336 unit to fetch it. Store this request in the WAR in the meantime.
1338 wait_in_WAR (cache
, pipe
, req
);
1340 cur_address
+= cache
->line_size
;
1345 handle_req_store (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1347 SIM_CPU
*current_cpu
;
1351 SI address
= req
->address
;
1352 char *data
= req
->u
.store
.data
;
1353 int length
= req
->u
.store
.length
;
1355 /* If this address interferes with an existing request, then requeue it. */
1356 if (address_interference (cache
, address
, req
, pipe
))
1358 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1362 /* Non-cache access. Write the data directly to memory. */
1363 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1365 write_data_to_memory (cache
, address
, data
, length
);
1369 /* See if the data is in the cache. */
1370 found
= get_tag (cache
, address
, &tag
);
1372 /* Write the data to the cache line if one was available and if it is
1373 either a hit or a miss in copy-back mode.
1374 The tag may be NULL if all ways were in use and locked on a miss.
1376 current_cpu
= cache
->cpu
;
1377 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
1378 if (tag
!= NULL
&& (found
|| copy_back
))
1381 /* Load the line from memory first, if it was a miss. */
1384 /* We need to wait for the memory unit to fetch the data.
1385 Store this request in the WAR and requeue the store request. */
1386 wait_in_WAR (cache
, pipe
, req
);
1387 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1388 /* Decrement the counts of accesses and hits because when the requeued
1389 request is processed again, it will appear to be a new access and
1391 --cache
->statistics
.accesses
;
1392 --cache
->statistics
.hits
;
1395 line_offset
= address
& (cache
->line_size
- 1);
1396 memcpy (tag
->line
+ line_offset
, data
, length
);
1397 invalidate_return_buffer (cache
, address
);
1400 /* Update the LRU information for the tags in this set. */
1401 set_most_recently_used (cache
, tag
);
1404 /* Write the data to memory if there was no line available or we are in
1405 write-through (not copy-back mode). */
1406 if (tag
== NULL
|| ! copy_back
)
1408 write_data_to_memory (cache
, address
, data
, length
);
1415 handle_req_invalidate (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1417 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1418 SI address
= req
->address
;
1419 SI interfere_address
= req
->u
.invalidate
.all
? -1 : address
;
1421 /* If this address interferes with an existing request, then requeue it. */
1422 if (address_interference (cache
, interfere_address
, req
, pipe
))
1424 pipeline_requeue_request (pipeline
);
1428 /* Invalidate the cache line now. This function already checks for
1429 non-cache access. */
1430 if (req
->u
.invalidate
.all
)
1431 frv_cache_invalidate_all (cache
, req
->u
.invalidate
.flush
);
1433 frv_cache_invalidate (cache
, address
, req
->u
.invalidate
.flush
);
1434 if (req
->u
.invalidate
.flush
)
1436 pipeline
->status
.flush
.reqno
= req
->reqno
;
1437 pipeline
->status
.flush
.address
= address
;
1438 pipeline
->status
.flush
.valid
= 1;
1443 handle_req_unlock (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1445 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1446 SI address
= req
->address
;
1448 /* If this address interferes with an existing request, then requeue it. */
1449 if (address_interference (cache
, address
, req
, pipe
))
1451 pipeline_requeue_request (pipeline
);
1455 /* Unlock the cache line. This function checks for non-cache access. */
1456 frv_cache_unlock (cache
, address
);
1460 handle_req_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1463 SI address
= req
->address
;
1465 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1467 /* Look for the data in the cache. The statistics of cache hit or
1468 miss have already been recorded, so save and restore the stats before
1469 and after obtaining the cache line. */
1470 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1471 tag
= find_or_retrieve_cache_line (cache
, address
);
1472 cache
->statistics
= save_stats
;
1475 if (! req
->u
.WAR
.preload
)
1477 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1478 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1482 invalidate_return_buffer (cache
, address
);
1483 if (req
->u
.WAR
.lock
)
1490 /* All cache lines in the set were locked, so just copy the data to the
1491 return buffer directly. */
1492 if (! req
->u
.WAR
.preload
)
1494 copy_memory_to_return_buffer (cache
, pipe
, address
);
1495 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1499 /* Resolve any conflicts and/or execute the given requests. */
1501 arbitrate_requests (FRV_CACHE
*cache
)
1504 /* Simply execute the requests in the final pipeline stages. */
1505 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1507 FRV_CACHE_REQUEST
*req
1508 = pipeline_stage_request (& cache
->pipeline
[pipe
], LAST_STAGE
);
1509 /* Make sure that there is a request to handle. */
1513 /* Handle the request. */
1517 handle_req_load (cache
, pipe
, req
);
1520 handle_req_store (cache
, pipe
, req
);
1522 case req_invalidate
:
1523 handle_req_invalidate (cache
, pipe
, req
);
1526 handle_req_preload (cache
, pipe
, req
);
1529 handle_req_unlock (cache
, pipe
, req
);
1532 handle_req_WAR (cache
, pipe
, req
);
1540 /* Move a waiting ARS register to a free WAR register. */
1542 move_ARS_to_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_WAR
*war
)
1544 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1545 NARS to BARS if it is valid. */
1546 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
)
1548 war
->address
= cache
->BARS
.address
;
1549 war
->reqno
= cache
->BARS
.reqno
;
1550 war
->priority
= cache
->BARS
.priority
;
1551 war
->preload
= cache
->BARS
.preload
;
1552 war
->lock
= cache
->BARS
.lock
;
1553 war
->latency
= cache
->memory_latency
+ 1;
1555 if (cache
->NARS
.valid
)
1557 cache
->BARS
= cache
->NARS
;
1558 cache
->NARS
.valid
= 0;
1561 cache
->BARS
.valid
= 0;
1564 /* If NARS is valid for this pipe, then move it to the given WAR. */
1565 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
)
1567 war
->address
= cache
->NARS
.address
;
1568 war
->reqno
= cache
->NARS
.reqno
;
1569 war
->priority
= cache
->NARS
.priority
;
1570 war
->preload
= cache
->NARS
.preload
;
1571 war
->lock
= cache
->NARS
.lock
;
1572 war
->latency
= cache
->memory_latency
+ 1;
1574 cache
->NARS
.valid
= 0;
1578 /* Decrease the latencies of the various states in the cache. */
1580 decrease_latencies (FRV_CACHE
*cache
)
1583 /* Check the WAR registers. */
1584 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1586 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1587 for (j
= 0; j
< NUM_WARS
; ++j
)
1589 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[j
];
1593 /* If the latency has expired, then submit a WAR request to the
1595 if (war
->latency
<= 0)
1597 add_WAR_request (pipeline
, war
);
1599 move_ARS_to_WAR (cache
, pipe
, war
);
1606 /* Run the cache for the given number of cycles. */
1608 frv_cache_run (FRV_CACHE
*cache
, int cycles
)
1611 for (i
= 0; i
< cycles
; ++i
)
1613 advance_pipelines (cache
);
1614 arbitrate_requests (cache
);
1615 decrease_latencies (cache
);
1620 frv_cache_read_passive_SI (FRV_CACHE
*cache
, SI address
, SI
*value
)
1625 if (non_cache_access (cache
, address
))
1629 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1630 int found
= get_tag (cache
, address
, &tag
);
1631 cache
->statistics
= save_stats
;
1634 return 0; /* Indicate non-cache-access. */
1637 /* A cache line was available for the data.
1638 Extract the target data from the line. */
1639 offset
= address
& (cache
->line_size
- 1);
1640 *value
= T2H_4 (*(SI
*)(tag
->line
+ offset
));
1644 /* Check the return buffers of the data cache to see if the requested data is
1647 frv_cache_data_in_buffer (FRV_CACHE
* cache
, int pipe
, SI address
,
1650 return cache
->pipeline
[pipe
].status
.return_buffer
.valid
1651 && cache
->pipeline
[pipe
].status
.return_buffer
.reqno
== reqno
1652 && cache
->pipeline
[pipe
].status
.return_buffer
.address
<= address
1653 && cache
->pipeline
[pipe
].status
.return_buffer
.address
+ cache
->line_size
1657 /* Check to see if the requested data has been flushed. */
1659 frv_cache_data_flushed (FRV_CACHE
* cache
, int pipe
, SI address
, unsigned reqno
)
1661 return cache
->pipeline
[pipe
].status
.flush
.valid
1662 && cache
->pipeline
[pipe
].status
.flush
.reqno
== reqno
1663 && cache
->pipeline
[pipe
].status
.flush
.address
<= address
1664 && cache
->pipeline
[pipe
].status
.flush
.address
+ cache
->line_size