2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
24 #include "libiberty.h"
30 frv_cache_init (SIM_CPU
*cpu
, FRV_CACHE
*cache
)
36 /* Set defaults for fields which are not initialized. */
38 switch (STATE_ARCHITECTURE (sd
)->mach
)
42 if (cache
->configured_sets
== 0)
43 cache
->configured_sets
= 512;
44 if (cache
->configured_ways
== 0)
45 cache
->configured_ways
= 2;
46 if (cache
->line_size
== 0)
47 cache
->line_size
= 32;
48 if (cache
->memory_latency
== 0)
49 cache
->memory_latency
= 20;
52 if (cache
->configured_sets
== 0)
53 cache
->configured_sets
= 128;
54 if (cache
->configured_ways
== 0)
55 cache
->configured_ways
= 4;
56 if (cache
->line_size
== 0)
57 cache
->line_size
= 64;
58 if (cache
->memory_latency
== 0)
59 cache
->memory_latency
= 20;
62 if (cache
->configured_sets
== 0)
63 cache
->configured_sets
= 64;
64 if (cache
->configured_ways
== 0)
65 cache
->configured_ways
= 4;
66 if (cache
->line_size
== 0)
67 cache
->line_size
= 64;
68 if (cache
->memory_latency
== 0)
69 cache
->memory_latency
= 20;
73 frv_cache_reconfigure (cpu
, cache
);
75 /* First allocate the cache storage based on the given dimensions. */
76 elements
= cache
->sets
* cache
->ways
;
77 cache
->tag_storage
= (FRV_CACHE_TAG
*)
78 zalloc (elements
* sizeof (*cache
->tag_storage
));
79 cache
->data_storage
= (char *) xmalloc (elements
* cache
->line_size
);
81 /* Initialize the pipelines and status buffers. */
82 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
84 cache
->pipeline
[i
].requests
= NULL
;
85 cache
->pipeline
[i
].status
.flush
.valid
= 0;
86 cache
->pipeline
[i
].status
.return_buffer
.valid
= 0;
87 cache
->pipeline
[i
].status
.return_buffer
.data
88 = (char *) xmalloc (cache
->line_size
);
89 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
90 cache
->pipeline
[i
].stages
[j
].request
= NULL
;
92 cache
->BARS
.valid
= 0;
93 cache
->NARS
.valid
= 0;
95 /* Now set the cache state. */
97 cache
->statistics
.accesses
= 0;
98 cache
->statistics
.hits
= 0;
102 frv_cache_term (FRV_CACHE
*cache
)
104 /* Free the cache storage. */
105 free (cache
->tag_storage
);
106 free (cache
->data_storage
);
107 free (cache
->pipeline
[LS
].status
.return_buffer
.data
);
108 free (cache
->pipeline
[LD
].status
.return_buffer
.data
);
111 /* Reset the cache configuration based on registers in the cpu. */
113 frv_cache_reconfigure (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
)
119 /* Set defaults for fields which are not initialized. */
120 sd
= CPU_STATE (current_cpu
);
121 switch (STATE_ARCHITECTURE (sd
)->mach
)
124 if (cache
== CPU_INSN_CACHE (current_cpu
))
126 ihsr8
= GET_IHSR8 ();
127 icdm
= GET_IHSR8_ICDM (ihsr8
);
128 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
131 cache
->sets
= cache
->sets
* cache
->ways
;
138 /* Set the cache to its original settings. */
139 cache
->sets
= cache
->configured_sets
;
140 cache
->ways
= cache
->configured_ways
;
145 /* Determine whether the given cache is enabled. */
147 frv_cache_enabled (FRV_CACHE
*cache
)
149 SIM_CPU
*current_cpu
= cache
->cpu
;
150 int hsr0
= GET_HSR0 ();
151 if (GET_HSR0_ICE (hsr0
) && cache
== CPU_INSN_CACHE (current_cpu
))
153 if (GET_HSR0_DCE (hsr0
) && cache
== CPU_DATA_CACHE (current_cpu
))
158 /* Determine whether the given address is RAM access, assuming that HSR0.RME
161 ram_access (FRV_CACHE
*cache
, USI address
)
165 USI start
, end
, way_size
;
166 SIM_CPU
*current_cpu
= cache
->cpu
;
167 SIM_DESC sd
= CPU_STATE (current_cpu
);
169 switch (STATE_ARCHITECTURE (sd
)->mach
)
172 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
173 ihsr8
= GET_IHSR8 ();
174 if (cache
== CPU_INSN_CACHE (current_cpu
))
178 cwe
= GET_IHSR8_ICWE (ihsr8
);
184 cwe
= GET_IHSR8_DCWE (ihsr8
);
186 way_size
= (end
- start
) / 4;
187 end
-= way_size
* cwe
;
188 return address
>= start
&& address
< end
;
193 return 1; /* RAM access */
196 /* Determine whether the given address should be accessed without using
199 non_cache_access (FRV_CACHE
*cache
, USI address
)
203 SIM_CPU
*current_cpu
= cache
->cpu
;
205 sd
= CPU_STATE (current_cpu
);
206 switch (STATE_ARCHITECTURE (sd
)->mach
)
210 if (address
>= 0xff000000
211 || address
>= 0xfe000000 && address
<= 0xfeffffff)
212 return 1; /* non-cache access */
215 if (address
>= 0xff000000
216 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
217 return 1; /* non-cache access */
218 if (cache
== CPU_INSN_CACHE (current_cpu
))
220 if (address
>= 0xfe000000 && address
<= 0xfe007fff)
221 return 1; /* non-cache access */
223 else if (address
>= 0xfe400000 && address
<= 0xfe407fff)
224 return 1; /* non-cache access */
227 if (address
>= 0xff000000
228 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
229 return 1; /* non-cache access */
230 if (cache
== CPU_INSN_CACHE (current_cpu
))
232 if (address
>= 0xfe000000 && address
<= 0xfe003fff)
233 return 1; /* non-cache access */
235 else if (address
>= 0xfe400000 && address
<= 0xfe403fff)
236 return 1; /* non-cache access */
241 if (GET_HSR0_RME (hsr0
))
242 return ram_access (cache
, address
);
244 return 0; /* cache-access */
247 /* Find the cache line corresponding to the given address.
248 If it is found then 'return_tag' is set to point to the tag for that line
250 If it is not found, 'return_tag' is set to point to the tag for the least
251 recently used line and 0 is returned.
254 get_tag (FRV_CACHE
*cache
, SI address
, FRV_CACHE_TAG
**return_tag
)
260 FRV_CACHE_TAG
*found
;
261 FRV_CACHE_TAG
*available
;
263 ++cache
->statistics
.accesses
;
265 /* First calculate which set this address will fall into. Do this by
266 shifting out the bits representing the offset within the line and
267 then keeping enough bits to index the set. */
268 set
= address
& ~(cache
->line_size
- 1);
269 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
271 set
&= (cache
->sets
- 1);
273 /* Now search the set for a valid tag which matches this address. At the
274 same time make note of the least recently used tag, which we will return
275 if no match is found. */
277 tag
= CACHE_ADDRESS_TAG (cache
, address
);
278 for (way
= 0; way
< cache
->ways
; ++way
)
280 found
= CACHE_TAG (cache
, set
, way
);
281 /* This tag is available as the least recently used if it is the
282 least recently used seen so far and it is not locked. */
283 if (! found
->locked
&& (available
== NULL
|| available
->lru
> found
->lru
))
285 if (found
->valid
&& found
->tag
== tag
)
288 ++cache
->statistics
.hits
;
289 return 1; /* found it */
293 *return_tag
= available
;
294 return 0; /* not found */
297 /* Write the given data out to memory. */
299 write_data_to_memory (FRV_CACHE
*cache
, SI address
, char *data
, int length
)
301 SIM_CPU
*cpu
= cache
->cpu
;
302 IADDR pc
= CPU_PC_GET (cpu
);
309 PROFILE_COUNT_WRITE (cpu
, address
, MODE_QI
);
312 PROFILE_COUNT_WRITE (cpu
, address
, MODE_HI
);
315 PROFILE_COUNT_WRITE (cpu
, address
, MODE_SI
);
318 PROFILE_COUNT_WRITE (cpu
, address
, MODE_DI
);
322 for (write_index
= 0; write_index
< length
; ++write_index
)
324 /* TODO: Better way to copy memory than a byte at a time? */
325 sim_core_write_unaligned_1 (cpu
, pc
, write_map
, address
+ write_index
,
330 /* Write a cache line out to memory. */
332 write_line_to_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
334 SI address
= tag
->tag
;
335 int set
= CACHE_TAG_SET_NUMBER (cache
, tag
);
337 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
340 write_data_to_memory (cache
, address
, tag
->line
, cache
->line_size
);
344 read_data_from_memory (SIM_CPU
*current_cpu
, SI address
, char *buffer
,
347 PCADDR pc
= CPU_PC_GET (current_cpu
);
349 PROFILE_COUNT_READ (current_cpu
, address
, MODE_QI
);
350 for (i
= 0; i
< length
; ++i
)
352 /* TODO: Better way to copy memory than a byte at a time? */
353 buffer
[i
] = sim_core_read_unaligned_1 (current_cpu
, pc
, read_map
,
358 /* Fill the given cache line from memory. */
360 fill_line_from_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
, SI address
)
365 SIM_CPU
*current_cpu
= cache
->cpu
;
367 /* If this line is already valid and the cache is in copy-back mode, then
368 write this line to memory before refilling it.
369 Check the dirty bit first, since it is less likely to be set. */
370 if (tag
->dirty
&& tag
->valid
)
372 int hsr0
= GET_HSR0 ();
373 if (GET_HSR0_CBM (hsr0
))
374 write_line_to_memory (cache
, tag
);
376 else if (tag
->line
== NULL
)
378 int line_index
= tag
- cache
->tag_storage
;
379 tag
->line
= cache
->data_storage
+ (line_index
* cache
->line_size
);
382 pc
= CPU_PC_GET (current_cpu
);
383 line_alignment
= cache
->line_size
- 1;
384 read_address
= address
& ~line_alignment
;
385 read_data_from_memory (current_cpu
, read_address
, tag
->line
,
387 tag
->tag
= CACHE_ADDRESS_TAG (cache
, address
);
391 /* Update the LRU information for the tags in the same set as the given tag. */
393 set_most_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
395 /* All tags in the same set are contiguous, so find the beginning of the
396 set by aligning to the size of a set. */
397 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
398 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
402 if (item
->lru
> tag
->lru
)
406 tag
->lru
= cache
->ways
; /* Mark as most recently used. */
409 /* Update the LRU information for the tags in the same set as the given tag. */
411 set_least_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
413 /* All tags in the same set are contiguous, so find the beginning of the
414 set by aligning to the size of a set. */
415 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
416 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
420 if (item
->lru
!= 0 && item
->lru
< tag
->lru
)
424 tag
->lru
= 0; /* Mark as least recently used. */
427 /* Find the line containing the given address and load it if it is not
429 Returns the tag of the requested line. */
430 static FRV_CACHE_TAG
*
431 find_or_retrieve_cache_line (FRV_CACHE
*cache
, SI address
)
433 /* See if this data is already in the cache. */
435 int found
= get_tag (cache
, address
, &tag
);
437 /* Fill the line from memory, if it is not valid. */
440 /* The tag could be NULL is all ways in the set were used and locked. */
444 fill_line_from_memory (cache
, tag
, address
);
448 /* Update the LRU information for the tags in this set. */
449 set_most_recently_used (cache
, tag
);
455 copy_line_to_return_buffer (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_TAG
*tag
,
458 /* A cache line was available for the data.
459 Copy the data from the cache line to the output buffer. */
460 memcpy (cache
->pipeline
[pipe
].status
.return_buffer
.data
,
461 tag
->line
, cache
->line_size
);
462 cache
->pipeline
[pipe
].status
.return_buffer
.address
463 = address
& ~(cache
->line_size
- 1);
464 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
468 copy_memory_to_return_buffer (FRV_CACHE
*cache
, int pipe
, SI address
)
470 address
&= ~(cache
->line_size
- 1);
471 read_data_from_memory (cache
->cpu
, address
,
472 cache
->pipeline
[pipe
].status
.return_buffer
.data
,
474 cache
->pipeline
[pipe
].status
.return_buffer
.address
= address
;
475 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
479 set_return_buffer_reqno (FRV_CACHE
*cache
, int pipe
, unsigned reqno
)
481 cache
->pipeline
[pipe
].status
.return_buffer
.reqno
= reqno
;
484 /* Read data from the given cache.
485 Returns the number of cycles required to obtain the data. */
487 frv_cache_read (FRV_CACHE
*cache
, int pipe
, SI address
)
491 if (non_cache_access (cache
, address
))
493 copy_memory_to_return_buffer (cache
, pipe
, address
);
497 tag
= find_or_retrieve_cache_line (cache
, address
);
500 return 0; /* Indicate non-cache-access. */
502 /* A cache line was available for the data.
503 Copy the data from the cache line to the output buffer. */
504 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
506 return 1; /* TODO - number of cycles unknown */
509 /* Writes data through the given cache.
510 The data is assumed to be in target endian order.
511 Returns the number of cycles required to write the data. */
513 frv_cache_write (FRV_CACHE
*cache
, SI address
, char *data
, unsigned length
)
517 /* See if this data is already in the cache. */
518 SIM_CPU
*current_cpu
= cache
->cpu
;
519 USI hsr0
= GET_HSR0 ();
523 if (non_cache_access (cache
, address
))
525 write_data_to_memory (cache
, address
, data
, length
);
529 found
= get_tag (cache
, address
, &tag
);
531 /* Write the data to the cache line if one was available and if it is
532 either a hit or a miss in copy-back mode.
533 The tag may be NULL if all ways were in use and locked on a miss.
535 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
536 if (tag
!= NULL
&& (found
|| copy_back
))
539 /* Load the line from memory first, if it was a miss. */
541 fill_line_from_memory (cache
, tag
, address
);
542 line_offset
= address
& (cache
->line_size
- 1);
543 memcpy (tag
->line
+ line_offset
, data
, length
);
546 /* Update the LRU information for the tags in this set. */
547 set_most_recently_used (cache
, tag
);
550 /* Write the data to memory if there was no line available or we are in
551 write-through (not copy-back mode). */
552 if (tag
== NULL
|| ! copy_back
)
554 write_data_to_memory (cache
, address
, data
, length
);
559 return 1; /* TODO - number of cycles unknown */
562 /* Preload the cache line containing the given address. Lock the
564 Returns the number of cycles required to write the data. */
566 frv_cache_preload (FRV_CACHE
*cache
, SI address
, USI length
, int lock
)
571 if (non_cache_access (cache
, address
))
574 /* preload at least 1 line. */
578 offset
= address
& (cache
->line_size
- 1);
579 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
581 /* Careful with this loop -- length is unsigned. */
582 for (/**/; lines
> 0; --lines
)
584 FRV_CACHE_TAG
*tag
= find_or_retrieve_cache_line (cache
, address
);
585 if (lock
&& tag
!= NULL
)
587 address
+= cache
->line_size
;
590 return 1; /* TODO - number of cycles unknown */
593 /* Unlock the cache line containing the given address.
594 Returns the number of cycles required to unlock the line. */
596 frv_cache_unlock (FRV_CACHE
*cache
, SI address
)
601 if (non_cache_access (cache
, address
))
604 found
= get_tag (cache
, address
, &tag
);
609 return 1; /* TODO - number of cycles unknown */
613 invalidate_return_buffer (FRV_CACHE
*cache
, SI address
)
615 /* If this address is in one of the return buffers, then invalidate that
617 address
&= ~(cache
->line_size
- 1);
618 if (address
== cache
->pipeline
[LS
].status
.return_buffer
.address
)
619 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
620 if (address
== cache
->pipeline
[LD
].status
.return_buffer
.address
)
621 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
624 /* Invalidate the cache line containing the given address. Flush the
626 Returns the number of cycles required to write the data. */
628 frv_cache_invalidate (FRV_CACHE
*cache
, SI address
, int flush
)
630 /* See if this data is already in the cache. */
634 /* Check for non-cache access. This operation is still perfromed even if
635 the cache is not currently enabled. */
636 if (non_cache_access (cache
, address
))
639 /* If the line is found, invalidate it. If a flush is requested, then flush
640 it if it is dirty. */
641 found
= get_tag (cache
, address
, &tag
);
645 /* If a flush is requested, then flush it if it is dirty. */
646 if (tag
->dirty
&& flush
)
647 write_line_to_memory (cache
, tag
);
648 set_least_recently_used (cache
, tag
);
652 /* If this is the insn cache, then flush the cpu's scache as well. */
654 if (cache
== CPU_INSN_CACHE (cpu
))
655 scache_flush_cpu (cpu
);
658 invalidate_return_buffer (cache
, address
);
660 return 1; /* TODO - number of cycles unknown */
663 /* Invalidate the entire cache. Flush the data if requested. */
665 frv_cache_invalidate_all (FRV_CACHE
*cache
, int flush
)
667 /* See if this data is already in the cache. */
668 int elements
= cache
->sets
* cache
->ways
;
669 FRV_CACHE_TAG
*tag
= cache
->tag_storage
;
673 for(i
= 0; i
< elements
; ++i
, ++tag
)
675 /* If a flush is requested, then flush it if it is dirty. */
676 if (tag
->valid
&& tag
->dirty
&& flush
)
677 write_line_to_memory (cache
, tag
);
683 /* If this is the insn cache, then flush the cpu's scache as well. */
685 if (cache
== CPU_INSN_CACHE (cpu
))
686 scache_flush_cpu (cpu
);
688 /* Invalidate both return buffers. */
689 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
690 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
692 return 1; /* TODO - number of cycles unknown */
695 /* ---------------------------------------------------------------------------
696 Functions for operating the cache in cycle accurate mode.
697 ------------------------------------------------------------------------- */
698 /* Convert a VLIW slot to a cache pipeline index. */
700 convert_slot_to_index (int slot
)
715 /* Allocate free chains of cache requests. */
716 #define FREE_CHAIN_SIZE 16
717 static FRV_CACHE_REQUEST
*frv_cache_request_free_chain
= NULL
;
718 static FRV_CACHE_REQUEST
*frv_store_request_free_chain
= NULL
;
721 allocate_new_cache_requests (void)
724 frv_cache_request_free_chain
= xmalloc (FREE_CHAIN_SIZE
725 * sizeof (FRV_CACHE_REQUEST
));
726 for (i
= 0; i
< FREE_CHAIN_SIZE
- 1; ++i
)
728 frv_cache_request_free_chain
[i
].next
729 = & frv_cache_request_free_chain
[i
+ 1];
732 frv_cache_request_free_chain
[FREE_CHAIN_SIZE
- 1].next
= NULL
;
735 /* Return the next free request in the queue for the given cache pipeline. */
736 static FRV_CACHE_REQUEST
*
737 new_cache_request (void)
739 FRV_CACHE_REQUEST
*req
;
741 /* Allocate new elements for the free chain if necessary. */
742 if (frv_cache_request_free_chain
== NULL
)
743 allocate_new_cache_requests ();
745 req
= frv_cache_request_free_chain
;
746 frv_cache_request_free_chain
= req
->next
;
751 /* Return the given cache request to the free chain. */
753 free_cache_request (FRV_CACHE_REQUEST
*req
)
755 if (req
->kind
== req_store
)
757 req
->next
= frv_store_request_free_chain
;
758 frv_store_request_free_chain
= req
;
762 req
->next
= frv_cache_request_free_chain
;
763 frv_cache_request_free_chain
= req
;
767 /* Search the free chain for an existing store request with a buffer that's
769 static FRV_CACHE_REQUEST
*
770 new_store_request (int length
)
772 FRV_CACHE_REQUEST
*prev
= NULL
;
773 FRV_CACHE_REQUEST
*req
;
774 for (req
= frv_store_request_free_chain
; req
!= NULL
; req
= req
->next
)
776 if (req
->u
.store
.length
== length
)
783 frv_store_request_free_chain
= req
->next
;
785 prev
->next
= req
->next
;
789 /* No existing request buffer was found, so make a new one. */
790 req
= new_cache_request ();
791 req
->kind
= req_store
;
792 req
->u
.store
.data
= xmalloc (length
);
793 req
->u
.store
.length
= length
;
797 /* Remove the given request from the given pipeline. */
799 pipeline_remove_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
801 FRV_CACHE_REQUEST
*next
= request
->next
;
802 FRV_CACHE_REQUEST
*prev
= request
->prev
;
813 /* Add the given request to the given pipeline. */
815 pipeline_add_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
817 FRV_CACHE_REQUEST
*prev
= NULL
;
818 FRV_CACHE_REQUEST
*item
;
820 /* Add the request in priority order. 0 is the highest priority. */
821 for (item
= p
->requests
; item
!= NULL
; item
= item
->next
)
823 if (item
->priority
> request
->priority
)
828 request
->next
= item
;
829 request
->prev
= prev
;
831 p
->requests
= request
;
833 prev
->next
= request
;
835 item
->prev
= request
;
838 /* Requeu the given request from the last of the given pipeline. */
840 pipeline_requeue_request (FRV_CACHE_PIPELINE
*p
)
842 FRV_CACHE_STAGE
*stage
= & p
->stages
[LAST_STAGE
];
843 FRV_CACHE_REQUEST
*req
= stage
->request
;
844 stage
->request
= NULL
;
845 pipeline_add_request (p
, req
);
848 /* Return the priority lower than the lowest one in this cache pipeline.
849 0 is the highest priority. */
851 next_priority (FRV_CACHE
*cache
, FRV_CACHE_PIPELINE
*pipeline
)
856 FRV_CACHE_REQUEST
*req
;
858 /* Check the priorities of any queued items. */
859 for (req
= pipeline
->requests
; req
!= NULL
; req
= req
->next
)
860 if (req
->priority
> lowest
)
861 lowest
= req
->priority
;
863 /* Check the priorities of items in the pipeline stages. */
864 for (i
= FIRST_STAGE
; i
< FRV_CACHE_STAGES
; ++i
)
866 FRV_CACHE_STAGE
*stage
= & pipeline
->stages
[i
];
867 if (stage
->request
!= NULL
&& stage
->request
->priority
> lowest
)
868 lowest
= stage
->request
->priority
;
871 /* Check the priorities of load requests waiting in WAR. These are one
872 higher than the request that spawned them. */
873 for (i
= 0; i
< NUM_WARS
; ++i
)
875 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[i
];
876 if (war
->valid
&& war
->priority
> lowest
)
877 lowest
= war
->priority
+ 1;
880 /* Check the priorities of any BARS or NARS associated with this pipeline.
881 These are one higher than the request that spawned them. */
882 pipe
= pipeline
- cache
->pipeline
;
883 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
884 && cache
->BARS
.priority
> lowest
)
885 lowest
= cache
->BARS
.priority
+ 1;
886 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
887 && cache
->NARS
.priority
> lowest
)
888 lowest
= cache
->NARS
.priority
+ 1;
890 /* Return a priority 2 lower than the lowest found. This allows a WAR
891 request to be generated with a priority greater than this but less than
892 the next higher priority request. */
897 add_WAR_request (FRV_CACHE_PIPELINE
* pipeline
, FRV_CACHE_WAR
*war
)
899 /* Add the load request to the indexed pipeline. */
900 FRV_CACHE_REQUEST
*req
= new_cache_request ();
902 req
->reqno
= war
->reqno
;
903 req
->priority
= war
->priority
;
904 req
->address
= war
->address
;
905 req
->u
.WAR
.preload
= war
->preload
;
906 req
->u
.WAR
.lock
= war
->lock
;
907 pipeline_add_request (pipeline
, req
);
910 /* Remove the next request from the given pipeline and return it. */
911 static FRV_CACHE_REQUEST
*
912 pipeline_next_request (FRV_CACHE_PIPELINE
*p
)
914 FRV_CACHE_REQUEST
*first
= p
->requests
;
916 pipeline_remove_request (p
, first
);
920 /* Return the request which is at the given stage of the given pipeline. */
921 static FRV_CACHE_REQUEST
*
922 pipeline_stage_request (FRV_CACHE_PIPELINE
*p
, int stage
)
924 return p
->stages
[stage
].request
;
928 advance_pipelines (FRV_CACHE
*cache
)
932 FRV_CACHE_PIPELINE
*pipelines
= cache
->pipeline
;
934 /* Free the final stage requests. */
935 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
937 FRV_CACHE_REQUEST
*req
= pipelines
[pipe
].stages
[LAST_STAGE
].request
;
939 free_cache_request (req
);
942 /* Shuffle the requests along the pipeline. */
943 for (stage
= LAST_STAGE
; stage
> FIRST_STAGE
; --stage
)
945 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
946 pipelines
[pipe
].stages
[stage
] = pipelines
[pipe
].stages
[stage
- 1];
949 /* Add a new request to the pipeline. */
950 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
951 pipelines
[pipe
].stages
[FIRST_STAGE
].request
952 = pipeline_next_request (& pipelines
[pipe
]);
955 /* Handle a request for a load from the given address. */
957 frv_cache_request_load (FRV_CACHE
*cache
, unsigned reqno
, SI address
, int slot
)
959 FRV_CACHE_REQUEST
*req
;
961 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
962 int pipe
= convert_slot_to_index (slot
);
963 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
965 /* Add the load request to the indexed pipeline. */
966 req
= new_cache_request ();
967 req
->kind
= req_load
;
969 req
->priority
= next_priority (cache
, pipeline
);
970 req
->address
= address
;
972 pipeline_add_request (pipeline
, req
);
976 frv_cache_request_store (FRV_CACHE
*cache
, SI address
,
977 int slot
, char *data
, unsigned length
)
979 FRV_CACHE_REQUEST
*req
;
981 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
982 int pipe
= convert_slot_to_index (slot
);
983 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
985 /* Add the load request to the indexed pipeline. */
986 req
= new_store_request (length
);
987 req
->kind
= req_store
;
988 req
->reqno
= NO_REQNO
;
989 req
->priority
= next_priority (cache
, pipeline
);
990 req
->address
= address
;
991 req
->u
.store
.length
= length
;
992 memcpy (req
->u
.store
.data
, data
, length
);
994 pipeline_add_request (pipeline
, req
);
995 invalidate_return_buffer (cache
, address
);
998 /* Handle a request to invalidate the cache line containing the given address.
999 Flush the data if requested. */
1001 frv_cache_request_invalidate (FRV_CACHE
*cache
, unsigned reqno
, SI address
,
1002 int slot
, int all
, int flush
)
1004 FRV_CACHE_REQUEST
*req
;
1006 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1007 int pipe
= convert_slot_to_index (slot
);
1008 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1010 /* Add the load request to the indexed pipeline. */
1011 req
= new_cache_request ();
1012 req
->kind
= req_invalidate
;
1014 req
->priority
= next_priority (cache
, pipeline
);
1015 req
->address
= address
;
1016 req
->u
.invalidate
.all
= all
;
1017 req
->u
.invalidate
.flush
= flush
;
1019 pipeline_add_request (pipeline
, req
);
1022 /* Handle a request to preload the cache line containing the given address. */
1024 frv_cache_request_preload (FRV_CACHE
*cache
, SI address
,
1025 int slot
, int length
, int lock
)
1027 FRV_CACHE_REQUEST
*req
;
1029 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1030 int pipe
= convert_slot_to_index (slot
);
1031 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1033 /* Add the load request to the indexed pipeline. */
1034 req
= new_cache_request ();
1035 req
->kind
= req_preload
;
1036 req
->reqno
= NO_REQNO
;
1037 req
->priority
= next_priority (cache
, pipeline
);
1038 req
->address
= address
;
1039 req
->u
.preload
.length
= length
;
1040 req
->u
.preload
.lock
= lock
;
1042 pipeline_add_request (pipeline
, req
);
1043 invalidate_return_buffer (cache
, address
);
1046 /* Handle a request to unlock the cache line containing the given address. */
1048 frv_cache_request_unlock (FRV_CACHE
*cache
, SI address
, int slot
)
1050 FRV_CACHE_REQUEST
*req
;
1052 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1053 int pipe
= convert_slot_to_index (slot
);
1054 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1056 /* Add the load request to the indexed pipeline. */
1057 req
= new_cache_request ();
1058 req
->kind
= req_unlock
;
1059 req
->reqno
= NO_REQNO
;
1060 req
->priority
= next_priority (cache
, pipeline
);
1061 req
->address
= address
;
1063 pipeline_add_request (pipeline
, req
);
1066 /* Check whether this address interferes with a pending request of
1069 address_interference (FRV_CACHE
*cache
, SI address
, FRV_CACHE_REQUEST
*req
,
1073 int line_mask
= ~(cache
->line_size
- 1);
1075 int priority
= req
->priority
;
1076 FRV_CACHE_REQUEST
*other_req
;
1080 address
&= line_mask
;
1081 all_address
= -1 & line_mask
;
1083 /* Check for collisions in the queue for this pipeline. */
1084 for (other_req
= cache
->pipeline
[pipe
].requests
;
1086 other_req
= other_req
->next
)
1088 other_address
= other_req
->address
& line_mask
;
1089 if ((address
== other_address
|| address
== all_address
)
1090 && priority
> other_req
->priority
)
1094 /* Check for a collision in the the other pipeline. */
1095 other_pipe
= pipe
^ 1;
1096 other_req
= cache
->pipeline
[other_pipe
].stages
[LAST_STAGE
].request
;
1097 if (other_req
!= NULL
)
1099 other_address
= other_req
->address
& line_mask
;
1100 if (address
== other_address
|| address
== all_address
)
1104 /* Check for a collision with load requests waiting in WAR. */
1105 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
1107 for (j
= 0; j
< NUM_WARS
; ++j
)
1109 FRV_CACHE_WAR
*war
= & cache
->pipeline
[i
].WAR
[j
];
1111 && (address
== (war
->address
& line_mask
)
1112 || address
== all_address
)
1113 && priority
> war
->priority
)
1116 /* If this is not a WAR request, then yield to any WAR requests in
1117 either pipeline or to a higher priority request in the same pipeline.
1119 if (req
->kind
!= req_WAR
)
1121 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
1123 other_req
= cache
->pipeline
[i
].stages
[j
].request
;
1124 if (other_req
!= NULL
)
1126 if (other_req
->kind
== req_WAR
)
1129 && (address
== (other_req
->address
& line_mask
)
1130 || address
== all_address
)
1131 && priority
> other_req
->priority
)
1138 /* Check for a collision with load requests waiting in ARS. */
1139 if (cache
->BARS
.valid
1140 && (address
== (cache
->BARS
.address
& line_mask
)
1141 || address
== all_address
)
1142 && priority
> cache
->BARS
.priority
)
1144 if (cache
->NARS
.valid
1145 && (address
== (cache
->NARS
.address
& line_mask
)
1146 || address
== all_address
)
1147 && priority
> cache
->NARS
.priority
)
1153 /* Wait for a free WAR register in BARS or NARS. */
1155 wait_for_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1158 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1160 if (! cache
->BARS
.valid
)
1162 cache
->BARS
.pipe
= pipe
;
1163 cache
->BARS
.reqno
= req
->reqno
;
1164 cache
->BARS
.address
= req
->address
;
1165 cache
->BARS
.priority
= req
->priority
- 1;
1169 cache
->BARS
.preload
= 0;
1170 cache
->BARS
.lock
= 0;
1173 cache
->BARS
.preload
= 1;
1174 cache
->BARS
.lock
= 0;
1177 cache
->BARS
.preload
= 1;
1178 cache
->BARS
.lock
= req
->u
.preload
.lock
;
1181 cache
->BARS
.valid
= 1;
1184 if (! cache
->NARS
.valid
)
1186 cache
->NARS
.pipe
= pipe
;
1187 cache
->NARS
.reqno
= req
->reqno
;
1188 cache
->NARS
.address
= req
->address
;
1189 cache
->NARS
.priority
= req
->priority
- 1;
1193 cache
->NARS
.preload
= 0;
1194 cache
->NARS
.lock
= 0;
1197 cache
->NARS
.preload
= 1;
1198 cache
->NARS
.lock
= 0;
1201 cache
->NARS
.preload
= 1;
1202 cache
->NARS
.lock
= req
->u
.preload
.lock
;
1205 cache
->NARS
.valid
= 1;
1208 /* All wait registers are busy, so resubmit this request. */
1209 pipeline_requeue_request (pipeline
);
1212 /* Find a free WAR register and wait for memory to fetch the data. */
1214 wait_in_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1217 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1219 /* Find a valid WAR to hold this request. */
1220 for (war
= 0; war
< NUM_WARS
; ++war
)
1221 if (! pipeline
->WAR
[war
].valid
)
1223 if (war
>= NUM_WARS
)
1225 wait_for_WAR (cache
, pipe
, req
);
1229 pipeline
->WAR
[war
].address
= req
->address
;
1230 pipeline
->WAR
[war
].reqno
= req
->reqno
;
1231 pipeline
->WAR
[war
].priority
= req
->priority
- 1;
1232 pipeline
->WAR
[war
].latency
= cache
->memory_latency
+ 1;
1236 pipeline
->WAR
[war
].preload
= 0;
1237 pipeline
->WAR
[war
].lock
= 0;
1240 pipeline
->WAR
[war
].preload
= 1;
1241 pipeline
->WAR
[war
].lock
= 0;
1244 pipeline
->WAR
[war
].preload
= 1;
1245 pipeline
->WAR
[war
].lock
= req
->u
.preload
.lock
;
1248 pipeline
->WAR
[war
].valid
= 1;
1252 handle_req_load (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1255 SI address
= req
->address
;
1257 /* If this address interferes with an existing request, then requeue it. */
1258 if (address_interference (cache
, address
, req
, pipe
))
1260 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1264 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1266 int found
= get_tag (cache
, address
, &tag
);
1268 /* If the data was found, return it to the caller. */
1271 set_most_recently_used (cache
, tag
);
1272 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1273 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1278 /* The data is not in the cache or this is a non-cache access. We need to
1279 wait for the memory unit to fetch it. Store this request in the WAR in
1281 wait_in_WAR (cache
, pipe
, req
);
1285 handle_req_preload (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1295 SI address
= req
->address
;
1298 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1301 /* preload at least 1 line. */
1302 length
= req
->u
.preload
.length
;
1306 /* Make sure that this request does not interfere with a pending request. */
1307 offset
= address
& (cache
->line_size
- 1);
1308 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
1309 cur_address
= address
& ~(cache
->line_size
- 1);
1310 for (line
= 0; line
< lines
; ++line
)
1312 /* If this address interferes with an existing request,
1314 if (address_interference (cache
, cur_address
, req
, pipe
))
1316 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1319 cur_address
+= cache
->line_size
;
1322 /* Now process each cache line. */
1323 /* Careful with this loop -- length is unsigned. */
1324 lock
= req
->u
.preload
.lock
;
1325 cur_address
= address
& ~(cache
->line_size
- 1);
1326 for (line
= 0; line
< lines
; ++line
)
1328 /* If the data was found, then lock it if requested. */
1329 found
= get_tag (cache
, cur_address
, &tag
);
1337 /* The data is not in the cache. We need to wait for the memory
1338 unit to fetch it. Store this request in the WAR in the meantime.
1340 wait_in_WAR (cache
, pipe
, req
);
1342 cur_address
+= cache
->line_size
;
1347 handle_req_store (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1349 SIM_CPU
*current_cpu
;
1353 SI address
= req
->address
;
1354 char *data
= req
->u
.store
.data
;
1355 int length
= req
->u
.store
.length
;
1357 /* If this address interferes with an existing request, then requeue it. */
1358 if (address_interference (cache
, address
, req
, pipe
))
1360 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1364 /* Non-cache access. Write the data directly to memory. */
1365 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1367 write_data_to_memory (cache
, address
, data
, length
);
1371 /* See if the data is in the cache. */
1372 found
= get_tag (cache
, address
, &tag
);
1374 /* Write the data to the cache line if one was available and if it is
1375 either a hit or a miss in copy-back mode.
1376 The tag may be NULL if all ways were in use and locked on a miss.
1378 current_cpu
= cache
->cpu
;
1379 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
1380 if (tag
!= NULL
&& (found
|| copy_back
))
1383 /* Load the line from memory first, if it was a miss. */
1386 /* We need to wait for the memory unit to fetch the data.
1387 Store this request in the WAR and requeue the store request. */
1388 wait_in_WAR (cache
, pipe
, req
);
1389 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1390 /* Decrement the counts of accesses and hits because when the requeued
1391 request is processed again, it will appear to be a new access and
1393 --cache
->statistics
.accesses
;
1394 --cache
->statistics
.hits
;
1397 line_offset
= address
& (cache
->line_size
- 1);
1398 memcpy (tag
->line
+ line_offset
, data
, length
);
1399 invalidate_return_buffer (cache
, address
);
1402 /* Update the LRU information for the tags in this set. */
1403 set_most_recently_used (cache
, tag
);
1406 /* Write the data to memory if there was no line available or we are in
1407 write-through (not copy-back mode). */
1408 if (tag
== NULL
|| ! copy_back
)
1410 write_data_to_memory (cache
, address
, data
, length
);
1417 handle_req_invalidate (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1419 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1420 SI address
= req
->address
;
1421 SI interfere_address
= req
->u
.invalidate
.all
? -1 : address
;
1423 /* If this address interferes with an existing request, then requeue it. */
1424 if (address_interference (cache
, interfere_address
, req
, pipe
))
1426 pipeline_requeue_request (pipeline
);
1430 /* Invalidate the cache line now. This function already checks for
1431 non-cache access. */
1432 if (req
->u
.invalidate
.all
)
1433 frv_cache_invalidate_all (cache
, req
->u
.invalidate
.flush
);
1435 frv_cache_invalidate (cache
, address
, req
->u
.invalidate
.flush
);
1436 if (req
->u
.invalidate
.flush
)
1438 pipeline
->status
.flush
.reqno
= req
->reqno
;
1439 pipeline
->status
.flush
.address
= address
;
1440 pipeline
->status
.flush
.valid
= 1;
1445 handle_req_unlock (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1447 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1448 SI address
= req
->address
;
1450 /* If this address interferes with an existing request, then requeue it. */
1451 if (address_interference (cache
, address
, req
, pipe
))
1453 pipeline_requeue_request (pipeline
);
1457 /* Unlock the cache line. This function checks for non-cache access. */
1458 frv_cache_unlock (cache
, address
);
1462 handle_req_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1466 SI address
= req
->address
;
1468 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1470 /* Look for the data in the cache. The statistics of cache hit or
1471 miss have already been recorded, so save and restore the stats before
1472 and after obtaining the cache line. */
1473 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1474 tag
= find_or_retrieve_cache_line (cache
, address
);
1475 cache
->statistics
= save_stats
;
1478 if (! req
->u
.WAR
.preload
)
1480 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1481 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1485 invalidate_return_buffer (cache
, address
);
1486 if (req
->u
.WAR
.lock
)
1493 /* All cache lines in the set were locked, so just copy the data to the
1494 return buffer directly. */
1495 if (! req
->u
.WAR
.preload
)
1497 copy_memory_to_return_buffer (cache
, pipe
, address
);
1498 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1502 /* Resolve any conflicts and/or execute the given requests. */
1504 arbitrate_requests (FRV_CACHE
*cache
)
1507 /* Simply execute the requests in the final pipeline stages. */
1508 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1510 FRV_CACHE_REQUEST
*req
1511 = pipeline_stage_request (& cache
->pipeline
[pipe
], LAST_STAGE
);
1512 /* Make sure that there is a request to handle. */
1516 /* Handle the request. */
1520 handle_req_load (cache
, pipe
, req
);
1523 handle_req_store (cache
, pipe
, req
);
1525 case req_invalidate
:
1526 handle_req_invalidate (cache
, pipe
, req
);
1529 handle_req_preload (cache
, pipe
, req
);
1532 handle_req_unlock (cache
, pipe
, req
);
1535 handle_req_WAR (cache
, pipe
, req
);
1543 /* Move a waiting ARS register to a free WAR register. */
1545 move_ARS_to_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_WAR
*war
)
1547 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1548 NARS to BARS if it is valid. */
1549 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
)
1551 war
->address
= cache
->BARS
.address
;
1552 war
->reqno
= cache
->BARS
.reqno
;
1553 war
->priority
= cache
->BARS
.priority
;
1554 war
->preload
= cache
->BARS
.preload
;
1555 war
->lock
= cache
->BARS
.lock
;
1556 war
->latency
= cache
->memory_latency
+ 1;
1558 if (cache
->NARS
.valid
)
1560 cache
->BARS
= cache
->NARS
;
1561 cache
->NARS
.valid
= 0;
1564 cache
->BARS
.valid
= 0;
1567 /* If NARS is valid for this pipe, then move it to the given WAR. */
1568 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
)
1570 war
->address
= cache
->NARS
.address
;
1571 war
->reqno
= cache
->NARS
.reqno
;
1572 war
->priority
= cache
->NARS
.priority
;
1573 war
->preload
= cache
->NARS
.preload
;
1574 war
->lock
= cache
->NARS
.lock
;
1575 war
->latency
= cache
->memory_latency
+ 1;
1577 cache
->NARS
.valid
= 0;
1581 /* Decrease the latencies of the various states in the cache. */
1583 decrease_latencies (FRV_CACHE
*cache
)
1586 /* Check the WAR registers. */
1587 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1589 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1590 for (j
= 0; j
< NUM_WARS
; ++j
)
1592 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[j
];
1596 /* If the latency has expired, then submit a WAR request to the
1598 if (war
->latency
<= 0)
1600 add_WAR_request (pipeline
, war
);
1602 move_ARS_to_WAR (cache
, pipe
, war
);
1609 /* Run the cache for the given number of cycles. */
1611 frv_cache_run (FRV_CACHE
*cache
, int cycles
)
1614 for (i
= 0; i
< cycles
; ++i
)
1616 advance_pipelines (cache
);
1617 arbitrate_requests (cache
);
1618 decrease_latencies (cache
);
1623 frv_cache_read_passive_SI (FRV_CACHE
*cache
, SI address
, SI
*value
)
1628 if (non_cache_access (cache
, address
))
1632 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1633 int found
= get_tag (cache
, address
, &tag
);
1634 cache
->statistics
= save_stats
;
1637 return 0; /* Indicate non-cache-access. */
1640 /* A cache line was available for the data.
1641 Extract the target data from the line. */
1642 offset
= address
& (cache
->line_size
- 1);
1643 *value
= T2H_4 (*(SI
*)(tag
->line
+ offset
));
1647 /* Check the return buffers of the data cache to see if the requested data is
1650 frv_cache_data_in_buffer (FRV_CACHE
* cache
, int pipe
, SI address
,
1653 return cache
->pipeline
[pipe
].status
.return_buffer
.valid
1654 && cache
->pipeline
[pipe
].status
.return_buffer
.reqno
== reqno
1655 && cache
->pipeline
[pipe
].status
.return_buffer
.address
<= address
1656 && cache
->pipeline
[pipe
].status
.return_buffer
.address
+ cache
->line_size
1660 /* Check to see if the requested data has been flushed. */
1662 frv_cache_data_flushed (FRV_CACHE
* cache
, int pipe
, SI address
, unsigned reqno
)
1664 return cache
->pipeline
[pipe
].status
.flush
.valid
1665 && cache
->pipeline
[pipe
].status
.flush
.reqno
== reqno
1666 && cache
->pipeline
[pipe
].status
.flush
.address
<= address
1667 && cache
->pipeline
[pipe
].status
.flush
.address
+ cache
->line_size