2 Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 Contributed by Red Hat.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #define WANT_CPU frvbf
21 #define WANT_CPU_FRVBF
23 #include "libiberty.h"
29 frv_cache_init (SIM_CPU
*cpu
, FRV_CACHE
*cache
)
35 /* Set defaults for fields which are not initialized. */
37 switch (STATE_ARCHITECTURE (sd
)->mach
)
41 if (cache
->configured_sets
== 0)
42 cache
->configured_sets
= 512;
43 if (cache
->configured_ways
== 0)
44 cache
->configured_ways
= 2;
45 if (cache
->line_size
== 0)
46 cache
->line_size
= 32;
47 if (cache
->memory_latency
== 0)
48 cache
->memory_latency
= 20;
51 if (cache
->configured_sets
== 0)
52 cache
->configured_sets
= 128;
53 if (cache
->configured_ways
== 0)
54 cache
->configured_ways
= 4;
55 if (cache
->line_size
== 0)
56 cache
->line_size
= 64;
57 if (cache
->memory_latency
== 0)
58 cache
->memory_latency
= 20;
61 if (cache
->configured_sets
== 0)
62 cache
->configured_sets
= 64;
63 if (cache
->configured_ways
== 0)
64 cache
->configured_ways
= 4;
65 if (cache
->line_size
== 0)
66 cache
->line_size
= 64;
67 if (cache
->memory_latency
== 0)
68 cache
->memory_latency
= 20;
72 frv_cache_reconfigure (cpu
, cache
);
74 /* First allocate the cache storage based on the given dimensions. */
75 elements
= cache
->sets
* cache
->ways
;
76 cache
->tag_storage
= (FRV_CACHE_TAG
*)
77 zalloc (elements
* sizeof (*cache
->tag_storage
));
78 cache
->data_storage
= (char *) xmalloc (elements
* cache
->line_size
);
80 /* Initialize the pipelines and status buffers. */
81 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
83 cache
->pipeline
[i
].requests
= NULL
;
84 cache
->pipeline
[i
].status
.flush
.valid
= 0;
85 cache
->pipeline
[i
].status
.return_buffer
.valid
= 0;
86 cache
->pipeline
[i
].status
.return_buffer
.data
87 = (char *) xmalloc (cache
->line_size
);
88 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
89 cache
->pipeline
[i
].stages
[j
].request
= NULL
;
91 cache
->BARS
.valid
= 0;
92 cache
->NARS
.valid
= 0;
94 /* Now set the cache state. */
96 cache
->statistics
.accesses
= 0;
97 cache
->statistics
.hits
= 0;
101 frv_cache_term (FRV_CACHE
*cache
)
103 /* Free the cache storage. */
104 free (cache
->tag_storage
);
105 free (cache
->data_storage
);
106 free (cache
->pipeline
[LS
].status
.return_buffer
.data
);
107 free (cache
->pipeline
[LD
].status
.return_buffer
.data
);
110 /* Reset the cache configuration based on registers in the cpu. */
112 frv_cache_reconfigure (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
)
118 /* Set defaults for fields which are not initialized. */
119 sd
= CPU_STATE (current_cpu
);
120 switch (STATE_ARCHITECTURE (sd
)->mach
)
123 if (cache
== CPU_INSN_CACHE (current_cpu
))
125 ihsr8
= GET_IHSR8 ();
126 icdm
= GET_IHSR8_ICDM (ihsr8
);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
130 cache
->sets
= cache
->sets
* cache
->ways
;
137 /* Set the cache to its original settings. */
138 cache
->sets
= cache
->configured_sets
;
139 cache
->ways
= cache
->configured_ways
;
144 /* Determine whether the given cache is enabled. */
146 frv_cache_enabled (FRV_CACHE
*cache
)
148 SIM_CPU
*current_cpu
= cache
->cpu
;
149 int hsr0
= GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0
) && cache
== CPU_INSN_CACHE (current_cpu
))
152 if (GET_HSR0_DCE (hsr0
) && cache
== CPU_DATA_CACHE (current_cpu
))
157 /* Determine whether the given address is RAM access, assuming that HSR0.RME
160 ram_access (FRV_CACHE
*cache
, USI address
)
164 USI start
, end
, way_size
;
165 SIM_CPU
*current_cpu
= cache
->cpu
;
166 SIM_DESC sd
= CPU_STATE (current_cpu
);
168 switch (STATE_ARCHITECTURE (sd
)->mach
)
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8
= GET_IHSR8 ();
173 if (cache
== CPU_INSN_CACHE (current_cpu
))
177 cwe
= GET_IHSR8_ICWE (ihsr8
);
183 cwe
= GET_IHSR8_DCWE (ihsr8
);
185 way_size
= (end
- start
) / 4;
186 end
-= way_size
* cwe
;
187 return address
>= start
&& address
< end
;
192 return 1; /* RAM access */
195 /* Determine whether the given address should be accessed without using
198 non_cache_access (FRV_CACHE
*cache
, USI address
)
202 SIM_CPU
*current_cpu
= cache
->cpu
;
204 sd
= CPU_STATE (current_cpu
);
205 switch (STATE_ARCHITECTURE (sd
)->mach
)
209 if (address
>= 0xff000000
210 || address
>= 0xfe000000 && address
<= 0xfeffffff)
211 return 1; /* non-cache access */
214 if (address
>= 0xff000000
215 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
216 return 1; /* non-cache access */
217 if (cache
== CPU_INSN_CACHE (current_cpu
))
219 if (address
>= 0xfe000000 && address
<= 0xfe007fff)
220 return 1; /* non-cache access */
222 else if (address
>= 0xfe400000 && address
<= 0xfe407fff)
223 return 1; /* non-cache access */
226 if (address
>= 0xff000000
227 || address
>= 0xfeff0000 && address
<= 0xfeffffff)
228 return 1; /* non-cache access */
229 if (cache
== CPU_INSN_CACHE (current_cpu
))
231 if (address
>= 0xfe000000 && address
<= 0xfe003fff)
232 return 1; /* non-cache access */
234 else if (address
>= 0xfe400000 && address
<= 0xfe403fff)
235 return 1; /* non-cache access */
240 if (GET_HSR0_RME (hsr0
))
241 return ram_access (cache
, address
);
243 return 0; /* cache-access */
246 /* Find the cache line corresponding to the given address.
247 If it is found then 'return_tag' is set to point to the tag for that line
249 If it is not found, 'return_tag' is set to point to the tag for the least
250 recently used line and 0 is returned.
253 get_tag (FRV_CACHE
*cache
, SI address
, FRV_CACHE_TAG
**return_tag
)
259 FRV_CACHE_TAG
*found
;
260 FRV_CACHE_TAG
*available
;
262 ++cache
->statistics
.accesses
;
264 /* First calculate which set this address will fall into. Do this by
265 shifting out the bits representing the offset within the line and
266 then keeping enough bits to index the set. */
267 set
= address
& ~(cache
->line_size
- 1);
268 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
270 set
&= (cache
->sets
- 1);
272 /* Now search the set for a valid tag which matches this address. At the
273 same time make note of the least recently used tag, which we will return
274 if no match is found. */
276 tag
= CACHE_ADDRESS_TAG (cache
, address
);
277 for (way
= 0; way
< cache
->ways
; ++way
)
279 found
= CACHE_TAG (cache
, set
, way
);
280 /* This tag is available as the least recently used if it is the
281 least recently used seen so far and it is not locked. */
282 if (! found
->locked
&& (available
== NULL
|| available
->lru
> found
->lru
))
284 if (found
->valid
&& found
->tag
== tag
)
287 ++cache
->statistics
.hits
;
288 return 1; /* found it */
292 *return_tag
= available
;
293 return 0; /* not found */
296 /* Write the given data out to memory. */
298 write_data_to_memory (FRV_CACHE
*cache
, SI address
, char *data
, int length
)
300 SIM_CPU
*cpu
= cache
->cpu
;
301 IADDR pc
= CPU_PC_GET (cpu
);
308 PROFILE_COUNT_WRITE (cpu
, address
, MODE_QI
);
311 PROFILE_COUNT_WRITE (cpu
, address
, MODE_HI
);
314 PROFILE_COUNT_WRITE (cpu
, address
, MODE_SI
);
317 PROFILE_COUNT_WRITE (cpu
, address
, MODE_DI
);
321 for (write_index
= 0; write_index
< length
; ++write_index
)
323 /* TODO: Better way to copy memory than a byte at a time? */
324 sim_core_write_unaligned_1 (cpu
, pc
, write_map
, address
+ write_index
,
329 /* Write a cache line out to memory. */
331 write_line_to_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
333 SI address
= tag
->tag
;
334 int set
= CACHE_TAG_SET_NUMBER (cache
, tag
);
336 for (bits
= cache
->line_size
- 1; bits
!= 0; bits
>>= 1)
339 write_data_to_memory (cache
, address
, tag
->line
, cache
->line_size
);
343 read_data_from_memory (SIM_CPU
*current_cpu
, SI address
, char *buffer
,
346 PCADDR pc
= CPU_PC_GET (current_cpu
);
348 PROFILE_COUNT_READ (current_cpu
, address
, MODE_QI
);
349 for (i
= 0; i
< length
; ++i
)
351 /* TODO: Better way to copy memory than a byte at a time? */
352 buffer
[i
] = sim_core_read_unaligned_1 (current_cpu
, pc
, read_map
,
357 /* Fill the given cache line from memory. */
359 fill_line_from_memory (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
, SI address
)
364 SIM_CPU
*current_cpu
= cache
->cpu
;
366 /* If this line is already valid and the cache is in copy-back mode, then
367 write this line to memory before refilling it.
368 Check the dirty bit first, since it is less likely to be set. */
369 if (tag
->dirty
&& tag
->valid
)
371 int hsr0
= GET_HSR0 ();
372 if (GET_HSR0_CBM (hsr0
))
373 write_line_to_memory (cache
, tag
);
375 else if (tag
->line
== NULL
)
377 int line_index
= tag
- cache
->tag_storage
;
378 tag
->line
= cache
->data_storage
+ (line_index
* cache
->line_size
);
381 pc
= CPU_PC_GET (current_cpu
);
382 line_alignment
= cache
->line_size
- 1;
383 read_address
= address
& ~line_alignment
;
384 read_data_from_memory (current_cpu
, read_address
, tag
->line
,
386 tag
->tag
= CACHE_ADDRESS_TAG (cache
, address
);
390 /* Update the LRU information for the tags in the same set as the given tag. */
392 set_most_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
394 /* All tags in the same set are contiguous, so find the beginning of the
395 set by aligning to the size of a set. */
396 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
397 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
401 if (item
->lru
> tag
->lru
)
405 tag
->lru
= cache
->ways
; /* Mark as most recently used. */
408 /* Update the LRU information for the tags in the same set as the given tag. */
410 set_least_recently_used (FRV_CACHE
*cache
, FRV_CACHE_TAG
*tag
)
412 /* All tags in the same set are contiguous, so find the beginning of the
413 set by aligning to the size of a set. */
414 FRV_CACHE_TAG
*item
= cache
->tag_storage
+ CACHE_TAG_SET_START (cache
, tag
);
415 FRV_CACHE_TAG
*limit
= item
+ cache
->ways
;
419 if (item
->lru
!= 0 && item
->lru
< tag
->lru
)
423 tag
->lru
= 0; /* Mark as least recently used. */
426 /* Find the line containing the given address and load it if it is not
428 Returns the tag of the requested line. */
429 static FRV_CACHE_TAG
*
430 find_or_retrieve_cache_line (FRV_CACHE
*cache
, SI address
)
432 /* See if this data is already in the cache. */
434 int found
= get_tag (cache
, address
, &tag
);
436 /* Fill the line from memory, if it is not valid. */
439 /* The tag could be NULL is all ways in the set were used and locked. */
443 fill_line_from_memory (cache
, tag
, address
);
447 /* Update the LRU information for the tags in this set. */
448 set_most_recently_used (cache
, tag
);
454 copy_line_to_return_buffer (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_TAG
*tag
,
457 /* A cache line was available for the data.
458 Copy the data from the cache line to the output buffer. */
459 memcpy (cache
->pipeline
[pipe
].status
.return_buffer
.data
,
460 tag
->line
, cache
->line_size
);
461 cache
->pipeline
[pipe
].status
.return_buffer
.address
462 = address
& ~(cache
->line_size
- 1);
463 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
467 copy_memory_to_return_buffer (FRV_CACHE
*cache
, int pipe
, SI address
)
469 address
&= ~(cache
->line_size
- 1);
470 read_data_from_memory (cache
->cpu
, address
,
471 cache
->pipeline
[pipe
].status
.return_buffer
.data
,
473 cache
->pipeline
[pipe
].status
.return_buffer
.address
= address
;
474 cache
->pipeline
[pipe
].status
.return_buffer
.valid
= 1;
478 set_return_buffer_reqno (FRV_CACHE
*cache
, int pipe
, unsigned reqno
)
480 cache
->pipeline
[pipe
].status
.return_buffer
.reqno
= reqno
;
483 /* Read data from the given cache.
484 Returns the number of cycles required to obtain the data. */
486 frv_cache_read (FRV_CACHE
*cache
, int pipe
, SI address
)
490 if (non_cache_access (cache
, address
))
492 copy_memory_to_return_buffer (cache
, pipe
, address
);
496 tag
= find_or_retrieve_cache_line (cache
, address
);
499 return 0; /* Indicate non-cache-access. */
501 /* A cache line was available for the data.
502 Copy the data from the cache line to the output buffer. */
503 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
505 return 1; /* TODO - number of cycles unknown */
508 /* Writes data through the given cache.
509 The data is assumed to be in target endian order.
510 Returns the number of cycles required to write the data. */
512 frv_cache_write (FRV_CACHE
*cache
, SI address
, char *data
, unsigned length
)
516 /* See if this data is already in the cache. */
517 SIM_CPU
*current_cpu
= cache
->cpu
;
518 USI hsr0
= GET_HSR0 ();
522 if (non_cache_access (cache
, address
))
524 write_data_to_memory (cache
, address
, data
, length
);
528 found
= get_tag (cache
, address
, &tag
);
530 /* Write the data to the cache line if one was available and if it is
531 either a hit or a miss in copy-back mode.
532 The tag may be NULL if all ways were in use and locked on a miss.
534 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
535 if (tag
!= NULL
&& (found
|| copy_back
))
538 /* Load the line from memory first, if it was a miss. */
540 fill_line_from_memory (cache
, tag
, address
);
541 line_offset
= address
& (cache
->line_size
- 1);
542 memcpy (tag
->line
+ line_offset
, data
, length
);
545 /* Update the LRU information for the tags in this set. */
546 set_most_recently_used (cache
, tag
);
549 /* Write the data to memory if there was no line available or we are in
550 write-through (not copy-back mode). */
551 if (tag
== NULL
|| ! copy_back
)
553 write_data_to_memory (cache
, address
, data
, length
);
558 return 1; /* TODO - number of cycles unknown */
561 /* Preload the cache line containing the given address. Lock the
563 Returns the number of cycles required to write the data. */
565 frv_cache_preload (FRV_CACHE
*cache
, SI address
, USI length
, int lock
)
570 if (non_cache_access (cache
, address
))
573 /* preload at least 1 line. */
577 offset
= address
& (cache
->line_size
- 1);
578 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
580 /* Careful with this loop -- length is unsigned. */
581 for (/**/; lines
> 0; --lines
)
583 FRV_CACHE_TAG
*tag
= find_or_retrieve_cache_line (cache
, address
);
584 if (lock
&& tag
!= NULL
)
586 address
+= cache
->line_size
;
589 return 1; /* TODO - number of cycles unknown */
592 /* Unlock the cache line containing the given address.
593 Returns the number of cycles required to unlock the line. */
595 frv_cache_unlock (FRV_CACHE
*cache
, SI address
)
600 if (non_cache_access (cache
, address
))
603 found
= get_tag (cache
, address
, &tag
);
608 return 1; /* TODO - number of cycles unknown */
612 invalidate_return_buffer (FRV_CACHE
*cache
, SI address
)
614 /* If this address is in one of the return buffers, then invalidate that
616 address
&= ~(cache
->line_size
- 1);
617 if (address
== cache
->pipeline
[LS
].status
.return_buffer
.address
)
618 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
619 if (address
== cache
->pipeline
[LD
].status
.return_buffer
.address
)
620 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
623 /* Invalidate the cache line containing the given address. Flush the
625 Returns the number of cycles required to write the data. */
627 frv_cache_invalidate (FRV_CACHE
*cache
, SI address
, int flush
)
629 /* See if this data is already in the cache. */
633 /* Check for non-cache access. This operation is still perfromed even if
634 the cache is not currently enabled. */
635 if (non_cache_access (cache
, address
))
638 /* If the line is found, invalidate it. If a flush is requested, then flush
639 it if it is dirty. */
640 found
= get_tag (cache
, address
, &tag
);
644 /* If a flush is requested, then flush it if it is dirty. */
645 if (tag
->dirty
&& flush
)
646 write_line_to_memory (cache
, tag
);
647 set_least_recently_used (cache
, tag
);
651 /* If this is the insn cache, then flush the cpu's scache as well. */
653 if (cache
== CPU_INSN_CACHE (cpu
))
654 scache_flush_cpu (cpu
);
657 invalidate_return_buffer (cache
, address
);
659 return 1; /* TODO - number of cycles unknown */
662 /* Invalidate the entire cache. Flush the data if requested. */
664 frv_cache_invalidate_all (FRV_CACHE
*cache
, int flush
)
666 /* See if this data is already in the cache. */
667 int elements
= cache
->sets
* cache
->ways
;
668 FRV_CACHE_TAG
*tag
= cache
->tag_storage
;
672 for(i
= 0; i
< elements
; ++i
, ++tag
)
674 /* If a flush is requested, then flush it if it is dirty. */
675 if (tag
->valid
&& tag
->dirty
&& flush
)
676 write_line_to_memory (cache
, tag
);
682 /* If this is the insn cache, then flush the cpu's scache as well. */
684 if (cache
== CPU_INSN_CACHE (cpu
))
685 scache_flush_cpu (cpu
);
687 /* Invalidate both return buffers. */
688 cache
->pipeline
[LS
].status
.return_buffer
.valid
= 0;
689 cache
->pipeline
[LD
].status
.return_buffer
.valid
= 0;
691 return 1; /* TODO - number of cycles unknown */
694 /* ---------------------------------------------------------------------------
695 Functions for operating the cache in cycle accurate mode.
696 ------------------------------------------------------------------------- */
697 /* Convert a VLIW slot to a cache pipeline index. */
699 convert_slot_to_index (int slot
)
714 /* Allocate free chains of cache requests. */
715 #define FREE_CHAIN_SIZE 16
716 static FRV_CACHE_REQUEST
*frv_cache_request_free_chain
= NULL
;
717 static FRV_CACHE_REQUEST
*frv_store_request_free_chain
= NULL
;
720 allocate_new_cache_requests (void)
723 frv_cache_request_free_chain
= xmalloc (FREE_CHAIN_SIZE
724 * sizeof (FRV_CACHE_REQUEST
));
725 for (i
= 0; i
< FREE_CHAIN_SIZE
- 1; ++i
)
727 frv_cache_request_free_chain
[i
].next
728 = & frv_cache_request_free_chain
[i
+ 1];
731 frv_cache_request_free_chain
[FREE_CHAIN_SIZE
- 1].next
= NULL
;
734 /* Return the next free request in the queue for the given cache pipeline. */
735 static FRV_CACHE_REQUEST
*
736 new_cache_request (void)
738 FRV_CACHE_REQUEST
*req
;
740 /* Allocate new elements for the free chain if necessary. */
741 if (frv_cache_request_free_chain
== NULL
)
742 allocate_new_cache_requests ();
744 req
= frv_cache_request_free_chain
;
745 frv_cache_request_free_chain
= req
->next
;
750 /* Return the given cache request to the free chain. */
752 free_cache_request (FRV_CACHE_REQUEST
*req
)
754 if (req
->kind
== req_store
)
756 req
->next
= frv_store_request_free_chain
;
757 frv_store_request_free_chain
= req
;
761 req
->next
= frv_cache_request_free_chain
;
762 frv_cache_request_free_chain
= req
;
766 /* Search the free chain for an existing store request with a buffer that's
768 static FRV_CACHE_REQUEST
*
769 new_store_request (int length
)
771 FRV_CACHE_REQUEST
*prev
= NULL
;
772 FRV_CACHE_REQUEST
*req
;
773 for (req
= frv_store_request_free_chain
; req
!= NULL
; req
= req
->next
)
775 if (req
->u
.store
.length
== length
)
782 frv_store_request_free_chain
= req
->next
;
784 prev
->next
= req
->next
;
788 /* No existing request buffer was found, so make a new one. */
789 req
= new_cache_request ();
790 req
->kind
= req_store
;
791 req
->u
.store
.data
= xmalloc (length
);
792 req
->u
.store
.length
= length
;
796 /* Remove the given request from the given pipeline. */
798 pipeline_remove_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
800 FRV_CACHE_REQUEST
*next
= request
->next
;
801 FRV_CACHE_REQUEST
*prev
= request
->prev
;
812 /* Add the given request to the given pipeline. */
814 pipeline_add_request (FRV_CACHE_PIPELINE
*p
, FRV_CACHE_REQUEST
*request
)
816 FRV_CACHE_REQUEST
*prev
= NULL
;
817 FRV_CACHE_REQUEST
*item
;
819 /* Add the request in priority order. 0 is the highest priority. */
820 for (item
= p
->requests
; item
!= NULL
; item
= item
->next
)
822 if (item
->priority
> request
->priority
)
827 request
->next
= item
;
828 request
->prev
= prev
;
830 p
->requests
= request
;
832 prev
->next
= request
;
834 item
->prev
= request
;
837 /* Requeu the given request from the last of the given pipeline. */
839 pipeline_requeue_request (FRV_CACHE_PIPELINE
*p
)
841 FRV_CACHE_STAGE
*stage
= & p
->stages
[LAST_STAGE
];
842 FRV_CACHE_REQUEST
*req
= stage
->request
;
843 stage
->request
= NULL
;
844 pipeline_add_request (p
, req
);
847 /* Return the priority lower than the lowest one in this cache pipeline.
848 0 is the highest priority. */
850 next_priority (FRV_CACHE
*cache
, FRV_CACHE_PIPELINE
*pipeline
)
855 FRV_CACHE_REQUEST
*req
;
857 /* Check the priorities of any queued items. */
858 for (req
= pipeline
->requests
; req
!= NULL
; req
= req
->next
)
859 if (req
->priority
> lowest
)
860 lowest
= req
->priority
;
862 /* Check the priorities of items in the pipeline stages. */
863 for (i
= FIRST_STAGE
; i
< FRV_CACHE_STAGES
; ++i
)
865 FRV_CACHE_STAGE
*stage
= & pipeline
->stages
[i
];
866 if (stage
->request
!= NULL
&& stage
->request
->priority
> lowest
)
867 lowest
= stage
->request
->priority
;
870 /* Check the priorities of load requests waiting in WAR. These are one
871 higher than the request that spawned them. */
872 for (i
= 0; i
< NUM_WARS
; ++i
)
874 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[i
];
875 if (war
->valid
&& war
->priority
> lowest
)
876 lowest
= war
->priority
+ 1;
879 /* Check the priorities of any BARS or NARS associated with this pipeline.
880 These are one higher than the request that spawned them. */
881 pipe
= pipeline
- cache
->pipeline
;
882 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
883 && cache
->BARS
.priority
> lowest
)
884 lowest
= cache
->BARS
.priority
+ 1;
885 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
886 && cache
->NARS
.priority
> lowest
)
887 lowest
= cache
->NARS
.priority
+ 1;
889 /* Return a priority 2 lower than the lowest found. This allows a WAR
890 request to be generated with a priority greater than this but less than
891 the next higher priority request. */
896 add_WAR_request (FRV_CACHE_PIPELINE
* pipeline
, FRV_CACHE_WAR
*war
)
898 /* Add the load request to the indexed pipeline. */
899 FRV_CACHE_REQUEST
*req
= new_cache_request ();
901 req
->reqno
= war
->reqno
;
902 req
->priority
= war
->priority
;
903 req
->address
= war
->address
;
904 req
->u
.WAR
.preload
= war
->preload
;
905 req
->u
.WAR
.lock
= war
->lock
;
906 pipeline_add_request (pipeline
, req
);
909 /* Remove the next request from the given pipeline and return it. */
910 static FRV_CACHE_REQUEST
*
911 pipeline_next_request (FRV_CACHE_PIPELINE
*p
)
913 FRV_CACHE_REQUEST
*first
= p
->requests
;
915 pipeline_remove_request (p
, first
);
919 /* Return the request which is at the given stage of the given pipeline. */
920 static FRV_CACHE_REQUEST
*
921 pipeline_stage_request (FRV_CACHE_PIPELINE
*p
, int stage
)
923 return p
->stages
[stage
].request
;
927 advance_pipelines (FRV_CACHE
*cache
)
931 FRV_CACHE_PIPELINE
*pipelines
= cache
->pipeline
;
933 /* Free the final stage requests. */
934 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
936 FRV_CACHE_REQUEST
*req
= pipelines
[pipe
].stages
[LAST_STAGE
].request
;
938 free_cache_request (req
);
941 /* Shuffle the requests along the pipeline. */
942 for (stage
= LAST_STAGE
; stage
> FIRST_STAGE
; --stage
)
944 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
945 pipelines
[pipe
].stages
[stage
] = pipelines
[pipe
].stages
[stage
- 1];
948 /* Add a new request to the pipeline. */
949 for (pipe
= 0; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
950 pipelines
[pipe
].stages
[FIRST_STAGE
].request
951 = pipeline_next_request (& pipelines
[pipe
]);
954 /* Handle a request for a load from the given address. */
956 frv_cache_request_load (FRV_CACHE
*cache
, unsigned reqno
, SI address
, int slot
)
958 FRV_CACHE_REQUEST
*req
;
960 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
961 int pipe
= convert_slot_to_index (slot
);
962 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
964 /* Add the load request to the indexed pipeline. */
965 req
= new_cache_request ();
966 req
->kind
= req_load
;
968 req
->priority
= next_priority (cache
, pipeline
);
969 req
->address
= address
;
971 pipeline_add_request (pipeline
, req
);
975 frv_cache_request_store (FRV_CACHE
*cache
, SI address
,
976 int slot
, char *data
, unsigned length
)
978 FRV_CACHE_REQUEST
*req
;
980 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
981 int pipe
= convert_slot_to_index (slot
);
982 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
984 /* Add the load request to the indexed pipeline. */
985 req
= new_store_request (length
);
986 req
->kind
= req_store
;
987 req
->reqno
= NO_REQNO
;
988 req
->priority
= next_priority (cache
, pipeline
);
989 req
->address
= address
;
990 req
->u
.store
.length
= length
;
991 memcpy (req
->u
.store
.data
, data
, length
);
993 pipeline_add_request (pipeline
, req
);
994 invalidate_return_buffer (cache
, address
);
997 /* Handle a request to invalidate the cache line containing the given address.
998 Flush the data if requested. */
1000 frv_cache_request_invalidate (FRV_CACHE
*cache
, unsigned reqno
, SI address
,
1001 int slot
, int all
, int flush
)
1003 FRV_CACHE_REQUEST
*req
;
1005 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1006 int pipe
= convert_slot_to_index (slot
);
1007 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1009 /* Add the load request to the indexed pipeline. */
1010 req
= new_cache_request ();
1011 req
->kind
= req_invalidate
;
1013 req
->priority
= next_priority (cache
, pipeline
);
1014 req
->address
= address
;
1015 req
->u
.invalidate
.all
= all
;
1016 req
->u
.invalidate
.flush
= flush
;
1018 pipeline_add_request (pipeline
, req
);
1021 /* Handle a request to preload the cache line containing the given address. */
1023 frv_cache_request_preload (FRV_CACHE
*cache
, SI address
,
1024 int slot
, int length
, int lock
)
1026 FRV_CACHE_REQUEST
*req
;
1028 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1029 int pipe
= convert_slot_to_index (slot
);
1030 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1032 /* Add the load request to the indexed pipeline. */
1033 req
= new_cache_request ();
1034 req
->kind
= req_preload
;
1035 req
->reqno
= NO_REQNO
;
1036 req
->priority
= next_priority (cache
, pipeline
);
1037 req
->address
= address
;
1038 req
->u
.preload
.length
= length
;
1039 req
->u
.preload
.lock
= lock
;
1041 pipeline_add_request (pipeline
, req
);
1042 invalidate_return_buffer (cache
, address
);
1045 /* Handle a request to unlock the cache line containing the given address. */
1047 frv_cache_request_unlock (FRV_CACHE
*cache
, SI address
, int slot
)
1049 FRV_CACHE_REQUEST
*req
;
1051 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1052 int pipe
= convert_slot_to_index (slot
);
1053 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1055 /* Add the load request to the indexed pipeline. */
1056 req
= new_cache_request ();
1057 req
->kind
= req_unlock
;
1058 req
->reqno
= NO_REQNO
;
1059 req
->priority
= next_priority (cache
, pipeline
);
1060 req
->address
= address
;
1062 pipeline_add_request (pipeline
, req
);
1065 /* Check whether this address interferes with a pending request of
1068 address_interference (FRV_CACHE
*cache
, SI address
, FRV_CACHE_REQUEST
*req
,
1072 int line_mask
= ~(cache
->line_size
- 1);
1074 int priority
= req
->priority
;
1075 FRV_CACHE_REQUEST
*other_req
;
1079 address
&= line_mask
;
1080 all_address
= -1 & line_mask
;
1082 /* Check for collisions in the queue for this pipeline. */
1083 for (other_req
= cache
->pipeline
[pipe
].requests
;
1085 other_req
= other_req
->next
)
1087 other_address
= other_req
->address
& line_mask
;
1088 if ((address
== other_address
|| address
== all_address
)
1089 && priority
> other_req
->priority
)
1093 /* Check for a collision in the the other pipeline. */
1094 other_pipe
= pipe
^ 1;
1095 other_req
= cache
->pipeline
[other_pipe
].stages
[LAST_STAGE
].request
;
1096 if (other_req
!= NULL
)
1098 other_address
= other_req
->address
& line_mask
;
1099 if (address
== other_address
|| address
== all_address
)
1103 /* Check for a collision with load requests waiting in WAR. */
1104 for (i
= LS
; i
< FRV_CACHE_PIPELINES
; ++i
)
1106 for (j
= 0; j
< NUM_WARS
; ++j
)
1108 FRV_CACHE_WAR
*war
= & cache
->pipeline
[i
].WAR
[j
];
1110 && (address
== (war
->address
& line_mask
)
1111 || address
== all_address
)
1112 && priority
> war
->priority
)
1115 /* If this is not a WAR request, then yield to any WAR requests in
1116 either pipeline or to a higher priority request in the same pipeline.
1118 if (req
->kind
!= req_WAR
)
1120 for (j
= FIRST_STAGE
; j
< FRV_CACHE_STAGES
; ++j
)
1122 other_req
= cache
->pipeline
[i
].stages
[j
].request
;
1123 if (other_req
!= NULL
)
1125 if (other_req
->kind
== req_WAR
)
1128 && (address
== (other_req
->address
& line_mask
)
1129 || address
== all_address
)
1130 && priority
> other_req
->priority
)
1137 /* Check for a collision with load requests waiting in ARS. */
1138 if (cache
->BARS
.valid
1139 && (address
== (cache
->BARS
.address
& line_mask
)
1140 || address
== all_address
)
1141 && priority
> cache
->BARS
.priority
)
1143 if (cache
->NARS
.valid
1144 && (address
== (cache
->NARS
.address
& line_mask
)
1145 || address
== all_address
)
1146 && priority
> cache
->NARS
.priority
)
1152 /* Wait for a free WAR register in BARS or NARS. */
1154 wait_for_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1157 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1159 if (! cache
->BARS
.valid
)
1161 cache
->BARS
.pipe
= pipe
;
1162 cache
->BARS
.reqno
= req
->reqno
;
1163 cache
->BARS
.address
= req
->address
;
1164 cache
->BARS
.priority
= req
->priority
- 1;
1168 cache
->BARS
.preload
= 0;
1169 cache
->BARS
.lock
= 0;
1172 cache
->BARS
.preload
= 1;
1173 cache
->BARS
.lock
= 0;
1176 cache
->BARS
.preload
= 1;
1177 cache
->BARS
.lock
= req
->u
.preload
.lock
;
1180 cache
->BARS
.valid
= 1;
1183 if (! cache
->NARS
.valid
)
1185 cache
->NARS
.pipe
= pipe
;
1186 cache
->NARS
.reqno
= req
->reqno
;
1187 cache
->NARS
.address
= req
->address
;
1188 cache
->NARS
.priority
= req
->priority
- 1;
1192 cache
->NARS
.preload
= 0;
1193 cache
->NARS
.lock
= 0;
1196 cache
->NARS
.preload
= 1;
1197 cache
->NARS
.lock
= 0;
1200 cache
->NARS
.preload
= 1;
1201 cache
->NARS
.lock
= req
->u
.preload
.lock
;
1204 cache
->NARS
.valid
= 1;
1207 /* All wait registers are busy, so resubmit this request. */
1208 pipeline_requeue_request (pipeline
);
1211 /* Find a free WAR register and wait for memory to fetch the data. */
1213 wait_in_WAR (FRV_CACHE
* cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1216 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1218 /* Find a valid WAR to hold this request. */
1219 for (war
= 0; war
< NUM_WARS
; ++war
)
1220 if (! pipeline
->WAR
[war
].valid
)
1222 if (war
>= NUM_WARS
)
1224 wait_for_WAR (cache
, pipe
, req
);
1228 pipeline
->WAR
[war
].address
= req
->address
;
1229 pipeline
->WAR
[war
].reqno
= req
->reqno
;
1230 pipeline
->WAR
[war
].priority
= req
->priority
- 1;
1231 pipeline
->WAR
[war
].latency
= cache
->memory_latency
+ 1;
1235 pipeline
->WAR
[war
].preload
= 0;
1236 pipeline
->WAR
[war
].lock
= 0;
1239 pipeline
->WAR
[war
].preload
= 1;
1240 pipeline
->WAR
[war
].lock
= 0;
1243 pipeline
->WAR
[war
].preload
= 1;
1244 pipeline
->WAR
[war
].lock
= req
->u
.preload
.lock
;
1247 pipeline
->WAR
[war
].valid
= 1;
1251 handle_req_load (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1254 SI address
= req
->address
;
1256 /* If this address interferes with an existing request, then requeue it. */
1257 if (address_interference (cache
, address
, req
, pipe
))
1259 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1263 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1265 int found
= get_tag (cache
, address
, &tag
);
1267 /* If the data was found, return it to the caller. */
1270 set_most_recently_used (cache
, tag
);
1271 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1272 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1277 /* The data is not in the cache or this is a non-cache access. We need to
1278 wait for the memory unit to fetch it. Store this request in the WAR in
1280 wait_in_WAR (cache
, pipe
, req
);
1284 handle_req_preload (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1294 SI address
= req
->address
;
1297 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1300 /* preload at least 1 line. */
1301 length
= req
->u
.preload
.length
;
1305 /* Make sure that this request does not interfere with a pending request. */
1306 offset
= address
& (cache
->line_size
- 1);
1307 lines
= 1 + (offset
+ length
- 1) / cache
->line_size
;
1308 cur_address
= address
& ~(cache
->line_size
- 1);
1309 for (line
= 0; line
< lines
; ++line
)
1311 /* If this address interferes with an existing request,
1313 if (address_interference (cache
, cur_address
, req
, pipe
))
1315 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1318 cur_address
+= cache
->line_size
;
1321 /* Now process each cache line. */
1322 /* Careful with this loop -- length is unsigned. */
1323 lock
= req
->u
.preload
.lock
;
1324 cur_address
= address
& ~(cache
->line_size
- 1);
1325 for (line
= 0; line
< lines
; ++line
)
1327 /* If the data was found, then lock it if requested. */
1328 found
= get_tag (cache
, cur_address
, &tag
);
1336 /* The data is not in the cache. We need to wait for the memory
1337 unit to fetch it. Store this request in the WAR in the meantime.
1339 wait_in_WAR (cache
, pipe
, req
);
1341 cur_address
+= cache
->line_size
;
1346 handle_req_store (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1348 SIM_CPU
*current_cpu
;
1352 SI address
= req
->address
;
1353 char *data
= req
->u
.store
.data
;
1354 int length
= req
->u
.store
.length
;
1356 /* If this address interferes with an existing request, then requeue it. */
1357 if (address_interference (cache
, address
, req
, pipe
))
1359 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1363 /* Non-cache access. Write the data directly to memory. */
1364 if (! frv_cache_enabled (cache
) || non_cache_access (cache
, address
))
1366 write_data_to_memory (cache
, address
, data
, length
);
1370 /* See if the data is in the cache. */
1371 found
= get_tag (cache
, address
, &tag
);
1373 /* Write the data to the cache line if one was available and if it is
1374 either a hit or a miss in copy-back mode.
1375 The tag may be NULL if all ways were in use and locked on a miss.
1377 current_cpu
= cache
->cpu
;
1378 copy_back
= GET_HSR0_CBM (GET_HSR0 ());
1379 if (tag
!= NULL
&& (found
|| copy_back
))
1382 /* Load the line from memory first, if it was a miss. */
1385 /* We need to wait for the memory unit to fetch the data.
1386 Store this request in the WAR and requeue the store request. */
1387 wait_in_WAR (cache
, pipe
, req
);
1388 pipeline_requeue_request (& cache
->pipeline
[pipe
]);
1389 /* Decrement the counts of accesses and hits because when the requeued
1390 request is processed again, it will appear to be a new access and
1392 --cache
->statistics
.accesses
;
1393 --cache
->statistics
.hits
;
1396 line_offset
= address
& (cache
->line_size
- 1);
1397 memcpy (tag
->line
+ line_offset
, data
, length
);
1398 invalidate_return_buffer (cache
, address
);
1401 /* Update the LRU information for the tags in this set. */
1402 set_most_recently_used (cache
, tag
);
1405 /* Write the data to memory if there was no line available or we are in
1406 write-through (not copy-back mode). */
1407 if (tag
== NULL
|| ! copy_back
)
1409 write_data_to_memory (cache
, address
, data
, length
);
1416 handle_req_invalidate (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1418 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1419 SI address
= req
->address
;
1420 SI interfere_address
= req
->u
.invalidate
.all
? -1 : address
;
1422 /* If this address interferes with an existing request, then requeue it. */
1423 if (address_interference (cache
, interfere_address
, req
, pipe
))
1425 pipeline_requeue_request (pipeline
);
1429 /* Invalidate the cache line now. This function already checks for
1430 non-cache access. */
1431 if (req
->u
.invalidate
.all
)
1432 frv_cache_invalidate_all (cache
, req
->u
.invalidate
.flush
);
1434 frv_cache_invalidate (cache
, address
, req
->u
.invalidate
.flush
);
1435 if (req
->u
.invalidate
.flush
)
1437 pipeline
->status
.flush
.reqno
= req
->reqno
;
1438 pipeline
->status
.flush
.address
= address
;
1439 pipeline
->status
.flush
.valid
= 1;
1444 handle_req_unlock (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1446 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1447 SI address
= req
->address
;
1449 /* If this address interferes with an existing request, then requeue it. */
1450 if (address_interference (cache
, address
, req
, pipe
))
1452 pipeline_requeue_request (pipeline
);
1456 /* Unlock the cache line. This function checks for non-cache access. */
1457 frv_cache_unlock (cache
, address
);
1461 handle_req_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_REQUEST
*req
)
1465 SI address
= req
->address
;
1467 if (frv_cache_enabled (cache
) && ! non_cache_access (cache
, address
))
1469 /* Look for the data in the cache. The statistics of cache hit or
1470 miss have already been recorded, so save and restore the stats before
1471 and after obtaining the cache line. */
1472 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1473 tag
= find_or_retrieve_cache_line (cache
, address
);
1474 cache
->statistics
= save_stats
;
1477 if (! req
->u
.WAR
.preload
)
1479 copy_line_to_return_buffer (cache
, pipe
, tag
, address
);
1480 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1484 invalidate_return_buffer (cache
, address
);
1485 if (req
->u
.WAR
.lock
)
1492 /* All cache lines in the set were locked, so just copy the data to the
1493 return buffer directly. */
1494 if (! req
->u
.WAR
.preload
)
1496 copy_memory_to_return_buffer (cache
, pipe
, address
);
1497 set_return_buffer_reqno (cache
, pipe
, req
->reqno
);
1501 /* Resolve any conflicts and/or execute the given requests. */
1503 arbitrate_requests (FRV_CACHE
*cache
)
1506 /* Simply execute the requests in the final pipeline stages. */
1507 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1509 FRV_CACHE_REQUEST
*req
1510 = pipeline_stage_request (& cache
->pipeline
[pipe
], LAST_STAGE
);
1511 /* Make sure that there is a request to handle. */
1515 /* Handle the request. */
1519 handle_req_load (cache
, pipe
, req
);
1522 handle_req_store (cache
, pipe
, req
);
1524 case req_invalidate
:
1525 handle_req_invalidate (cache
, pipe
, req
);
1528 handle_req_preload (cache
, pipe
, req
);
1531 handle_req_unlock (cache
, pipe
, req
);
1534 handle_req_WAR (cache
, pipe
, req
);
1542 /* Move a waiting ARS register to a free WAR register. */
1544 move_ARS_to_WAR (FRV_CACHE
*cache
, int pipe
, FRV_CACHE_WAR
*war
)
1546 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1547 NARS to BARS if it is valid. */
1548 if (cache
->BARS
.valid
&& cache
->BARS
.pipe
== pipe
)
1550 war
->address
= cache
->BARS
.address
;
1551 war
->reqno
= cache
->BARS
.reqno
;
1552 war
->priority
= cache
->BARS
.priority
;
1553 war
->preload
= cache
->BARS
.preload
;
1554 war
->lock
= cache
->BARS
.lock
;
1555 war
->latency
= cache
->memory_latency
+ 1;
1557 if (cache
->NARS
.valid
)
1559 cache
->BARS
= cache
->NARS
;
1560 cache
->NARS
.valid
= 0;
1563 cache
->BARS
.valid
= 0;
1566 /* If NARS is valid for this pipe, then move it to the given WAR. */
1567 if (cache
->NARS
.valid
&& cache
->NARS
.pipe
== pipe
)
1569 war
->address
= cache
->NARS
.address
;
1570 war
->reqno
= cache
->NARS
.reqno
;
1571 war
->priority
= cache
->NARS
.priority
;
1572 war
->preload
= cache
->NARS
.preload
;
1573 war
->lock
= cache
->NARS
.lock
;
1574 war
->latency
= cache
->memory_latency
+ 1;
1576 cache
->NARS
.valid
= 0;
1580 /* Decrease the latencies of the various states in the cache. */
1582 decrease_latencies (FRV_CACHE
*cache
)
1585 /* Check the WAR registers. */
1586 for (pipe
= LS
; pipe
< FRV_CACHE_PIPELINES
; ++pipe
)
1588 FRV_CACHE_PIPELINE
*pipeline
= & cache
->pipeline
[pipe
];
1589 for (j
= 0; j
< NUM_WARS
; ++j
)
1591 FRV_CACHE_WAR
*war
= & pipeline
->WAR
[j
];
1595 /* If the latency has expired, then submit a WAR request to the
1597 if (war
->latency
<= 0)
1599 add_WAR_request (pipeline
, war
);
1601 move_ARS_to_WAR (cache
, pipe
, war
);
1608 /* Run the cache for the given number of cycles. */
1610 frv_cache_run (FRV_CACHE
*cache
, int cycles
)
1613 for (i
= 0; i
< cycles
; ++i
)
1615 advance_pipelines (cache
);
1616 arbitrate_requests (cache
);
1617 decrease_latencies (cache
);
1622 frv_cache_read_passive_SI (FRV_CACHE
*cache
, SI address
, SI
*value
)
1627 if (non_cache_access (cache
, address
))
1631 FRV_CACHE_STATISTICS save_stats
= cache
->statistics
;
1632 int found
= get_tag (cache
, address
, &tag
);
1633 cache
->statistics
= save_stats
;
1636 return 0; /* Indicate non-cache-access. */
1639 /* A cache line was available for the data.
1640 Extract the target data from the line. */
1641 offset
= address
& (cache
->line_size
- 1);
1642 *value
= T2H_4 (*(SI
*)(tag
->line
+ offset
));
1646 /* Check the return buffers of the data cache to see if the requested data is
1649 frv_cache_data_in_buffer (FRV_CACHE
* cache
, int pipe
, SI address
,
1652 return cache
->pipeline
[pipe
].status
.return_buffer
.valid
1653 && cache
->pipeline
[pipe
].status
.return_buffer
.reqno
== reqno
1654 && cache
->pipeline
[pipe
].status
.return_buffer
.address
<= address
1655 && cache
->pipeline
[pipe
].status
.return_buffer
.address
+ cache
->line_size
1659 /* Check to see if the requested data has been flushed. */
1661 frv_cache_data_flushed (FRV_CACHE
* cache
, int pipe
, SI address
, unsigned reqno
)
1663 return cache
->pipeline
[pipe
].status
.flush
.valid
1664 && cache
->pipeline
[pipe
].status
.flush
.reqno
== reqno
1665 && cache
->pipeline
[pipe
].status
.flush
.address
<= address
1666 && cache
->pipeline
[pipe
].status
.flush
.address
+ cache
->line_size