5 void factor_vm::deallocate_inline_cache(cell return_address
) {
6 // Find the call target.
7 void* old_entry_point
= get_call_target(return_address
);
8 code_block
* old_block
= (code_block
*)old_entry_point
- 1;
10 // Free the old PIC since we know its unreachable
11 if (old_block
->pic_p())
12 code
->free(old_block
);
15 // Figure out what kind of type check the PIC needs based on the methods
17 static cell
determine_inline_cache_type(array
* cache_entries
) {
18 for (cell i
= 0; i
< array_capacity(cache_entries
); i
+= 2) {
19 // Is it a tuple layout?
20 if (TAG(array_nth(cache_entries
, i
)) == ARRAY_TYPE
) {
27 void factor_vm::update_pic_count(cell type
) {
29 dispatch_stats
.pic_tag_count
++;
31 dispatch_stats
.pic_tuple_count
++;
34 struct inline_cache_jit
: public jit
{
35 inline_cache_jit(cell generic_word
, factor_vm
* vm
) : jit(generic_word
, vm
) {}
37 void emit_check_and_jump(cell ic_type
, cell i
, cell klass
, cell method
);
38 void emit_inline_cache(fixnum index
, cell generic_word_
, cell methods_
,
39 cell cache_entries_
, bool tail_call_p
);
42 void inline_cache_jit::emit_check_and_jump(cell ic_type
, cell i
,
43 cell klass
, cell method
) {
45 cell check_type
= PIC_CHECK_TAG
;
46 if (TAG(klass
) != FIXNUM_TYPE
)
47 check_type
= PIC_CHECK_TUPLE
;
49 // The tag check can be skipped if it is the first one and we are
50 // checking for the fixnum type which is 0. That is because the
51 // AND instruction in the PIC_TAG template already sets the zero
53 if (!(i
== 0 && ic_type
== PIC_TAG
&& klass
== 0)) {
54 emit_with_literal(parent
->special_objects
[check_type
], klass
);
57 // Yes? Jump to method
58 emit_with_literal(parent
->special_objects
[PIC_HIT
], method
);
61 // index: 0 = top of stack, 1 = item underneath, etc
62 // cache_entries: array of class/method pairs
64 void inline_cache_jit::emit_inline_cache(fixnum index
, cell generic_word_
,
65 cell methods_
, cell cache_entries_
,
67 data_root
<word
> generic_word(generic_word_
, parent
);
68 data_root
<array
> methods(methods_
, parent
);
69 data_root
<array
> cache_entries(cache_entries_
, parent
);
71 cell ic_type
= determine_inline_cache_type(cache_entries
.untagged());
72 parent
->update_pic_count(ic_type
);
74 // Generate machine code to determine the object's class.
75 emit_with_literal(parent
->special_objects
[PIC_LOAD
],
76 tag_fixnum(-index
* sizeof(cell
)));
78 // Put the tag of the object, or class of the tuple in a register.
79 emit(parent
->special_objects
[ic_type
]);
81 // Generate machine code to check, in turn, if the class is one of the cached
83 for (cell i
= 0; i
< array_capacity(cache_entries
.untagged()); i
+= 2) {
84 cell klass
= array_nth(cache_entries
.untagged(), i
);
85 cell method
= array_nth(cache_entries
.untagged(), i
+ 1);
87 emit_check_and_jump(ic_type
, i
, klass
, method
);
90 // If none of the above conditionals tested true, then execution "falls
93 // A stack frame is set up, since the inline-cache-miss sub-primitive
94 // makes a subroutine call to the VM.
95 emit(parent
->special_objects
[JIT_PROLOG
]);
97 // The inline-cache-miss sub-primitive call receives enough information to
98 // reconstruct the PIC with the new entry.
99 push(generic_word
.value());
100 push(methods
.value());
101 push(tag_fixnum(index
));
102 push(cache_entries
.value());
105 parent
->special_objects
[tail_call_p
? PIC_MISS_TAIL_WORD
: PIC_MISS_WORD
],
107 true); // stack_frame_p
111 cell
factor_vm::add_inline_cache_entry(cell cache_entries_
, cell klass_
,
113 data_root
<array
> cache_entries(cache_entries_
, this);
114 data_root
<object
> klass(klass_
, this);
115 data_root
<word
> method(method_
, this);
117 cell pic_size
= array_capacity(cache_entries
.untagged());
118 data_root
<array
> new_cache_entries(
119 reallot_array(cache_entries
.untagged(), pic_size
+ 2), this);
120 set_array_nth(new_cache_entries
.untagged(), pic_size
, klass
.value());
121 set_array_nth(new_cache_entries
.untagged(), pic_size
+ 1, method
.value());
122 return new_cache_entries
.value();
125 void factor_vm::update_pic_transitions(cell pic_size
) {
126 if (pic_size
== max_pic_size
)
127 dispatch_stats
.pic_to_mega_transitions
++;
128 else if (pic_size
== 0)
129 dispatch_stats
.cold_call_to_ic_transitions
++;
130 else if (pic_size
== 1)
131 dispatch_stats
.ic_to_pic_transitions
++;
134 // The cache_entries parameter is empty (on cold call site) or has entries
135 // (on cache miss). Called from assembly with the actual return address.
136 // Compilation of the inline cache may trigger a GC, which may trigger a
138 // also, the block containing the return address may now be dead. Use a
139 // code_root to take care of the details.
141 cell
factor_vm::inline_cache_miss(cell return_address_
) {
142 code_root
return_address(return_address_
, this);
143 bool tail_call_site
= tail_call_site_p(return_address
.value
);
146 FACTOR_PRINT("Inline cache miss at "
147 << (tail_call_site
? "tail" : "non-tail")
148 << " call site 0x" << std::hex
<< return_address
.value
153 data_root
<array
> cache_entries(ctx
->pop(), this);
154 fixnum index
= untag_fixnum(ctx
->pop());
155 data_root
<array
> methods(ctx
->pop(), this);
156 data_root
<word
> generic_word(ctx
->pop(), this);
157 data_root
<object
> object(((cell
*)ctx
->datastack
)[-index
], this);
159 cell pic_size
= array_capacity(cache_entries
.untagged()) / 2;
161 update_pic_transitions(pic_size
);
163 cell xt
= generic_word
->entry_point
;
164 if (pic_size
< max_pic_size
) {
165 cell klass
= object_class(object
.value());
166 cell method
= lookup_method(object
.value(), methods
.value());
168 data_root
<array
> new_cache_entries(
169 add_inline_cache_entry(cache_entries
.value(), klass
, method
), this);
171 inline_cache_jit
jit(generic_word
.value(), this);
172 jit
.emit_inline_cache(index
, generic_word
.value(), methods
.value(),
173 new_cache_entries
.value(), tail_call_site
);
174 code_block
* code
= jit
.to_code_block(CODE_BLOCK_PIC
, JIT_FRAME_SIZE
);
175 initialize_code_block(code
);
176 xt
= code
->entry_point();
179 // Install the new stub.
180 if (return_address
.valid
) {
181 // Since each PIC is only referenced from a single call site,
182 // if the old call target was a PIC, we can deallocate it immediately,
183 // instead of leaving dead PICs around until the next GC.
184 deallocate_inline_cache(return_address
.value
);
185 set_call_target(return_address
.value
, xt
);
188 FACTOR_PRINT("Updated " << (tail_call_site
? "tail" : "non-tail")
189 << " call site 0x" << std::hex
<< return_address
.value
<< std::dec
190 << " with 0x" << std::hex
<< (cell
)xt
<< std::dec
);
199 VM_C_API cell
inline_cache_miss(cell return_address
, factor_vm
* parent
) {
200 return parent
->inline_cache_miss(return_address
);