55
56 // Make a map, with JVM state
57 uint parm_cnt = jdomain->cnt();
58 uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
59 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
60 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
61 JVMState* jvms = new (C) JVMState(0);
62 jvms->set_bci(InvocationEntryBci);
63 jvms->set_monoff(max_map);
64 jvms->set_scloff(max_map);
65 jvms->set_endoff(max_map);
66 {
67 SafePointNode *map = new SafePointNode( max_map, jvms );
68 jvms->set_map(map);
69 set_jvms(jvms);
70 assert(map == this->map(), "kit.map is set");
71 }
72
73 // Make up the parameters
74 uint i;
75 for( i = 0; i < parm_cnt; i++ )
76 map()->init_req(i, _gvn.transform(new ParmNode(start, i)));
77 for( ; i<map()->req(); i++ )
78 map()->init_req(i, top()); // For nicer debugging
79
80 // GraphKit requires memory to be a MergeMemNode:
81 set_all_memory(map()->memory());
82
83 // Get base of thread-local storage area
84 Node* thread = _gvn.transform( new ThreadLocalNode() );
85
86 const int NoAlias = Compile::AliasIdxBot;
87
88 Node* adr_last_Java_pc = basic_plus_adr(top(),
89 thread,
90 in_bytes(JavaThread::frame_anchor_offset()) +
91 in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
92 #if defined(SPARC)
93 Node* adr_flags = basic_plus_adr(top(),
94 thread,
95 in_bytes(JavaThread::frame_anchor_offset()) +
96 in_bytes(JavaFrameAnchor::flags_offset()));
97 #endif /* defined(SPARC) */
98
99
100 // Drop in the last_Java_sp. last_Java_fp is not touched.
101 // Always do this after the other "last_Java_frame" fields are set since
102 // as soon as last_Java_sp != NULL the has_last_Java_frame is true and
103 // users will look at the other fields.
104 //
105 Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
106 Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
107 store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
108
109 // Set _thread_in_native
110 // The order of stores into TLS is critical! Setting _thread_in_native MUST
111 // be last, because a GC is allowed at any time after setting it and the GC
112 // will require last_Java_pc and last_Java_sp.
113
114 //-----------------------------
115 // Compute signature for C call. Varies from the Java signature!
116 const Type **fields = TypeTuple::fields(2*parm_cnt+2);
117 uint cnt = TypeFunc::Parms;
118 // The C routines gets the base of thread-local storage passed in as an
119 // extra argument. Not all calls need it, but its cheap to add here.
120 for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) {
121 fields[cnt] = jdomain->field_at(pcnt);
122 }
123
124 fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
125 // Also pass in the caller's PC, if asked for.
126 if (return_pc) {
127 fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC
128 }
129
130 const TypeTuple* domain = TypeTuple::make(cnt,fields);
131 // The C routine we are about to call cannot return an oop; it can block on
132 // exit and a GC will trash the oop while it sits in C-land. Instead, we
133 // return the oop through TLS for runtime calls.
134 // Also, C routines returning integer subword values leave the high
135 // order bits dirty; these must be cleaned up by explicit sign extension.
136 const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
137 // Make a private copy of jrange->fields();
138 const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
139 // Fixup oop returns
140 int retval_ptr = retval->isa_oop_ptr();
141 if (retval_ptr) {
142 assert( pass_tls, "Oop must be returned thru TLS" );
143 // Fancy-jumps return address; others return void
144 rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;
145
146 } else if (retval->isa_int()) { // Returning any integer subtype?
147 // "Fatten" byte, char & short return types to 'int' to show that
148 // the native C code can return values with junk high order bits.
149 // We'll sign-extend it below later.
150 rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext
151
152 } else if (jrange->cnt() >= TypeFunc::Parms+1) { // Else copy other types
153 rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
154 if (jrange->cnt() == TypeFunc::Parms+2) {
155 rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
156 }
157 }
158 const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields);
159
160 // Final C signature
161 const TypeFunc *c_sig = TypeFunc::make(domain,range);
162
163 //-----------------------------
164 // Make the call node
165 CallRuntimeNode *call = new CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM);
166 //-----------------------------
167
168 // Fix-up the debug info for the call
169 call->set_jvms( new (C) JVMState(0) );
170 call->jvms()->set_bci(0);
171 call->jvms()->set_offsets(cnt);
172
173 // Set fixed predefined input arguments
174 cnt = 0;
175 for (i = 0; i < TypeFunc::Parms; i++)
176 call->init_req(cnt++, map()->in(i));
177 // A little too aggressive on the parm copy; return address is not an input
178 call->set_req(TypeFunc::ReturnAdr, top());
179 for (; i < parm_cnt; i++) { // Regular input arguments
180 call->init_req(cnt++, map()->in(i));
181 }
182
183 call->init_req( cnt++, thread );
184 if( return_pc ) // Return PC, if asked for
185 call->init_req( cnt++, returnadr() );
186 _gvn.transform_no_reclaim(call);
187
188
189 //-----------------------------
190 // Now set up the return results
191 set_control( _gvn.transform( new ProjNode(call,TypeFunc::Control)) );
192 set_i_o( _gvn.transform( new ProjNode(call,TypeFunc::I_O )) );
193 set_all_memory_call(call);
194 if (range->cnt() > TypeFunc::Parms) {
195 Node* retnode = _gvn.transform( new ProjNode(call,TypeFunc::Parms) );
196 // C-land is allowed to return sub-word values. Convert to integer type.
197 assert( retval != Type::TOP, "" );
198 if (retval == TypeInt::BOOL) {
199 retnode = _gvn.transform( new AndINode(retnode, intcon(0xFF)) );
200 } else if (retval == TypeInt::CHAR) {
201 retnode = _gvn.transform( new AndINode(retnode, intcon(0xFFFF)) );
202 } else if (retval == TypeInt::BYTE) {
203 retnode = _gvn.transform( new LShiftINode(retnode, intcon(24)) );
204 retnode = _gvn.transform( new RShiftINode(retnode, intcon(24)) );
205 } else if (retval == TypeInt::SHORT) {
206 retnode = _gvn.transform( new LShiftINode(retnode, intcon(16)) );
207 retnode = _gvn.transform( new RShiftINode(retnode, intcon(16)) );
|
55
56 // Make a map, with JVM state
57 uint parm_cnt = jdomain->cnt();
58 uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
59 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
60 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
61 JVMState* jvms = new (C) JVMState(0);
62 jvms->set_bci(InvocationEntryBci);
63 jvms->set_monoff(max_map);
64 jvms->set_scloff(max_map);
65 jvms->set_endoff(max_map);
66 {
67 SafePointNode *map = new SafePointNode( max_map, jvms );
68 jvms->set_map(map);
69 set_jvms(jvms);
70 assert(map == this->map(), "kit.map is set");
71 }
72
73 // Make up the parameters
74 uint i;
75 for (i = 0; i < parm_cnt; i++) {
76 map()->init_req(i, _gvn.transform(new ParmNode(start, i)));
77 }
78 for ( ; i<map()->req(); i++) {
79 map()->init_req(i, top()); // For nicer debugging
80 }
81
82 // GraphKit requires memory to be a MergeMemNode:
83 set_all_memory(map()->memory());
84
85 // Get base of thread-local storage area
86 Node* thread = _gvn.transform(new ThreadLocalNode());
87
88 const int NoAlias = Compile::AliasIdxBot;
89
90 Node* adr_last_Java_pc = basic_plus_adr(top(),
91 thread,
92 in_bytes(JavaThread::frame_anchor_offset()) +
93 in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
94 #if defined(SPARC)
95 Node* adr_flags = basic_plus_adr(top(),
96 thread,
97 in_bytes(JavaThread::frame_anchor_offset()) +
98 in_bytes(JavaFrameAnchor::flags_offset()));
99 #endif /* defined(SPARC) */
100
101
102 // Drop in the last_Java_sp. last_Java_fp is not touched.
103 // Always do this after the other "last_Java_frame" fields are set since
104 // as soon as last_Java_sp != NULL the has_last_Java_frame is true and
105 // users will look at the other fields.
106 //
107 Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
108 Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
109 store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
110
111 // Set _thread_in_native
112 // The order of stores into TLS is critical! Setting _thread_in_native MUST
113 // be last, because a GC is allowed at any time after setting it and the GC
114 // will require last_Java_pc and last_Java_sp.
115
116 //-----------------------------
117 // Compute signature for C call. Varies from the Java signature!
118
119 const Type **fields = TypeTuple::fields(2*parm_cnt+2);
120 uint cnt = TypeFunc::Parms;
121 // The C routines gets the base of thread-local storage passed in as an
122 // extra argument. Not all calls need it, but it is cheap to add here.
123 for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) {
124 const Type *f = jdomain->field_at(pcnt);
125 if (CCallingConventionRequiresIntsAsLongs && f->isa_int()) {
126 fields[cnt++] = TypeLong::LONG;
127 fields[cnt] = Type::HALF; // Must add an additional half for a long.
128 } else {
129 fields[cnt] = f;
130 }
131 }
132 fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
133 // Also pass in the caller's PC, if asked for.
134 if (return_pc) {
135 fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC
136 }
137 const TypeTuple* domain = TypeTuple::make(cnt, fields);
138
139 // The C routine we are about to call cannot return an oop; it can block on
140 // exit and a GC will trash the oop while it sits in C-land. Instead, we
141 // return the oop through TLS for runtime calls.
142 // Also, C routines returning integer subword values leave the high
143 // order bits dirty; these must be cleaned up by explicit sign extension.
144 const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
145 // Make a private copy of jrange->fields();
146 const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
147 // Fixup oop returns
148 int retval_ptr = retval->isa_oop_ptr();
149 if (retval_ptr) {
150 assert( pass_tls, "Oop must be returned thru TLS" );
151 // Fancy-jumps return address; others return void
152 rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;
153
154 } else if (retval->isa_int()) { // Returning any integer subtype?
155 // "Fatten" byte, char & short return types to 'int' to show that
156 // the native C code can return values with junk high order bits.
157 // We'll sign-extend it below later.
158 rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext
159
160 } else if (jrange->cnt() >= TypeFunc::Parms+1) { // Else copy other types
161 rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
162 if (jrange->cnt() == TypeFunc::Parms+2) {
163 rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
164 }
165 }
166 const TypeTuple* range = TypeTuple::make(jrange->cnt(), rfields);
167
168 // Final C signature
169 const TypeFunc *c_sig = TypeFunc::make(domain, range);
170
171 //-----------------------------
172 // Make the call node.
173 CallRuntimeNode *call = new CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM);
174 //-----------------------------
175
176 // Fix-up the debug info for the call.
177 call->set_jvms(new (C) JVMState(0));
178 call->jvms()->set_bci(0);
179 call->jvms()->set_offsets(cnt);
180
181 // Set fixed predefined input arguments.
182 cnt = 0;
183 for (i = 0; i < TypeFunc::Parms; i++) {
184 call->init_req(cnt++, map()->in(i));
185 }
186 // A little too aggressive on the parm copy; return address is not an input.
187 call->set_req(TypeFunc::ReturnAdr, top());
188 for (; i < parm_cnt; i++) { // Regular input arguments.
189 const Type *f = jdomain->field_at(i);
190 if (CCallingConventionRequiresIntsAsLongs && f->isa_int()) {
191 call->init_req(cnt++, _gvn.transform(new ConvI2LNode(map()->in(i))));
192 call->init_req(cnt++, top());
193 } else {
194 call->init_req(cnt++, map()->in(i));
195 }
196 }
197 call->init_req(cnt++, thread);
198 if (return_pc) { // Return PC, if asked for.
199 call->init_req(cnt++, returnadr());
200 }
201
202 _gvn.transform_no_reclaim(call);
203
204 //-----------------------------
205 // Now set up the return results
206 set_control( _gvn.transform( new ProjNode(call,TypeFunc::Control)) );
207 set_i_o( _gvn.transform( new ProjNode(call,TypeFunc::I_O )) );
208 set_all_memory_call(call);
209 if (range->cnt() > TypeFunc::Parms) {
210 Node* retnode = _gvn.transform( new ProjNode(call,TypeFunc::Parms) );
211 // C-land is allowed to return sub-word values. Convert to integer type.
212 assert( retval != Type::TOP, "" );
213 if (retval == TypeInt::BOOL) {
214 retnode = _gvn.transform( new AndINode(retnode, intcon(0xFF)) );
215 } else if (retval == TypeInt::CHAR) {
216 retnode = _gvn.transform( new AndINode(retnode, intcon(0xFFFF)) );
217 } else if (retval == TypeInt::BYTE) {
218 retnode = _gvn.transform( new LShiftINode(retnode, intcon(24)) );
219 retnode = _gvn.transform( new RShiftINode(retnode, intcon(24)) );
220 } else if (retval == TypeInt::SHORT) {
221 retnode = _gvn.transform( new LShiftINode(retnode, intcon(16)) );
222 retnode = _gvn.transform( new RShiftINode(retnode, intcon(16)) );
|