153 if (con != NULL) {
154 push_node(field->layout_type(), con);
155 return;
156 }
157 }
158
159 ciType* field_klass = field->type();
160 bool is_vol = field->is_volatile();
161
162 // Compute address and memory type.
163 int offset = field->offset_in_bytes();
164 const TypePtr* adr_type = C->alias_type(field)->adr_type();
165 Node *adr = basic_plus_adr(obj, obj, offset);
166 BasicType bt = field->layout_type();
167
168 // Build the resultant type of the load
169 const Type *type;
170
171 bool must_assert_null = false;
172
173 if( bt == T_OBJECT ) {
174 if (!field->type()->is_loaded()) {
175 type = TypeInstPtr::BOTTOM;
176 must_assert_null = true;
177 } else if (field->is_static_constant()) {
178 // This can happen if the constant oop is non-perm.
179 ciObject* con = field->constant_value().as_object();
180 // Do not "join" in the previous type; it doesn't add value,
181 // and may yield a vacuous result if the field is of interface type.
182 if (con->is_null_object()) {
183 type = TypePtr::NULL_PTR;
184 } else {
185 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
186 }
187 assert(type != NULL, "field singleton type must be consistent");
188 } else {
189 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
190 }
191 } else {
192 type = Type::get_const_basic_type(bt);
193 }
194 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
195 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
196 }
197 // Build the load.
198 //
199 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
200 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
201 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
202
203 // Adjust Java stack
204 if (type2size[bt] == 1)
205 push(ld);
206 else
207 push_pair(ld);
208
209 if (must_assert_null) {
210 // Do not take a trap here. It's possible that the program
211 // will never load the field's class, and will happily see
212 // null values in this field forever. Don't stumble into a
213 // trap for such a program, or we might get a long series
214 // of useless recompilations. (Or, we might load a class
215 // which should not be loaded.) If we ever see a non-null
216 // value, we will then trap and recompile. (The trap will
217 // not need to mention the class index, since the class will
218 // already have been loaded if we ever see a non-null value.)
219 // uncommon_trap(iter().get_field_signature_index());
220 if (PrintOpto && (Verbose || WizardMode)) {
221 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
222 }
223 if (C->log() != NULL) {
224 C->log()->elem("assert_null reason='field' klass='%d'",
225 C->log()->identify(field->type()));
226 }
227 // If there is going to be a trap, put it at the next bytecode:
228 set_bci(iter().next_bci());
229 null_assert(peek());
230 set_bci(iter().cur_bci()); // put it back
231 }
232
233 // If reference is volatile, prevent following memory ops from
234 // floating up past the volatile read. Also prevents commoning
235 // another volatile read.
236 if (field->is_volatile()) {
237 // Memory barrier includes bogus read of value to force load BEFORE membar
238 insert_mem_bar(Op_MemBarAcquire, ld);
239 }
240 }
241
242 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
243 bool is_vol = field->is_volatile();
244 // If reference is volatile, prevent following memory ops from
245 // floating down past the volatile write. Also prevents commoning
246 // another volatile read.
247 if (is_vol) insert_mem_bar(Op_MemBarRelease);
248
249 // Compute address and memory type.
250 int offset = field->offset_in_bytes();
251 const TypePtr* adr_type = C->alias_type(field)->adr_type();
252 Node* adr = basic_plus_adr(obj, obj, offset);
253 BasicType bt = field->layout_type();
254 // Value to be stored
255 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
256 // Round doubles before storing
257 if (bt == T_DOUBLE) val = dstore_rounding(val);
258
259 // Conservatively release stores of object references.
260 const MemNode::MemOrd mo =
261 is_vol ?
262 // Volatile fields need releasing stores.
263 MemNode::release :
264 // Non-volatile fields also need releasing stores if they hold an
265 // object reference, because the object reference might point to
266 // a freshly created object.
267 StoreNode::release_if_reference(bt);
268
269 // Store the value.
270 Node* store;
271 if (bt == T_OBJECT) {
272 const TypeOopPtr* field_type;
273 if (!field->type()->is_loaded()) {
274 field_type = TypeInstPtr::BOTTOM;
275 } else {
276 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
277 }
278 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
279 } else {
280 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
281 store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
282 }
283
284 // If reference is volatile, prevent following volatiles ops from
285 // floating up before the volatile write.
286 if (is_vol) {
287 // If not multiple copy atomic, we do the MemBarVolatile before the load.
288 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
289 insert_mem_bar(Op_MemBarVolatile); // Use fat membar
290 }
291 // Remember we wrote a volatile field.
292 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
293 // in constructors which have such stores. See do_exits() in parse1.cpp.
294 if (is_field) {
295 set_wrote_volatile(true);
296 }
297 }
298
299 if (is_field) {
300 set_wrote_fields(true);
301 }
302
303 // If the field is final, the rules of Java say we are in <init> or <clinit>.
304 // Note the presence of writes to final non-static fields, so that we
305 // can insert a memory barrier later on to keep the writes from floating
306 // out of the constructor.
307 // Any method can write a @Stable field; insert memory barriers after those also.
308 if (is_field && (field->is_final() || field->is_stable())) {
309 if (field->is_final()) {
310 set_wrote_final(true);
311 }
312 if (field->is_stable()) {
313 set_wrote_stable(true);
314 }
315
316 // Preserve allocation ptr to create precedent edge to it in membar
317 // generated on exit from constructor.
318 // Can't bind stable with its allocation, only record allocation for final field.
319 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
320 set_alloc_with_final(obj);
321 }
322 }
323 }
324
325 //=============================================================================
326 void Parse::do_anewarray() {
327 bool will_link;
328 ciKlass* klass = iter().get_klass(will_link);
329
330 // Uncommon Trap when class that array contains is not loaded
331 // we need the loaded class for the rest of graph; do not
332 // initialize the container class (see Java spec)!!!
333 assert(will_link, "anewarray: typeflow responsibility");
334
335 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
336 // Check that array_klass object is loaded
337 if (!array_klass->is_loaded()) {
338 // Generate uncommon_trap for unloaded array_class
339 uncommon_trap(Deoptimization::Reason_unloaded,
340 Deoptimization::Action_reinterpret,
341 array_klass);
342 return;
361 push(obj);
362 }
363
364 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
365 // Also handle the degenerate 1-dimensional case of anewarray.
366 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
367 Node* length = lengths[0];
368 assert(length != NULL, "");
369 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
370 if (ndimensions > 1) {
371 jint length_con = find_int_con(length, -1);
372 guarantee(length_con >= 0, "non-constant multianewarray");
373 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
374 const TypePtr* adr_type = TypeAryPtr::OOPS;
375 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
376 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
377 for (jint i = 0; i < length_con; i++) {
378 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
379 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
380 Node* eaddr = basic_plus_adr(array, offset);
381 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
382 }
383 }
384 return array;
385 }
386
387 void Parse::do_multianewarray() {
388 int ndimensions = iter().get_dimensions();
389
390 // the m-dimensional array
391 bool will_link;
392 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
393 assert(will_link, "multianewarray: typeflow responsibility");
394
395 // Note: Array classes are always initialized; no is_initialized check.
396
397 kill_dead_locals();
398
399 // get the lengths from the stack (first dimension is on top)
400 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
401 length[ndimensions] = NULL; // terminating null for make_runtime_call
|
153 if (con != NULL) {
154 push_node(field->layout_type(), con);
155 return;
156 }
157 }
158
159 ciType* field_klass = field->type();
160 bool is_vol = field->is_volatile();
161
162 // Compute address and memory type.
163 int offset = field->offset_in_bytes();
164 const TypePtr* adr_type = C->alias_type(field)->adr_type();
165 Node *adr = basic_plus_adr(obj, obj, offset);
166 BasicType bt = field->layout_type();
167
168 // Build the resultant type of the load
169 const Type *type;
170
171 bool must_assert_null = false;
172
173 C2DecoratorSet decorators = C2_ACCESS_ON_HEAP | C2_ACCESS_FREE_CONTROL;
174 decorators |= is_vol ? C2_MO_VOLATILE : C2_MO_RELAXED;
175
176 bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
177
178 if (is_obj) {
179 if (!field->type()->is_loaded()) {
180 type = TypeInstPtr::BOTTOM;
181 must_assert_null = true;
182 } else if (field->is_static_constant()) {
183 // This can happen if the constant oop is non-perm.
184 ciObject* con = field->constant_value().as_object();
185 // Do not "join" in the previous type; it doesn't add value,
186 // and may yield a vacuous result if the field is of interface type.
187 if (con->is_null_object()) {
188 type = TypePtr::NULL_PTR;
189 } else {
190 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
191 }
192 assert(type != NULL, "field singleton type must be consistent");
193 } else {
194 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
195 }
196 } else {
197 type = Type::get_const_basic_type(bt);
198 }
199
200 Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
201
202 // Adjust Java stack
203 if (type2size[bt] == 1)
204 push(ld);
205 else
206 push_pair(ld);
207
208 if (must_assert_null) {
209 // Do not take a trap here. It's possible that the program
210 // will never load the field's class, and will happily see
211 // null values in this field forever. Don't stumble into a
212 // trap for such a program, or we might get a long series
213 // of useless recompilations. (Or, we might load a class
214 // which should not be loaded.) If we ever see a non-null
215 // value, we will then trap and recompile. (The trap will
216 // not need to mention the class index, since the class will
217 // already have been loaded if we ever see a non-null value.)
218 // uncommon_trap(iter().get_field_signature_index());
219 if (PrintOpto && (Verbose || WizardMode)) {
220 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
221 }
222 if (C->log() != NULL) {
223 C->log()->elem("assert_null reason='field' klass='%d'",
224 C->log()->identify(field->type()));
225 }
226 // If there is going to be a trap, put it at the next bytecode:
227 set_bci(iter().next_bci());
228 null_assert(peek());
229 set_bci(iter().cur_bci()); // put it back
230 }
231 }
232
233 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
234 bool is_vol = field->is_volatile();
235
236 // Compute address and memory type.
237 int offset = field->offset_in_bytes();
238 const TypePtr* adr_type = C->alias_type(field)->adr_type();
239 Node* adr = basic_plus_adr(obj, obj, offset);
240 BasicType bt = field->layout_type();
241 // Value to be stored
242 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
243
244 C2DecoratorSet decorators = C2_ACCESS_ON_HEAP;
245 decorators |= is_vol ? C2_MO_VOLATILE : C2_MO_RELAXED;
246
247 bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
248
249 // Store the value.
250 const Type* field_type;
251 if (!field->type()->is_loaded()) {
252 field_type = TypeInstPtr::BOTTOM;
253 } else {
254 if (is_obj) {
255 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
256 } else {
257 field_type = Type::BOTTOM;
258 }
259 }
260 access_store_at(control(), obj, adr, adr_type, val, field_type, bt, decorators);
261
262 if (is_field) {
263 // Remember we wrote a volatile field.
264 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
265 // in constructors which have such stores. See do_exits() in parse1.cpp.
266 if (is_vol) {
267 set_wrote_volatile(true);
268 }
269 set_wrote_fields(true);
270
271 // If the field is final, the rules of Java say we are in <init> or <clinit>.
272 // Note the presence of writes to final non-static fields, so that we
273 // can insert a memory barrier later on to keep the writes from floating
274 // out of the constructor.
275 // Any method can write a @Stable field; insert memory barriers after those also.
276 if (field->is_final()) {
277 set_wrote_final(true);
278 if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
279 // Preserve allocation ptr to create precedent edge to it in membar
280 // generated on exit from constructor.
281 // Can't bind stable with its allocation, only record allocation for final field.
282 set_alloc_with_final(obj);
283 }
284 }
285 if (field->is_stable()) {
286 set_wrote_stable(true);
287 }
288 }
289 }
290
291 //=============================================================================
292 void Parse::do_anewarray() {
293 bool will_link;
294 ciKlass* klass = iter().get_klass(will_link);
295
296 // Uncommon Trap when class that array contains is not loaded
297 // we need the loaded class for the rest of graph; do not
298 // initialize the container class (see Java spec)!!!
299 assert(will_link, "anewarray: typeflow responsibility");
300
301 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
302 // Check that array_klass object is loaded
303 if (!array_klass->is_loaded()) {
304 // Generate uncommon_trap for unloaded array_class
305 uncommon_trap(Deoptimization::Reason_unloaded,
306 Deoptimization::Action_reinterpret,
307 array_klass);
308 return;
327 push(obj);
328 }
329
330 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
331 // Also handle the degenerate 1-dimensional case of anewarray.
332 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
333 Node* length = lengths[0];
334 assert(length != NULL, "");
335 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
336 if (ndimensions > 1) {
337 jint length_con = find_int_con(length, -1);
338 guarantee(length_con >= 0, "non-constant multianewarray");
339 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
340 const TypePtr* adr_type = TypeAryPtr::OOPS;
341 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
342 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
343 for (jint i = 0; i < length_con; i++) {
344 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
345 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
346 Node* eaddr = basic_plus_adr(array, offset);
347 access_store_at(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, C2_ACCESS_ON_HEAP | C2_ACCESS_ON_ARRAY);
348 }
349 }
350 return array;
351 }
352
353 void Parse::do_multianewarray() {
354 int ndimensions = iter().get_dimensions();
355
356 // the m-dimensional array
357 bool will_link;
358 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
359 assert(will_link, "multianewarray: typeflow responsibility");
360
361 // Note: Array classes are always initialized; no is_initialized check.
362
363 kill_dead_locals();
364
365 // get the lengths from the stack (first dimension is on top)
366 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
367 length[ndimensions] = NULL; // terminating null for make_runtime_call
|