159 Node* con = make_constant_from_field(field, obj);
160 if (con != NULL) {
161 push_node(field->layout_type(), con);
162 return;
163 }
164 }
165
166 ciType* field_klass = field->type();
167 bool is_vol = field->is_volatile();
168
169 // Compute address and memory type.
170 int offset = field->offset_in_bytes();
171 const TypePtr* adr_type = C->alias_type(field)->adr_type();
172 Node *adr = basic_plus_adr(obj, obj, offset);
173
174 // Build the resultant type of the load
175 const Type *type;
176
177 bool must_assert_null = false;
178
179 if( bt == T_OBJECT ) {
180 if (!field->type()->is_loaded()) {
181 type = TypeInstPtr::BOTTOM;
182 must_assert_null = true;
183 } else if (field->is_static_constant()) {
184 // This can happen if the constant oop is non-perm.
185 ciObject* con = field->constant_value().as_object();
186 // Do not "join" in the previous type; it doesn't add value,
187 // and may yield a vacuous result if the field is of interface type.
188 if (con->is_null_object()) {
189 type = TypePtr::NULL_PTR;
190 } else {
191 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
192 }
193 assert(type != NULL, "field singleton type must be consistent");
194 } else {
195 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
196 }
197 } else {
198 type = Type::get_const_basic_type(bt);
199 }
200 if (support_IRIW_for_not_multiple_copy_atomic_cpu && field->is_volatile()) {
201 insert_mem_bar(Op_MemBarVolatile); // StoreLoad barrier
202 }
203 // Build the load.
204 //
205 MemNode::MemOrd mo = is_vol ? MemNode::acquire : MemNode::unordered;
206 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
207 Node* ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access);
208
209 // Adjust Java stack
210 if (type2size[bt] == 1)
211 push(ld);
212 else
213 push_pair(ld);
214
215 if (must_assert_null) {
216 // Do not take a trap here. It's possible that the program
217 // will never load the field's class, and will happily see
218 // null values in this field forever. Don't stumble into a
219 // trap for such a program, or we might get a long series
220 // of useless recompilations. (Or, we might load a class
221 // which should not be loaded.) If we ever see a non-null
222 // value, we will then trap and recompile. (The trap will
223 // not need to mention the class index, since the class will
224 // already have been loaded if we ever see a non-null value.)
225 // uncommon_trap(iter().get_field_signature_index());
226 if (PrintOpto && (Verbose || WizardMode)) {
227 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
228 }
229 if (C->log() != NULL) {
230 C->log()->elem("assert_null reason='field' klass='%d'",
231 C->log()->identify(field->type()));
232 }
233 // If there is going to be a trap, put it at the next bytecode:
234 set_bci(iter().next_bci());
235 null_assert(peek());
236 set_bci(iter().cur_bci()); // put it back
237 }
238
239 // If reference is volatile, prevent following memory ops from
240 // floating up past the volatile read. Also prevents commoning
241 // another volatile read.
242 if (field->is_volatile()) {
243 // Memory barrier includes bogus read of value to force load BEFORE membar
244 insert_mem_bar(Op_MemBarAcquire, ld);
245 }
246 }
247
248 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
249 bool is_vol = field->is_volatile();
250 // If reference is volatile, prevent following memory ops from
251 // floating down past the volatile write. Also prevents commoning
252 // another volatile read.
253 if (is_vol) insert_mem_bar(Op_MemBarRelease);
254
255 // Compute address and memory type.
256 int offset = field->offset_in_bytes();
257 const TypePtr* adr_type = C->alias_type(field)->adr_type();
258 Node* adr = basic_plus_adr(obj, obj, offset);
259 BasicType bt = field->layout_type();
260 // Value to be stored
261 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
262 // Round doubles before storing
263 if (bt == T_DOUBLE) val = dstore_rounding(val);
264
265 // Conservatively release stores of object references.
266 const MemNode::MemOrd mo =
267 is_vol ?
268 // Volatile fields need releasing stores.
269 MemNode::release :
270 // Non-volatile fields also need releasing stores if they hold an
271 // object reference, because the object reference might point to
272 // a freshly created object.
273 StoreNode::release_if_reference(bt);
274
275 // Store the value.
276 Node* store;
277 if (bt == T_OBJECT) {
278 const TypeOopPtr* field_type;
279 if (!field->type()->is_loaded()) {
280 field_type = TypeInstPtr::BOTTOM;
281 } else {
282 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
283 }
284 store = store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo);
285 } else {
286 bool needs_atomic_access = is_vol || AlwaysAtomicAccesses;
287 store = store_to_memory(control(), adr, val, bt, adr_type, mo, needs_atomic_access);
288 }
289
290 // If reference is volatile, prevent following volatiles ops from
291 // floating up before the volatile write.
292 if (is_vol) {
293 // If not multiple copy atomic, we do the MemBarVolatile before the load.
294 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
295 insert_mem_bar(Op_MemBarVolatile); // Use fat membar
296 }
297 // Remember we wrote a volatile field.
298 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
299 // in constructors which have such stores. See do_exits() in parse1.cpp.
300 if (is_field) {
301 set_wrote_volatile(true);
302 }
303 }
304
305 if (is_field) {
306 set_wrote_fields(true);
307 }
308
309 // If the field is final, the rules of Java say we are in <init> or <clinit>.
310 // Note the presence of writes to final non-static fields, so that we
311 // can insert a memory barrier later on to keep the writes from floating
312 // out of the constructor.
313 // Any method can write a @Stable field; insert memory barriers after those also.
314 if (is_field && (field->is_final() || field->is_stable())) {
315 if (field->is_final()) {
316 set_wrote_final(true);
317 }
318 if (field->is_stable()) {
319 set_wrote_stable(true);
320 }
321
322 // Preserve allocation ptr to create precedent edge to it in membar
323 // generated on exit from constructor.
324 // Can't bind stable with its allocation, only record allocation for final field.
325 if (field->is_final() && AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
326 set_alloc_with_final(obj);
327 }
328 }
329 }
330
331 //=============================================================================
332 void Parse::do_anewarray() {
333 bool will_link;
334 ciKlass* klass = iter().get_klass(will_link);
335
336 // Uncommon Trap when class that array contains is not loaded
337 // we need the loaded class for the rest of graph; do not
338 // initialize the container class (see Java spec)!!!
339 assert(will_link, "anewarray: typeflow responsibility");
340
341 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
342 // Check that array_klass object is loaded
343 if (!array_klass->is_loaded()) {
344 // Generate uncommon_trap for unloaded array_class
345 uncommon_trap(Deoptimization::Reason_unloaded,
|
159 Node* con = make_constant_from_field(field, obj);
160 if (con != NULL) {
161 push_node(field->layout_type(), con);
162 return;
163 }
164 }
165
166 ciType* field_klass = field->type();
167 bool is_vol = field->is_volatile();
168
169 // Compute address and memory type.
170 int offset = field->offset_in_bytes();
171 const TypePtr* adr_type = C->alias_type(field)->adr_type();
172 Node *adr = basic_plus_adr(obj, obj, offset);
173
174 // Build the resultant type of the load
175 const Type *type;
176
177 bool must_assert_null = false;
178
179 DecoratorSet decorators = IN_HEAP;
180 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
181
182 bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
183
184 if (is_obj) {
185 if (!field->type()->is_loaded()) {
186 type = TypeInstPtr::BOTTOM;
187 must_assert_null = true;
188 } else if (field->is_static_constant()) {
189 // This can happen if the constant oop is non-perm.
190 ciObject* con = field->constant_value().as_object();
191 // Do not "join" in the previous type; it doesn't add value,
192 // and may yield a vacuous result if the field is of interface type.
193 if (con->is_null_object()) {
194 type = TypePtr::NULL_PTR;
195 } else {
196 type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
197 }
198 assert(type != NULL, "field singleton type must be consistent");
199 } else {
200 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
201 }
202 } else {
203 type = Type::get_const_basic_type(bt);
204 }
205
206 Node* ld = access_load_at(obj, adr, adr_type, type, bt, decorators);
207
208 // Adjust Java stack
209 if (type2size[bt] == 1)
210 push(ld);
211 else
212 push_pair(ld);
213
214 if (must_assert_null) {
215 // Do not take a trap here. It's possible that the program
216 // will never load the field's class, and will happily see
217 // null values in this field forever. Don't stumble into a
218 // trap for such a program, or we might get a long series
219 // of useless recompilations. (Or, we might load a class
220 // which should not be loaded.) If we ever see a non-null
221 // value, we will then trap and recompile. (The trap will
222 // not need to mention the class index, since the class will
223 // already have been loaded if we ever see a non-null value.)
224 // uncommon_trap(iter().get_field_signature_index());
225 if (PrintOpto && (Verbose || WizardMode)) {
226 method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
227 }
228 if (C->log() != NULL) {
229 C->log()->elem("assert_null reason='field' klass='%d'",
230 C->log()->identify(field->type()));
231 }
232 // If there is going to be a trap, put it at the next bytecode:
233 set_bci(iter().next_bci());
234 null_assert(peek());
235 set_bci(iter().cur_bci()); // put it back
236 }
237 }
238
239 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
240 bool is_vol = field->is_volatile();
241
242 // Compute address and memory type.
243 int offset = field->offset_in_bytes();
244 const TypePtr* adr_type = C->alias_type(field)->adr_type();
245 Node* adr = basic_plus_adr(obj, obj, offset);
246 BasicType bt = field->layout_type();
247 // Value to be stored
248 Node* val = type2size[bt] == 1 ? pop() : pop_pair();
249
250 DecoratorSet decorators = IN_HEAP;
251 decorators |= is_vol ? MO_SEQ_CST : MO_UNORDERED;
252
253 bool is_obj = bt == T_OBJECT || bt == T_ARRAY;
254
255 // Store the value.
256 const Type* field_type;
257 if (!field->type()->is_loaded()) {
258 field_type = TypeInstPtr::BOTTOM;
259 } else {
260 if (is_obj) {
261 field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
262 } else {
263 field_type = Type::BOTTOM;
264 }
265 }
266 access_store_at(control(), obj, adr, adr_type, val, field_type, bt, decorators);
267
268 if (is_field) {
269 // Remember we wrote a volatile field.
270 // For not multiple copy atomic cpu (ppc64) a barrier should be issued
271 // in constructors which have such stores. See do_exits() in parse1.cpp.
272 if (is_vol) {
273 set_wrote_volatile(true);
274 }
275 set_wrote_fields(true);
276
277 // If the field is final, the rules of Java say we are in <init> or <clinit>.
278 // Note the presence of writes to final non-static fields, so that we
279 // can insert a memory barrier later on to keep the writes from floating
280 // out of the constructor.
281 // Any method can write a @Stable field; insert memory barriers after those also.
282 if (field->is_final()) {
283 set_wrote_final(true);
284 if (AllocateNode::Ideal_allocation(obj, &_gvn) != NULL) {
285 // Preserve allocation ptr to create precedent edge to it in membar
286 // generated on exit from constructor.
287 // Can't bind stable with its allocation, only record allocation for final field.
288 set_alloc_with_final(obj);
289 }
290 }
291 if (field->is_stable()) {
292 set_wrote_stable(true);
293 }
294 }
295 }
296
297 //=============================================================================
298 void Parse::do_anewarray() {
299 bool will_link;
300 ciKlass* klass = iter().get_klass(will_link);
301
302 // Uncommon Trap when class that array contains is not loaded
303 // we need the loaded class for the rest of graph; do not
304 // initialize the container class (see Java spec)!!!
305 assert(will_link, "anewarray: typeflow responsibility");
306
307 ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
308 // Check that array_klass object is loaded
309 if (!array_klass->is_loaded()) {
310 // Generate uncommon_trap for unloaded array_class
311 uncommon_trap(Deoptimization::Reason_unloaded,
|
367 push(obj);
368 }
369
370 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
371 // Also handle the degenerate 1-dimensional case of anewarray.
372 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
373 Node* length = lengths[0];
374 assert(length != NULL, "");
375 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
376 if (ndimensions > 1) {
377 jint length_con = find_int_con(length, -1);
378 guarantee(length_con >= 0, "non-constant multianewarray");
379 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
380 const TypePtr* adr_type = TypeAryPtr::OOPS;
381 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
382 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
383 for (jint i = 0; i < length_con; i++) {
384 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
385 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
386 Node* eaddr = basic_plus_adr(array, offset);
387 store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, MemNode::unordered);
388 }
389 }
390 return array;
391 }
392
393 void Parse::do_multianewarray() {
394 int ndimensions = iter().get_dimensions();
395
396 // the m-dimensional array
397 bool will_link;
398 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
399 assert(will_link, "multianewarray: typeflow responsibility");
400
401 // Note: Array classes are always initialized; no is_initialized check.
402
403 kill_dead_locals();
404
405 // get the lengths from the stack (first dimension is on top)
406 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
|
333 push(obj);
334 }
335
336 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
337 // Also handle the degenerate 1-dimensional case of anewarray.
338 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
339 Node* length = lengths[0];
340 assert(length != NULL, "");
341 Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
342 if (ndimensions > 1) {
343 jint length_con = find_int_con(length, -1);
344 guarantee(length_con >= 0, "non-constant multianewarray");
345 ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
346 const TypePtr* adr_type = TypeAryPtr::OOPS;
347 const TypeOopPtr* elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
348 const intptr_t header = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
349 for (jint i = 0; i < length_con; i++) {
350 Node* elem = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
351 intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
352 Node* eaddr = basic_plus_adr(array, offset);
353 access_store_at(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT, IN_HEAP | IN_HEAP_ARRAY);
354 }
355 }
356 return array;
357 }
358
359 void Parse::do_multianewarray() {
360 int ndimensions = iter().get_dimensions();
361
362 // the m-dimensional array
363 bool will_link;
364 ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
365 assert(will_link, "multianewarray: typeflow responsibility");
366
367 // Note: Array classes are always initialized; no is_initialized check.
368
369 kill_dead_locals();
370
371 // get the lengths from the stack (first dimension is on top)
372 Node** length = NEW_RESOURCE_ARRAY(Node*, ndimensions + 1);
|