168 RegionNode* region, int null_path,
169 int offset);
170 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
171 RegionNode* region, int null_path) {
172 int offset = java_lang_Class::klass_offset_in_bytes();
173 return load_klass_from_mirror_common(mirror, never_see_null,
174 region, null_path,
175 offset);
176 }
177 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
178 RegionNode* region, int null_path) {
179 int offset = java_lang_Class::array_klass_offset_in_bytes();
180 return load_klass_from_mirror_common(mirror, never_see_null,
181 region, null_path,
182 offset);
183 }
184 Node* generate_access_flags_guard(Node* kls,
185 int modifier_mask, int modifier_bits,
186 RegionNode* region);
187 Node* generate_interface_guard(Node* kls, RegionNode* region);
188 Node* generate_value_guard(Node* kls, RegionNode* region);
189
190 enum ArrayKind {
191 AnyArray,
192 NonArray,
193 ObjectArray,
194 NonObjectArray,
195 TypeArray,
196 ValueArray
197 };
198
199 Node* generate_array_guard(Node* kls, RegionNode* region) {
200 return generate_array_guard_common(kls, region, AnyArray);
201 }
202 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
203 return generate_array_guard_common(kls, region, NonArray);
204 }
205 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
206 return generate_array_guard_common(kls, region, ObjectArray);
207 }
208 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
3398
3399 //--------------------(inline_native_Class_query helpers)---------------------
3400 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3401 // Fall through if (mods & mask) == bits, take the guard otherwise.
3402 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3403 // Branch around if the given klass has the given modifier bit set.
3404 // Like generate_guard, adds a new path onto the region.
3405 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3406 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3407 Node* mask = intcon(modifier_mask);
3408 Node* bits = intcon(modifier_bits);
3409 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3410 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3411 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3412 return generate_fair_guard(bol, region);
3413 }
3414 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3415 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3416 }
3417
3418 Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) {
3419 return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region);
3420 }
3421
3422 //-------------------------inline_native_Class_query-------------------
3423 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3424 const Type* return_type = TypeInt::BOOL;
3425 Node* prim_return_value = top(); // what happens if it's a primitive class?
3426 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3427 bool expect_prim = false; // most of these guys expect to work on refs
3428
3429 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3430
3431 Node* mirror = argument(0);
3432 Node* obj = top();
3433
3434 switch (id) {
3435 case vmIntrinsics::_isInstance:
3436 // nothing is an instance of a primitive type
3437 prim_return_value = intcon(0);
3438 obj = argument(1);
3439 break;
3440 case vmIntrinsics::_getModifiers:
3441 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
4330 // System.identityHashCode(null) == 0
4331 Node* null_ctl = top();
4332 obj = null_check_oop(obj, &null_ctl);
4333 result_reg->init_req(_null_path, null_ctl);
4334 result_val->init_req(_null_path, _gvn.intcon(0));
4335 }
4336
4337 // Unconditionally null? Then return right away.
4338 if (stopped()) {
4339 set_control( result_reg->in(_null_path));
4340 if (!stopped())
4341 set_result(result_val->in(_null_path));
4342 return true;
4343 }
4344
4345 // We only go to the fast case code if we pass a number of guards. The
4346 // paths which do not pass are accumulated in the slow_region.
4347 RegionNode* slow_region = new RegionNode(1);
4348 record_for_igvn(slow_region);
4349
4350 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
4351 assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here");
4352 if (is_static && obj_type->can_be_value_type()) {
4353 Node* obj_klass = load_object_klass(obj);
4354 generate_value_guard(obj_klass, slow_region);
4355 }
4356
4357 // If this is a virtual call, we generate a funny guard. We pull out
4358 // the vtable entry corresponding to hashCode() from the target object.
4359 // If the target method which we are calling happens to be the native
4360 // Object hashCode() method, we pass the guard. We do not need this
4361 // guard for non-virtual calls -- the caller is known to be the native
4362 // Object hashCode().
4363 if (is_virtual) {
4364 // After null check, get the object's klass.
4365 Node* obj_klass = load_object_klass(obj);
4366 generate_virtual_guard(obj_klass, slow_region);
4367 }
4368
4369 // Get the header out of the object, use LoadMarkNode when available
4370 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4371 // The control of the load must be NULL. Otherwise, the load can move before
4372 // the null check after castPP removal.
4373 Node* no_ctrl = NULL;
4374 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4375
4376 // Test the header to see if it is unlocked.
4377 Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
4378 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4379 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4380 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4381 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4382
4383 generate_slow_guard(test_unlocked, slow_region);
4384
4385 // Get the hash value and check to see that it has been properly assigned.
4386 // We depend on hash_mask being at most 32 bits and avoid the use of
4387 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4388 // vm: see markWord.hpp.
4389 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4390 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4391 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4392 // This hack lets the hash bits live anywhere in the mark object now, as long
4393 // as the shift drops the relevant bits into the low 32 bits. Note that
4394 // Java spec says that HashCode is an int so there's no point in capturing
4395 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4396 hshifted_header = ConvX2I(hshifted_header);
|
168 RegionNode* region, int null_path,
169 int offset);
170 Node* load_klass_from_mirror(Node* mirror, bool never_see_null,
171 RegionNode* region, int null_path) {
172 int offset = java_lang_Class::klass_offset_in_bytes();
173 return load_klass_from_mirror_common(mirror, never_see_null,
174 region, null_path,
175 offset);
176 }
177 Node* load_array_klass_from_mirror(Node* mirror, bool never_see_null,
178 RegionNode* region, int null_path) {
179 int offset = java_lang_Class::array_klass_offset_in_bytes();
180 return load_klass_from_mirror_common(mirror, never_see_null,
181 region, null_path,
182 offset);
183 }
184 Node* generate_access_flags_guard(Node* kls,
185 int modifier_mask, int modifier_bits,
186 RegionNode* region);
187 Node* generate_interface_guard(Node* kls, RegionNode* region);
188
189 enum ArrayKind {
190 AnyArray,
191 NonArray,
192 ObjectArray,
193 NonObjectArray,
194 TypeArray,
195 ValueArray
196 };
197
198 Node* generate_array_guard(Node* kls, RegionNode* region) {
199 return generate_array_guard_common(kls, region, AnyArray);
200 }
201 Node* generate_non_array_guard(Node* kls, RegionNode* region) {
202 return generate_array_guard_common(kls, region, NonArray);
203 }
204 Node* generate_objArray_guard(Node* kls, RegionNode* region) {
205 return generate_array_guard_common(kls, region, ObjectArray);
206 }
207 Node* generate_non_objArray_guard(Node* kls, RegionNode* region) {
3397
3398 //--------------------(inline_native_Class_query helpers)---------------------
3399 // Use this for JVM_ACC_INTERFACE, JVM_ACC_IS_CLONEABLE_FAST, JVM_ACC_HAS_FINALIZER.
3400 // Fall through if (mods & mask) == bits, take the guard otherwise.
3401 Node* LibraryCallKit::generate_access_flags_guard(Node* kls, int modifier_mask, int modifier_bits, RegionNode* region) {
3402 // Branch around if the given klass has the given modifier bit set.
3403 // Like generate_guard, adds a new path onto the region.
3404 Node* modp = basic_plus_adr(kls, in_bytes(Klass::access_flags_offset()));
3405 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT, MemNode::unordered);
3406 Node* mask = intcon(modifier_mask);
3407 Node* bits = intcon(modifier_bits);
3408 Node* mbit = _gvn.transform(new AndINode(mods, mask));
3409 Node* cmp = _gvn.transform(new CmpINode(mbit, bits));
3410 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3411 return generate_fair_guard(bol, region);
3412 }
3413 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) {
3414 return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region);
3415 }
3416
3417 //-------------------------inline_native_Class_query-------------------
3418 bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) {
3419 const Type* return_type = TypeInt::BOOL;
3420 Node* prim_return_value = top(); // what happens if it's a primitive class?
3421 bool never_see_null = !too_many_traps(Deoptimization::Reason_null_check);
3422 bool expect_prim = false; // most of these guys expect to work on refs
3423
3424 enum { _normal_path = 1, _prim_path = 2, PATH_LIMIT };
3425
3426 Node* mirror = argument(0);
3427 Node* obj = top();
3428
3429 switch (id) {
3430 case vmIntrinsics::_isInstance:
3431 // nothing is an instance of a primitive type
3432 prim_return_value = intcon(0);
3433 obj = argument(1);
3434 break;
3435 case vmIntrinsics::_getModifiers:
3436 prim_return_value = intcon(JVM_ACC_ABSTRACT | JVM_ACC_FINAL | JVM_ACC_PUBLIC);
4325 // System.identityHashCode(null) == 0
4326 Node* null_ctl = top();
4327 obj = null_check_oop(obj, &null_ctl);
4328 result_reg->init_req(_null_path, null_ctl);
4329 result_val->init_req(_null_path, _gvn.intcon(0));
4330 }
4331
4332 // Unconditionally null? Then return right away.
4333 if (stopped()) {
4334 set_control( result_reg->in(_null_path));
4335 if (!stopped())
4336 set_result(result_val->in(_null_path));
4337 return true;
4338 }
4339
4340 // We only go to the fast case code if we pass a number of guards. The
4341 // paths which do not pass are accumulated in the slow_region.
4342 RegionNode* slow_region = new RegionNode(1);
4343 record_for_igvn(slow_region);
4344
4345 // If this is a virtual call, we generate a funny guard. We pull out
4346 // the vtable entry corresponding to hashCode() from the target object.
4347 // If the target method which we are calling happens to be the native
4348 // Object hashCode() method, we pass the guard. We do not need this
4349 // guard for non-virtual calls -- the caller is known to be the native
4350 // Object hashCode().
4351 if (is_virtual) {
4352 // After null check, get the object's klass.
4353 Node* obj_klass = load_object_klass(obj);
4354 generate_virtual_guard(obj_klass, slow_region);
4355 }
4356
4357 // Get the header out of the object, use LoadMarkNode when available
4358 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
4359 // The control of the load must be NULL. Otherwise, the load can move before
4360 // the null check after castPP removal.
4361 Node* no_ctrl = NULL;
4362 Node* header = make_load(no_ctrl, header_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
4363
4364 // Test the header to see if it is unlocked.
4365 // This also serves as guard against value types (they have the always_locked_pattern set).
4366 Node *lock_mask = _gvn.MakeConX(markWord::biased_lock_mask_in_place);
4367 Node *lmasked_header = _gvn.transform(new AndXNode(header, lock_mask));
4368 Node *unlocked_val = _gvn.MakeConX(markWord::unlocked_value);
4369 Node *chk_unlocked = _gvn.transform(new CmpXNode( lmasked_header, unlocked_val));
4370 Node *test_unlocked = _gvn.transform(new BoolNode( chk_unlocked, BoolTest::ne));
4371
4372 generate_slow_guard(test_unlocked, slow_region);
4373
4374 // Get the hash value and check to see that it has been properly assigned.
4375 // We depend on hash_mask being at most 32 bits and avoid the use of
4376 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
4377 // vm: see markWord.hpp.
4378 Node *hash_mask = _gvn.intcon(markWord::hash_mask);
4379 Node *hash_shift = _gvn.intcon(markWord::hash_shift);
4380 Node *hshifted_header= _gvn.transform(new URShiftXNode(header, hash_shift));
4381 // This hack lets the hash bits live anywhere in the mark object now, as long
4382 // as the shift drops the relevant bits into the low 32 bits. Note that
4383 // Java spec says that HashCode is an int so there's no point in capturing
4384 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build).
4385 hshifted_header = ConvX2I(hshifted_header);
|