195 }
196
197 if (is_atomic) {
198 if (is_acquire || is_volatile) {
199 kit->insert_mem_bar(Op_MemBarAcquire);
200 }
201 } else if (is_write) {
202 // If not multiple copy atomic, we do the MemBarVolatile before the load.
203 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
204 kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar
205 }
206 } else {
207 if (is_volatile || is_acquire) {
208 kit->insert_mem_bar(Op_MemBarAcquire, _access.raw_access());
209 }
210 }
211 }
212
213 };
214
215 Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
216 C2AccessFence fence(access);
217 resolve_address(access);
218 return store_at_resolved(access, val);
219 }
220
221 Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
222 C2AccessFence fence(access);
223 resolve_address(access);
224 return load_at_resolved(access, val_type);
225 }
226
227 MemNode::MemOrd C2Access::mem_node_mo() const {
228 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
229 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
230 if ((_decorators & MO_SEQ_CST) != 0) {
231 if (is_write && is_read) {
232 // For atomic operations
233 return MemNode::seqcst;
234 } else if (is_write) {
276 const TypePtr* adr_type = _addr.type();
277 Node* adr = _addr.node();
278 if (!needs_cpu_membar() && adr_type->isa_instptr()) {
279 assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
280 intptr_t offset = Type::OffsetBot;
281 AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset);
282 if (offset >= 0) {
283 int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
284 if (offset < s) {
285 // Guaranteed to be a valid access, no need to pin it
286 _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
287 _decorators ^= C2_PINNED_LOAD;
288 }
289 }
290 }
291 }
292 }
293
294 //--------------------------- atomic operations---------------------------------
295
296 static void pin_atomic_op(C2AtomicAccess& access) {
297 if (!access.needs_pinning()) {
298 return;
299 }
300 // SCMemProjNodes represent the memory state of a LoadStore. Their
301 // main role is to prevent LoadStore nodes from being optimized away
302 // when their results aren't used.
303 GraphKit* kit = access.kit();
304 Node* load_store = access.raw_access();
305 assert(load_store != NULL, "must pin atomic op");
306 Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
307 kit->set_memory(proj, access.alias_idx());
308 }
309
310 void C2AtomicAccess::set_memory() {
311 Node *mem = _kit->memory(_alias_idx);
312 _memory = mem;
313 }
314
315 Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
316 Node* new_val, const Type* value_type) const {
317 GraphKit* kit = access.kit();
318 MemNode::MemOrd mo = access.mem_node_mo();
319 Node* mem = access.memory();
320
321 Node* adr = access.addr().node();
322 const TypePtr* adr_type = access.addr().type();
323
324 Node* load_store = NULL;
325
326 if (access.is_oop()) {
327 #ifdef _LP64
328 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
329 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
341 break;
342 }
343 case T_SHORT: {
344 load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
345 break;
346 }
347 case T_INT: {
348 load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
349 break;
350 }
351 case T_LONG: {
352 load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
353 break;
354 }
355 default:
356 ShouldNotReachHere();
357 }
358 }
359
360 access.set_raw_access(load_store);
361 pin_atomic_op(access);
362
363 #ifdef _LP64
364 if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
365 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
366 }
367 #endif
368
369 return load_store;
370 }
371
372 Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
373 Node* new_val, const Type* value_type) const {
374 GraphKit* kit = access.kit();
375 DecoratorSet decorators = access.decorators();
376 MemNode::MemOrd mo = access.mem_node_mo();
377 Node* mem = access.memory();
378 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
379 Node* load_store = NULL;
380 Node* adr = access.addr().node();
381
421 load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
422 } else {
423 load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
424 }
425 break;
426 }
427 case T_LONG: {
428 if (is_weak_cas) {
429 load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
430 } else {
431 load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
432 }
433 break;
434 }
435 default:
436 ShouldNotReachHere();
437 }
438 }
439
440 access.set_raw_access(load_store);
441 pin_atomic_op(access);
442
443 return load_store;
444 }
445
446 Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
447 GraphKit* kit = access.kit();
448 Node* mem = access.memory();
449 Node* adr = access.addr().node();
450 const TypePtr* adr_type = access.addr().type();
451 Node* load_store = NULL;
452
453 if (access.is_oop()) {
454 #ifdef _LP64
455 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
456 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
457 load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
458 } else
459 #endif
460 {
461 load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
463 } else {
464 switch (access.type()) {
465 case T_BYTE:
466 load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
467 break;
468 case T_SHORT:
469 load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
470 break;
471 case T_INT:
472 load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
473 break;
474 case T_LONG:
475 load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
476 break;
477 default:
478 ShouldNotReachHere();
479 }
480 }
481
482 access.set_raw_access(load_store);
483 pin_atomic_op(access);
484
485 #ifdef _LP64
486 if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
487 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
488 }
489 #endif
490
491 return load_store;
492 }
493
494 Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
495 Node* load_store = NULL;
496 GraphKit* kit = access.kit();
497 Node* adr = access.addr().node();
498 const TypePtr* adr_type = access.addr().type();
499 Node* mem = access.memory();
500
501 switch(access.type()) {
502 case T_BYTE:
503 load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
504 break;
505 case T_SHORT:
506 load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
507 break;
508 case T_INT:
509 load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
510 break;
511 case T_LONG:
512 load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
513 break;
514 default:
515 ShouldNotReachHere();
516 }
517
518 access.set_raw_access(load_store);
519 pin_atomic_op(access);
520
521 return load_store;
522 }
523
524 Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
525 Node* new_val, const Type* value_type) const {
526 C2AccessFence fence(access);
527 resolve_address(access);
528 return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
529 }
530
531 Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
532 Node* new_val, const Type* value_type) const {
533 C2AccessFence fence(access);
534 resolve_address(access);
535 return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
536 }
537
538 Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
539 C2AccessFence fence(access);
540 resolve_address(access);
541 return atomic_xchg_at_resolved(access, new_val, value_type);
542 }
543
544 Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
545 C2AccessFence fence(access);
546 resolve_address(access);
547 return atomic_add_at_resolved(access, new_val, value_type);
548 }
549
550 void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
551 // Exclude the header but include array length to copy by 8 bytes words.
552 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
553 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
554 instanceOopDesc::base_offset_in_bytes();
555 // base_off:
556 // 8 - 32-bit VM
557 // 12 - 64-bit VM, compressed klass
558 // 16 - 64-bit VM, normal klass
559 if (base_off % BytesPerLong != 0) {
560 assert(UseCompressedClassPointers, "");
561 if (is_array) {
562 // Exclude length to copy by 8 bytes words.
563 base_off += sizeof(int);
564 } else {
565 // Include klass to copy by 8 bytes words.
|
195 }
196
197 if (is_atomic) {
198 if (is_acquire || is_volatile) {
199 kit->insert_mem_bar(Op_MemBarAcquire);
200 }
201 } else if (is_write) {
202 // If not multiple copy atomic, we do the MemBarVolatile before the load.
203 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
204 kit->insert_mem_bar(Op_MemBarVolatile); // Use fat membar
205 }
206 } else {
207 if (is_volatile || is_acquire) {
208 kit->insert_mem_bar(Op_MemBarAcquire, _access.raw_access());
209 }
210 }
211 }
212
213 };
214
215 class C2AtomicAccessFence: public C2AccessFence {
216 C2AtomicAccess& _access;
217
218 public:
219 C2AtomicAccessFence(C2AtomicAccess& access) :
220 C2AccessFence(access), _access(access) { }
221
222 ~C2AtomicAccessFence() {
223 // ~C2AtomicAccessFence is called before ~C2AccessFence. This calling order
224 // matters here. Because SCMemProjNode should be added before memory barrier
225 // insertion (it is inserted in ~C2AccessFence).
226 if (!_access.needs_pinning()) {
227 return;
228 }
229 // SCMemProjNodes represent the memory state of a LoadStore. Their
230 // main role is to prevent LoadStore nodes from being optimized away
231 // when their results aren't used.
232 GraphKit* kit = _access.kit();
233 Node* load_store = _access.raw_access();
234 assert(load_store != NULL, "must pin atomic op");
235 Node* proj = kit->gvn().transform(new SCMemProjNode(load_store));
236 kit->set_memory(proj, _access.alias_idx());
237 }
238
239 };
240
241 Node* BarrierSetC2::store_at(C2Access& access, C2AccessValue& val) const {
242 C2AccessFence fence(access);
243 resolve_address(access);
244 return store_at_resolved(access, val);
245 }
246
247 Node* BarrierSetC2::load_at(C2Access& access, const Type* val_type) const {
248 C2AccessFence fence(access);
249 resolve_address(access);
250 return load_at_resolved(access, val_type);
251 }
252
253 MemNode::MemOrd C2Access::mem_node_mo() const {
254 bool is_write = (_decorators & C2_WRITE_ACCESS) != 0;
255 bool is_read = (_decorators & C2_READ_ACCESS) != 0;
256 if ((_decorators & MO_SEQ_CST) != 0) {
257 if (is_write && is_read) {
258 // For atomic operations
259 return MemNode::seqcst;
260 } else if (is_write) {
302 const TypePtr* adr_type = _addr.type();
303 Node* adr = _addr.node();
304 if (!needs_cpu_membar() && adr_type->isa_instptr()) {
305 assert(adr_type->meet(TypePtr::NULL_PTR) != adr_type->remove_speculative(), "should be not null");
306 intptr_t offset = Type::OffsetBot;
307 AddPNode::Ideal_base_and_offset(adr, &_kit->gvn(), offset);
308 if (offset >= 0) {
309 int s = Klass::layout_helper_size_in_bytes(adr_type->isa_instptr()->klass()->layout_helper());
310 if (offset < s) {
311 // Guaranteed to be a valid access, no need to pin it
312 _decorators ^= C2_CONTROL_DEPENDENT_LOAD;
313 _decorators ^= C2_PINNED_LOAD;
314 }
315 }
316 }
317 }
318 }
319
320 //--------------------------- atomic operations---------------------------------
321
322 void C2AtomicAccess::set_memory() {
323 Node *mem = _kit->memory(_alias_idx);
324 _memory = mem;
325 }
326
327 Node* BarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicAccess& access, Node* expected_val,
328 Node* new_val, const Type* value_type) const {
329 GraphKit* kit = access.kit();
330 MemNode::MemOrd mo = access.mem_node_mo();
331 Node* mem = access.memory();
332
333 Node* adr = access.addr().node();
334 const TypePtr* adr_type = access.addr().type();
335
336 Node* load_store = NULL;
337
338 if (access.is_oop()) {
339 #ifdef _LP64
340 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
341 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
353 break;
354 }
355 case T_SHORT: {
356 load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
357 break;
358 }
359 case T_INT: {
360 load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
361 break;
362 }
363 case T_LONG: {
364 load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
365 break;
366 }
367 default:
368 ShouldNotReachHere();
369 }
370 }
371
372 access.set_raw_access(load_store);
373
374 #ifdef _LP64
375 if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
376 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
377 }
378 #endif
379
380 return load_store;
381 }
382
383 Node* BarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicAccess& access, Node* expected_val,
384 Node* new_val, const Type* value_type) const {
385 GraphKit* kit = access.kit();
386 DecoratorSet decorators = access.decorators();
387 MemNode::MemOrd mo = access.mem_node_mo();
388 Node* mem = access.memory();
389 bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
390 Node* load_store = NULL;
391 Node* adr = access.addr().node();
392
432 load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
433 } else {
434 load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
435 }
436 break;
437 }
438 case T_LONG: {
439 if (is_weak_cas) {
440 load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
441 } else {
442 load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
443 }
444 break;
445 }
446 default:
447 ShouldNotReachHere();
448 }
449 }
450
451 access.set_raw_access(load_store);
452
453 return load_store;
454 }
455
456 Node* BarrierSetC2::atomic_xchg_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
457 GraphKit* kit = access.kit();
458 Node* mem = access.memory();
459 Node* adr = access.addr().node();
460 const TypePtr* adr_type = access.addr().type();
461 Node* load_store = NULL;
462
463 if (access.is_oop()) {
464 #ifdef _LP64
465 if (adr->bottom_type()->is_ptr_to_narrowoop()) {
466 Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
467 load_store = kit->gvn().transform(new GetAndSetNNode(kit->control(), mem, adr, newval_enc, adr_type, value_type->make_narrowoop()));
468 } else
469 #endif
470 {
471 load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
473 } else {
474 switch (access.type()) {
475 case T_BYTE:
476 load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
477 break;
478 case T_SHORT:
479 load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
480 break;
481 case T_INT:
482 load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
483 break;
484 case T_LONG:
485 load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
486 break;
487 default:
488 ShouldNotReachHere();
489 }
490 }
491
492 access.set_raw_access(load_store);
493
494 #ifdef _LP64
495 if (access.is_oop() && adr->bottom_type()->is_ptr_to_narrowoop()) {
496 return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
497 }
498 #endif
499
500 return load_store;
501 }
502
503 Node* BarrierSetC2::atomic_add_at_resolved(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
504 Node* load_store = NULL;
505 GraphKit* kit = access.kit();
506 Node* adr = access.addr().node();
507 const TypePtr* adr_type = access.addr().type();
508 Node* mem = access.memory();
509
510 switch(access.type()) {
511 case T_BYTE:
512 load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
513 break;
514 case T_SHORT:
515 load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
516 break;
517 case T_INT:
518 load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
519 break;
520 case T_LONG:
521 load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
522 break;
523 default:
524 ShouldNotReachHere();
525 }
526
527 access.set_raw_access(load_store);
528
529 return load_store;
530 }
531
532 Node* BarrierSetC2::atomic_cmpxchg_val_at(C2AtomicAccess& access, Node* expected_val,
533 Node* new_val, const Type* value_type) const {
534 C2AtomicAccessFence fence(access);
535 resolve_address(access);
536 return atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
537 }
538
539 Node* BarrierSetC2::atomic_cmpxchg_bool_at(C2AtomicAccess& access, Node* expected_val,
540 Node* new_val, const Type* value_type) const {
541 C2AtomicAccessFence fence(access);
542 resolve_address(access);
543 return atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
544 }
545
546 Node* BarrierSetC2::atomic_xchg_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
547 C2AtomicAccessFence fence(access);
548 resolve_address(access);
549 return atomic_xchg_at_resolved(access, new_val, value_type);
550 }
551
552 Node* BarrierSetC2::atomic_add_at(C2AtomicAccess& access, Node* new_val, const Type* value_type) const {
553 C2AtomicAccessFence fence(access);
554 resolve_address(access);
555 return atomic_add_at_resolved(access, new_val, value_type);
556 }
557
558 void BarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
559 // Exclude the header but include array length to copy by 8 bytes words.
560 // Can't use base_offset_in_bytes(bt) since basic type is unknown.
561 int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
562 instanceOopDesc::base_offset_in_bytes();
563 // base_off:
564 // 8 - 32-bit VM
565 // 12 - 64-bit VM, compressed klass
566 // 16 - 64-bit VM, normal klass
567 if (base_off % BytesPerLong != 0) {
568 assert(UseCompressedClassPointers, "");
569 if (is_array) {
570 // Exclude length to copy by 8 bytes words.
571 base_off += sizeof(int);
572 } else {
573 // Include klass to copy by 8 bytes words.
|