< prev index next >

src/hotspot/share/gc/shared/c2/barrierSetC2.cpp

Print this page
rev 56016 : 8229422: Taskqueue: Outdated selection of weak memory model platforms
Reviewed-by:

*** 196,206 **** // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) { _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } else if (is_volatile) { ! if (support_IRIW_for_not_multiple_copy_atomic_cpu) { _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } else { _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } } --- 196,206 ---- // into actual barriers on most machines, but we still need rest of // compiler to respect ordering. if (is_release) { _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } else if (is_volatile) { ! if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } else { _leading_membar = kit->insert_mem_bar(Op_MemBarRelease); } }
*** 216,226 **** // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. ! if (is_volatile && support_IRIW_for_not_multiple_copy_atomic_cpu) { assert(kit != NULL, "unsupported at optimization time"); _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } } --- 216,226 ---- // Memory barrier to prevent normal and 'unsafe' accesses from // bypassing each other. Happens after null checks, so the // exception paths do not take memory state from the memory barrier, // so there's no problems making a strong assert about mixing users // of safe & unsafe memory. ! if (is_volatile && SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { assert(kit != NULL, "unsupported at optimization time"); _leading_membar = kit->insert_mem_bar(Op_MemBarVolatile); } }
*** 267,277 **** MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); } } } else if (is_write) { // If not multiple copy atomic, we do the MemBarVolatile before the load. ! if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) { assert(kit != NULL, "unsupported at optimization time"); Node* n = _access.raw_access(); Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar if (_leading_membar != NULL) { MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); --- 267,277 ---- MemBarNode::set_load_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar()); } } } else if (is_write) { // If not multiple copy atomic, we do the MemBarVolatile before the load. ! if (is_volatile && !SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) { assert(kit != NULL, "unsupported at optimization time"); Node* n = _access.raw_access(); Node* mb = kit->insert_mem_bar(Op_MemBarVolatile, n); // Use fat membar if (_leading_membar != NULL) { MemBarNode::set_store_pair(_leading_membar->as_MemBar(), mb->as_MemBar());
*** 279,289 **** } } else { if (is_volatile || is_acquire) { assert(kit != NULL, "unsupported at optimization time"); Node* n = _access.raw_access(); ! assert(_leading_membar == NULL || support_IRIW_for_not_multiple_copy_atomic_cpu, "no leading membar expected"); Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); mb->as_MemBar()->set_trailing_load(); } } } --- 279,289 ---- } } else { if (is_volatile || is_acquire) { assert(kit != NULL, "unsupported at optimization time"); Node* n = _access.raw_access(); ! assert(_leading_membar == NULL || SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU, "no leading membar expected"); Node* mb = kit->insert_mem_bar(Op_MemBarAcquire, n); mb->as_MemBar()->set_trailing_load(); } } }
< prev index next >