src/share/vm/runtime/park.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/runtime

src/share/vm/runtime/park.cpp

Print this page
rev 5732 : [mq]: comments2


 135 
 136 // Override operator new and delete so we can ensure that the
 137 // least significant byte of ParkEvent addresses is 0.
 138 // Beware that excessive address alignment is undesirable
 139 // as it can result in D$ index usage imbalance as
 140 // well as bank access imbalance on Niagara-like platforms,
 141 // although Niagara's hash function should help.
 142 
 143 void * ParkEvent::operator new (size_t sz) throw() {
 144   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 145 }
 146 
 147 void ParkEvent::operator delete (void * a) {
 148   // ParkEvents are type-stable and immortal ...
 149   ShouldNotReachHere();
 150 }
 151 
 152 
 153 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 154 // allocate() and release() code for use by Parkers.  The Parker:: forms
 155 // will eventually be removed as we consolide and shift over to ParkEvents
 156 // for both builtin synchronization and JSR166 operations.
 157 
 158 volatile int Parker::ListLock = 0 ;
 159 Parker * volatile Parker::FreeList = NULL ;
 160 
 161 Parker * Parker::Allocate (JavaThread * t) {
 162   guarantee (t != NULL, "invariant") ;
 163   Parker * p ;
 164 
 165   // Start by trying to recycle an existing but unassociated
 166   // Parker from the global free list.
 167   for (;;) {
 168     p = FreeList ;
 169     if (p  == NULL) break ;
 170     // 1: Detach
 171     // Tantamount to p = Swap (&FreeList, NULL)
 172     if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
 173        continue ;
 174     }
 175 




 135 
 136 // Override operator new and delete so we can ensure that the
 137 // least significant byte of ParkEvent addresses is 0.
 138 // Beware that excessive address alignment is undesirable
 139 // as it can result in D$ index usage imbalance as
 140 // well as bank access imbalance on Niagara-like platforms,
 141 // although Niagara's hash function should help.
 142 
 143 void * ParkEvent::operator new (size_t sz) throw() {
 144   return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
 145 }
 146 
 147 void ParkEvent::operator delete (void * a) {
 148   // ParkEvents are type-stable and immortal ...
 149   ShouldNotReachHere();
 150 }
 151 
 152 
 153 // 6399321 As a temporary measure we copied & modified the ParkEvent::
 154 // allocate() and release() code for use by Parkers.  The Parker:: forms
 155 // will eventually be removed as we consolidate and shift over to ParkEvents
 156 // for both builtin synchronization and JSR166 operations.
 157 
 158 volatile int Parker::ListLock = 0 ;
 159 Parker * volatile Parker::FreeList = NULL ;
 160 
 161 Parker * Parker::Allocate (JavaThread * t) {
 162   guarantee (t != NULL, "invariant") ;
 163   Parker * p ;
 164 
 165   // Start by trying to recycle an existing but unassociated
 166   // Parker from the global free list.
 167   for (;;) {
 168     p = FreeList ;
 169     if (p  == NULL) break ;
 170     // 1: Detach
 171     // Tantamount to p = Swap (&FreeList, NULL)
 172     if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
 173        continue ;
 174     }
 175 


src/share/vm/runtime/park.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File