433 }
434
435 // -----------------------------------------------------------------------------
436 // Hash Code handling
437 //
438 // Performance concern:
439 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
440 // OrderAccess::Dummy variable. This store is unnecessary for correctness.
441 // Many threads STing into a common location causes considerable cache migration
442 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
443 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local
444 // latency on the executing processor -- is a better choice as it scales on SMP
445 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
446 // discussion of coherency costs. Note that all our current reference platforms
447 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
448 //
449 // As a general policy we use "volatile" to control compiler-based reordering
450 // and explicit fences (barriers) to control for architectural reordering performed
451 // by the CPU(s) or platform.
452
453 static int MBFence (int x) { OrderAccess::fence(); return x; }
454
455 struct SharedGlobals {
456 // These are highly shared mostly-read variables.
457 // To avoid false-sharing they need to be the sole occupants of a $ line.
458 double padPrefix [8];
459 volatile int stwRandom ;
460 volatile int stwCycle ;
461
462 // Hot RW variables -- Sequester to avoid false-sharing
463 double padSuffix [16];
464 volatile int hcSequence ;
465 double padFinal [8] ;
466 } ;
467
468 static SharedGlobals GVars ;
469 static int MonitorScavengeThreshold = 1000000 ;
470 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
471
472 static markOop ReadStableMark (oop obj) {
473 markOop mark = obj->mark() ;
474 if (!mark->is_being_inflated()) {
|
433 }
434
435 // -----------------------------------------------------------------------------
436 // Hash Code handling
437 //
438 // Performance concern:
439 // OrderAccess::storestore() calls release() which STs 0 into the global volatile
440 // OrderAccess::Dummy variable. This store is unnecessary for correctness.
441 // Many threads STing into a common location causes considerable cache migration
442 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore()
443 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local
444 // latency on the executing processor -- is a better choice as it scales on SMP
445 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a
446 // discussion of coherency costs. Note that all our current reference platforms
447 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC.
448 //
449 // As a general policy we use "volatile" to control compiler-based reordering
450 // and explicit fences (barriers) to control for architectural reordering performed
451 // by the CPU(s) or platform.
452
453 struct SharedGlobals {
454 // These are highly shared mostly-read variables.
455 // To avoid false-sharing they need to be the sole occupants of a $ line.
456 double padPrefix [8];
457 volatile int stwRandom ;
458 volatile int stwCycle ;
459
460 // Hot RW variables -- Sequester to avoid false-sharing
461 double padSuffix [16];
462 volatile int hcSequence ;
463 double padFinal [8] ;
464 } ;
465
466 static SharedGlobals GVars ;
467 static int MonitorScavengeThreshold = 1000000 ;
468 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending
469
470 static markOop ReadStableMark (oop obj) {
471 markOop mark = obj->mark() ;
472 if (!mark->is_being_inflated()) {
|