< prev index next >

src/share/vm/gc/g1/g1CollectedHeap.cpp

Print this page
rev 13328 : [mq]: webrev.0b
rev 13329 : [mq]: webrev.1
rev 13330 : imported patch webrev.2
rev 13331 : imported patch webrev.3b
rev 13332 : [mq]: webrev.4


1243       // how reference processing currently works in G1.
1244 
1245       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1246       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1247 
1248       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1249       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1250 
1251       ref_processor_stw()->enable_discovery();
1252       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1253 
1254       // Do collection work
1255       {
1256         HandleMark hm;  // Discard invalid handles created during gc
1257         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1258       }
1259 
1260       assert(num_free_regions() == 0, "we should not have added any free regions");
1261       rebuild_region_sets(false /* free_list_only */);
1262 


1263       // Enqueue any discovered reference objects that have
1264       // not been removed from the discovered lists.
1265       ref_processor_stw()->enqueue_discovered_references();


1266 
1267 #if defined(COMPILER2) || INCLUDE_JVMCI
1268       DerivedPointerTable::update_pointers();
1269 #endif
1270 
1271       MemoryService::track_memory_usage();
1272 
1273       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1274       ref_processor_stw()->verify_no_references_recorded();
1275 
1276       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1277       ClassLoaderDataGraph::purge();
1278       MetaspaceAux::verify_metrics();
1279 
1280       // Note: since we've just done a full GC, concurrent
1281       // marking is no longer active. Therefore we need not
1282       // re-enable reference discovery for the CM ref processor.
1283       // That will be done at the start of the next marking cycle.
1284       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1285       ref_processor_cm()->verify_no_references_recorded();


1650   // should not not be holding to any GC alloc regions. The method
1651   // below will make sure of that and do any remaining clean up.
1652   _allocator->abandon_gc_alloc_regions();
1653 
1654   // Instead of tearing down / rebuilding the free lists here, we
1655   // could instead use the remove_all_pending() method on free_list to
1656   // remove only the ones that we need to remove.
1657   tear_down_region_sets(true /* free_list_only */);
1658   shrink_helper(shrink_bytes);
1659   rebuild_region_sets(true /* free_list_only */);
1660 
1661   _hrm.verify_optional();
1662   _verifier->verify_region_sets_optional();
1663 }
1664 
1665 // Public methods.
1666 
1667 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1668   CollectedHeap(),
1669   _collector_policy(collector_policy),
1670   _g1_policy(create_g1_policy()),


1671   _collection_set(this, _g1_policy),
1672   _dirty_card_queue_set(false),
1673   _is_alive_closure_cm(this),
1674   _is_alive_closure_stw(this),
1675   _ref_processor_cm(NULL),
1676   _ref_processor_stw(NULL),
1677   _bot(NULL),
1678   _hot_card_cache(NULL),
1679   _g1_rem_set(NULL),
1680   _cg1r(NULL),
1681   _g1mm(NULL),
1682   _preserved_marks_set(true /* in_c_heap */),
1683   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1684   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1685   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1686   _humongous_reclaim_candidates(),
1687   _has_humongous_reclaim_candidates(false),
1688   _archive_allocator(NULL),
1689   _free_regions_coming(false),
1690   _gc_time_stamp(0),
1691   _summary_bytes_used(0),
1692   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1693   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1694   _expand_heap_after_alloc_failure(true),
1695   _old_marking_cycles_started(0),
1696   _old_marking_cycles_completed(0),
1697   _in_cset_fast_test(),
1698   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1699   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
1700 
1701   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1702                           /* are_GC_task_threads */true,
1703                           /* are_ConcurrentGC_threads */false);
1704   _workers->initialize_workers();
1705   _verifier = new G1HeapVerifier(this);
1706 
1707   _allocator = G1Allocator::create_allocator(this);
1708 
1709   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1710 
1711   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1712 
1713   // Override the default _filler_array_max_size so that no humongous filler
1714   // objects are created.
1715   _filler_array_max_size = _humongous_object_threshold_in_words;
1716 
1717   uint n_queues = ParallelGCThreads;
1718   _task_queues = new RefToScanQueueSet(n_queues);
1719 


1998   //     (depending on the value of ParallelRefProcEnabled
1999   //     and ParallelGCThreads).
2000   //   * A full GC disables reference discovery by the CM
2001   //     ref processor and abandons any entries on it's
2002   //     discovered lists.
2003   //
2004   // * For the STW processor:
2005   //   * Non MT discovery is enabled at the start of a full GC.
2006   //   * Processing and enqueueing during a full GC is non-MT.
2007   //   * During a full GC, references are processed after marking.
2008   //
2009   //   * Discovery (may or may not be MT) is enabled at the start
2010   //     of an incremental evacuation pause.
2011   //   * References are processed near the end of a STW evacuation pause.
2012   //   * For both types of GC:
2013   //     * Discovery is atomic - i.e. not concurrent.
2014   //     * Reference discovery will not need a barrier.
2015 
2016   MemRegion mr = reserved_region();
2017 


2018   // Concurrent Mark ref processor
2019   _ref_processor_cm =
2020     new ReferenceProcessor(mr,    // span
2021                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2022                                 // mt processing
2023                            ParallelGCThreads,
2024                                 // degree of mt processing
2025                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2026                                 // mt discovery
2027                            MAX2(ParallelGCThreads, ConcGCThreads),
2028                                 // degree of mt discovery
2029                            false,
2030                                 // Reference discovery is not atomic
2031                            &_is_alive_closure_cm);
2032                                 // is alive closure
2033                                 // (for efficiency/performance)
2034 
2035   // STW ref processor
2036   _ref_processor_stw =
2037     new ReferenceProcessor(mr,    // span
2038                            ParallelRefProcEnabled && (ParallelGCThreads > 1),
2039                                 // mt processing
2040                            ParallelGCThreads,
2041                                 // degree of mt processing
2042                            (ParallelGCThreads > 1),
2043                                 // mt discovery
2044                            ParallelGCThreads,
2045                                 // degree of mt discovery
2046                            true,
2047                                 // Reference discovery is atomic
2048                            &_is_alive_closure_stw);
2049                                 // is alive closure
2050                                 // (for efficiency/performance)
2051 }
2052 
2053 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2054   return _collector_policy;
2055 }
2056 
2057 size_t G1CollectedHeap::capacity() const {
2058   return _hrm.length() * HeapRegion::GrainBytes;


4296 
4297   // Even when parallel reference processing is enabled, the processing
4298   // of JNI refs is serial and performed serially by the current thread
4299   // rather than by a worker. The following PSS will be used for processing
4300   // JNI refs.
4301 
4302   // Use only a single queue for this PSS.
4303   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4304   pss->set_ref_processor(NULL);
4305   assert(pss->queue_is_empty(), "pre-condition");
4306 
4307   // Keep alive closure.
4308   G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4309 
4310   // Serial Complete GC closure
4311   G1STWDrainQueueClosure drain_queue(this, pss);
4312 
4313   // Setup the soft refs policy...
4314   rp->setup_policy(false);
4315 


4316   ReferenceProcessorStats stats;
4317   if (!rp->processing_is_mt()) {
4318     // Serial reference processing...
4319     stats = rp->process_discovered_references(&is_alive,
4320                                               &keep_alive,
4321                                               &drain_queue,
4322                                               NULL,
4323                                               _gc_timer_stw);
4324   } else {
4325     uint no_of_gc_workers = workers()->active_workers();
4326 
4327     // Parallel reference processing
4328     assert(no_of_gc_workers <= rp->max_num_q(),
4329            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4330            no_of_gc_workers,  rp->max_num_q());
4331 
4332     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4333     stats = rp->process_discovered_references(&is_alive,
4334                                               &keep_alive,
4335                                               &drain_queue,
4336                                               &par_task_executor,
4337                                               _gc_timer_stw);
4338   }
4339 
4340   _gc_tracer_stw->report_gc_reference_stats(stats);
4341 
4342   // We have completed copying any necessary live referent objects.
4343   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4344 
4345   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4346   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4347 }
4348 
4349 // Weak Reference processing during an evacuation pause (part 2).
4350 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4351   double ref_enq_start = os::elapsedTime();
4352 
4353   ReferenceProcessor* rp = _ref_processor_stw;
4354   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4355 


4356   // Now enqueue any remaining on the discovered lists on to
4357   // the pending list.
4358   if (!rp->processing_is_mt()) {
4359     // Serial reference processing...
4360     rp->enqueue_discovered_references();
4361   } else {
4362     // Parallel reference enqueueing
4363 
4364     uint n_workers = workers()->active_workers();
4365 
4366     assert(n_workers <= rp->max_num_q(),
4367            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4368            n_workers,  rp->max_num_q());
4369 
4370     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4371     rp->enqueue_discovered_references(&par_task_executor);
4372   }
4373 
4374   rp->verify_no_references_recorded();
4375   assert(!rp->discovery_enabled(), "should have been disabled");
4376 
4377   // FIXME
4378   // CM's reference processing also cleans up the string and symbol tables.
4379   // Should we do that here also? We could, but it is a serial operation
4380   // and could significantly increase the pause time.
4381 
4382   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4383   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4384 }
4385 
4386 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4387   double merge_pss_time_start = os::elapsedTime();
4388   per_thread_states->flush();
4389   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4390 }
4391 




1243       // how reference processing currently works in G1.
1244 
1245       // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1246       ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1247 
1248       // Temporarily clear the STW ref processor's _is_alive_non_header field.
1249       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1250 
1251       ref_processor_stw()->enable_discovery();
1252       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1253 
1254       // Do collection work
1255       {
1256         HandleMark hm;  // Discard invalid handles created during gc
1257         G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1258       }
1259 
1260       assert(num_free_regions() == 0, "we should not have added any free regions");
1261       rebuild_region_sets(false /* free_list_only */);
1262 
1263       ReferenceProcessorPhaseTimes pt(NULL, ref_processor_stw()->num_q());
1264 
1265       // Enqueue any discovered reference objects that have
1266       // not been removed from the discovered lists.
1267       ref_processor_stw()->enqueue_discovered_references(NULL, &pt);
1268 
1269       pt.print_enqueue_phase();
1270 
1271 #if defined(COMPILER2) || INCLUDE_JVMCI
1272       DerivedPointerTable::update_pointers();
1273 #endif
1274 
1275       MemoryService::track_memory_usage();
1276 
1277       assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1278       ref_processor_stw()->verify_no_references_recorded();
1279 
1280       // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1281       ClassLoaderDataGraph::purge();
1282       MetaspaceAux::verify_metrics();
1283 
1284       // Note: since we've just done a full GC, concurrent
1285       // marking is no longer active. Therefore we need not
1286       // re-enable reference discovery for the CM ref processor.
1287       // That will be done at the start of the next marking cycle.
1288       assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1289       ref_processor_cm()->verify_no_references_recorded();


1654   // should not not be holding to any GC alloc regions. The method
1655   // below will make sure of that and do any remaining clean up.
1656   _allocator->abandon_gc_alloc_regions();
1657 
1658   // Instead of tearing down / rebuilding the free lists here, we
1659   // could instead use the remove_all_pending() method on free_list to
1660   // remove only the ones that we need to remove.
1661   tear_down_region_sets(true /* free_list_only */);
1662   shrink_helper(shrink_bytes);
1663   rebuild_region_sets(true /* free_list_only */);
1664 
1665   _hrm.verify_optional();
1666   _verifier->verify_region_sets_optional();
1667 }
1668 
1669 // Public methods.
1670 
1671 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* collector_policy) :
1672   CollectedHeap(),
1673   _collector_policy(collector_policy),
1674   _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1675   _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1676   _g1_policy(create_g1_policy(_gc_timer_stw)),
1677   _collection_set(this, _g1_policy),
1678   _dirty_card_queue_set(false),
1679   _is_alive_closure_cm(this),
1680   _is_alive_closure_stw(this),
1681   _ref_processor_cm(NULL),
1682   _ref_processor_stw(NULL),
1683   _bot(NULL),
1684   _hot_card_cache(NULL),
1685   _g1_rem_set(NULL),
1686   _cg1r(NULL),
1687   _g1mm(NULL),
1688   _preserved_marks_set(true /* in_c_heap */),
1689   _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1690   _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1691   _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1692   _humongous_reclaim_candidates(),
1693   _has_humongous_reclaim_candidates(false),
1694   _archive_allocator(NULL),
1695   _free_regions_coming(false),
1696   _gc_time_stamp(0),
1697   _summary_bytes_used(0),
1698   _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1699   _old_evac_stats("Old", OldPLABSize, PLABWeight),
1700   _expand_heap_after_alloc_failure(true),
1701   _old_marking_cycles_started(0),
1702   _old_marking_cycles_completed(0),
1703   _in_cset_fast_test() {


1704 
1705   _workers = new WorkGang("GC Thread", ParallelGCThreads,
1706                           /* are_GC_task_threads */true,
1707                           /* are_ConcurrentGC_threads */false);
1708   _workers->initialize_workers();
1709   _verifier = new G1HeapVerifier(this);
1710 
1711   _allocator = G1Allocator::create_allocator(this);
1712 
1713   _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1714 
1715   _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1716 
1717   // Override the default _filler_array_max_size so that no humongous filler
1718   // objects are created.
1719   _filler_array_max_size = _humongous_object_threshold_in_words;
1720 
1721   uint n_queues = ParallelGCThreads;
1722   _task_queues = new RefToScanQueueSet(n_queues);
1723 


2002   //     (depending on the value of ParallelRefProcEnabled
2003   //     and ParallelGCThreads).
2004   //   * A full GC disables reference discovery by the CM
2005   //     ref processor and abandons any entries on it's
2006   //     discovered lists.
2007   //
2008   // * For the STW processor:
2009   //   * Non MT discovery is enabled at the start of a full GC.
2010   //   * Processing and enqueueing during a full GC is non-MT.
2011   //   * During a full GC, references are processed after marking.
2012   //
2013   //   * Discovery (may or may not be MT) is enabled at the start
2014   //     of an incremental evacuation pause.
2015   //   * References are processed near the end of a STW evacuation pause.
2016   //   * For both types of GC:
2017   //     * Discovery is atomic - i.e. not concurrent.
2018   //     * Reference discovery will not need a barrier.
2019 
2020   MemRegion mr = reserved_region();
2021 
2022   bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
2023 
2024   // Concurrent Mark ref processor
2025   _ref_processor_cm =
2026     new ReferenceProcessor(mr,    // span
2027                            mt_processing,
2028                                 // mt processing
2029                            ParallelGCThreads,
2030                                 // degree of mt processing
2031                            (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2032                                 // mt discovery
2033                            MAX2(ParallelGCThreads, ConcGCThreads),
2034                                 // degree of mt discovery
2035                            false,
2036                                 // Reference discovery is not atomic
2037                            &_is_alive_closure_cm);
2038                                 // is alive closure
2039                                 // (for efficiency/performance)
2040 
2041   // STW ref processor
2042   _ref_processor_stw =
2043     new ReferenceProcessor(mr,    // span
2044                            mt_processing,
2045                                 // mt processing
2046                            ParallelGCThreads,
2047                                 // degree of mt processing
2048                            (ParallelGCThreads > 1),
2049                                 // mt discovery
2050                            ParallelGCThreads,
2051                                 // degree of mt discovery
2052                            true,
2053                                 // Reference discovery is atomic
2054                            &_is_alive_closure_stw);
2055                                 // is alive closure
2056                                 // (for efficiency/performance)
2057 }
2058 
2059 CollectorPolicy* G1CollectedHeap::collector_policy() const {
2060   return _collector_policy;
2061 }
2062 
2063 size_t G1CollectedHeap::capacity() const {
2064   return _hrm.length() * HeapRegion::GrainBytes;


4302 
4303   // Even when parallel reference processing is enabled, the processing
4304   // of JNI refs is serial and performed serially by the current thread
4305   // rather than by a worker. The following PSS will be used for processing
4306   // JNI refs.
4307 
4308   // Use only a single queue for this PSS.
4309   G1ParScanThreadState*          pss = per_thread_states->state_for_worker(0);
4310   pss->set_ref_processor(NULL);
4311   assert(pss->queue_is_empty(), "pre-condition");
4312 
4313   // Keep alive closure.
4314   G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
4315 
4316   // Serial Complete GC closure
4317   G1STWDrainQueueClosure drain_queue(this, pss);
4318 
4319   // Setup the soft refs policy...
4320   rp->setup_policy(false);
4321 
4322   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
4323 
4324   ReferenceProcessorStats stats;
4325   if (!rp->processing_is_mt()) {
4326     // Serial reference processing...
4327     stats = rp->process_discovered_references(&is_alive,
4328                                               &keep_alive,
4329                                               &drain_queue,
4330                                               NULL,
4331                                               pt);
4332   } else {
4333     uint no_of_gc_workers = workers()->active_workers();
4334 
4335     // Parallel reference processing
4336     assert(no_of_gc_workers <= rp->max_num_q(),
4337            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4338            no_of_gc_workers,  rp->max_num_q());
4339 
4340     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers);
4341     stats = rp->process_discovered_references(&is_alive,
4342                                               &keep_alive,
4343                                               &drain_queue,
4344                                               &par_task_executor,
4345                                               pt);
4346   }
4347 
4348   _gc_tracer_stw->report_gc_reference_stats(stats);
4349 
4350   // We have completed copying any necessary live referent objects.
4351   assert(pss->queue_is_empty(), "both queue and overflow should be empty");
4352 
4353   double ref_proc_time = os::elapsedTime() - ref_proc_start;
4354   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
4355 }
4356 
4357 // Weak Reference processing during an evacuation pause (part 2).
4358 void G1CollectedHeap::enqueue_discovered_references(G1ParScanThreadStateSet* per_thread_states) {
4359   double ref_enq_start = os::elapsedTime();
4360 
4361   ReferenceProcessor* rp = _ref_processor_stw;
4362   assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
4363 
4364   ReferenceProcessorPhaseTimes* pt = g1_policy()->phase_times()->ref_phase_times();
4365 
4366   // Now enqueue any remaining on the discovered lists on to
4367   // the pending list.
4368   if (!rp->processing_is_mt()) {
4369     // Serial reference processing...
4370     rp->enqueue_discovered_references(NULL, pt);
4371   } else {
4372     // Parallel reference enqueueing
4373 
4374     uint n_workers = workers()->active_workers();
4375 
4376     assert(n_workers <= rp->max_num_q(),
4377            "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u",
4378            n_workers,  rp->max_num_q());
4379 
4380     G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers);
4381     rp->enqueue_discovered_references(&par_task_executor, pt);
4382   }
4383 
4384   rp->verify_no_references_recorded();
4385   assert(!rp->discovery_enabled(), "should have been disabled");
4386 
4387   // FIXME
4388   // CM's reference processing also cleans up the string and symbol tables.
4389   // Should we do that here also? We could, but it is a serial operation
4390   // and could significantly increase the pause time.
4391 
4392   double ref_enq_time = os::elapsedTime() - ref_enq_start;
4393   g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
4394 }
4395 
4396 void G1CollectedHeap::merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states) {
4397   double merge_pss_time_start = os::elapsedTime();
4398   per_thread_states->flush();
4399   g1_policy()->phase_times()->record_merge_pss_time_ms((os::elapsedTime() - merge_pss_time_start) * 1000.0);
4400 }
4401 


< prev index next >