/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 .
*/
ScBroadcastAreaSlot::~ScBroadcastAreaSlot()
{ for ( ScBroadcastAreas::iterator aIter( aBroadcastAreaTbl.begin());
aIter != aBroadcastAreaTbl.end(); /* none */)
{ // Prevent hash from accessing dangling pointer in case area is // deleted.
ScBroadcastArea* pArea = (*aIter).mpArea; // Erase all so no hash will be accessed upon destruction of the // unordered_map.
aIter = aBroadcastAreaTbl.erase(aIter); if (!pArea->DecRef()) delete pArea;
}
}
ScDocument::HardRecalcState ScBroadcastAreaSlot::CheckHardRecalcStateCondition() const
{
ScDocument::HardRecalcState eState = rDoc.GetHardRecalcState(); if (eState == ScDocument::HardRecalcState::OFF)
{ if (aBroadcastAreaTbl.size() >= aBroadcastAreaTbl.max_size())
{ // this is more hypothetical now, check existed for old SV_PTRARR_SORT
ScDocShell* pShell = rDoc.GetDocumentShell();
OSL_ENSURE( pShell, "Missing DocShell :-/" );
if ( pShell )
pShell->SetError(SCWARN_CORE_HARD_RECALC);
bool ScBroadcastAreaSlot::StartListeningArea( const ScRange& rRange, bool bGroupListening, SvtListener* pListener, ScBroadcastArea*& rpArea )
{ bool bNewArea = false;
OSL_ENSURE(pListener, "StartListeningArea: pListener Null");
assert(!rDoc.IsDelayedFormulaGrouping()); // otherwise the group size might be incorrect if (CheckHardRecalcStateCondition() == ScDocument::HardRecalcState::ETERNAL) returnfalse; if ( !rpArea )
{ // Even if most times the area doesn't exist yet and immediately trying // to new and insert it would save an attempt to find it, on massive // operations like identical large [HV]LOOKUP() areas the new/delete // would add quite some penalty for all but the first formula cell.
ScBroadcastAreas::const_iterator aIter( FindBroadcastArea( rRange, bGroupListening)); if (aIter != aBroadcastAreaTbl.end())
rpArea = (*aIter).mpArea; else
{
rpArea = new ScBroadcastArea( rRange);
rpArea->SetGroupListening(bGroupListening); if (aBroadcastAreaTbl.insert( rpArea).second)
{
rpArea->IncRef();
bNewArea = true;
} else
{
OSL_FAIL("StartListeningArea: area not found and not inserted in slot?!?"); delete rpArea;
rpArea = nullptr;
}
} if (rpArea)
pListener->StartListening( rpArea->GetBroadcaster());
} else
{ if (aBroadcastAreaTbl.insert( rpArea).second)
rpArea->IncRef();
} return bNewArea;
}
// If rpArea != NULL then no listeners are stopped, only the area is removed // and the reference count decremented. void ScBroadcastAreaSlot::EndListeningArea( const ScRange& rRange, bool bGroupListening, SvtListener* pListener, ScBroadcastArea*& rpArea )
{
OSL_ENSURE(pListener, "EndListeningArea: pListener Null"); if ( !rpArea )
{
ScBroadcastAreas::iterator aIter( FindBroadcastArea( rRange, bGroupListening)); if (aIter == aBroadcastAreaTbl.end() || isMarkedErased( aIter)) return;
rpArea = (*aIter).mpArea;
pListener->EndListening( rpArea->GetBroadcaster() ); if ( !rpArea->GetBroadcaster().HasListeners() )
{ // if nobody is listening we can dispose it if (rpArea->GetRef() == 1)
rpArea = nullptr; // will be deleted by erase
EraseArea( aIter);
}
} else
{ if (rpArea && !rpArea->GetBroadcaster().HasListeners())
{
ScBroadcastAreas::iterator aIter( FindBroadcastArea( rRange, bGroupListening)); if (aIter == aBroadcastAreaTbl.end() || isMarkedErased( aIter)) return;
OSL_ENSURE( (*aIter).mpArea == rpArea, "EndListeningArea: area pointer mismatch"); if (rpArea->GetRef() == 1)
rpArea = nullptr; // will be deleted by erase
EraseArea( aIter);
}
}
}
// Take the intersection of the area range and the broadcast range.
ScRange aIntersection = rAreaRange.Intersection(rRange); if (!aIntersection.IsValid()) continue;
// A Notify() during broadcast may call EndListeningArea() and thus dispose // an area if it was the last listener, which would invalidate an iterator // pointing to it, hence the real erase is done afterwards.
FinallyEraseAreas();
return bIsBroadcasted;
}
bool ScBroadcastAreaSlot::AreaBroadcast( const ScHint& rHint)
{ if (aBroadcastAreaTbl.empty()) returnfalse;
// A Notify() during broadcast may call EndListeningArea() and thus dispose // an area if it was the last listener, which would invalidate an iterator // pointing to it, hence the real erase is done afterwards.
FinallyEraseAreas();
return bIsBroadcasted;
}
void ScBroadcastAreaSlot::DelBroadcastAreasInRange( const ScRange& rRange )
{ if (aBroadcastAreaTbl.empty()) return; for (ScBroadcastAreas::iterator aIter( aBroadcastAreaTbl.begin());
aIter != aBroadcastAreaTbl.end(); /* increment in body */ )
{ const ScRange& rAreaRange = (*aIter).mpArea->GetRange(); if (rRange.Contains( rAreaRange))
{
ScBroadcastArea* pArea = (*aIter).mpArea;
aIter = aBroadcastAreaTbl.erase(aIter); // erase before modifying if (!pArea->DecRef())
{ if (pBASM->IsInBulkBroadcast())
pBASM->RemoveBulkArea( pArea); delete pArea;
}
} else
++aIter;
}
}
void ScBroadcastAreaSlot::EraseArea( ScBroadcastAreas::iterator& rIter )
{ if (mbInBroadcastIteration)
{
(*rIter).mbErasure = true; // mark for erasure
mbHasErasedArea = true; // at least one area is marked for erasure.
pBASM->PushAreaToBeErased( this, rIter);
} else
{
ScBroadcastArea* pArea = (*rIter).mpArea;
aBroadcastAreaTbl.erase( rIter); if (!pArea->DecRef())
{ if (pBASM->IsInBulkBroadcast())
pBASM->RemoveBulkGroupArea(pArea); delete pArea;
}
}
}
ScBroadcastArea* pArea = (*aIter).mpArea; const ScRange& rAreaRange = pArea->GetRange(); switch (eGroup)
{ case sc::ListenerGroupType::Group: if (!pArea->IsGroupListening()) continue; break; case sc::ListenerGroupType::Both: default:
;
}
switch (eType)
{ case sc::AreaOverlapType::Inside: if (!rRange.Contains(rAreaRange)) // The range needs to be fully inside specified range. continue; break; case sc::AreaOverlapType::InsideOrOverlap: if (!rRange.Intersects(rAreaRange)) // The range needs to be partially overlapping or fully inside. continue; break; case sc::AreaOverlapType::OneRowInside: if (rAreaRange.aStart.Row() != rAreaRange.aEnd.Row() || !rRange.Contains(rAreaRange)) // The range needs to be one single row and fully inside // specified range. continue; break; case sc::AreaOverlapType::OneColumnInside: if (rAreaRange.aStart.Col() != rAreaRange.aEnd.Col() || !rRange.Contains(rAreaRange)) // The range needs to be one single column and fully inside // specified range. continue; break;
}
ScBroadcastAreaSlotMachine::ScBroadcastAreaSlotMachine(
ScDocument& rDocument ) :
rDoc( rDocument ),
pUpdateChain( nullptr ),
pEOUpdateChain( nullptr ),
nInBulkBroadcast( 0 )
{ // initSlotDistribution --------- // Logarithmic or any other distribution. // Upper and leftmost sheet part usually is more populated and referenced and gets fine // grained resolution, larger data in larger hunks. // Just like with cells, slots are organized in columns. Slot 0 is for first nSliceRow x nSliceCol // cells, slot 1 is for next nSliceRow x nSliceCel cells below, etc. After a while the size of row // slice doubles (making more cells share the same slot), this distribution data is stored // in ScSlotData including ranges of cells. This is repeated for another column of nSliceCol cells, // again with the column slice doubling after some time. // Functions ComputeSlotOffset(), ComputeArePoints() and ComputeNextSlot() do the necessary // calculations.
SCSIZE nSlots = 0; // This should be SCCOL, but that's only 16bit and would overflow when doubling 16k columns.
sal_Int32 nCol1 = 0;
sal_Int32 nCol2 = 1024;
SCSIZE nSliceCol = 16; while (nCol2 <= rDoc.GetMaxColCount())
{
SCROW nRow1 = 0;
SCROW nRow2 = 32*1024;
SCSIZE nSliceRow = 128;
SCSIZE nSlotsCol = 0;
SCSIZE nSlotsStartCol = nSlots; // Must be sorted by row1,row2! while (nRow2 <= rDoc.GetMaxRowCount())
{
maSlotDistribution.emplace_back(nRow1, nRow2, nSliceRow, nSlotsCol, nCol1, nCol2, nSliceCol, nSlotsStartCol);
nSlotsCol += (nRow2 - nRow1) / nSliceRow;
nRow1 = nRow2;
nRow2 *= 2;
nSliceRow *= 2;
} // Store the number of slots in a column in mnBcaSlotsCol, so that finding a slot // to the right can be computed quickly in ComputeNextSlot(). if(nCol1 == 0)
mnBcaSlotsCol = nSlotsCol;
assert(nSlotsCol == mnBcaSlotsCol);
nSlots += (nCol2 - nCol1) / nSliceCol * nSlotsCol;
nCol1 = nCol2;
nCol2 *= 2;
nSliceCol *= 2;
}
mnBcaSlots = nSlots; #ifdef DBG_UTIL
DoChecks(); #endif
}
ScBroadcastAreaSlotMachine::~ScBroadcastAreaSlotMachine()
{
aTableSlotsMap.clear();
pBCAlways.reset(); // Areas to-be-erased still present is a serious error in handling, but at // this stage there's nothing we can do anymore.
SAL_WARN_IF( !maAreasToBeErased.empty(), "sc.core", "ScBroadcastAreaSlotMachine::dtor: maAreasToBeErased not empty");
}
// Basic checks that the calculations work correctly. void ScBroadcastAreaSlotMachine::DoChecks()
{ // Copy&paste from the ctor.
constexpr SCSIZE nSliceRow = 128;
constexpr SCSIZE nSliceCol = 16; // First and second column are in the same slice and so get the same slot.
compare( ComputeSlotOffset( ScAddress( 0, 0, 0 )), ComputeSlotOffset( ScAddress( 1, 0, 0 )), __LINE__); // Each nSliceRow rows are offset by one slot (at the start of the logarithmic distribution).
compare( ComputeSlotOffset( ScAddress( 0, 0, 0 )),
ComputeSlotOffset( ScAddress( 0, nSliceRow, 0 )) - 1, __LINE__ );
compare( ComputeSlotOffset( ScAddress( nSliceCol - 1, 0, 0 )),
ComputeSlotOffset( ScAddress( nSliceCol, 0, 0 )) - mnBcaSlotsCol, __LINE__ ); // Check that last cell is the last slot.
compare( ComputeSlotOffset( ScAddress( rDoc.GetMaxColCount() - 1, rDoc.GetMaxRowCount() - 1, 0 )),
mnBcaSlots - 1, __LINE__ ); // Check that adjacent rows in the same column but in different distribution areas differ by one slot. for( size_t i = 0; i < maSlotDistribution.size() - 1; ++i )
{ const ScSlotData& s1 = maSlotDistribution[ i ]; const ScSlotData& s2 = maSlotDistribution[ i + 1 ]; if( s1.nStartCol == s2.nStartCol )
{
assert( s1.nStopRow == s2.nStartRow );
compare( ComputeSlotOffset( ScAddress( s1.nStartCol, s1.nStopRow - 1, 0 )),
ComputeSlotOffset( ScAddress( s1.nStartCol, s1.nStopRow, 0 )) - 1, __LINE__ );
}
} // Check that adjacent columns in the same row but in different distribution areas differ by mnBcaSlotsCol. for( size_t i = 0; i < maSlotDistribution.size() - 1; ++i )
{ const ScSlotData& s1 = maSlotDistribution[ i ]; for( size_t j = i + 1; j < maSlotDistribution.size(); ++j )
{ const ScSlotData& s2 = maSlotDistribution[ i + 1 ]; if( s1.nStartRow == s2.nStartRow && s1.nStopCol == s2.nStartCol )
{
assert( s1.nStopRow == s2.nStartRow );
compare( ComputeSlotOffset( ScAddress( s1.nStopCol - 1, s1.nStartRow, 0 )),
ComputeSlotOffset( ScAddress( s1.nStopCol, s1.nStartRow, 0 )) - mnBcaSlotsCol, __LINE__ );
}
}
} // Iterate all slots.
ScRange range( ScAddress( 0, 0, 0 ), ScAddress( rDoc.MaxCol(), rDoc.MaxRow(), 0 ));
SCSIZE nStart, nEnd, nRowBreak;
ComputeAreaPoints( range, nStart, nEnd, nRowBreak );
assert( nStart == 0 );
assert( nEnd == mnBcaSlots - 1 );
SCSIZE nOff = nStart;
SCSIZE nBreak = nOff + nRowBreak;
std::unique_ptr<ScBroadcastAreaSlot*[]> slots( new ScBroadcastAreaSlot*[ mnBcaSlots ] ); // dummy, not accessed
ScBroadcastAreaSlot** ppSlots = slots.get();
ScBroadcastAreaSlot** pp = ppSlots; while ( nOff <= nEnd )
{
SCSIZE previous = nOff;
ComputeNextSlot( nOff, nBreak, pp, nStart, ppSlots, nRowBreak, mnBcaSlotsCol);
compare( nOff, previous + 1, __LINE__ );
} // Iterate slots in the last row (each will differ by mnBcaSlotsCol).
range = ScRange( ScAddress( 0, rDoc.MaxRow(), 0 ),
ScAddress( rDoc.MaxCol(), rDoc.MaxRow() - 1, 0 ));
ComputeAreaPoints( range, nStart, nEnd, nRowBreak );
assert( nStart == mnBcaSlotsCol - 1 );
assert( nEnd == mnBcaSlots - 1 );
nOff = nStart;
nBreak = nOff + nRowBreak;
ppSlots = slots.get();
pp = ppSlots; while ( nOff <= nEnd )
{
SCSIZE previous = nOff;
ComputeNextSlot( nOff, nBreak, pp, nStart, ppSlots, nRowBreak, mnBcaSlotsCol);
compare( nOff, previous + mnBcaSlotsCol, __LINE__ );
}
} #endif
void ScBroadcastAreaSlotMachine::StartListeningArea( const ScRange& rRange, bool bGroupListening, SvtListener* pListener )
{ if ( rRange == BCA_LISTEN_ALWAYS )
{ if ( !pBCAlways )
pBCAlways.reset( new SvtBroadcaster );
pListener->StartListening( *pBCAlways );
} else
{ // A new area needs to be inserted to the corresponding slots, for 3D // ranges for all sheets, do not slice into per sheet areas or the // !bDone will break too early (i.e. after the first sheet) if // subsequent listeners are to be added.
ScBroadcastArea* pArea = nullptr; bool bDone = false; for (SCTAB nTab = rRange.aStart.Tab();
!bDone && nTab <= rRange.aEnd.Tab(); ++nTab)
{
TableSlotsMap::iterator iTab( aTableSlotsMap.find( nTab)); if (iTab == aTableSlotsMap.end())
iTab = aTableSlotsMap.emplace( std::piecewise_construct,
std::forward_as_tuple(nTab), std::forward_as_tuple(mnBcaSlots) ).first;
ScBroadcastAreaSlot** ppSlots = (*iTab).second.getSlots();
SCSIZE nStart, nEnd, nRowBreak;
ComputeAreaPoints( rRange, nStart, nEnd, nRowBreak );
SCSIZE nOff = nStart;
SCSIZE nBreak = nOff + nRowBreak;
ScBroadcastAreaSlot** pp = ppSlots + nOff; while ( !bDone && nOff <= nEnd )
{ if ( !*pp )
*pp = new ScBroadcastAreaSlot( rDoc, this ); if (!pArea)
{ // If the call to StartListeningArea didn't create the // ScBroadcastArea, listeners were added to an already // existing identical area that doesn't need to be inserted // to slots again. if (!(*pp)->StartListeningArea( rRange, bGroupListening, pListener, pArea))
bDone = true;
} else
(*pp)->InsertListeningArea( pArea);
ComputeNextSlot( nOff, nBreak, pp, nStart, ppSlots, nRowBreak, mnBcaSlotsCol);
}
}
}
}
// Delete if not inserted to any slot. RemoveBulkArea(pArea) was // already executed in UpdateRemove(). if (!pArea->GetRef()) delete pArea;
}
pEOUpdateChain = nullptr;
}
bool ScBroadcastAreaSlotMachine::BulkBroadcastGroupAreas()
{ if (m_BulkGroupAreas.empty()) returnfalse;
sc::BulkDataHint aHint( rDoc );
bool bBroadcasted = false; for (constauto& [pArea, rSpans] : m_BulkGroupAreas)
{
assert(pArea);
SvtBroadcaster& rBC = pArea->GetBroadcaster(); if (!rBC.HasListeners())
{ /* FIXME: find the cause where the last listener is removed and
* this area is still listed here. */
SAL_WARN("sc.core","ScBroadcastAreaSlotMachine::BulkBroadcastGroupAreas - pArea has no listeners and should had been removed already");
} else
{
aHint.setSpans(&rSpans);
rBC.Broadcast(aHint);
bBroadcasted = true;
}
}
void ScBroadcastAreaSlotMachine::FinallyEraseAreas( ScBroadcastAreaSlot* pSlot )
{
SAL_WARN_IF( pSlot->IsInBroadcastIteration(), "sc.core", "ScBroadcastAreaSlotMachine::FinallyEraseAreas: during iteration? NO!"); if (pSlot->IsInBroadcastIteration()) return;
// maAreasToBeErased is a simple vector so erasing an element may // invalidate iterators and would be inefficient anyway. Instead, copy // elements to be preserved (usually none!) to temporary vector and swap.
AreasToBeErased aCopy; for (auto& rArea : maAreasToBeErased)
{ if (rArea.first == pSlot)
pSlot->EraseArea( rArea.second); else
aCopy.push_back( rArea);
}
maAreasToBeErased.swap( aCopy);
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.16Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.