1
|
/**
|
2
|
* Copyright (C) 2019 EDIT
|
3
|
* European Distributed Institute of Taxonomy
|
4
|
* http://www.e-taxonomy.eu
|
5
|
*
|
6
|
* The contents of this file are subject to the Mozilla Public License Version 1.1
|
7
|
* See LICENSE.TXT at the top of this package for the full license terms.
|
8
|
*/
|
9
|
package eu.etaxonomy.cdm.api.service.description;
|
10
|
|
11
|
import java.util.ArrayList;
|
12
|
import java.util.HashSet;
|
13
|
import java.util.Iterator;
|
14
|
import java.util.List;
|
15
|
import java.util.Map;
|
16
|
import java.util.Set;
|
17
|
import java.util.stream.Collectors;
|
18
|
|
19
|
import org.apache.logging.log4j.LogManager;
|
20
|
import org.apache.logging.log4j.Logger;
|
21
|
import org.hibernate.FlushMode;
|
22
|
import org.hibernate.Session;
|
23
|
import org.springframework.transaction.PlatformTransactionManager;
|
24
|
import org.springframework.transaction.TransactionDefinition;
|
25
|
import org.springframework.transaction.TransactionStatus;
|
26
|
import org.springframework.transaction.support.DefaultTransactionDefinition;
|
27
|
|
28
|
import eu.etaxonomy.cdm.api.application.ICdmRepository;
|
29
|
import eu.etaxonomy.cdm.api.service.DeleteResult;
|
30
|
import eu.etaxonomy.cdm.api.service.IClassificationService;
|
31
|
import eu.etaxonomy.cdm.api.service.IDescriptionElementService;
|
32
|
import eu.etaxonomy.cdm.api.service.IDescriptionService;
|
33
|
import eu.etaxonomy.cdm.api.service.IDescriptiveDataSetService;
|
34
|
import eu.etaxonomy.cdm.api.service.ITaxonNodeService;
|
35
|
import eu.etaxonomy.cdm.api.service.ITaxonService;
|
36
|
import eu.etaxonomy.cdm.api.service.ITermService;
|
37
|
import eu.etaxonomy.cdm.common.DynamicBatch;
|
38
|
import eu.etaxonomy.cdm.common.JvmLimitsException;
|
39
|
import eu.etaxonomy.cdm.common.monitor.IProgressMonitor;
|
40
|
import eu.etaxonomy.cdm.common.monitor.NullProgressMonitor;
|
41
|
import eu.etaxonomy.cdm.common.monitor.SubProgressMonitor;
|
42
|
import eu.etaxonomy.cdm.filter.TaxonNodeFilter;
|
43
|
import eu.etaxonomy.cdm.filter.TaxonNodeFilter.ORDER;
|
44
|
import eu.etaxonomy.cdm.model.common.CdmBase;
|
45
|
import eu.etaxonomy.cdm.model.description.CategoricalData;
|
46
|
import eu.etaxonomy.cdm.model.description.DescriptionBase;
|
47
|
import eu.etaxonomy.cdm.model.description.DescriptionElementBase;
|
48
|
import eu.etaxonomy.cdm.model.description.DescriptionElementSource;
|
49
|
import eu.etaxonomy.cdm.model.description.Distribution;
|
50
|
import eu.etaxonomy.cdm.model.description.TaxonDescription;
|
51
|
import eu.etaxonomy.cdm.model.media.Media;
|
52
|
import eu.etaxonomy.cdm.model.reference.OriginalSourceType;
|
53
|
import eu.etaxonomy.cdm.model.taxon.Taxon;
|
54
|
import eu.etaxonomy.cdm.model.taxon.TaxonBase;
|
55
|
import eu.etaxonomy.cdm.model.taxon.TaxonNode;
|
56
|
import eu.etaxonomy.cdm.model.term.DefinedTermBase;
|
57
|
import eu.etaxonomy.cdm.persistence.query.OrderHint;
|
58
|
|
59
|
/**
|
60
|
* A common base class to run aggregation tasks on descriptive data.
|
61
|
*
|
62
|
* Usable for all types of descriptive data like structured descriptive data,
|
63
|
* ( {@link CategoricalData and QuantitativeData}, {@link Distribution},
|
64
|
* {@link Media}, etc.
|
65
|
*
|
66
|
* @author a.mueller
|
67
|
* @since 03.11.2019
|
68
|
*/
|
69
|
public abstract class DescriptionAggregationBase<T extends DescriptionAggregationBase<T, CONFIG>, CONFIG extends DescriptionAggregationConfigurationBase<T>> {
|
70
|
|
71
|
public static final Logger logger = LogManager.getLogger(DescriptionAggregationBase.class);
|
72
|
|
73
|
private static final long BATCH_MIN_FREE_HEAP = 150 * 1024 * 1024; //800 MB
|
74
|
/**
|
75
|
* ratio of the initially free heap which should not be used
|
76
|
* during the batch processing. This amount of the heap is reserved
|
77
|
* for the flushing of the session and to the index
|
78
|
*/
|
79
|
private static final double BATCH_FREE_HEAP_RATIO = 0.9;
|
80
|
// private static final int BATCH_SIZE_BY_AREA = 1000;
|
81
|
// private static final int BATCH_SIZE_BY_RANK = 500;
|
82
|
private static final int BATCH_SIZE_BY_TAXON = 200;
|
83
|
|
84
|
private ICdmRepository repository;
|
85
|
private CONFIG config;
|
86
|
private DeleteResult result;
|
87
|
|
88
|
private long batchMinFreeHeap = BATCH_MIN_FREE_HEAP;
|
89
|
|
90
|
|
91
|
public final DeleteResult invoke(CONFIG config, ICdmRepository repository){
|
92
|
init(config, repository);
|
93
|
return doInvoke();
|
94
|
}
|
95
|
|
96
|
protected DeleteResult doInvoke() {
|
97
|
|
98
|
try {
|
99
|
double start = System.currentTimeMillis();
|
100
|
IProgressMonitor monitor = getConfig().getMonitor();
|
101
|
|
102
|
// only for debugging:
|
103
|
//logger.setLevel(Level.TRACE); // TRACE will slow down a lot since it forces loading all term representations
|
104
|
// LogUtils.setLevel("org.hibernate.SQL", Level.DEBUG);
|
105
|
logger.info("Hibernate JDBC Batch size: " + getSession().getSessionFactory().getSessionFactoryOptions().getJdbcBatchSize());
|
106
|
|
107
|
TaxonNodeFilter filter = getConfig().getTaxonNodeFilter();
|
108
|
filter.setOrder(ORDER.TREEINDEX_DESC); //DESC guarantees that child taxa are aggregated before parent
|
109
|
filter.setIncludeRootNodes(false); //root nodes do not make sense for aggregation
|
110
|
|
111
|
monitor.beginTask("Accumulating " + pluralDataType(), 100);
|
112
|
Long countTaxonNodes = getTaxonNodeService().count(filter);
|
113
|
int aggregationWorkTicks = countTaxonNodes.intValue();
|
114
|
logger.info(aggregationWorkTicks + " taxa to aggregate");
|
115
|
int getIdListTicks = 1;
|
116
|
int preAccumulateTicks = 1;
|
117
|
monitor.worked(5);
|
118
|
SubProgressMonitor subMonitor = SubProgressMonitor.NewStarted(monitor,
|
119
|
95, "Accumulating " + pluralDataType(), aggregationWorkTicks + getIdListTicks + preAccumulateTicks);
|
120
|
|
121
|
subMonitor.subTask("Get taxon node ID list");
|
122
|
List<Integer> taxonNodeIdList = getTaxonNodeService().idList(filter);
|
123
|
|
124
|
subMonitor.worked(getIdListTicks);
|
125
|
|
126
|
try {
|
127
|
preAggregate(subMonitor);
|
128
|
} catch (Exception e) {
|
129
|
return handleException(e, "Unhandled error during pre-aggregation");
|
130
|
}
|
131
|
|
132
|
try {
|
133
|
verifyConfiguration(subMonitor);
|
134
|
} catch (Exception e) {
|
135
|
return handleException(e, "Unhandled error during configuration check");
|
136
|
}
|
137
|
|
138
|
subMonitor.worked(preAccumulateTicks);
|
139
|
subMonitor.subTask("Accumulating "+pluralDataType()+" per taxon for taxon filter " + filter.toString());
|
140
|
|
141
|
double startAccumulate = System.currentTimeMillis();
|
142
|
|
143
|
//TODO AM move to invokeOnSingleTaxon()
|
144
|
IProgressMonitor aggregateMonitor = new SubProgressMonitor(subMonitor, aggregationWorkTicks);
|
145
|
try {
|
146
|
aggregate(taxonNodeIdList, aggregateMonitor);
|
147
|
} catch (Exception e) {
|
148
|
return handleException(e, "Unhandled error during aggregation");
|
149
|
}
|
150
|
|
151
|
double end = System.currentTimeMillis();
|
152
|
logger.info("Time elapsed for accumulate only(): " + (end - startAccumulate) / (1000) + "s");
|
153
|
logger.info("Time elapsed for invoking task(): " + (end - start) / (1000) + "s");
|
154
|
|
155
|
done();
|
156
|
} catch (Exception e) {
|
157
|
getResult().addException(new RuntimeException("Unhandled error during doInvoke", e));
|
158
|
}
|
159
|
return getResult();
|
160
|
}
|
161
|
|
162
|
private DeleteResult handleException(Exception e, String unhandledMessage) {
|
163
|
Exception ex;
|
164
|
if (e instanceof AggregationException){
|
165
|
ex = e;
|
166
|
}else{
|
167
|
ex = new RuntimeException(unhandledMessage + ": " + e.getMessage() , e);
|
168
|
e.printStackTrace();
|
169
|
}
|
170
|
getResult().addException(ex);
|
171
|
getResult().setError();
|
172
|
done();
|
173
|
return getResult();
|
174
|
}
|
175
|
|
176
|
protected void aggregate(List<Integer> taxonNodeIdList, IProgressMonitor subMonitor) throws JvmLimitsException {
|
177
|
|
178
|
DynamicBatch batch = new DynamicBatch(BATCH_SIZE_BY_TAXON, batchMinFreeHeap);
|
179
|
batch.setRequiredFreeHeap(BATCH_FREE_HEAP_RATIO);
|
180
|
//TODO AM from aggByRank batch.setMaxAllowedGcIncreases(10);
|
181
|
|
182
|
TransactionStatus txStatus = startTransaction(false);
|
183
|
initTransaction();
|
184
|
|
185
|
// visit all accepted taxa
|
186
|
// subMonitor.beginTask("Work on taxa.", taxonNodeIdList.size());
|
187
|
subMonitor.subTask("Accumulating bottom up " + taxonNodeIdList.size() + " taxa.");
|
188
|
|
189
|
//TODO FIXME this was a Taxon not a TaxonNode id list
|
190
|
Iterator<Integer> taxonIdIterator = taxonNodeIdList.iterator();
|
191
|
|
192
|
while (taxonIdIterator.hasNext() || batch.hasUnprocessedItems()) {
|
193
|
if(getConfig().getMonitor().isCanceled()){
|
194
|
break;
|
195
|
}
|
196
|
|
197
|
if(txStatus == null) {
|
198
|
// transaction has been committed at the end of this batch, start a new one
|
199
|
txStatus = startTransaction(false);
|
200
|
initTransaction();
|
201
|
}
|
202
|
|
203
|
//load taxa for this batch
|
204
|
List<Integer> taxonIds = batch.nextItems(taxonIdIterator);
|
205
|
// logger.debug("accumulateByArea() - taxon " + taxonPager.getFirstRecord() + " to " + taxonPager.getLastRecord() + " of " + taxonPager.getCount() + "]");
|
206
|
|
207
|
//TODO AM adapt init-strat to taxonnode if it stays a taxon node list
|
208
|
List<OrderHint> orderHints = new ArrayList<>();
|
209
|
orderHints.add(OrderHint.BY_TREE_INDEX_DESC);
|
210
|
List<TaxonNode> taxonNodes = getTaxonNodeService().loadByIds(taxonIds, orderHints, descriptionInitStrategy());
|
211
|
|
212
|
// iterate over the taxa and accumulate areas
|
213
|
// start processing the new batch
|
214
|
|
215
|
for(TaxonNode taxonNode : taxonNodes) {
|
216
|
if(getConfig().getMonitor().isCanceled()){
|
217
|
break;
|
218
|
}
|
219
|
subMonitor.subTask("Accumulating " + taxonNode.getTaxon().getTitleCache());
|
220
|
|
221
|
accumulateSingleTaxon(taxonNode);
|
222
|
batch.incrementCounter();
|
223
|
|
224
|
subMonitor.worked(1);
|
225
|
|
226
|
//TODO handle canceled better if needed
|
227
|
if(subMonitor.isCanceled()){
|
228
|
return;
|
229
|
}
|
230
|
|
231
|
if(!batch.isWithinJvmLimits()) {
|
232
|
break; // flushAndClear and start with new batch
|
233
|
}
|
234
|
} // next taxon
|
235
|
|
236
|
// flushAndClear();
|
237
|
|
238
|
// commit for every batch, otherwise the persistent context
|
239
|
// may grow too much and eats up all the heap
|
240
|
commitTransaction(txStatus);
|
241
|
txStatus = null;
|
242
|
|
243
|
|
244
|
// flushing the session and to the index (flushAndClear() ) can impose a
|
245
|
// massive heap consumption. therefore we explicitly do a check after the
|
246
|
// flush to detect these situations and to reduce the batch size.
|
247
|
if(getConfig().isAdaptBatchSize() && batch.getJvmMonitor().getGCRateSiceLastCheck() > 0.05) {
|
248
|
batch.reduceSize(0.5);
|
249
|
}
|
250
|
|
251
|
} // next batch of taxa
|
252
|
|
253
|
}
|
254
|
|
255
|
/**
|
256
|
* Base class for temporary aggregation results for a single taxon. For within taxon
|
257
|
* and from child to parent aggregation. Should be extended by implementing aggregation classes.
|
258
|
*/
|
259
|
protected class ResultHolder{
|
260
|
//descriptions are identifiable and therefore are not deleted automatically by removing them from taxon or specimen
|
261
|
//here we store all descriptions that need to be deleted after aggregation as they are not needed anymore
|
262
|
Set<DescriptionBase<?>> descriptionsToDelete = new HashSet<>();
|
263
|
}
|
264
|
|
265
|
protected void accumulateSingleTaxon(TaxonNode taxonNode){
|
266
|
|
267
|
Taxon taxon = CdmBase.deproxy(taxonNode.getTaxon());
|
268
|
if(logger.isDebugEnabled()){logger.debug("accumulate - taxon :" + taxonToString(taxon));}
|
269
|
|
270
|
//description
|
271
|
TaxonDescription targetDescription = getAggregatedDescription(taxon);
|
272
|
|
273
|
//temporary result
|
274
|
ResultHolder resultHolder = createResultHolder();
|
275
|
for (AggregationMode mode : getConfig().getAggregationModes()){
|
276
|
if (mode == AggregationMode.ToParent){
|
277
|
aggregateToParentTaxon(taxonNode, resultHolder, new HashSet<>()); //no excludedDescriptions because aggregating from children
|
278
|
} else if (mode == AggregationMode.WithinTaxon){
|
279
|
Set<TaxonDescription> excludedDescriptions = new HashSet<>();
|
280
|
excludedDescriptions.add(targetDescription);
|
281
|
aggregateWithinSingleTaxon(taxon, resultHolder, excludedDescriptions);
|
282
|
}else{
|
283
|
throw new IllegalArgumentException("Mode " + mode + " not yet supported");
|
284
|
}
|
285
|
}
|
286
|
|
287
|
//persist
|
288
|
boolean updated = mergeAggregationResultIntoTargetDescription(targetDescription, resultHolder);
|
289
|
if (updated){
|
290
|
if (targetDescription.isPersited()){
|
291
|
getResult().addUpdatedUuid(targetDescription);
|
292
|
}else{
|
293
|
getResult().addInsertedUuid(targetDescription);
|
294
|
}
|
295
|
}
|
296
|
removeDescriptionIfEmpty(targetDescription, resultHolder);
|
297
|
deleteDescriptionsToDelete(resultHolder);
|
298
|
}
|
299
|
|
300
|
/**
|
301
|
* Remove descriptions to be deleted from persistent data if possible.
|
302
|
*/
|
303
|
private void deleteDescriptionsToDelete(DescriptionAggregationBase<T, CONFIG>.ResultHolder resultHolder) {
|
304
|
for (DescriptionBase<?> descriptionToDelete : resultHolder.descriptionsToDelete){
|
305
|
if (descriptionToDelete.isPersited()){
|
306
|
getSession().flush(); // move to service method #9801
|
307
|
DeleteResult descriptionDeleteResult = repository.getDescriptionService().deleteDescription(descriptionToDelete);
|
308
|
//TODO handle result somehow if not OK, but careful, descriptions may be linked >1x and therefore maybe deleted only after last link was removed
|
309
|
if (descriptionDeleteResult.getDeletedObjects().contains(descriptionToDelete) && descriptionToDelete.isPersited()){
|
310
|
this.getResult().addDeletedObject(descriptionToDelete);
|
311
|
}
|
312
|
// this.getResult().includeResult(descriptionDeleteResult, true);
|
313
|
}
|
314
|
}
|
315
|
}
|
316
|
|
317
|
/**
|
318
|
* If target description is empty the description is added to the descriptions to be deleted
|
319
|
* in the result holder.
|
320
|
*/
|
321
|
protected void removeDescriptionIfEmpty(TaxonDescription description, ResultHolder resultHolder) {
|
322
|
if (description.getElements().isEmpty()){
|
323
|
description.getTaxon().removeDescription(description);
|
324
|
resultHolder.descriptionsToDelete.add(description);
|
325
|
}
|
326
|
}
|
327
|
|
328
|
/**
|
329
|
* Removes description elements not needed anymore from their description and
|
330
|
* updates the {@link DeleteResult}.
|
331
|
*/
|
332
|
protected boolean handleDescriptionElementsToRemove(TaxonDescription targetDescription,
|
333
|
Set<? extends DescriptionElementBase> elementsToRemove) {
|
334
|
boolean updated = false;
|
335
|
//remove all elements not needed anymore
|
336
|
for(DescriptionElementBase elementToRemove : elementsToRemove){
|
337
|
targetDescription.removeElement(elementToRemove);
|
338
|
//AM: do we really want to add each element to the deleteResult?
|
339
|
//this.getResult().addDeletedObject(elementToRemove);
|
340
|
updated |= elementToRemove.isPersited();
|
341
|
}
|
342
|
return updated;
|
343
|
}
|
344
|
|
345
|
/**
|
346
|
* Adds the temporary aggregated data (resultHolder) to the description.
|
347
|
* Tries to reuse existing data if possible.
|
348
|
*/
|
349
|
protected abstract boolean mergeAggregationResultIntoTargetDescription(TaxonDescription targetDescription,
|
350
|
ResultHolder resultHolder);
|
351
|
|
352
|
protected abstract void aggregateToParentTaxon(TaxonNode taxonNode, ResultHolder resultHolder,
|
353
|
Set<TaxonDescription> excludedDescriptions);
|
354
|
|
355
|
protected abstract void aggregateWithinSingleTaxon(Taxon taxon, ResultHolder resultHolder,
|
356
|
Set<TaxonDescription> excludedDescriptions);
|
357
|
|
358
|
/**
|
359
|
* Creates a {@link ResultHolder} object to temporarily store the aggregation
|
360
|
* result (within taxon and from child to parent) for a single taxon.
|
361
|
*/
|
362
|
protected abstract ResultHolder createResultHolder();
|
363
|
|
364
|
/**
|
365
|
* Either finds an existing taxon description for the given taxon or creates a new one.
|
366
|
*/
|
367
|
private TaxonDescription getAggregatedDescription(Taxon taxon) {
|
368
|
|
369
|
// find existing one
|
370
|
for (TaxonDescription description : taxon.getDescriptions()) {
|
371
|
if (hasDescriptionType(description)){
|
372
|
if (logger.isDebugEnabled()){logger.debug("reusing existing aggregated description for " + taxonToString(taxon));}
|
373
|
setDescriptionTitle(description, taxon); //maybe we want to redefine the title
|
374
|
if (getConfig().isDoClearExistingDescription()){
|
375
|
clearDescription(description);
|
376
|
}
|
377
|
|
378
|
return description;
|
379
|
}
|
380
|
}
|
381
|
|
382
|
// create a new one
|
383
|
TaxonDescription newDescription = createNewDescription(taxon);
|
384
|
return newDescription;
|
385
|
}
|
386
|
|
387
|
/**
|
388
|
* Removes all description elements of the according type from the
|
389
|
* (aggregation) description.
|
390
|
*/
|
391
|
private void clearDescription(TaxonDescription aggregationDescription) {
|
392
|
Set<DescriptionElementBase> deleteCandidates = new HashSet<>();
|
393
|
for (DescriptionElementBase descriptionElement : aggregationDescription.getElements()) {
|
394
|
|
395
|
if(isRelevantDescriptionElement(descriptionElement)) {
|
396
|
deleteCandidates.add(descriptionElement);
|
397
|
}
|
398
|
}
|
399
|
if(deleteCandidates.size() > 0){
|
400
|
for(DescriptionElementBase descriptionElement : deleteCandidates) {
|
401
|
aggregationDescription.removeElement(descriptionElement);
|
402
|
getDescriptionElementService().delete(descriptionElement);
|
403
|
if (descriptionElement.isPersited()){
|
404
|
getResult().addDeletedObject(descriptionElement);
|
405
|
}
|
406
|
}
|
407
|
getDescriptionService().saveOrUpdate(aggregationDescription);
|
408
|
}
|
409
|
}
|
410
|
|
411
|
protected <S extends DescriptionElementBase, TE extends DefinedTermBase<?>> boolean mergeDescriptionElements(
|
412
|
TaxonDescription targetDescription, Map<TE, S> newElementsMap, Class<S> debClass) {
|
413
|
|
414
|
boolean updated = false;
|
415
|
|
416
|
//init elements to remove
|
417
|
Set<DescriptionElementBase> elementsToRemove = new HashSet<>(
|
418
|
targetDescription.getElements().stream()
|
419
|
.filter(el->el.isInstanceOf(debClass))
|
420
|
.collect(Collectors.toSet()));
|
421
|
|
422
|
//for each character in "characters of new elements"
|
423
|
for (TE keyTerm : newElementsMap.keySet()) {
|
424
|
S newElement = newElementsMap.get(keyTerm);
|
425
|
|
426
|
//if elements for this character exist in old data, remember any of them to keep
|
427
|
//(in clean data there should be only max. 1
|
428
|
DescriptionElementBase elementToStay = null;
|
429
|
for (DescriptionElementBase existingDeb : elementsToRemove) {
|
430
|
if(existingDeb.getFeature().equals(keyTerm)){
|
431
|
elementToStay = existingDeb;
|
432
|
break;
|
433
|
}
|
434
|
}
|
435
|
|
436
|
//if there is no element for this character in old data, add the new element for this character to the target description (otherwise reuse old element)
|
437
|
if (elementToStay == null){
|
438
|
targetDescription.addElement(newElement);
|
439
|
updated = true;
|
440
|
}else{
|
441
|
elementsToRemove.remove(elementToStay);
|
442
|
updated |= mergeDescriptionElement(elementToStay, newElement);
|
443
|
}
|
444
|
}
|
445
|
|
446
|
updated |= handleDescriptionElementsToRemove(targetDescription, elementsToRemove);
|
447
|
return updated;
|
448
|
}
|
449
|
|
450
|
/**
|
451
|
* Merges a new (temporary description element into an existing one)
|
452
|
*/
|
453
|
protected abstract <S extends DescriptionElementBase>
|
454
|
boolean mergeDescriptionElement(S targetElement, S newElement);
|
455
|
|
456
|
protected boolean mergeSourcesForDescriptionElements(DescriptionElementBase deb,
|
457
|
Set<DescriptionElementSource> newSources) {
|
458
|
|
459
|
boolean updated = false;
|
460
|
Set<DescriptionElementSource> toDeleteSources = new HashSet<>(deb.getSources());
|
461
|
for(DescriptionElementSource newSource : newSources) {
|
462
|
boolean contained = false;
|
463
|
for(DescriptionElementSource existingSource: deb.getSources()) {
|
464
|
if(existingSource.equalsByShallowCompare(newSource)) {
|
465
|
contained = true;
|
466
|
toDeleteSources.remove(existingSource);
|
467
|
break;
|
468
|
}
|
469
|
}
|
470
|
if(!contained) {
|
471
|
try {
|
472
|
deb.addSource(newSource.clone());
|
473
|
updated = true;
|
474
|
} catch (CloneNotSupportedException e) {
|
475
|
// should never happen
|
476
|
throw new RuntimeException(e);
|
477
|
}
|
478
|
}
|
479
|
}
|
480
|
for (DescriptionElementSource toDeleteSource : toDeleteSources){
|
481
|
deb.removeSource(toDeleteSource);
|
482
|
updated |= toDeleteSource.isPersited();
|
483
|
}
|
484
|
return updated;
|
485
|
}
|
486
|
|
487
|
protected abstract TaxonDescription createNewDescription(Taxon taxon);
|
488
|
|
489
|
protected abstract boolean hasDescriptionType(TaxonDescription description);
|
490
|
|
491
|
protected abstract void setDescriptionTitle(TaxonDescription description, Taxon taxon);
|
492
|
|
493
|
protected abstract boolean isRelevantDescriptionElement(DescriptionElementBase deb);
|
494
|
|
495
|
protected String taxonToString(TaxonBase<?> taxon) {
|
496
|
if(logger.isTraceEnabled()) {
|
497
|
return taxon.getTitleCache();
|
498
|
} else {
|
499
|
return taxon.toString();
|
500
|
}
|
501
|
}
|
502
|
|
503
|
protected abstract List<String> descriptionInitStrategy();
|
504
|
|
505
|
protected abstract void preAggregate(IProgressMonitor monitor);
|
506
|
|
507
|
protected abstract void verifyConfiguration(IProgressMonitor monitor);
|
508
|
|
509
|
/**
|
510
|
* hook for initializing object when a new transaction starts
|
511
|
*/
|
512
|
protected abstract void initTransaction();
|
513
|
|
514
|
protected abstract String pluralDataType();
|
515
|
|
516
|
private void init(CONFIG config, ICdmRepository repository) {
|
517
|
this.repository = repository;
|
518
|
this.config = config;
|
519
|
if(config.getMonitor() == null){
|
520
|
config.setMonitor(new NullProgressMonitor());
|
521
|
}
|
522
|
result = new DeleteResult();
|
523
|
}
|
524
|
|
525
|
protected void addSourcesDeduplicated(DescriptionElementBase targetDeb, Set<DescriptionElementSource> sourcesToAdd) {
|
526
|
for(DescriptionElementSource source : sourcesToAdd) {
|
527
|
boolean contained = false;
|
528
|
if (!hasValidSourceType(source)&& !isAggregationSource(source)){ //only aggregate sources of defined source types
|
529
|
continue;
|
530
|
}
|
531
|
for(DescriptionElementSource existingSource: targetDeb.getSources()) {
|
532
|
if(existingSource.equalsByShallowCompare(source)) {
|
533
|
contained = true;
|
534
|
break;
|
535
|
}
|
536
|
}
|
537
|
if(!contained) {
|
538
|
try {
|
539
|
targetDeb.addSource(source.clone());
|
540
|
} catch (CloneNotSupportedException e) {
|
541
|
// should never happen
|
542
|
throw new RuntimeException(e);
|
543
|
}
|
544
|
}
|
545
|
}
|
546
|
}
|
547
|
|
548
|
private boolean hasValidSourceType(DescriptionElementSource source) {
|
549
|
return getConfig().getAggregatingSourceTypes().contains(source.getType());
|
550
|
}
|
551
|
|
552
|
private boolean isAggregationSource(DescriptionElementSource source) {
|
553
|
return source.getType().equals(OriginalSourceType.Aggregation) && source.getCdmSource() != null;
|
554
|
}
|
555
|
|
556
|
// ******************** GETTER / SETTER *************************/
|
557
|
|
558
|
protected IDescriptionService getDescriptionService(){
|
559
|
return repository.getDescriptionService();
|
560
|
}
|
561
|
|
562
|
protected IDescriptionElementService getDescriptionElementService(){
|
563
|
return repository.getDescriptionElementService();
|
564
|
}
|
565
|
|
566
|
protected IDescriptiveDataSetService getDescriptiveDatasetService() {
|
567
|
return repository.getDescriptiveDataSetService();
|
568
|
}
|
569
|
|
570
|
protected ITaxonService getTaxonService() {
|
571
|
return repository.getTaxonService();
|
572
|
}
|
573
|
|
574
|
protected ITaxonNodeService getTaxonNodeService() {
|
575
|
return repository.getTaxonNodeService();
|
576
|
}
|
577
|
|
578
|
protected ITermService getTermService() {
|
579
|
return repository.getTermService();
|
580
|
}
|
581
|
|
582
|
protected IClassificationService getClassificationService() {
|
583
|
return repository.getClassificationService();
|
584
|
}
|
585
|
|
586
|
protected PlatformTransactionManager getTransactionManager(){
|
587
|
return repository.getTransactionManager();
|
588
|
}
|
589
|
|
590
|
// TODO merge with CdmRepository#startTransaction() into common base class
|
591
|
protected void commitTransaction(TransactionStatus txStatus){
|
592
|
logger.debug("commiting transaction ...");
|
593
|
repository.commitTransaction(txStatus);
|
594
|
return;
|
595
|
}
|
596
|
|
597
|
protected TransactionStatus startTransaction(Boolean readOnly) {
|
598
|
|
599
|
DefaultTransactionDefinition defaultTxDef = new DefaultTransactionDefinition();
|
600
|
defaultTxDef.setReadOnly(readOnly);
|
601
|
TransactionDefinition txDef = defaultTxDef;
|
602
|
|
603
|
// Log some transaction-related debug information.
|
604
|
if (logger.isTraceEnabled()) {
|
605
|
logger.trace("Transaction name = " + txDef.getName());
|
606
|
logger.trace("Transaction facets:");
|
607
|
logger.trace("Propagation behavior = " + txDef.getPropagationBehavior());
|
608
|
logger.trace("Isolation level = " + txDef.getIsolationLevel());
|
609
|
logger.trace("Timeout = " + txDef.getTimeout());
|
610
|
logger.trace("Read Only = " + txDef.isReadOnly());
|
611
|
// org.springframework.orm.hibernate5.HibernateTransactionManager
|
612
|
// provides more transaction/session-related debug information.
|
613
|
}
|
614
|
|
615
|
TransactionStatus txStatus = getTransactionManager().getTransaction(txDef);
|
616
|
getSession().setFlushMode(FlushMode.COMMIT);
|
617
|
|
618
|
return txStatus;
|
619
|
}
|
620
|
|
621
|
protected Session getSession() {
|
622
|
return getDescriptionService().getSession();
|
623
|
}
|
624
|
|
625
|
protected ICdmRepository getRepository() {
|
626
|
return repository;
|
627
|
}
|
628
|
|
629
|
protected CONFIG getConfig() {
|
630
|
return config;
|
631
|
}
|
632
|
|
633
|
protected DeleteResult getResult() {
|
634
|
return result;
|
635
|
}
|
636
|
|
637
|
protected void done(){
|
638
|
getConfig().getMonitor().done();
|
639
|
}
|
640
|
|
641
|
public void setBatchMinFreeHeap(long batchMinFreeHeap) {
|
642
|
this.batchMinFreeHeap = batchMinFreeHeap;
|
643
|
}
|
644
|
|
645
|
}
|