1
|
// $Id$
|
2
|
/**
|
3
|
* Copyright (C) 2013 EDIT
|
4
|
* European Distributed Institute of Taxonomy
|
5
|
* http://www.e-taxonomy.eu
|
6
|
*
|
7
|
* The contents of this file are subject to the Mozilla Public License Version 1.1
|
8
|
* See LICENSE.TXT at the top of this package for the full license terms.
|
9
|
*/
|
10
|
package eu.etaxonomy.cdm.api.service.description;
|
11
|
|
12
|
import java.util.ArrayList;
|
13
|
import java.util.Arrays;
|
14
|
import java.util.HashMap;
|
15
|
import java.util.HashSet;
|
16
|
import java.util.Iterator;
|
17
|
import java.util.List;
|
18
|
import java.util.Map;
|
19
|
import java.util.Set;
|
20
|
import java.util.UUID;
|
21
|
|
22
|
import org.apache.log4j.Logger;
|
23
|
import org.hibernate.FlushMode;
|
24
|
import org.hibernate.HibernateException;
|
25
|
import org.hibernate.Session;
|
26
|
import org.hibernate.engine.spi.SessionFactoryImplementor;
|
27
|
import org.hibernate.search.Search;
|
28
|
import org.springframework.beans.factory.annotation.Autowired;
|
29
|
import org.springframework.orm.hibernate5.HibernateTransactionManager;
|
30
|
import org.springframework.stereotype.Service;
|
31
|
import org.springframework.transaction.TransactionDefinition;
|
32
|
import org.springframework.transaction.TransactionStatus;
|
33
|
import org.springframework.transaction.support.DefaultTransactionDefinition;
|
34
|
|
35
|
import eu.etaxonomy.cdm.api.service.IClassificationService;
|
36
|
import eu.etaxonomy.cdm.api.service.IDescriptionService;
|
37
|
import eu.etaxonomy.cdm.api.service.INameService;
|
38
|
import eu.etaxonomy.cdm.api.service.ITaxonService;
|
39
|
import eu.etaxonomy.cdm.api.service.ITermService;
|
40
|
import eu.etaxonomy.cdm.common.monitor.IProgressMonitor;
|
41
|
import eu.etaxonomy.cdm.common.monitor.NullProgressMonitor;
|
42
|
import eu.etaxonomy.cdm.common.monitor.SubProgressMonitor;
|
43
|
import eu.etaxonomy.cdm.model.common.DefinedTermBase;
|
44
|
import eu.etaxonomy.cdm.model.common.Extension;
|
45
|
import eu.etaxonomy.cdm.model.common.ExtensionType;
|
46
|
import eu.etaxonomy.cdm.model.common.Marker;
|
47
|
import eu.etaxonomy.cdm.model.common.MarkerType;
|
48
|
import eu.etaxonomy.cdm.model.common.OrderedTermBase;
|
49
|
import eu.etaxonomy.cdm.model.description.DescriptionElementBase;
|
50
|
import eu.etaxonomy.cdm.model.description.DescriptionElementSource;
|
51
|
import eu.etaxonomy.cdm.model.description.Distribution;
|
52
|
import eu.etaxonomy.cdm.model.description.PresenceAbsenceTerm;
|
53
|
import eu.etaxonomy.cdm.model.description.TaxonDescription;
|
54
|
import eu.etaxonomy.cdm.model.location.NamedArea;
|
55
|
import eu.etaxonomy.cdm.model.name.Rank;
|
56
|
import eu.etaxonomy.cdm.model.taxon.Classification;
|
57
|
import eu.etaxonomy.cdm.model.taxon.Taxon;
|
58
|
import eu.etaxonomy.cdm.model.taxon.TaxonBase;
|
59
|
import eu.etaxonomy.cdm.persistence.dto.ClassificationLookupDTO;
|
60
|
import eu.etaxonomy.cdm.persistence.query.OrderHint;
|
61
|
|
62
|
/**
|
63
|
*
|
64
|
* <h2>GENERAL NOTES </h2>
|
65
|
* <em>TODO: These notes are directly taken from original Transmission Engine Occurrence
|
66
|
* version 14 written in Visual Basic and still need to be
|
67
|
* adapted to the java version of the transmission engine!</em>
|
68
|
*
|
69
|
* <h3>summaryStatus</h3>
|
70
|
*
|
71
|
* Each distribution information has a summaryStatus, this is an summary of the status codes
|
72
|
* as stored in the fields of emOccurrence native, introduced, cultivated, ...
|
73
|
* The summaryStatus seems to be equivalent to the CDM DistributionStatus
|
74
|
*
|
75
|
* <h3>map generation</h3>
|
76
|
*
|
77
|
* When generating maps from the accumulated distribution information some special cases have to be handled:
|
78
|
* <ol>
|
79
|
* <li>if a entered or imported status information exist for the same area for which calculated (accumulated)
|
80
|
* data is available, the calculated data has to be given preference over other data.
|
81
|
* </li>
|
82
|
* <li>If there is an area with a sub area and both areas have the same calculated status only the subarea
|
83
|
* status should be shown in the map, whereas the super area should be ignored.
|
84
|
* </li>
|
85
|
* </ol>
|
86
|
*
|
87
|
* @author Anton Güntsch (author of original Transmission Engine Occurrence version 14 written in Visual Basic)
|
88
|
* @author Andreas Kohlbecker (2013, porting Transmission Engine Occurrence to Java)
|
89
|
* @date Feb 22, 2013
|
90
|
*/
|
91
|
@Service
|
92
|
public class TransmissionEngineDistribution { //TODO extends IoBase?
|
93
|
|
94
|
public static final String EXTENSION_VALUE_PREFIX = "transmissionEngineDistribution.priority:";
|
95
|
|
96
|
public static final Logger logger = Logger.getLogger(TransmissionEngineDistribution.class);
|
97
|
|
98
|
/**
|
99
|
* only used for performance testing
|
100
|
*/
|
101
|
final boolean ONLY_FISRT_BATCH = false;
|
102
|
|
103
|
|
104
|
protected static final List<String> TAXONDESCRIPTION_INIT_STRATEGY = Arrays.asList(new String [] {
|
105
|
"description.markers.markerType",
|
106
|
"description.elements.markers.markerType",
|
107
|
"description.elements.area",
|
108
|
"description.elements.status",
|
109
|
"description.elements.sources.citation.authorship",
|
110
|
// "description.elements.sources.nameUsedInSource",
|
111
|
// "description.elements.multilanguageText",
|
112
|
// "name.status.type",
|
113
|
});
|
114
|
|
115
|
|
116
|
/**
|
117
|
* A map which contains the status terms as key and the priority as value
|
118
|
* The map will contain both, the PresenceTerms and the AbsenceTerms
|
119
|
*/
|
120
|
private Map<PresenceAbsenceTerm, Integer> statusPriorityMap = null;
|
121
|
|
122
|
@Autowired
|
123
|
private IDescriptionService descriptionService;
|
124
|
|
125
|
@Autowired
|
126
|
private ITermService termService;
|
127
|
|
128
|
@Autowired
|
129
|
private ITaxonService taxonService;
|
130
|
|
131
|
@Autowired
|
132
|
private IClassificationService classificationService;
|
133
|
|
134
|
@Autowired
|
135
|
private INameService mameService;
|
136
|
|
137
|
@Autowired
|
138
|
private HibernateTransactionManager transactionManager;
|
139
|
|
140
|
private List<PresenceAbsenceTerm> byAreaIgnoreStatusList = null;
|
141
|
|
142
|
private List<PresenceAbsenceTerm> byRankIgnoreStatusList = null;
|
143
|
|
144
|
private final Map<NamedArea, Set<NamedArea>> subAreaMap = new HashMap<NamedArea, Set<NamedArea>>();
|
145
|
|
146
|
private final List<OrderHint> emptyOrderHints = new ArrayList<OrderHint>(0);
|
147
|
|
148
|
|
149
|
/**
|
150
|
* byAreaIgnoreStatusList contains by default:
|
151
|
* <ul>
|
152
|
* <li>AbsenceTerm.CULTIVATED_REPORTED_IN_ERROR()</li>
|
153
|
* <li>AbsenceTerm.INTRODUCED_REPORTED_IN_ERROR()</li>
|
154
|
* <li>AbsenceTerm.INTRODUCED_FORMERLY_INTRODUCED()</li>
|
155
|
* <li>AbsenceTerm.NATIVE_REPORTED_IN_ERROR()</li>
|
156
|
* <li>AbsenceTerm.NATIVE_FORMERLY_NATIVE()</li>
|
157
|
* </ul>
|
158
|
*
|
159
|
* @return the byAreaIgnoreStatusList
|
160
|
*/
|
161
|
public List<PresenceAbsenceTerm> getByAreaIgnoreStatusList() {
|
162
|
if(byAreaIgnoreStatusList == null ){
|
163
|
byAreaIgnoreStatusList = Arrays.asList(
|
164
|
new PresenceAbsenceTerm[] {
|
165
|
PresenceAbsenceTerm.CULTIVATED_REPORTED_IN_ERROR(),
|
166
|
PresenceAbsenceTerm.INTRODUCED_REPORTED_IN_ERROR(),
|
167
|
PresenceAbsenceTerm.NATIVE_REPORTED_IN_ERROR(),
|
168
|
PresenceAbsenceTerm.INTRODUCED_FORMERLY_INTRODUCED(),
|
169
|
PresenceAbsenceTerm.NATIVE_FORMERLY_NATIVE()
|
170
|
// TODO what about PresenceAbsenceTerm.ABSENT() also ignore?
|
171
|
});
|
172
|
}
|
173
|
return byAreaIgnoreStatusList;
|
174
|
}
|
175
|
|
176
|
/**
|
177
|
* @param byAreaIgnoreStatusList the byAreaIgnoreStatusList to set
|
178
|
*/
|
179
|
public void setByAreaIgnoreStatusList(List<PresenceAbsenceTerm> byAreaIgnoreStatusList) {
|
180
|
this.byAreaIgnoreStatusList = byAreaIgnoreStatusList;
|
181
|
}
|
182
|
|
183
|
/**
|
184
|
* byRankIgnoreStatusList contains by default
|
185
|
* <ul>
|
186
|
* <li>PresenceTerm.ENDEMIC_FOR_THE_RELEVANT_AREA()</li>
|
187
|
* </ul>
|
188
|
*
|
189
|
* @return the byRankIgnoreStatusList
|
190
|
*/
|
191
|
public List<PresenceAbsenceTerm> getByRankIgnoreStatusList() {
|
192
|
|
193
|
if (byRankIgnoreStatusList == null) {
|
194
|
byRankIgnoreStatusList = Arrays.asList(
|
195
|
new PresenceAbsenceTerm[] {
|
196
|
PresenceAbsenceTerm.ENDEMIC_FOR_THE_RELEVANT_AREA()
|
197
|
});
|
198
|
}
|
199
|
return byRankIgnoreStatusList;
|
200
|
}
|
201
|
|
202
|
/**
|
203
|
* @param byRankIgnoreStatusList the byRankIgnoreStatusList to set
|
204
|
*/
|
205
|
public void setByRankIgnoreStatusList(List<PresenceAbsenceTerm> byRankIgnoreStatusList) {
|
206
|
this.byRankIgnoreStatusList = byRankIgnoreStatusList;
|
207
|
}
|
208
|
|
209
|
/**
|
210
|
*
|
211
|
* @param superAreas
|
212
|
*/
|
213
|
public TransmissionEngineDistribution() {
|
214
|
}
|
215
|
|
216
|
/**
|
217
|
* initializes the map which contains the status terms as key and the priority as value
|
218
|
* The map will contain both, the PresenceTerms and the AbsenceTerms
|
219
|
*/
|
220
|
private void initializeStatusPriorityMap() {
|
221
|
|
222
|
statusPriorityMap = new HashMap<PresenceAbsenceTerm, Integer>();
|
223
|
Integer priority;
|
224
|
|
225
|
// PresenceTerms
|
226
|
for(PresenceAbsenceTerm term : termService.list(PresenceAbsenceTerm.class, null, null, null, null)){
|
227
|
priority = getPriorityFor(term);
|
228
|
if(priority != null){
|
229
|
statusPriorityMap.put(term, priority);
|
230
|
}
|
231
|
}
|
232
|
}
|
233
|
|
234
|
/**
|
235
|
* Compares the PresenceAbsenceTermBase terms contained in <code>a.status</code> and <code>b.status</code> after
|
236
|
* the priority as stored in the statusPriorityMap. The StatusAndSources object with
|
237
|
* the higher priority is returned. In the case of <code>a == b</code> the sources of b will be added to the sources
|
238
|
* of a.
|
239
|
*
|
240
|
* If either a or b or the status are null b or a is returned.
|
241
|
*
|
242
|
* @see initializeStatusPriorityMap()
|
243
|
*
|
244
|
* @param a
|
245
|
* @param b
|
246
|
* @param sourcesForWinnerB
|
247
|
* In the case when <code>b</code> is preferred over <code>a</code> these Set of sources will be added to the sources of <code>b</code>
|
248
|
* @return
|
249
|
*/
|
250
|
private StatusAndSources choosePreferred(StatusAndSources a, StatusAndSources b, Set<DescriptionElementSource> sourcesForWinnerB){
|
251
|
|
252
|
if (statusPriorityMap == null) {
|
253
|
initializeStatusPriorityMap();
|
254
|
}
|
255
|
|
256
|
if (b == null || b.status == null) {
|
257
|
return a;
|
258
|
}
|
259
|
if (a == null || a.status == null) {
|
260
|
return b;
|
261
|
}
|
262
|
|
263
|
if (statusPriorityMap.get(a.status) == null) {
|
264
|
logger.warn("No priority found in map for " + a.status.getLabel());
|
265
|
return b;
|
266
|
}
|
267
|
if (statusPriorityMap.get(b.status) == null) {
|
268
|
logger.warn("No priority found in map for " + b.status.getLabel());
|
269
|
return a;
|
270
|
}
|
271
|
if(statusPriorityMap.get(a.status) < statusPriorityMap.get(b.status)){
|
272
|
if(sourcesForWinnerB != null) {
|
273
|
b.addSources(sourcesForWinnerB);
|
274
|
}
|
275
|
return b;
|
276
|
} else if (statusPriorityMap.get(a.status) == statusPriorityMap.get(b.status)){
|
277
|
a.addSources(b.sources);
|
278
|
return a;
|
279
|
} else {
|
280
|
return a;
|
281
|
}
|
282
|
}
|
283
|
|
284
|
/**
|
285
|
* reads the priority for the given status term from the extensions.
|
286
|
*
|
287
|
* @param term
|
288
|
* @return the priority value
|
289
|
*/
|
290
|
private Integer getPriorityFor(DefinedTermBase<?> term) {
|
291
|
Set<Extension> extensions = term.getExtensions();
|
292
|
for(Extension extension : extensions){
|
293
|
if(!extension.getType().equals(ExtensionType.ORDER())) {
|
294
|
continue;
|
295
|
}
|
296
|
int pos = extension.getValue().indexOf(EXTENSION_VALUE_PREFIX);
|
297
|
if(pos == 0){ // if starts with EXTENSION_VALUE_PREFIX
|
298
|
try {
|
299
|
Integer priority = Integer.valueOf(extension.getValue().substring(EXTENSION_VALUE_PREFIX.length()));
|
300
|
return priority;
|
301
|
} catch (NumberFormatException e) {
|
302
|
logger.warn("Invalid number format in Extension:" + extension.getValue());
|
303
|
}
|
304
|
}
|
305
|
}
|
306
|
logger.warn("no priority defined for '" + term.getLabel() + "'");
|
307
|
return null;
|
308
|
}
|
309
|
|
310
|
/**
|
311
|
* runs both steps
|
312
|
* <ul>
|
313
|
* <li>Step 1: Accumulate occurrence records by area</li>
|
314
|
* <li>Step 2: Accumulate by ranks starting from lower rank to upper rank,
|
315
|
* the status of all children are accumulated on each rank starting from
|
316
|
* lower rank to upper rank.</li>
|
317
|
* </ul>
|
318
|
*
|
319
|
* @param superAreas
|
320
|
* the areas to which the subordinate areas should be projected.
|
321
|
* @param lowerRank
|
322
|
* @param upperRank
|
323
|
* @param classification
|
324
|
* @param classification
|
325
|
* limit the accumulation process to a specific classification
|
326
|
* (not yet implemented)
|
327
|
* @param monitor
|
328
|
* the progress monitor to use for reporting progress to the
|
329
|
* user. It is the caller's responsibility to call done() on the
|
330
|
* given monitor. Accepts null, indicating that no progress
|
331
|
* should be reported and that the operation cannot be cancelled.
|
332
|
*/
|
333
|
public void accumulate(AggregationMode mode, List<NamedArea> superAreas, Rank lowerRank, Rank upperRank,
|
334
|
Classification classification, IProgressMonitor monitor) {
|
335
|
|
336
|
if (monitor == null) {
|
337
|
monitor = new NullProgressMonitor();
|
338
|
}
|
339
|
|
340
|
|
341
|
// only for debugging:
|
342
|
//logger.setLevel(Level.TRACE); // TRACE will slow down a lot since it forces loading all term representations
|
343
|
//Logger.getLogger("org.hibernate.SQL").setLevel(Level.DEBUG);
|
344
|
|
345
|
logger.info("Hibernate JDBC Batch size: "
|
346
|
+ ((SessionFactoryImplementor) getSession().getSessionFactory()).getSettings().getJdbcBatchSize());
|
347
|
|
348
|
Set<Classification> classifications = new HashSet<Classification>();
|
349
|
if(classification == null) {
|
350
|
classifications.addAll(classificationService.listClassifications(null, null, null, null));
|
351
|
} else {
|
352
|
classifications.add(classification);
|
353
|
}
|
354
|
|
355
|
int aggregationWorkTicks = mode.equals(AggregationMode.byAreasAndRanks) ? 400 : 200;
|
356
|
|
357
|
// take start time for performance testing
|
358
|
// NOTE: use ONLY_FISRT_BATCH = true to measure only one batch
|
359
|
double start = System.currentTimeMillis();
|
360
|
|
361
|
monitor.beginTask("Accumulating distributions", (classifications.size() * aggregationWorkTicks) + 1 );
|
362
|
updatePriorities();
|
363
|
monitor.worked(1);
|
364
|
|
365
|
List<Rank> ranks = rankInterval(lowerRank, upperRank);
|
366
|
|
367
|
for(Classification _classification : classifications) {
|
368
|
|
369
|
ClassificationLookupDTO classificationLookupDao = classificationService.classificationLookup(_classification);
|
370
|
classificationLookupDao.filterInclude(ranks);
|
371
|
|
372
|
double end1 = System.currentTimeMillis();
|
373
|
logger.info("Time elapsed for classificationLookup() : " + (end1 - start) / (1000) + "s");
|
374
|
double start2 = System.currentTimeMillis();
|
375
|
|
376
|
monitor.subTask("Accumulating distributions to super areas for " + _classification.getTitleCache());
|
377
|
if (mode.equals(AggregationMode.byAreas) || mode.equals(AggregationMode.byAreasAndRanks)) {
|
378
|
accumulateByArea(superAreas, classificationLookupDao, new SubProgressMonitor(monitor, 200), true);
|
379
|
}
|
380
|
monitor.subTask("Accumulating distributions to higher ranks for " + _classification.getTitleCache());
|
381
|
|
382
|
double end2 = System.currentTimeMillis();
|
383
|
logger.info("Time elapsed for accumulateByArea() : " + (end2 - start2) / (1000) + "s");
|
384
|
|
385
|
double start3 = System.currentTimeMillis();
|
386
|
if (mode.equals(AggregationMode.byRanks) || mode.equals(AggregationMode.byAreasAndRanks)) {
|
387
|
accumulateByRank(ranks, classificationLookupDao, new SubProgressMonitor(monitor, 200), mode.equals(AggregationMode.byRanks));
|
388
|
}
|
389
|
|
390
|
double end3 = System.currentTimeMillis();
|
391
|
logger.info("Time elapsed for accumulateByRank() : " + (end3 - start3) / (1000) + "s");
|
392
|
logger.info("Time elapsed for accumulate(): " + (end3 - start) / (1000) + "s");
|
393
|
|
394
|
if(ONLY_FISRT_BATCH) {
|
395
|
monitor.done();
|
396
|
break;
|
397
|
}
|
398
|
}
|
399
|
}
|
400
|
|
401
|
|
402
|
/**
|
403
|
* Step 1: Accumulate occurrence records by area
|
404
|
* <ul>
|
405
|
* <li>areas are projected to super areas e.g.: HS <-- HS(A), HS(G), HS(S)</li>
|
406
|
* <li>super areas do initially not have a status set ==> Prerequisite to check in CDM</li>
|
407
|
* <li>areas having a summary status of summary value different from {@link #getByAreaIgnoreStatusList()} are ignored</li>
|
408
|
* <li>areas have a priority value, the status of the area with highest priority determines the status of the super area</li>
|
409
|
* <li>the source references of the accumulated distributions are also accumulated into the new distribution,,</li>
|
410
|
* <li>this has been especially implemented for the EuroMed Checklist Vol2 and might not be a general requirement</li>
|
411
|
* </ul>
|
412
|
*
|
413
|
* @param superAreas
|
414
|
* the areas to which the subordinate areas should be projected
|
415
|
* @param classificationLookupDao
|
416
|
*
|
417
|
*/
|
418
|
protected void accumulateByArea(List<NamedArea> superAreas, ClassificationLookupDTO classificationLookupDao, IProgressMonitor subMonitor, boolean doClearDescriptions) {
|
419
|
|
420
|
int batchSize = 1000;
|
421
|
|
422
|
TransactionStatus txStatus = startTransaction(false);
|
423
|
|
424
|
// reload superAreas TODO is it faster to getSession().merge(object) ??
|
425
|
Set<UUID> superAreaUuids = new HashSet<UUID>(superAreas.size());
|
426
|
for (NamedArea superArea : superAreas){
|
427
|
superAreaUuids.add(superArea.getUuid());
|
428
|
}
|
429
|
|
430
|
// visit all accepted taxa
|
431
|
subMonitor.beginTask("Accumulating by area ", classificationLookupDao.getTaxonIds().size());
|
432
|
Iterator<Integer> taxonIdIterator = classificationLookupDao.getTaxonIds().iterator();
|
433
|
|
434
|
while (taxonIdIterator.hasNext()) {
|
435
|
|
436
|
if(txStatus == null) {
|
437
|
// transaction has been comitted at the end of this batch, start a new one
|
438
|
txStatus = startTransaction(false);
|
439
|
}
|
440
|
|
441
|
// the session is cleared after each batch, so load the superAreaList for each batch
|
442
|
List<NamedArea> superAreaList = (List)termService.find(superAreaUuids);
|
443
|
|
444
|
// load taxa for this batch
|
445
|
List<TaxonBase> taxa = null;
|
446
|
Set<Integer> taxonIds = new HashSet<Integer>(batchSize);
|
447
|
while(taxonIdIterator.hasNext() && taxonIds.size() < batchSize ) {
|
448
|
taxonIds.add(taxonIdIterator.next());
|
449
|
}
|
450
|
|
451
|
// logger.debug("accumulateByArea() - taxon " + taxonPager.getFirstRecord() + " to " + taxonPager.getLastRecord() + " of " + taxonPager.getCount() + "]");
|
452
|
|
453
|
taxa = taxonService.listByIds(taxonIds, null, null, emptyOrderHints, TAXONDESCRIPTION_INIT_STRATEGY);
|
454
|
|
455
|
// iterate over the taxa and accumulate areas
|
456
|
for(TaxonBase taxonBase : taxa) {
|
457
|
if(logger.isDebugEnabled()){
|
458
|
logger.debug("accumulateByArea() - taxon :" + taxonToString(taxonBase));
|
459
|
}
|
460
|
|
461
|
Taxon taxon = (Taxon)taxonBase;
|
462
|
TaxonDescription description = findComputedDescription(taxon, doClearDescriptions);
|
463
|
List<Distribution> distributions = distributionsFor(taxon);
|
464
|
|
465
|
// Step through superAreas for accumulation of subAreas
|
466
|
for (NamedArea superArea : superAreaList){
|
467
|
|
468
|
// accumulate all sub area status
|
469
|
StatusAndSources accumulatedStatusAndSources = null;
|
470
|
// TODO consider using the TermHierarchyLookup (only in local branch a.kohlbecker)
|
471
|
Set<NamedArea> subAreas = getSubAreasFor(superArea);
|
472
|
for(NamedArea subArea : subAreas){
|
473
|
if(logger.isTraceEnabled()){
|
474
|
logger.trace("accumulateByArea() - \t\t" + termToString(subArea));
|
475
|
}
|
476
|
// step through all distributions for the given subArea
|
477
|
for(Distribution distribution : distributions){
|
478
|
if(distribution.getArea() != null && distribution.getArea().equals(subArea) && distribution.getStatus() != null) {
|
479
|
PresenceAbsenceTerm status = distribution.getStatus();
|
480
|
if(logger.isTraceEnabled()){
|
481
|
logger.trace("accumulateByArea() - \t\t" + termToString(subArea) + ": " + termToString(status));
|
482
|
}
|
483
|
// skip all having a status value different of those in byAreaIgnoreStatusList
|
484
|
if (getByAreaIgnoreStatusList().contains(status)){
|
485
|
continue;
|
486
|
}
|
487
|
StatusAndSources subStatusAndSources = new StatusAndSources(status, distribution.getSources());
|
488
|
accumulatedStatusAndSources = choosePreferred(accumulatedStatusAndSources, subStatusAndSources, null);
|
489
|
}
|
490
|
}
|
491
|
} // next sub area
|
492
|
if (accumulatedStatusAndSources != null) {
|
493
|
if(logger.isDebugEnabled()){
|
494
|
logger.debug("accumulateByArea() - \t >> " + termToString(superArea) + ": " + termToString(accumulatedStatusAndSources.status));
|
495
|
}
|
496
|
// store new distribution element for superArea in taxon description
|
497
|
Distribution newDistribitionElement = Distribution.NewInstance(superArea, accumulatedStatusAndSources.status);
|
498
|
newDistribitionElement.getSources().addAll(accumulatedStatusAndSources.sources);
|
499
|
newDistribitionElement.addMarker(Marker.NewInstance(MarkerType.COMPUTED(), true));
|
500
|
description.addElement(newDistribitionElement);
|
501
|
}
|
502
|
|
503
|
} // next super area ....
|
504
|
|
505
|
descriptionService.saveOrUpdate(description);
|
506
|
taxonService.saveOrUpdate(taxon);
|
507
|
subMonitor.worked(1);
|
508
|
|
509
|
} // next taxon
|
510
|
|
511
|
flushAndClear();
|
512
|
|
513
|
// commit for every batch, otherwise the persistent context
|
514
|
// may grow too much and eats up all the heap
|
515
|
commitTransaction(txStatus);
|
516
|
txStatus = null;
|
517
|
|
518
|
if(ONLY_FISRT_BATCH) {
|
519
|
break;
|
520
|
}
|
521
|
|
522
|
} // next batch of taxa
|
523
|
|
524
|
subMonitor.done();
|
525
|
}
|
526
|
|
527
|
/**
|
528
|
* Step 2: Accumulate by ranks starting from lower rank to upper rank, the status of all children
|
529
|
* are accumulated on each rank starting from lower rank to upper rank.
|
530
|
* <ul>
|
531
|
* <li>aggregate distribution of included taxa of the next lower rank for any rank level starting from the lower rank (e.g. sub species)
|
532
|
* up to upper rank (e.g. Genus)</li>
|
533
|
* <li>the accumulation id done for each distribution area found in the included taxa</li>
|
534
|
* <li>areas of subtaxa with status endemic are ignored</li>
|
535
|
* <li>the status with the highest priority determines the value for the accumulated distribution</li>
|
536
|
* <li>the source reference of the accumulated distributions are also accumulated into the new distribution,
|
537
|
* this has been especially implemented for the EuroMed Checklist Vol2 and might not be a general requirement</li>
|
538
|
*</ul>
|
539
|
*/
|
540
|
protected void accumulateByRank(List<Rank> rankInterval, ClassificationLookupDTO classificationLookupDao, IProgressMonitor subMonitor, boolean doClearDescriptions) {
|
541
|
|
542
|
int batchSize = 500;
|
543
|
|
544
|
TransactionStatus txStatus = startTransaction(false);
|
545
|
|
546
|
// the loadRankSpecificRootNodes() method not only finds
|
547
|
// taxa of the specified rank but also taxa of lower ranks
|
548
|
// if no taxon of the specified rank exists, so we need to
|
549
|
// remember which taxa have been processed already
|
550
|
Set<Integer> taxaProcessedIds = new HashSet<Integer>();
|
551
|
List<TaxonBase> taxa = null;
|
552
|
List<TaxonBase> childTaxa = null;
|
553
|
|
554
|
List<Rank> ranks = rankInterval;
|
555
|
|
556
|
int ticksPerRank = 100;
|
557
|
subMonitor.beginTask("Accumulating by rank", ranks.size() * ticksPerRank);
|
558
|
|
559
|
for (Rank rank : ranks) {
|
560
|
|
561
|
if(logger.isDebugEnabled()){
|
562
|
logger.debug("accumulateByRank() - at Rank '" + termToString(rank) + "'");
|
563
|
}
|
564
|
|
565
|
SubProgressMonitor taxonSubMonitor = null;
|
566
|
Set<Integer> taxonIdsPerRank = classificationLookupDao.getTaxonIdByRank().get(rank);
|
567
|
if(taxonIdsPerRank == null || taxonIdsPerRank.isEmpty()) {
|
568
|
continue;
|
569
|
}
|
570
|
Iterator<Integer> taxonIdIterator = taxonIdsPerRank.iterator();
|
571
|
while (taxonIdIterator.hasNext()) {
|
572
|
|
573
|
if(txStatus == null) {
|
574
|
// transaction has been committed at the end of this batch, start a new one
|
575
|
txStatus = startTransaction(false);
|
576
|
}
|
577
|
|
578
|
// load taxa for this batch
|
579
|
Set<Integer> taxonIds = new HashSet<Integer>(batchSize);
|
580
|
while(taxonIdIterator.hasNext() && taxonIds.size() < batchSize ) {
|
581
|
taxonIds.add(taxonIdIterator.next());
|
582
|
}
|
583
|
|
584
|
taxa = taxonService.listByIds(taxonIds, null, null, emptyOrderHints, null);
|
585
|
|
586
|
if(taxonSubMonitor == null) {
|
587
|
taxonSubMonitor = new SubProgressMonitor(subMonitor, ticksPerRank);
|
588
|
taxonSubMonitor.beginTask("Accumulating by rank " + termToString(rank), taxa.size());
|
589
|
}
|
590
|
|
591
|
// if(logger.isDebugEnabled()){
|
592
|
// logger.debug("accumulateByRank() - taxon " + taxonPager.getFirstRecord() + " to " + taxonPager.getLastRecord() + " of " + taxonPager.getCount() + "]");
|
593
|
// }
|
594
|
|
595
|
for(TaxonBase taxonBase : taxa) {
|
596
|
|
597
|
Taxon taxon = (Taxon)taxonBase;
|
598
|
if (taxaProcessedIds.contains(taxon.getId())) {
|
599
|
if(logger.isDebugEnabled()){
|
600
|
logger.debug("accumulateByRank() - skipping already processed taxon :" + taxonToString(taxon));
|
601
|
}
|
602
|
continue;
|
603
|
}
|
604
|
taxaProcessedIds.add(taxon.getId());
|
605
|
if(logger.isDebugEnabled()){
|
606
|
logger.debug("accumulateByRank() [" + rank.getLabel() + "] - taxon :" + taxonToString(taxon));
|
607
|
}
|
608
|
|
609
|
// Step through direct taxonomic children for accumulation
|
610
|
Map<NamedArea, StatusAndSources> accumulatedStatusMap = new HashMap<NamedArea, StatusAndSources>();
|
611
|
|
612
|
Set<Integer> childTaxonIds = classificationLookupDao.getChildTaxonMap().get(taxon.getId());
|
613
|
if(childTaxonIds != null && !childTaxonIds.isEmpty()) {
|
614
|
childTaxa = taxonService.listByIds(childTaxonIds, null, null, emptyOrderHints, TAXONDESCRIPTION_INIT_STRATEGY);
|
615
|
|
616
|
for (TaxonBase childTaxonBase : childTaxa){
|
617
|
|
618
|
Taxon childTaxon = (Taxon) childTaxonBase;
|
619
|
getSession().setReadOnly(childTaxon, true);
|
620
|
if(logger.isTraceEnabled()){
|
621
|
logger.trace(" subtaxon :" + taxonToString(childTaxon));
|
622
|
}
|
623
|
|
624
|
for(Distribution distribution : distributionsFor(childTaxon) ) {
|
625
|
PresenceAbsenceTerm status = distribution.getStatus();
|
626
|
NamedArea area = distribution.getArea();
|
627
|
if (status == null || getByRankIgnoreStatusList().contains(status)){
|
628
|
continue;
|
629
|
}
|
630
|
|
631
|
StatusAndSources subStatusAndSources = new StatusAndSources(status, distribution.getSources());
|
632
|
accumulatedStatusMap.put(area, choosePreferred(accumulatedStatusMap.get(area), subStatusAndSources, null));
|
633
|
}
|
634
|
}
|
635
|
|
636
|
if(accumulatedStatusMap.size() > 0) {
|
637
|
TaxonDescription description = findComputedDescription(taxon, doClearDescriptions);
|
638
|
for (NamedArea area : accumulatedStatusMap.keySet()) {
|
639
|
Distribution distribition = findDistribution(description, area, accumulatedStatusMap.get(area).status);
|
640
|
if(distribition == null) {
|
641
|
// create a new distribution element
|
642
|
distribition = Distribution.NewInstance(area, accumulatedStatusMap.get(area).status);
|
643
|
distribition.addMarker(Marker.NewInstance(MarkerType.COMPUTED(), true));
|
644
|
}
|
645
|
addSourcesDeduplicated(distribition.getSources(), accumulatedStatusMap.get(area).sources);
|
646
|
|
647
|
description.addElement(distribition);
|
648
|
}
|
649
|
taxonService.saveOrUpdate(taxon);
|
650
|
descriptionService.saveOrUpdate(description);
|
651
|
}
|
652
|
|
653
|
}
|
654
|
taxonSubMonitor.worked(1); // one taxon worked
|
655
|
|
656
|
} // next taxon ....
|
657
|
|
658
|
flushAndClear();
|
659
|
|
660
|
// commit for every batch, otherwise the persistent context
|
661
|
// may grow too much and eats up all the heap
|
662
|
commitTransaction(txStatus);
|
663
|
txStatus = null;
|
664
|
|
665
|
if(ONLY_FISRT_BATCH) {
|
666
|
break;
|
667
|
}
|
668
|
} // next batch
|
669
|
|
670
|
if(taxonSubMonitor != null) { // TODO taxonSubpager, this check should not be needed
|
671
|
taxonSubMonitor.done();
|
672
|
}
|
673
|
subMonitor.worked(1);
|
674
|
|
675
|
if(ONLY_FISRT_BATCH) {
|
676
|
break;
|
677
|
}
|
678
|
} // next Rank
|
679
|
|
680
|
subMonitor.done();
|
681
|
}
|
682
|
|
683
|
/**
|
684
|
* @param description
|
685
|
* @param area
|
686
|
* @param status
|
687
|
* @return
|
688
|
*/
|
689
|
private Distribution findDistribution(TaxonDescription description, NamedArea area, PresenceAbsenceTerm status) {
|
690
|
for(DescriptionElementBase item : description.getElements()) {
|
691
|
if(!(item instanceof Distribution)) {
|
692
|
continue;
|
693
|
}
|
694
|
Distribution distribution = ((Distribution)item);
|
695
|
if(distribution.getArea().equals(area) && distribution.getStatus().equals(status)) {
|
696
|
return distribution;
|
697
|
}
|
698
|
}
|
699
|
return null;
|
700
|
}
|
701
|
|
702
|
/**
|
703
|
* @param lowerRank
|
704
|
* @param upperRank
|
705
|
* @return
|
706
|
*/
|
707
|
private List<Rank> rankInterval(Rank lowerRank, Rank upperRank) {
|
708
|
|
709
|
TransactionStatus txStatus = startTransaction(false);
|
710
|
Rank currentRank = lowerRank;
|
711
|
List<Rank> ranks = new ArrayList<Rank>();
|
712
|
ranks.add(currentRank);
|
713
|
while (!currentRank.isHigher(upperRank)) {
|
714
|
currentRank = findNextHigherRank(currentRank);
|
715
|
ranks.add(currentRank);
|
716
|
}
|
717
|
commitTransaction(txStatus);
|
718
|
txStatus = null;
|
719
|
return ranks;
|
720
|
}
|
721
|
|
722
|
/**
|
723
|
* @return
|
724
|
*/
|
725
|
private Session getSession() {
|
726
|
return descriptionService.getSession();
|
727
|
}
|
728
|
|
729
|
/**
|
730
|
*
|
731
|
*/
|
732
|
private void flushAndClear() {
|
733
|
logger.debug("flushing and clearing session ...");
|
734
|
getSession().flush();
|
735
|
try {
|
736
|
Search.getFullTextSession(getSession()).flushToIndexes();
|
737
|
} catch (HibernateException e) {
|
738
|
/* IGNORE - Hibernate Search Event listeners not configured ... */
|
739
|
if(!e.getMessage().startsWith("Hibernate Search Event listeners not configured")){
|
740
|
throw e;
|
741
|
}
|
742
|
}
|
743
|
getSession().clear();
|
744
|
}
|
745
|
|
746
|
|
747
|
// TODO merge with CdmApplicationDefaultConfiguration#startTransaction() into common base class
|
748
|
public TransactionStatus startTransaction(Boolean readOnly) {
|
749
|
|
750
|
DefaultTransactionDefinition defaultTxDef = new DefaultTransactionDefinition();
|
751
|
defaultTxDef.setReadOnly(readOnly);
|
752
|
TransactionDefinition txDef = defaultTxDef;
|
753
|
|
754
|
// Log some transaction-related debug information.
|
755
|
if (logger.isTraceEnabled()) {
|
756
|
logger.trace("Transaction name = " + txDef.getName());
|
757
|
logger.trace("Transaction facets:");
|
758
|
logger.trace("Propagation behavior = " + txDef.getPropagationBehavior());
|
759
|
logger.trace("Isolation level = " + txDef.getIsolationLevel());
|
760
|
logger.trace("Timeout = " + txDef.getTimeout());
|
761
|
logger.trace("Read Only = " + txDef.isReadOnly());
|
762
|
// org.springframework.orm.hibernate5.HibernateTransactionManager
|
763
|
// provides more transaction/session-related debug information.
|
764
|
}
|
765
|
|
766
|
TransactionStatus txStatus = transactionManager.getTransaction(txDef);
|
767
|
|
768
|
getSession().setFlushMode(FlushMode.COMMIT);
|
769
|
|
770
|
return txStatus;
|
771
|
}
|
772
|
|
773
|
// TODO merge with CdmApplicationDefaultConfiguration#startTransaction() into common base class
|
774
|
public void commitTransaction(TransactionStatus txStatus){
|
775
|
logger.debug("commiting transaction ...");
|
776
|
transactionManager.commit(txStatus);
|
777
|
return;
|
778
|
}
|
779
|
|
780
|
/**
|
781
|
* returns the next higher rank
|
782
|
*
|
783
|
* TODO better implement OrderedTermBase.getNextHigherTerm() and OrderedTermBase.getNextLowerTerm()?
|
784
|
*
|
785
|
* @param rank
|
786
|
* @return
|
787
|
*/
|
788
|
private Rank findNextHigherRank(Rank rank) {
|
789
|
rank = (Rank) termService.load(rank.getUuid());
|
790
|
return rank.getNextHigherTerm();
|
791
|
// OrderedTermVocabulary<Rank> rankVocabulary = mameService.getRankVocabulary();;
|
792
|
// return rankVocabulary.getNextHigherTerm(rank);
|
793
|
}
|
794
|
|
795
|
/**
|
796
|
* Either finds an existing taxon description of the given taxon or creates a new one.
|
797
|
* If the doClear is set all existing description elements will be cleared.
|
798
|
*
|
799
|
* @param taxon
|
800
|
* @param doClear will remove all existing Distributions if the taxon already
|
801
|
* has a MarkerType.COMPUTED() TaxonDescription
|
802
|
* @return
|
803
|
*/
|
804
|
private TaxonDescription findComputedDescription(Taxon taxon, boolean doClear) {
|
805
|
|
806
|
String descriptionTitle = this.getClass().getSimpleName();
|
807
|
|
808
|
// find existing one
|
809
|
for (TaxonDescription description : taxon.getDescriptions()) {
|
810
|
if (description.hasMarker(MarkerType.COMPUTED(), true)) {
|
811
|
logger.debug("reusing description for " + taxon.getTitleCache());
|
812
|
if (doClear) {
|
813
|
int deleteCount = 0;
|
814
|
Set<DescriptionElementBase> deleteCandidates = new HashSet<DescriptionElementBase>();
|
815
|
for (DescriptionElementBase descriptionElement : description.getElements()) {
|
816
|
if(descriptionElement instanceof Distribution) {
|
817
|
deleteCandidates.add(descriptionElement);
|
818
|
}
|
819
|
}
|
820
|
if(deleteCandidates.size() > 0){
|
821
|
for(DescriptionElementBase descriptionElement : deleteCandidates) {
|
822
|
description.removeElement(descriptionElement);
|
823
|
descriptionService.deleteDescriptionElement(descriptionElement);
|
824
|
descriptionElement = null;
|
825
|
deleteCount++;
|
826
|
}
|
827
|
descriptionService.saveOrUpdate(description);
|
828
|
logger.debug("\t" + deleteCount +" distributions cleared");
|
829
|
}
|
830
|
|
831
|
}
|
832
|
return description;
|
833
|
}
|
834
|
}
|
835
|
|
836
|
// create a new one
|
837
|
logger.debug("creating new description for " + taxon.getTitleCache());
|
838
|
TaxonDescription description = TaxonDescription.NewInstance(taxon);
|
839
|
description.setTitleCache(descriptionTitle, true);
|
840
|
description.addMarker(Marker.NewInstance(MarkerType.COMPUTED(), true));
|
841
|
return description;
|
842
|
}
|
843
|
|
844
|
/**
|
845
|
* @param superArea
|
846
|
* @return
|
847
|
*/
|
848
|
private Set<NamedArea> getSubAreasFor(NamedArea superArea) {
|
849
|
|
850
|
if(!subAreaMap.containsKey(superArea)) {
|
851
|
if(logger.isDebugEnabled()){
|
852
|
logger.debug("loading included areas for " + superArea.getLabel());
|
853
|
}
|
854
|
subAreaMap.put(superArea, superArea.getIncludes());
|
855
|
}
|
856
|
return subAreaMap.get(superArea);
|
857
|
}
|
858
|
|
859
|
/**
|
860
|
* @param taxon
|
861
|
* @return
|
862
|
*/
|
863
|
private List<Distribution> distributionsFor(Taxon taxon) {
|
864
|
List<Distribution> distributions = new ArrayList<Distribution>();
|
865
|
for(TaxonDescription description: taxon.getDescriptions()) {
|
866
|
for(DescriptionElementBase deb : description.getElements()) {
|
867
|
if(deb instanceof Distribution) {
|
868
|
distributions.add((Distribution)deb);
|
869
|
}
|
870
|
}
|
871
|
}
|
872
|
return distributions;
|
873
|
}
|
874
|
|
875
|
/**
|
876
|
* @param taxon
|
877
|
* @param logger2
|
878
|
* @return
|
879
|
*/
|
880
|
private String taxonToString(TaxonBase taxon) {
|
881
|
if(logger.isTraceEnabled()) {
|
882
|
return taxon.getTitleCache();
|
883
|
} else {
|
884
|
return taxon.toString();
|
885
|
}
|
886
|
}
|
887
|
|
888
|
/**
|
889
|
* @param taxon
|
890
|
* @param logger2
|
891
|
* @return
|
892
|
*/
|
893
|
private String termToString(OrderedTermBase<?> term) {
|
894
|
if(logger.isTraceEnabled()) {
|
895
|
return term.getLabel() + " [" + term.getIdInVocabulary() + "]";
|
896
|
} else {
|
897
|
return term.getIdInVocabulary();
|
898
|
}
|
899
|
}
|
900
|
|
901
|
/**
|
902
|
* Sets the priorities for presence and absence terms, the priorities are stored in extensions.
|
903
|
* This method will start a new transaction and commits it after the work is done.
|
904
|
*/
|
905
|
public void updatePriorities() {
|
906
|
|
907
|
TransactionStatus txStatus = startTransaction(false);
|
908
|
|
909
|
Map<PresenceAbsenceTerm, Integer> priorityMap = new HashMap<PresenceAbsenceTerm, Integer>();
|
910
|
|
911
|
priorityMap.put(PresenceAbsenceTerm.CULTIVATED_REPORTED_IN_ERROR(), 1);
|
912
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_UNCERTAIN_DEGREE_OF_NATURALISATION(), 2);
|
913
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_FORMERLY_INTRODUCED(), 3);
|
914
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_REPORTED_IN_ERROR(), 20);
|
915
|
priorityMap.put(PresenceAbsenceTerm.NATIVE_REPORTED_IN_ERROR(), 30);
|
916
|
priorityMap.put(PresenceAbsenceTerm.CULTIVATED(), 45);
|
917
|
priorityMap.put(PresenceAbsenceTerm.NATIVE_FORMERLY_NATIVE(), 40);
|
918
|
priorityMap.put(PresenceAbsenceTerm.NATIVE_PRESENCE_QUESTIONABLE(), 60);
|
919
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_PRESENCE_QUESTIONABLE(), 50);
|
920
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_DOUBTFULLY_INTRODUCED(), 80);
|
921
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED(), 90);
|
922
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_ADVENTITIOUS(), 100);
|
923
|
priorityMap.put(PresenceAbsenceTerm.INTRODUCED_NATURALIZED(), 110);
|
924
|
priorityMap.put(PresenceAbsenceTerm.NATIVE_DOUBTFULLY_NATIVE(), 120); // null
|
925
|
priorityMap.put(PresenceAbsenceTerm.NATIVE(), 130); // null
|
926
|
priorityMap.put(PresenceAbsenceTerm.ENDEMIC_FOR_THE_RELEVANT_AREA(), 999);
|
927
|
|
928
|
for(PresenceAbsenceTerm term : priorityMap.keySet()) {
|
929
|
// load the term
|
930
|
term = (PresenceAbsenceTerm) termService.load(term.getUuid());
|
931
|
// find the extension
|
932
|
Extension priorityExtension = null;
|
933
|
Set<Extension> extensions = term.getExtensions();
|
934
|
for(Extension extension : extensions){
|
935
|
if (!extension.getType().equals(ExtensionType.ORDER())) {
|
936
|
continue;
|
937
|
}
|
938
|
int pos = extension.getValue().indexOf(EXTENSION_VALUE_PREFIX);
|
939
|
if(pos == 0){ // if starts with EXTENSION_VALUE_PREFIX
|
940
|
priorityExtension = extension;
|
941
|
break;
|
942
|
}
|
943
|
}
|
944
|
if(priorityExtension == null) {
|
945
|
priorityExtension = Extension.NewInstance(term, null, ExtensionType.ORDER());
|
946
|
}
|
947
|
priorityExtension.setValue(EXTENSION_VALUE_PREFIX + priorityMap.get(term));
|
948
|
|
949
|
// save the term
|
950
|
termService.saveOrUpdate(term);
|
951
|
if (logger.isDebugEnabled()) {
|
952
|
logger.debug("Priority updated for " + term.getLabel());
|
953
|
}
|
954
|
}
|
955
|
|
956
|
commitTransaction(txStatus);
|
957
|
}
|
958
|
|
959
|
public static void addSourcesDeduplicated(Set<DescriptionElementSource> target, Set<DescriptionElementSource> sources) {
|
960
|
for(DescriptionElementSource source : sources) {
|
961
|
boolean contained = false;
|
962
|
for(DescriptionElementSource existingSource: target) {
|
963
|
if(existingSource.equalsByShallowCompare(source)) {
|
964
|
contained = true;
|
965
|
break;
|
966
|
}
|
967
|
}
|
968
|
if(!contained) {
|
969
|
try {
|
970
|
target.add((DescriptionElementSource)source.clone());
|
971
|
} catch (CloneNotSupportedException e) {
|
972
|
// should never happen
|
973
|
throw new RuntimeException(e);
|
974
|
}
|
975
|
}
|
976
|
}
|
977
|
}
|
978
|
|
979
|
public enum AggregationMode {
|
980
|
byAreas,
|
981
|
byRanks,
|
982
|
byAreasAndRanks
|
983
|
|
984
|
}
|
985
|
|
986
|
private class StatusAndSources {
|
987
|
|
988
|
private final PresenceAbsenceTerm status;
|
989
|
|
990
|
private final Set<DescriptionElementSource> sources = new HashSet<>();
|
991
|
|
992
|
public StatusAndSources(PresenceAbsenceTerm status, Set<DescriptionElementSource> sources) {
|
993
|
this.status = status;
|
994
|
addSourcesDeduplicated(this.sources, sources);
|
995
|
}
|
996
|
|
997
|
/**
|
998
|
* @param sources
|
999
|
*/
|
1000
|
public void addSources(Set<DescriptionElementSource> sources) {
|
1001
|
addSourcesDeduplicated(this.sources, sources);
|
1002
|
}
|
1003
|
|
1004
|
}
|
1005
|
}
|