2 * Copyright (C) 2017 EDIT
3 * European Distributed Institute of Taxonomy
4 * http://www.e-taxonomy.eu
6 * The contents of this file are subject to the Mozilla Public License Version 1.1
7 * See LICENSE.TXT at the top of this package for the full license terms.
9 package eu
.etaxonomy
.cdm
.io
.cdmLightWord
;
12 import java
.util
.ArrayList
;
13 import java
.util
.Collection
;
14 import java
.util
.Collections
;
15 import java
.util
.Comparator
;
16 import java
.util
.HashMap
;
17 import java
.util
.HashSet
;
18 import java
.util
.Iterator
;
19 import java
.util
.List
;
22 import java
.util
.UUID
;
24 import org
.apache
.commons
.lang3
.StringUtils
;
25 import org
.springframework
.beans
.factory
.annotation
.Autowired
;
26 import org
.springframework
.stereotype
.Component
;
28 import eu
.etaxonomy
.cdm
.api
.service
.geo
.IDistributionService
;
29 import eu
.etaxonomy
.cdm
.api
.service
.name
.TypeDesignationSetComparator
;
30 import eu
.etaxonomy
.cdm
.api
.service
.name
.TypeDesignationSetContainer
;
31 import eu
.etaxonomy
.cdm
.api
.service
.name
.TypeDesignationSetFormatter
;
32 import eu
.etaxonomy
.cdm
.common
.CdmUtils
;
33 import eu
.etaxonomy
.cdm
.common
.monitor
.IProgressMonitor
;
34 import eu
.etaxonomy
.cdm
.compare
.name
.TypeComparator
;
35 import eu
.etaxonomy
.cdm
.compare
.taxon
.HomotypicGroupTaxonComparator
;
36 import eu
.etaxonomy
.cdm
.filter
.TaxonNodeFilter
;
37 import eu
.etaxonomy
.cdm
.format
.description
.distribution
.CondensedDistribution
;
38 import eu
.etaxonomy
.cdm
.format
.reference
.OriginalSourceFormatter
;
39 import eu
.etaxonomy
.cdm
.hibernate
.HibernateProxyHelper
;
40 import eu
.etaxonomy
.cdm
.io
.cdmLight
.OrderHelper
;
41 import eu
.etaxonomy
.cdm
.io
.common
.CdmExportBase
;
42 import eu
.etaxonomy
.cdm
.io
.common
.ExportResult
.ExportResultState
;
43 import eu
.etaxonomy
.cdm
.io
.common
.TaxonNodeOutStreamPartitioner
;
44 import eu
.etaxonomy
.cdm
.io
.common
.XmlExportState
;
45 import eu
.etaxonomy
.cdm
.io
.common
.mapping
.out
.IExportTransformer
;
46 import eu
.etaxonomy
.cdm
.model
.agent
.AgentBase
;
47 import eu
.etaxonomy
.cdm
.model
.agent
.Person
;
48 import eu
.etaxonomy
.cdm
.model
.agent
.Team
;
49 import eu
.etaxonomy
.cdm
.model
.agent
.TeamOrPersonBase
;
50 import eu
.etaxonomy
.cdm
.model
.common
.Annotation
;
51 import eu
.etaxonomy
.cdm
.model
.common
.AnnotationType
;
52 import eu
.etaxonomy
.cdm
.model
.common
.CdmBase
;
53 import eu
.etaxonomy
.cdm
.model
.common
.ICdmBase
;
54 import eu
.etaxonomy
.cdm
.model
.common
.IIdentifiableEntity
;
55 import eu
.etaxonomy
.cdm
.model
.common
.IdentifiableEntity
;
56 import eu
.etaxonomy
.cdm
.model
.common
.IdentifiableSource
;
57 import eu
.etaxonomy
.cdm
.model
.common
.Identifier
;
58 import eu
.etaxonomy
.cdm
.model
.common
.Language
;
59 import eu
.etaxonomy
.cdm
.model
.common
.LanguageString
;
60 import eu
.etaxonomy
.cdm
.model
.description
.CommonTaxonName
;
61 import eu
.etaxonomy
.cdm
.model
.description
.DescriptionBase
;
62 import eu
.etaxonomy
.cdm
.model
.description
.DescriptionElementBase
;
63 import eu
.etaxonomy
.cdm
.model
.description
.DescriptionElementSource
;
64 import eu
.etaxonomy
.cdm
.model
.description
.Distribution
;
65 import eu
.etaxonomy
.cdm
.model
.description
.Feature
;
66 import eu
.etaxonomy
.cdm
.model
.description
.IndividualsAssociation
;
67 import eu
.etaxonomy
.cdm
.model
.description
.TaxonDescription
;
68 import eu
.etaxonomy
.cdm
.model
.description
.TaxonInteraction
;
69 import eu
.etaxonomy
.cdm
.model
.description
.TaxonNameDescription
;
70 import eu
.etaxonomy
.cdm
.model
.description
.TextData
;
71 import eu
.etaxonomy
.cdm
.model
.location
.NamedArea
;
72 import eu
.etaxonomy
.cdm
.model
.media
.ExternalLink
;
73 import eu
.etaxonomy
.cdm
.model
.media
.Media
;
74 import eu
.etaxonomy
.cdm
.model
.media
.MediaRepresentation
;
75 import eu
.etaxonomy
.cdm
.model
.media
.MediaRepresentationPart
;
76 import eu
.etaxonomy
.cdm
.model
.name
.HomotypicalGroup
;
77 import eu
.etaxonomy
.cdm
.model
.name
.NameRelationship
;
78 import eu
.etaxonomy
.cdm
.model
.name
.NameRelationshipType
;
79 import eu
.etaxonomy
.cdm
.model
.name
.NameTypeDesignation
;
80 import eu
.etaxonomy
.cdm
.model
.name
.NomenclaturalSource
;
81 import eu
.etaxonomy
.cdm
.model
.name
.NomenclaturalStatus
;
82 import eu
.etaxonomy
.cdm
.model
.name
.Rank
;
83 import eu
.etaxonomy
.cdm
.model
.name
.SpecimenTypeDesignation
;
84 import eu
.etaxonomy
.cdm
.model
.name
.TaxonName
;
85 import eu
.etaxonomy
.cdm
.model
.name
.TextualTypeDesignation
;
86 import eu
.etaxonomy
.cdm
.model
.name
.TypeDesignationBase
;
87 import eu
.etaxonomy
.cdm
.model
.occurrence
.DerivedUnit
;
88 import eu
.etaxonomy
.cdm
.model
.occurrence
.FieldUnit
;
89 import eu
.etaxonomy
.cdm
.model
.occurrence
.GatheringEvent
;
90 import eu
.etaxonomy
.cdm
.model
.occurrence
.MediaSpecimen
;
91 import eu
.etaxonomy
.cdm
.model
.occurrence
.SpecimenOrObservationBase
;
92 import eu
.etaxonomy
.cdm
.model
.reference
.NamedSource
;
93 import eu
.etaxonomy
.cdm
.model
.reference
.OriginalSourceType
;
94 import eu
.etaxonomy
.cdm
.model
.reference
.Reference
;
95 import eu
.etaxonomy
.cdm
.model
.reference
.ReferenceType
;
96 import eu
.etaxonomy
.cdm
.model
.taxon
.Classification
;
97 import eu
.etaxonomy
.cdm
.model
.taxon
.Synonym
;
98 import eu
.etaxonomy
.cdm
.model
.taxon
.Taxon
;
99 import eu
.etaxonomy
.cdm
.model
.taxon
.TaxonBase
;
100 import eu
.etaxonomy
.cdm
.model
.taxon
.TaxonNode
;
101 import eu
.etaxonomy
.cdm
.model
.taxon
.TaxonRelationship
;
102 import eu
.etaxonomy
.cdm
.model
.term
.IdentifierType
;
103 import eu
.etaxonomy
.cdm
.model
.term
.TermTree
;
104 import eu
.etaxonomy
.cdm
.persistence
.dto
.TaxonNodeDto
;
105 import eu
.etaxonomy
.cdm
.persistence
.dto
.TaxonNodeDtoByRankAndNameComparator
;
106 import eu
.etaxonomy
.cdm
.strategy
.cache
.HTMLTagRules
;
107 import eu
.etaxonomy
.cdm
.strategy
.cache
.TagEnum
;
108 import eu
.etaxonomy
.cdm
.strategy
.cache
.TaggedText
;
109 import eu
.etaxonomy
.cdm
.strategy
.exceptions
.UnknownCdmTypeException
;
116 public class WordClassificationExport
117 extends CdmExportBase
<WordClassificationExportConfigurator
, WordClassificationExportState
, IExportTransformer
, File
>{
119 private static final long serialVersionUID
= 5373475508269756045L;
122 private IDistributionService geoService
;
124 public WordClassificationExport() {
125 this.ioName
= this.getClass().getSimpleName();
129 public long countSteps(WordClassificationExportState state
) {
130 TaxonNodeFilter filter
= state
.getConfig().getTaxonNodeFilter();
131 return getTaxonNodeService().count(filter
);
135 protected void doInvoke(WordClassificationExportState state
) {
138 IProgressMonitor monitor
= state
.getConfig().getProgressMonitor();
139 WordClassificationExportConfigurator config
= state
.getConfig();
140 if (config
.getTaxonNodeFilter().hasClassificationFilter()) {
141 Classification classification
= getClassificationService()
142 .load(config
.getTaxonNodeFilter().getClassificationFilter().get(0).getUuid());
143 state
.setRootId(classification
.getRootNode().getUuid());
145 } else if (config
.getTaxonNodeFilter().hasSubtreeFilter()) {
146 state
.setRootId(config
.getTaxonNodeFilter().getSubtreeFilter().get(0).getUuid());
148 @SuppressWarnings("unchecked")
149 TaxonNodeOutStreamPartitioner
<XmlExportState
> partitioner
= TaxonNodeOutStreamPartitioner
.NewInstance(this,
150 state
, state
.getConfig().getTaxonNodeFilter(), 100, monitor
, null);
152 // handleMetaData(state);
153 monitor
.subTask("Start partitioning");
155 TaxonNode node
= partitioner
.next();
156 while (node
!= null) {
157 handleTaxonNode(state
, node
);
158 node
= partitioner
.next();
160 // get rootNode and create helperObjects
161 if (state
.getRootId() != null) {
162 List
<TaxonNodeDto
> childrenOfRoot
= state
.getNodeChildrenMap().get(state
.getRootId());
164 Comparator
<TaxonNodeDto
> comp
= state
.getConfig().getComparator();
166 comp
= new TaxonNodeDtoByRankAndNameComparator();
168 if (childrenOfRoot
!= null) {
169 Collections
.sort(childrenOfRoot
, comp
);
170 OrderHelper helper
= new OrderHelper(state
.getRootId());
171 helper
.setOrderIndex(state
.getActualOrderIndexAndUpdate());
172 state
.getOrderHelperMap().put(state
.getRootId(), helper
);
174 for (TaxonNodeDto child
: childrenOfRoot
) {
175 OrderHelper childHelper
= new OrderHelper(child
.getTaxonUuid());
176 helper
.addChild(childHelper
);
177 childHelper
.setOrderIndex(state
.getActualOrderIndexAndUpdate());
178 childHelper
.addChildren(
179 createOrderHelper(state
.getNodeChildrenMap().get(child
.getUuid()), state
));
183 state
.getNodeChildrenMap().clear();
184 for (OrderHelper order
: state
.getOrderHelperMap().values()) {
185 setOrderIndex(state
, order
);
189 state
.getProcessor().createFinalResult(state
);
190 } catch (Exception e
) {
191 state
.getResult().addException(e
,
192 "An unexpected error occurred in main method doInvoke() " + e
.getMessage());
197 private void setOrderIndex(WordClassificationExportState state
, OrderHelper order
) {
199 if (order
.getTaxonUuid() != null
200 && state
.getProcessor().hasRecord(WordClassificationExportTable
.TAXON
, order
.getTaxonUuid().toString())) {
201 String
[] csvLine
= state
.getProcessor().getRecord(WordClassificationExportTable
.TAXON
,
202 order
.getTaxonUuid().toString());
203 csvLine
[WordClassificationExportTable
.TAXON
.getIndex(WordClassificationExportTable
.SORT_INDEX
)] = String
204 .valueOf(order
.getOrderIndex());
207 if (order
.getChildren() == null) {
210 for (OrderHelper helper
: order
.getChildren()) {
211 setOrderIndex(state
, helper
);
215 private List
<OrderHelper
> createOrderHelper(List
<TaxonNodeDto
> nodes
, WordClassificationExportState state
) {
216 List
<TaxonNodeDto
> children
= nodes
;
217 // alreadySortedNodes.add(parentUuid);
218 if (children
== null) {
221 Comparator
<TaxonNodeDto
> comp
= state
.getConfig().getComparator();
223 comp
= new TaxonNodeDtoByRankAndNameComparator();
225 Collections
.sort(children
, comp
);
226 // TODO: nochmal checken!!!
227 OrderHelper helperChild
;
228 List
<OrderHelper
> childrenHelper
= new ArrayList
<>();
229 for (TaxonNodeDto child
: children
) {
230 helperChild
= new OrderHelper(child
.getTaxonUuid());
231 helperChild
.setOrderIndex(state
.getActualOrderIndexAndUpdate());
233 if (state
.getNodeChildrenMap().get(child
.getUuid()) != null) {
234 children
= state
.getNodeChildrenMap().get(child
.getUuid());
235 helperChild
.addChildren(createOrderHelper(children
, state
));
237 childrenHelper
.add(helperChild
);
239 return childrenHelper
;
242 private void handleTaxonNode(WordClassificationExportState state
, TaxonNode taxonNode
) {
244 if (taxonNode
== null) {
245 String message
= "TaxonNode for given taxon node UUID not found. ";
247 state
.getResult().addWarning(message
);
250 TaxonNode root
= taxonNode
;
251 List
<TaxonNodeDto
> childNodes
;
252 if (root
.hasChildNodes()) {
253 childNodes
= new ArrayList
<>();
254 for (TaxonNode child
: root
.getChildNodes()) {
256 childNodes
.add(new TaxonNodeDto(child
));
259 state
.getNodeChildrenMap().put(root
.getUuid(), childNodes
);
261 // add root to node map
264 TaxonNodeDto rootDto
= new TaxonNodeDto(root
);
265 UUID parentUuid
= root
.getParent() != null ? root
.getParent().getUuid()
266 : state
.getClassificationUUID(root
);
267 List
<TaxonNodeDto
> children
= state
.getNodeChildrenMap().get(parentUuid
);
268 if (children
!= null && !children
.contains(rootDto
)) {
269 state
.getNodeChildrenMap().get(parentUuid
).add(rootDto
);
270 } else if (state
.getNodeChildrenMap().get(parentUuid
) == null) {
271 List
<TaxonNodeDto
> rootList
= new ArrayList
<>();
272 rootList
.add(rootDto
);
273 state
.getNodeChildrenMap().put(parentUuid
, rootList
);
276 if (root
.hasTaxon()) {
277 handleTaxon(state
, root
);
280 } catch (Exception e
) {
281 state
.getResult().addException(e
, "An unexpected error occurred when handling taxonNode "
282 + taxonNode
.getUuid() + ": " + e
.getMessage() + e
.getStackTrace());
287 private void handleTaxon(WordClassificationExportState state
, TaxonNode taxonNode
) {
290 if (taxonNode
== null) {
291 state
.getResult().addError("The taxonNode was null.", "handleTaxon");
292 state
.getResult().setState(ExportResultState
.INCOMPLETE_WITH_ERROR
);
295 if (taxonNode
.getTaxon() == null) {
296 state
.getResult().addError("There was a taxon node without a taxon: " + taxonNode
.getUuid(),
298 state
.getResult().setState(ExportResultState
.INCOMPLETE_WITH_ERROR
);
300 Taxon taxon
= CdmBase
.deproxy(taxonNode
.getTaxon());
303 TaxonName name
= taxon
.getName();
304 handleName(state
, name
, taxon
, true);
305 HomotypicalGroup homotypicGroup
= taxon
.getHomotypicGroup();
307 int homotypicGroupIndex
= 0;
308 handleHomotypicalGroup(state
, homotypicGroup
, taxon
, homotypicGroupIndex
);
309 homotypicGroupIndex
++;
310 for (Synonym syn
: taxon
.getSynonymsInGroup(homotypicGroup
)) {
311 handleSynonym(state
, syn
, index
);
314 List
<HomotypicalGroup
> heterotypicHomotypicGroups
= taxon
.getHeterotypicSynonymyGroups();
315 for (HomotypicalGroup group
: heterotypicHomotypicGroups
){
316 handleHomotypicalGroup(state
, group
, taxon
, homotypicGroupIndex
);
317 for (Synonym syn
: taxon
.getSynonymsInGroup(group
)) {
318 handleSynonym(state
, syn
, index
);
321 homotypicGroupIndex
++;
325 for (Taxon tax
: taxon
.getAllProParteSynonyms()) {
326 handleProPartePartialMisapplied(state
, tax
, taxon
, true, false, index
);
331 for (Taxon tax
: taxon
.getAllMisappliedNames()) {
332 handleProPartePartialMisapplied(state
, tax
, taxon
, false, true, index
);
336 // state.getProcessor().put(table, taxon, csvLine);
337 handleDescriptions(state
, taxon
);
338 } catch (Exception e
) {
339 state
.getResult().addException(e
,
340 "An unexpected problem occurred when trying to export taxon with id " + taxon
.getId() + " " + taxon
.getTitleCache());
341 state
.getResult().setState(ExportResultState
.INCOMPLETE_WITH_ERROR
);
345 } catch (Exception e
) {
346 state
.getResult().addException(e
, "An unexpected error occurred when handling the taxon node of "
347 + cdmBaseStr(taxonNode
.getTaxon()) + ", titleCache:"+ taxonNode
.getTaxon().getTitleCache()+": " + e
.getMessage());
351 private void handleDescriptions(WordClassificationExportState state
, CdmBase cdmBase
) {
352 String titleCache
= null;
355 if (cdmBase
instanceof Taxon
) {
356 Taxon taxon
= HibernateProxyHelper
.deproxy(cdmBase
, Taxon
.class);
357 titleCache
= taxon
.getTitleCache();
358 Set
<TaxonDescription
> descriptions
= taxon
.getDescriptions();
359 List
<DescriptionElementBase
> simpleFacts
= new ArrayList
<>();
360 List
<DescriptionElementBase
> specimenFacts
= new ArrayList
<>();
361 List
<DescriptionElementBase
> distributionFacts
= new ArrayList
<>();
362 List
<DescriptionElementBase
> taxonInteractionsFacts
= new ArrayList
<>();
363 List
<DescriptionElementBase
> commonNameFacts
= new ArrayList
<>();
364 List
<DescriptionElementBase
> usageFacts
= new ArrayList
<>();
365 for (TaxonDescription description
: descriptions
) {
366 if (description
.getElements() != null) {
367 for (DescriptionElementBase element
: description
.getElements()) {
368 element
= CdmBase
.deproxy(element
);
369 handleAnnotations(element
);
370 if (element
.getFeature().equals(Feature
.COMMON_NAME())) {
371 commonNameFacts
.add(element
);
372 } else if (element
.getFeature().equals(Feature
.DISTRIBUTION())) {
373 distributionFacts
.add(element
);
374 } else if (element
instanceof IndividualsAssociation
375 || isSpecimenFeature(element
.getFeature())) {
376 specimenFacts
.add(element
);
377 } else if (element
.getFeature().isSupportsTaxonInteraction()) {
378 taxonInteractionsFacts
.add(element
);
380 simpleFacts
.add(element
);
385 if (!commonNameFacts
.isEmpty()) {
386 handleCommonNameFacts(state
, taxon
, commonNameFacts
);
388 if (!distributionFacts
.isEmpty()) {
389 handleDistributionFacts(state
, taxon
, distributionFacts
);
391 if (!specimenFacts
.isEmpty()) {
392 handleSpecimenFacts(state
, taxon
, specimenFacts
);
394 if (!simpleFacts
.isEmpty()) {
395 handleSimpleFacts(state
, taxon
, simpleFacts
);
397 if (!taxonInteractionsFacts
.isEmpty()) {
398 handleTaxonInteractionsFacts(state
, taxon
, taxonInteractionsFacts
);
400 } else if (cdmBase
instanceof TaxonName
) {
401 TaxonName name
= CdmBase
.deproxy(cdmBase
, TaxonName
.class);
402 titleCache
= name
.getTitleCache();
403 Set
<TaxonNameDescription
> descriptions
= name
.getDescriptions();
404 List
<DescriptionElementBase
> simpleFacts
= new ArrayList
<>();
405 for (TaxonNameDescription description
: descriptions
) {
406 if (description
.getElements() != null) {
407 for (DescriptionElementBase element
: description
.getElements()) {
408 simpleFacts
.add(element
);
412 if (!simpleFacts
.isEmpty()) {
413 handleSimpleFacts(state
, name
, simpleFacts
);
416 } catch (Exception e
) {
417 state
.getResult().addException(e
, "An unexpected error occurred when handling description of "
418 + cdmBaseStr(cdmBase
) + (titleCache
!= null?
(" " +titleCache
) : "")+": " + e
.getMessage());
422 private void handleAnnotations(DescriptionElementBase element
) {
423 // TODO Auto-generated method stub
426 private void handleMetaData(WordClassificationExportState state
) {
427 WordClassificationExportTable table
= WordClassificationExportTable
.METADATA
;
428 String
[] csvLine
= new String
[table
.getSize()];
429 // csvLine[table.getIndex(CdmLightExportTable.INSTANCE_ID)] = state.getConfig().getInctanceId();
430 // csvLine[table.getIndex(CdmLightExportTable.INSTANCE_NAME)] = state.getConfig().getInstanceName();
431 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_BASE_URL
)] = state
.getConfig().getBase_url();
432 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_CONTRIBUTOR
)] = state
.getConfig().getContributor();
433 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_CREATOR
)] = state
.getConfig().getCreator();
434 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_DESCRIPTION
)] = state
.getConfig().getDescription();
435 // csvLine[table.getIndex(WordClassificationExportTable.DATASET_DOWNLOAD_LINK)] = state.getConfig().getDataset_download_link();
436 // csvLine[table.getIndex(WordClassificationExportTable.DATASET_KEYWORDS)] = state.getConfig().getKeywords();
437 // csvLine[table.getIndex(WordClassificationExportTable.DATASET_LANDINGPAGE)] = state.getConfig().getDataSet_landing_page();
439 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_LANGUAGE
)] = state
.getConfig().getLanguage() != null? state
.getConfig().getLanguage().getLabel(): null;
440 // csvLine[table.getIndex(WordClassificationExportTable.DATASET_LICENCE)] = state.getConfig().getLicence();
441 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_LOCATION
)] = state
.getConfig().getLocation();
442 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_RECOMMENDED_CITATTION
)] = state
.getConfig().getRecommended_citation();
443 csvLine
[table
.getIndex(WordClassificationExportTable
.DATASET_TITLE
)] = state
.getConfig().getTitle();
444 state
.getProcessor().put(table
, "", csvLine
);
447 private boolean isSpecimenFeature(Feature feature
) {
448 // TODO allow user defined specimen features
449 if (feature
== null) {
451 } else if (feature
.isSupportsIndividualAssociation()) {
454 return feature
.equals(Feature
.SPECIMEN()) || feature
.equals(Feature
.INDIVIDUALS_ASSOCIATION())
455 || feature
.equals(Feature
.MATERIALS_EXAMINED()) || feature
.equals(Feature
.OBSERVATION())
456 || feature
.equals(Feature
.OCCURRENCE());
460 private void handleSimpleFacts(WordClassificationExportState state
, CdmBase cdmBase
,
461 List
<DescriptionElementBase
> simpleFacts
) {
462 String titleCache
= null;
464 WordClassificationExportTable table
;
465 if (cdmBase
instanceof TaxonName
) {
466 titleCache
= ((TaxonName
)cdmBase
).getTitleCache();
467 table
= WordClassificationExportTable
.NAME_FACT
;
469 if (cdmBase
instanceof Taxon
){
470 titleCache
= ((Taxon
)cdmBase
).getTitleCache();
472 table
= WordClassificationExportTable
.SIMPLE_FACT
;
474 WordClassificationExportTable tableMedia
= WordClassificationExportTable
.MEDIA
;
475 for (DescriptionElementBase element
: simpleFacts
) {
476 if (element
.getModifyingText().isEmpty() && !element
.getMedia().isEmpty()) {
477 handleSimpleMediaFact(state
, cdmBase
, tableMedia
, element
);
479 handleSingleSimpleFact(state
, cdmBase
, table
, element
);
482 } catch (Exception e
) {
483 state
.getResult().addException(e
, "An unexpected error occurred when handling simple facts for "
484 + cdmBaseStr(cdmBase
) + (titleCache
!= null?
(" " +titleCache
) : "")+ ": " + e
.getMessage());
488 private void handleTaxonInteractionsFacts(WordClassificationExportState state
, CdmBase cdmBase
,
489 List
<DescriptionElementBase
> taxonInteractionsFacts
) {
490 WordClassificationExportTable table
= WordClassificationExportTable
.TAXON_INTERACTION_FACT
;
491 String titleCache
= null;
492 if (cdmBase
instanceof TaxonBase
){
493 titleCache
= ((TaxonBase
)cdmBase
).getTitleCache();
495 for (DescriptionElementBase element
: taxonInteractionsFacts
) {
499 String
[] csvLine
= new String
[table
.getSize()];
501 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
502 handleSource(state
, element
, table
);
503 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, cdmBase
);
504 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON2_FK
)] = getId(state
,
505 ((TaxonInteraction
) element
).getTaxon2());
506 csvLine
[table
.getIndex(WordClassificationExportTable
.DESCRIPTION
)] = createMultilanguageString(
507 ((TaxonInteraction
) element
).getDescription());
508 state
.getProcessor().put(table
, element
, csvLine
);
510 } catch (Exception e
) {
511 state
.getResult().addException(e
, "An unexpected error occurred when handling taxon interaction"
512 + cdmBaseStr(element
) + (titleCache
!= null?
(" " +titleCache
) : "")+ ": " + e
.getMessage());
517 private void handleSimpleMediaFact(WordClassificationExportState state
, CdmBase cdmBase
, WordClassificationExportTable table
,
518 DescriptionElementBase element
) {
521 handleSource(state
, element
, WordClassificationExportTable
.MEDIA
);
523 if (element
instanceof TextData
) {
524 TextData textData
= (TextData
) element
;
525 csvLine
= new String
[table
.getSize()];
526 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
527 if (cdmBase
instanceof Taxon
) {
528 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, cdmBase
);
529 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_FK
)] = "";
530 } else if (cdmBase
instanceof TaxonName
) {
531 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = "";
532 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_FK
)] = getId(state
, cdmBase
);
535 String mediaUris
= "";
536 for (Media media
: textData
.getMedia()) {
537 String mediaString
= extractMediaUris(media
.getRepresentations().iterator());
538 if (!StringUtils
.isBlank(mediaString
)) {
539 mediaUris
+= mediaString
+ ";";
541 state
.getResult().addWarning("Empty Media object for " + cdmBase
.getUserFriendlyTypeName() + " "
542 + cdmBase
.getUuid() + " (media: " + media
.getUuid() + ")");
545 csvLine
[table
.getIndex(WordClassificationExportTable
.MEDIA_URI
)] = mediaUris
;
548 } catch (Exception e
) {
549 state
.getResult().addException(e
, "An unexpected error occurred when handling single simple fact "
550 + cdmBaseStr(element
) + ": " + e
.getMessage());
555 private void handleSingleSimpleFact(WordClassificationExportState state
, CdmBase cdmBase
, WordClassificationExportTable table
,
556 DescriptionElementBase element
) {
559 handleSource(state
, element
, WordClassificationExportTable
.SIMPLE_FACT
);
561 if (element
instanceof TextData
) {
562 TextData textData
= (TextData
) element
;
563 csvLine
= new String
[table
.getSize()];
564 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
565 if (cdmBase
instanceof Taxon
) {
566 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, cdmBase
);
567 } else if (cdmBase
instanceof TaxonName
) {
568 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_FK
)] = getId(state
, cdmBase
);
570 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_CATEGORY
)] = textData
.getFeature().getLabel();
572 String mediaUris
= "";
573 for (Media media
: textData
.getMedia()) {
574 String mediaString
= extractMediaUris(media
.getRepresentations().iterator());
575 if (!StringUtils
.isBlank(mediaString
)) {
576 mediaUris
+= mediaString
+ ";";
578 state
.getResult().addWarning("Empty Media object for uuid: " + cdmBase
.getUuid()
579 + " uuid of media: " + media
.getUuid());
582 csvLine
[table
.getIndex(WordClassificationExportTable
.MEDIA_URI
)] = mediaUris
;
583 if (textData
.getFeature().equals(Feature
.CITATION())) {
584 state
.getProcessor().put(table
, textData
, csvLine
);
585 } else if (!textData
.getMultilanguageText().isEmpty()) {
586 for (Language language
: textData
.getMultilanguageText().keySet()) {
587 String
[] csvLineLanguage
= csvLine
.clone();
588 LanguageString langString
= textData
.getLanguageText(language
);
589 String text
= langString
.getText();
590 if (state
.getConfig().isFilterIntextReferences()) {
591 text
= filterIntextReferences(langString
.getText());
593 csvLineLanguage
[table
.getIndex(WordClassificationExportTable
.FACT_TEXT
)] = text
;
594 csvLineLanguage
[table
.getIndex(WordClassificationExportTable
.LANGUAGE
)] = language
.getLabel();
595 state
.getProcessor().put(table
, textData
, csvLineLanguage
);
598 state
.getProcessor().put(table
, textData
, csvLine
);
601 } catch (Exception e
) {
602 state
.getResult().addException(e
, "An unexpected error occurred when handling single simple fact "
603 + cdmBaseStr(element
) + ": " + e
.getMessage());
607 private String
filterIntextReferences(String text
) {
609 * (<cdm:reference cdmId='fbd19251-efee-4ded-b780-915000f66d41'
610 * intextId='1352d42c-e201-4155-a02a-55360d3b563e'>Ridley in Fl. Malay
611 * Pen. 3 (1924) 22</cdm:reference>)
613 String newText
= text
.replaceAll("<cdm:reference cdmId='[a-z0-9\\-]*' intextId='[a-z0-9\\-]*'>", "");
614 newText
= newText
.replaceAll("</cdm:reference>", "");
616 newText
= newText
.replaceAll("<cdm:key cdmId='[a-z0-9\\-]*' intextId='[a-z0-9\\-]*'>", "");
617 newText
= newText
.replaceAll("</cdm:key>", "");
621 private void handleSpecimenFacts(WordClassificationExportState state
, Taxon taxon
,
622 List
<DescriptionElementBase
> specimenFacts
) {
623 WordClassificationExportTable table
= WordClassificationExportTable
.SPECIMEN_FACT
;
625 for (DescriptionElementBase element
: specimenFacts
) {
627 String
[] csvLine
= new String
[table
.getSize()];
628 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
629 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, taxon
);
630 handleSource(state
, element
, table
);
631 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_NOTES
)] = createAnnotationsString(
632 element
.getAnnotations());
634 if (element
instanceof IndividualsAssociation
) {
636 IndividualsAssociation indAssociation
= (IndividualsAssociation
) element
;
637 if (indAssociation
.getAssociatedSpecimenOrObservation() == null) {
639 .addWarning("There is an individual association with no specimen associated (Taxon "
640 + taxon
.getTitleCache() + "(" + taxon
.getUuid() + "). Could not be exported.");
643 if (!state
.getSpecimenStore()
644 .contains((indAssociation
.getAssociatedSpecimenOrObservation().getUuid()))) {
645 SpecimenOrObservationBase
<?
> specimenBase
= HibernateProxyHelper
.deproxy(
646 indAssociation
.getAssociatedSpecimenOrObservation(),
647 SpecimenOrObservationBase
.class);
649 handleSpecimen(state
, specimenBase
);
650 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_FK
)] = getId(state
,
651 indAssociation
.getAssociatedSpecimenOrObservation());
654 } else if (element
instanceof TextData
) {
655 TextData textData
= HibernateProxyHelper
.deproxy(element
, TextData
.class);
656 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_DESCRIPTION
)] = createMultilanguageString(
657 textData
.getMultilanguageText());
659 state
.getProcessor().put(table
, element
, csvLine
);
660 } catch (Exception e
) {
661 state
.getResult().addException(e
, "An unexpected error occurred when handling single specimen fact "
662 + cdmBaseStr(element
) + ": " + e
.getMessage());
667 private String
createMultilanguageString(Map
<Language
, LanguageString
> multilanguageText
) {
669 int index
= multilanguageText
.size();
670 for (LanguageString langString
: multilanguageText
.values()) {
671 text
+= langString
.getText();
680 private String
createAnnotationsString(Set
<Annotation
> annotations
) {
681 StringBuffer strBuff
= new StringBuffer();
683 for (Annotation ann
: annotations
) {
684 if (ann
.getAnnotationType() == null || !ann
.getAnnotationType().equals(AnnotationType
.TECHNICAL())) {
685 strBuff
.append(ann
.getText());
686 strBuff
.append("; ");
690 if (strBuff
.length() > 2) {
691 return strBuff
.substring(0, strBuff
.length() - 2);
697 private void handleSource(WordClassificationExportState state
, DescriptionElementBase element
,
698 WordClassificationExportTable factsTable
) {
699 WordClassificationExportTable table
= WordClassificationExportTable
.FACT_SOURCES
;
701 Set
<DescriptionElementSource
> sources
= element
.getSources();
703 for (DescriptionElementSource source
: sources
) {
704 if (!(source
.getType().equals(OriginalSourceType
.Import
)
705 && state
.getConfig().isExcludeImportSources())) {
706 String
[] csvLine
= new String
[table
.getSize()];
707 Reference ref
= source
.getCitation();
708 if ((ref
== null) && (source
.getNameUsedInSource() == null)) {
712 if (!state
.getReferenceStore().contains(ref
.getUuid())) {
713 handleReference(state
, ref
);
716 csvLine
[table
.getIndex(WordClassificationExportTable
.REFERENCE_FK
)] = getId(state
, ref
);
718 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_FK
)] = getId(state
, element
);
720 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_IN_SOURCE_FK
)] = getId(state
,
721 source
.getNameUsedInSource());
722 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_TYPE
)] = factsTable
.getTableName();
723 if (StringUtils
.isBlank(csvLine
[table
.getIndex(WordClassificationExportTable
.REFERENCE_FK
)])
724 && StringUtils
.isBlank(csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_IN_SOURCE_FK
)])) {
727 state
.getProcessor().put(table
, source
, csvLine
);
731 } catch (Exception e
) {
732 state
.getResult().addException(e
, "An unexpected error occurred when handling single source "
733 + cdmBaseStr(element
) + ": " + e
.getMessage());
738 private void handleDistributionFacts(WordClassificationExportState state
, Taxon taxon
,
739 List
<DescriptionElementBase
> distributionFacts
) {
741 WordClassificationExportTable table
= WordClassificationExportTable
.GEOGRAPHIC_AREA_FACT
;
742 Set
<Distribution
> distributions
= new HashSet
<>();
743 for (DescriptionElementBase element
: distributionFacts
) {
745 if (element
instanceof Distribution
) {
746 String
[] csvLine
= new String
[table
.getSize()];
747 Distribution distribution
= (Distribution
) element
;
748 distributions
.add(distribution
);
749 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
750 handleSource(state
, element
, table
);
751 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, taxon
);
752 if (distribution
.getArea() != null) {
753 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_LABEL
)] = distribution
.getArea().getLabel();
755 if (distribution
.getStatus() != null) {
756 csvLine
[table
.getIndex(WordClassificationExportTable
.STATUS_LABEL
)] = distribution
.getStatus().getLabel();
758 state
.getProcessor().put(table
, distribution
, csvLine
);
761 .addError("The distribution description for the taxon " + taxon
.getUuid()
762 + " is not of type distribution. Could not be exported. UUID of the description element: "
763 + element
.getUuid());
765 } catch (Exception e
) {
766 state
.getResult().addException(e
, "An unexpected error occurred when handling single distribution "
767 + cdmBaseStr(element
) + ": " + e
.getMessage());
770 if(state
.getConfig().isCreateCondensedDistributionString()){
771 List
<Language
> langs
= new ArrayList
<>();
772 langs
.add(Language
.ENGLISH());
774 TermTree
<NamedArea
> areaTree
= null; //TODO
775 CondensedDistribution conDis
= geoService
.getCondensedDistribution(
776 //TODO add CondensedDistributionConfiguration to export configuration
777 distributions
, areaTree
, true, null, state
.getConfig().getCondensedDistributionConfiguration(), langs
);
778 WordClassificationExportTable tableCondensed
=
779 WordClassificationExportTable
.SIMPLE_FACT
;
780 String
[] csvLine
= new String
[tableCondensed
.getSize()];
781 //the computed fact has no uuid, TODO: remember the uuid for later reference assignment
782 UUID randomUuid
= UUID
.randomUUID();
783 csvLine
[tableCondensed
.getIndex(WordClassificationExportTable
.FACT_ID
)] =
784 randomUuid
.toString();
785 csvLine
[tableCondensed
.getIndex(WordClassificationExportTable
.TAXON_FK
)] =
787 csvLine
[tableCondensed
.getIndex(WordClassificationExportTable
.FACT_TEXT
)] =
789 csvLine
[tableCondensed
.getIndex(WordClassificationExportTable
.LANGUAGE
)] =Language
.ENGLISH().toString();
791 csvLine
[tableCondensed
.getIndex(WordClassificationExportTable
.FACT_CATEGORY
)] =
792 "CondensedDistribution";
794 state
.getProcessor().put(tableCondensed
, taxon
, csvLine
);
798 private void handleCommonNameFacts(WordClassificationExportState state
, Taxon taxon
,
799 List
<DescriptionElementBase
> commonNameFacts
) {
800 WordClassificationExportTable table
= WordClassificationExportTable
.COMMON_NAME_FACT
;
802 for (DescriptionElementBase element
: commonNameFacts
) {
804 if (element
instanceof CommonTaxonName
) {
805 String
[] csvLine
= new String
[table
.getSize()];
806 CommonTaxonName commonName
= (CommonTaxonName
) element
;
807 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
808 handleSource(state
, element
, table
);
809 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, taxon
);
810 if (commonName
.getName() != null) {
811 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_TEXT
)] = commonName
.getName();
813 if (commonName
.getLanguage() != null) {
814 csvLine
[table
.getIndex(WordClassificationExportTable
.LANGUAGE
)] = commonName
.getLanguage().getLabel();
816 if (commonName
.getArea() != null) {
817 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_LABEL
)] = commonName
.getArea().getLabel();
819 state
.getProcessor().put(table
, commonName
, csvLine
);
820 } else if (element
instanceof TextData
){
821 String
[] csvLine
= new String
[table
.getSize()];
822 TextData commonName
= (TextData
) element
;
823 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_ID
)] = getId(state
, element
);
824 handleSource(state
, element
, table
);
825 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, taxon
);
826 if (commonName
.getMultilanguageText() != null) {
827 csvLine
[table
.getIndex(WordClassificationExportTable
.FACT_TEXT
)] = createMultilanguageString(commonName
.getMultilanguageText());
829 state
.getProcessor().put(table
, commonName
, csvLine
);
832 .addError("The common name description for the taxon " + taxon
.getUuid()
833 + " is not of type common name. Could not be exported. UUID of the description element: "
834 + element
.getUuid());
836 } catch (Exception e
) {
837 state
.getResult().addException(e
, "An unexpected error occurred when handling single common name "
838 + cdmBaseStr(element
) + " - "+taxon
.getTitleCache()+ ": " + e
.getMessage());
843 private String
getTitleCache(IIdentifiableEntity identEntity
) {
844 if (identEntity
== null) {
848 return identEntity
.getTitleCache();
851 private String
getId(WordClassificationExportState state
, ICdmBase cdmBase
) {
852 if (cdmBase
== null) {
855 // TODO make configurable
856 return cdmBase
.getUuid().toString();
859 private void handleSynonym(WordClassificationExportState state
, Synonym synonym
, int index
) {
861 if (isUnpublished(state
.getConfig(), synonym
)) {
864 TaxonName name
= synonym
.getName();
865 handleName(state
, name
, synonym
.getAcceptedTaxon());
867 WordClassificationExportTable table
= WordClassificationExportTable
.SYNONYM
;
868 String
[] csvLine
= new String
[table
.getSize()];
870 csvLine
[table
.getIndex(WordClassificationExportTable
.SYNONYM_ID
)] = getId(state
, synonym
);
871 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, synonym
.getAcceptedTaxon());
872 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_FK
)] = getId(state
, name
);
873 if (synonym
.getSec() != null && !state
.getReferenceStore().contains(synonym
.getSec().getUuid())) {
874 handleReference(state
, synonym
.getSec());
876 csvLine
[table
.getIndex(WordClassificationExportTable
.APPENDED_PHRASE
)] = synonym
.getAppendedPhrase();
877 csvLine
[table
.getIndex(WordClassificationExportTable
.SYN_SEC_REFERENCE_FK
)] = getId(state
, synonym
.getSec());
878 csvLine
[table
.getIndex(WordClassificationExportTable
.SYN_SEC_REFERENCE
)] = getTitleCache(synonym
.getSec());
879 csvLine
[table
.getIndex(WordClassificationExportTable
.PUBLISHED
)] = synonym
.isPublish() ?
"1" : "0";
880 csvLine
[table
.getIndex(WordClassificationExportTable
.IS_PRO_PARTE
)] = "0";
881 csvLine
[table
.getIndex(WordClassificationExportTable
.IS_PARTIAL
)] = "0";
882 csvLine
[table
.getIndex(WordClassificationExportTable
.IS_MISAPPLIED
)] = "0";
883 csvLine
[table
.getIndex(WordClassificationExportTable
.SORT_INDEX
)] = String
.valueOf(index
);
884 state
.getProcessor().put(table
, synonym
, csvLine
);
885 } catch (Exception e
) {
886 state
.getResult().addException(e
, "An unexpected error occurred when handling synonym "
887 + cdmBaseStr(synonym
) + ": " + e
.getMessage());
892 * Handles misapplied names (including pro parte and partial as well as pro
893 * parte and partial synonyms
895 private void handleProPartePartialMisapplied(WordClassificationExportState state
, Taxon taxon
, Taxon accepted
, boolean isProParte
, boolean isMisapplied
, int index
) {
897 Taxon ppSyonym
= taxon
;
898 if (isUnpublished(state
.getConfig(), ppSyonym
)) {
901 TaxonName name
= ppSyonym
.getName();
902 handleName(state
, name
, accepted
);
904 WordClassificationExportTable table
= WordClassificationExportTable
.SYNONYM
;
905 String
[] csvLine
= new String
[table
.getSize()];
907 csvLine
[table
.getIndex(WordClassificationExportTable
.SYNONYM_ID
)] = getId(state
, ppSyonym
);
908 csvLine
[table
.getIndex(WordClassificationExportTable
.TAXON_FK
)] = getId(state
, accepted
);
909 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_FK
)] = getId(state
, name
);
911 Reference secRef
= ppSyonym
.getSec();
913 if (secRef
!= null && !state
.getReferenceStore().contains(secRef
.getUuid())) {
914 handleReference(state
, secRef
);
916 csvLine
[table
.getIndex(WordClassificationExportTable
.SEC_REFERENCE_FK
)] = getId(state
, secRef
);
917 csvLine
[table
.getIndex(WordClassificationExportTable
.SEC_REFERENCE
)] = getTitleCache(secRef
);
918 Set
<TaxonRelationship
> rels
= accepted
.getTaxonRelations(ppSyonym
);
919 TaxonRelationship rel
= null;
920 boolean isPartial
= false;
921 if (rels
.size() == 1){
922 rel
= rels
.iterator().next();
924 }else if (rels
.size() > 1){
925 Iterator
<TaxonRelationship
> iterator
= rels
.iterator();
926 while (iterator
.hasNext()){
927 rel
= iterator
.next();
928 if (isProParte
&& rel
.getType().isAnySynonym()){
930 } else if (isMisapplied
&& rel
.getType().isAnyMisappliedName()){
938 Reference synSecRef
= rel
.getCitation();
939 if (synSecRef
!= null && !state
.getReferenceStore().contains(synSecRef
.getUuid())) {
940 handleReference(state
, synSecRef
);
942 csvLine
[table
.getIndex(WordClassificationExportTable
.SYN_SEC_REFERENCE_FK
)] = getId(state
, synSecRef
);
943 csvLine
[table
.getIndex(WordClassificationExportTable
.SYN_SEC_REFERENCE
)] = getTitleCache(synSecRef
);
944 isProParte
= rel
.getType().isProParte();
945 isPartial
= rel
.getType().isPartial();
948 state
.getResult().addWarning("An unexpected error occurred when handling "
949 + "pro parte/partial synonym or misapplied name " + cdmBaseStr(taxon
) );
954 csvLine
[table
.getIndex(WordClassificationExportTable
.IS_PRO_PARTE
)] = isProParte ?
"1" : "0";
955 csvLine
[table
.getIndex(WordClassificationExportTable
.IS_PARTIAL
)] = isPartial ?
"1" : "0";
956 csvLine
[table
.getIndex(WordClassificationExportTable
.IS_MISAPPLIED
)] = isMisapplied ?
"1" : "0";
957 csvLine
[table
.getIndex(WordClassificationExportTable
.SORT_INDEX
)] = String
.valueOf(index
);
958 state
.getProcessor().put(table
, ppSyonym
, csvLine
);
959 } catch (Exception e
) {
960 state
.getResult().addException(e
, "An unexpected error occurred when handling "
961 + "pro parte/partial synonym or misapplied name " + cdmBaseStr(taxon
) + ": " + e
.getMessage());
966 private void handleName(WordClassificationExportState state
, TaxonName name
, Taxon acceptedTaxon
){
967 handleName(state
, name
, acceptedTaxon
, false);
970 private void handleName(WordClassificationExportState state
, TaxonName name
, Taxon acceptedTaxon
, boolean acceptedName
) {
971 if (name
== null || state
.getNameStore().containsKey(name
.getId())) {
975 Rank rank
= name
.getRank();
976 WordClassificationExportTable table
= WordClassificationExportTable
.SCIENTIFIC_NAME
;
977 name
= HibernateProxyHelper
.deproxy(name
);
978 state
.getNameStore().put(name
.getId(), name
.getUuid());
979 String
[] csvLine
= new String
[table
.getSize()];
981 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_ID
)] = getId(state
, name
);
982 if (name
.getLsid() != null) {
983 csvLine
[table
.getIndex(WordClassificationExportTable
.LSID
)] = name
.getLsid().getLsid();
985 csvLine
[table
.getIndex(WordClassificationExportTable
.LSID
)] = "";
988 handleIdentifier(state
, name
);
989 handleDescriptions(state
, name
);
991 csvLine
[table
.getIndex(WordClassificationExportTable
.RANK
)] = getTitleCache(rank
);
993 csvLine
[table
.getIndex(WordClassificationExportTable
.RANK_SEQUENCE
)] = String
.valueOf(rank
.getOrderIndex());
994 if (rank
.isInfraGeneric()) {
996 csvLine
[table
.getIndex(WordClassificationExportTable
.INFRAGENERIC_RANK
)] = name
.getRank()
997 .getInfraGenericMarker();
998 } catch (UnknownCdmTypeException e
) {
999 state
.getResult().addError("Infrageneric marker expected but not available for rank "
1000 + name
.getRank().getTitleCache());
1003 if (rank
.isInfraSpecific()) {
1004 csvLine
[table
.getIndex(WordClassificationExportTable
.INFRASPECIFIC_RANK
)] = name
.getRank().getAbbreviation();
1007 csvLine
[table
.getIndex(WordClassificationExportTable
.RANK_SEQUENCE
)] = "";
1009 if (name
.isProtectedTitleCache()) {
1010 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_NAME_WITH_AUTHORS
)] = name
.getTitleCache();
1012 // TODO: adapt the tropicos titlecache creation
1013 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_NAME_WITH_AUTHORS
)] = name
.getTitleCache();
1017 if (!state
.getConfig().isAddHTML()) {
1018 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_NAME_WITH_REF
)] = name
.getFullTitleCache();
1020 List
<TaggedText
> taggedFullTitleCache
= name
.getTaggedFullTitle();
1021 List
<TaggedText
> taggedName
= name
.getTaggedName();
1023 String fullTitleWithHtml
= createNameWithItalics(taggedFullTitleCache
);
1024 // TODO: adapt the tropicos titlecache creation
1025 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_NAME_WITH_REF
)] = fullTitleWithHtml
.trim();
1028 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_NAME_NO_AUTHORS
)] = name
.getNameCache();
1029 csvLine
[table
.getIndex(WordClassificationExportTable
.GENUS_UNINOMIAL
)] = name
.getGenusOrUninomial();
1031 csvLine
[table
.getIndex(WordClassificationExportTable
.INFRAGENERIC_EPITHET
)] = name
.getInfraGenericEpithet();
1032 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIFIC_EPITHET
)] = name
.getSpecificEpithet();
1034 csvLine
[table
.getIndex(WordClassificationExportTable
.INFRASPECIFIC_EPITHET
)] = name
.getInfraSpecificEpithet();
1036 csvLine
[table
.getIndex(WordClassificationExportTable
.APPENDED_PHRASE
)] = name
.getAppendedPhrase();
1038 csvLine
[table
.getIndex(WordClassificationExportTable
.BAS_AUTHORTEAM_FK
)] = getId(state
, name
.getBasionymAuthorship());
1039 if (name
.getBasionymAuthorship() != null) {
1040 if (state
.getAuthorFromStore(name
.getBasionymAuthorship().getId()) == null) {
1041 handleAuthor(state
, name
.getBasionymAuthorship());
1044 csvLine
[table
.getIndex(WordClassificationExportTable
.BAS_EX_AUTHORTEAM_FK
)] = getId(state
,
1045 name
.getExBasionymAuthorship());
1046 if (name
.getExBasionymAuthorship() != null) {
1047 if (state
.getAuthorFromStore(name
.getExBasionymAuthorship().getId()) == null) {
1048 handleAuthor(state
, name
.getExBasionymAuthorship());
1052 csvLine
[table
.getIndex(WordClassificationExportTable
.COMB_AUTHORTEAM_FK
)] = getId(state
,
1053 name
.getCombinationAuthorship());
1054 if (name
.getCombinationAuthorship() != null) {
1055 if (state
.getAuthorFromStore(name
.getCombinationAuthorship().getId()) == null) {
1056 handleAuthor(state
, name
.getCombinationAuthorship());
1059 csvLine
[table
.getIndex(WordClassificationExportTable
.COMB_EX_AUTHORTEAM_FK
)] = getId(state
,
1060 name
.getExCombinationAuthorship());
1061 if (name
.getExCombinationAuthorship() != null) {
1062 if (state
.getAuthorFromStore(name
.getExCombinationAuthorship().getId()) == null) {
1063 handleAuthor(state
, name
.getExCombinationAuthorship());
1068 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_TEAM_STRING
)] = name
.getAuthorshipCache();
1070 Reference nomRef
= name
.getNomenclaturalReference();
1072 NomenclaturalSource nomenclaturalSource
= name
.getNomenclaturalSource();
1073 if (nomenclaturalSource
!= null &&nomenclaturalSource
.getNameUsedInSource() != null){
1074 handleName(state
, nomenclaturalSource
.getNameUsedInSource(), null);
1075 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_USED_IN_SOURCE
)] = getId(state
, nomenclaturalSource
.getNameUsedInSource());
1078 if (nomRef
!= null) {
1079 if (!state
.getReferenceStore().contains(nomRef
.getUuid())) {
1080 handleReference(state
, nomRef
);
1082 csvLine
[table
.getIndex(WordClassificationExportTable
.REFERENCE_FK
)] = getId(state
, nomRef
);
1083 csvLine
[table
.getIndex(WordClassificationExportTable
.PUBLICATION_TYPE
)] = nomRef
.getType().name();
1084 if (nomRef
.getVolume() != null) {
1085 csvLine
[table
.getIndex(WordClassificationExportTable
.VOLUME_ISSUE
)] = nomRef
.getVolume();
1086 csvLine
[table
.getIndex(WordClassificationExportTable
.COLLATION
)] = createCollatation(name
);
1088 if (nomRef
.getDatePublished() != null) {
1089 csvLine
[table
.getIndex(WordClassificationExportTable
.DATE_PUBLISHED
)] = nomRef
.getTimePeriodPublishedString();
1090 csvLine
[table
.getIndex(WordClassificationExportTable
.YEAR_PUBLISHED
)] = nomRef
.getDatePublished().getYear();
1091 csvLine
[table
.getIndex(WordClassificationExportTable
.VERBATIM_DATE
)] = nomRef
.getDatePublished()
1094 if (name
.getNomenclaturalMicroReference() != null) {
1095 csvLine
[table
.getIndex(WordClassificationExportTable
.DETAIL
)] = name
.getNomenclaturalMicroReference();
1097 nomRef
= HibernateProxyHelper
.deproxy(nomRef
);
1098 if (nomRef
.getInReference() != null) {
1099 Reference inReference
= nomRef
.getInReference();
1100 if (inReference
.getDatePublished() != null && nomRef
.getDatePublished() == null) {
1101 csvLine
[table
.getIndex(WordClassificationExportTable
.DATE_PUBLISHED
)] = inReference
1102 .getDatePublishedString();
1103 csvLine
[table
.getIndex(WordClassificationExportTable
.YEAR_PUBLISHED
)] = inReference
.getDatePublished()
1106 if (nomRef
.getVolume() == null && inReference
.getVolume() != null) {
1107 csvLine
[table
.getIndex(WordClassificationExportTable
.VOLUME_ISSUE
)] = inReference
.getVolume();
1108 csvLine
[table
.getIndex(WordClassificationExportTable
.COLLATION
)] = createCollatation(name
);
1110 if (inReference
.getInReference() != null) {
1111 inReference
= inReference
.getInReference();
1113 if (inReference
.getAbbrevTitle() == null) {
1114 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_TITLE
)] = CdmUtils
1115 .Nz(inReference
.getTitle());
1117 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_TITLE
)] = CdmUtils
1118 .Nz(inReference
.getAbbrevTitle());
1120 if (inReference
.getTitle() == null) {
1121 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_TITLE
)] = CdmUtils
1122 .Nz(inReference
.getAbbrevTitle()!= null? inReference
.getAbbrevTitle(): inReference
.getTitleCache());
1124 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_TITLE
)] = CdmUtils
.Nz(inReference
.getTitle());
1127 TeamOrPersonBase
<?
> author
= inReference
.getAuthorship();
1129 && (nomRef
.isOfType(ReferenceType
.BookSection
) || nomRef
.isOfType(ReferenceType
.Section
))) {
1130 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_REF_AUTHOR
)] = author
.isProtectedTitleCache()
1131 ? author
.getTitleCache() : CdmUtils
.Nz(author
.getNomenclaturalTitleCache());
1132 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_REF_AUTHOR
)] = CdmUtils
1133 .Nz(author
.getTitleCache());
1135 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_REF_AUTHOR
)] = "";
1136 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_REF_AUTHOR
)] = "";
1139 if (nomRef
.getAbbrevTitle() == null) {
1140 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_TITLE
)] = CdmUtils
1141 .Nz(nomRef
.getTitle()!= null? nomRef
.getTitle():nomRef
.getAbbrevTitleCache());
1143 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_TITLE
)] = CdmUtils
1144 .Nz(nomRef
.getAbbrevTitle());
1146 if (nomRef
.getTitle() == null) {
1147 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_TITLE
)] = CdmUtils
1148 .Nz(nomRef
.getAbbrevTitle()!= null? nomRef
.getAbbrevTitle(): nomRef
.getTitleCache());
1150 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_TITLE
)] = CdmUtils
.Nz(nomRef
.getTitle());
1152 TeamOrPersonBase
<?
> author
= nomRef
.getAuthorship();
1153 if (author
!= null) {
1154 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_REF_AUTHOR
)] = author
.isProtectedTitleCache()
1155 ? author
.getTitleCache() : CdmUtils
.Nz(author
.getNomenclaturalTitleCache());
1156 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_REF_AUTHOR
)] = CdmUtils
1157 .Nz(author
.getTitleCache());
1159 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_REF_AUTHOR
)] = "";
1160 csvLine
[table
.getIndex(WordClassificationExportTable
.FULL_REF_AUTHOR
)] = "";
1165 csvLine
[table
.getIndex(WordClassificationExportTable
.PUBLICATION_TYPE
)] = "";
1175 String protologueUriString
= extractProtologueURIs(state
, name
);
1177 csvLine
[table
.getIndex(WordClassificationExportTable
.PROTOLOGUE_URI
)] = protologueUriString
;
1178 Collection
<TypeDesignationBase
> specimenTypeDesignations
= new ArrayList
<>();
1179 List
<TextualTypeDesignation
> textualTypeDesignations
= new ArrayList
<>();
1180 for (TypeDesignationBase
<?
> typeDesignation
: name
.getTypeDesignations()) {
1181 if (typeDesignation
.isInstanceOf(TextualTypeDesignation
.class)) {
1183 if (((TextualTypeDesignation
) typeDesignation
).isVerbatim() ){
1184 Set
<IdentifiableSource
> sources
= typeDesignation
.getSources();
1185 boolean isProtologue
= false;
1186 if (sources
!= null && !sources
.isEmpty()){
1187 IdentifiableSource source
= sources
.iterator().next();
1188 if (name
.getNomenclaturalReference() != null){
1189 isProtologue
= source
.getCitation() != null? source
.getCitation().getUuid().equals(name
.getNomenclaturalReference().getUuid()): false;
1193 csvLine
[table
.getIndex(WordClassificationExportTable
.PROTOLOGUE_TYPE_STATEMENT
)] = ((TextualTypeDesignation
) typeDesignation
)
1194 .getPreferredText(Language
.DEFAULT());
1196 textualTypeDesignations
.add((TextualTypeDesignation
) typeDesignation
);
1200 textualTypeDesignations
.add((TextualTypeDesignation
) typeDesignation
);
1202 } else if (typeDesignation
.isInstanceOf(SpecimenTypeDesignation
.class)) {
1203 SpecimenTypeDesignation specimenType
= HibernateProxyHelper
.deproxy(typeDesignation
, SpecimenTypeDesignation
.class);
1204 specimenTypeDesignations
.add(specimenType
);
1205 handleSpecimenType(state
, specimenType
);
1208 }else if (typeDesignation
instanceof NameTypeDesignation
){
1209 specimenTypeDesignations
.add(HibernateProxyHelper
.deproxy(typeDesignation
, NameTypeDesignation
.class));
1212 TypeDesignationSetContainer manager
= new TypeDesignationSetContainer(specimenTypeDesignations
, name
, TypeDesignationSetComparator
.ORDER_BY
.TYPE_STATUS
);
1213 HTMLTagRules rules
= new HTMLTagRules();
1214 rules
.addRule(TagEnum
.name
, "i");
1216 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_SPECIMEN
)] = manager
.print(false, false, false, rules
);
1218 StringBuilder stringbuilder
= new StringBuilder();
1220 for (TextualTypeDesignation typeDesignation
: textualTypeDesignations
) {
1221 stringbuilder
.append(typeDesignation
.getPreferredText(Language
.DEFAULT()));
1222 if (typeDesignation
.getSources() != null && !typeDesignation
.getSources().isEmpty() ){
1223 stringbuilder
.append( " [");
1225 for (IdentifiableSource source
: typeDesignation
.getSources()){
1226 if (source
.getCitation() != null){
1227 stringbuilder
.append(OriginalSourceFormatter
.INSTANCE
.format(source
));
1229 if (index
< typeDesignation
.getSources().size()) {
1230 stringbuilder
.append( ", ");
1234 stringbuilder
.append( "]");
1236 if (i
< textualTypeDesignations
.size()) {
1237 stringbuilder
.append( "; ");
1239 stringbuilder
.append(".");
1243 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_STATEMENT
)] = stringbuilder
.toString();
1246 if (name
.getStatus() == null || name
.getStatus().isEmpty()) {
1247 csvLine
[table
.getIndex(WordClassificationExportTable
.NOM_STATUS
)] = "";
1248 csvLine
[table
.getIndex(WordClassificationExportTable
.NOM_STATUS_ABBREV
)] = "";
1251 String statusStringAbbrev
= extractStatusString(state
, name
, true);
1252 String statusString
= extractStatusString(state
, name
, false);
1254 csvLine
[table
.getIndex(WordClassificationExportTable
.NOM_STATUS
)] = statusString
.trim();
1255 csvLine
[table
.getIndex(WordClassificationExportTable
.NOM_STATUS_ABBREV
)] = statusStringAbbrev
.trim();
1258 HomotypicalGroup group
= HibernateProxyHelper
.deproxy(name
.getHomotypicalGroup(), HomotypicalGroup
.class);
1260 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_FK
)] = getId(state
, group
);
1261 List
<TaxonName
> typifiedNames
= new ArrayList
<>();
1262 if (acceptedTaxon
!= null){
1263 HomotypicGroupTaxonComparator comparator
= new HomotypicGroupTaxonComparator(acceptedTaxon
);
1264 List
<Synonym
> synonymsInGroup
= null;
1265 if (group
.equals(acceptedTaxon
.getHomotypicGroup())){
1266 synonymsInGroup
= acceptedTaxon
.getHomotypicSynonymsByHomotypicGroup(comparator
);
1267 typifiedNames
.add(name
);
1269 synonymsInGroup
= acceptedTaxon
.getSynonymsInGroup(group
, comparator
);
1272 synonymsInGroup
.stream().forEach(synonym
-> typifiedNames
.add(HibernateProxyHelper
.deproxy(synonym
.getName(), TaxonName
.class)));
1275 typifiedNames
.addAll(group
.getTypifiedNames());
1279 Integer seqNumber
= typifiedNames
.indexOf(name
);
1280 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_SEQ
)] = String
.valueOf(seqNumber
);
1281 state
.getProcessor().put(table
, name
, csvLine
);
1282 handleNameRelationships(state
, name
);
1284 } catch (Exception e
) {
1285 state
.getResult().addException(e
,
1286 "An unexpected error occurred when handling the name " + cdmBaseStr(name
) + ": " + name
.getTitleCache() + ": " + e
.getMessage());
1288 e
.printStackTrace();
1293 * @param specimenType
1295 private void handleSpecimenType_(WordClassificationExportState state
, SpecimenTypeDesignation specimenType
) {
1296 if (specimenType
.getTypeSpecimen() != null){
1297 DerivedUnit specimen
= specimenType
.getTypeSpecimen();
1298 if(specimen
!= null && !state
.getSpecimenStore().contains( specimen
.getUuid())){
1299 handleSpecimen(state
, specimen
);
1302 WordClassificationExportTable table
= WordClassificationExportTable
.TYPE_DESIGNATION
;
1303 String
[] csvLine
= new String
[table
.getSize()];
1304 //TYPE_ID, SPECIMEN_FK, TYPE_VERBATIM_CITATION, TYPE_STATUS, TYPE_DESIGNATED_BY_STRING, TYPE_DESIGNATED_BY_REF_FK};
1305 //Specimen_Fk und den Typusangaben (Art des Typus [holo, lecto, etc.], Quelle, Designation-Quelle, +
1306 Set
<TaxonName
> typifiedNames
= specimenType
.getTypifiedNames();
1307 for (TaxonName name
: typifiedNames
){
1308 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_STATUS
)] = specimenType
.getTypeStatus() != null? specimenType
.getTypeStatus().getDescription(): "";
1309 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_ID
)] = getId(state
, specimenType
);
1310 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPIFIED_NAME_FK
)] = getId(state
, name
);
1311 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_FK
)] = getId(state
, specimenType
.getTypeSpecimen());
1312 if (specimenType
.getSources() != null && !specimenType
.getSources().isEmpty()){
1313 String sourceString
= "";
1315 for (IdentifiableSource source
: specimenType
.getSources()){
1316 if (source
.getCitation()!= null){
1317 sourceString
= sourceString
.concat(source
.getCitation().getCitation());
1320 if (index
!= specimenType
.getSources().size()){
1321 sourceString
.concat(", ");
1324 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_INFORMATION_REF_STRING
)] = sourceString
;
1326 if (specimenType
.getDesignationSource() != null && specimenType
.getDesignationSource().getCitation() != null && !state
.getReferenceStore().contains(specimenType
.getDesignationSource().getCitation().getUuid())){
1327 handleReference(state
, specimenType
.getDesignationSource().getCitation());
1328 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_DESIGNATED_BY_REF_FK
)] = specimenType
.getDesignationSource() != null ?
getId(state
, specimenType
.getDesignationSource().getCitation()): "";
1331 state
.getProcessor().put(table
, specimenType
, csvLine
);
1337 * @param specimenType
1339 private void handleSpecimenType(WordClassificationExportState state
, SpecimenTypeDesignation specimenType
) {
1340 if (specimenType
.getTypeSpecimen() != null){
1341 DerivedUnit specimen
= specimenType
.getTypeSpecimen();
1342 if(specimen
!= null && !state
.getSpecimenStore().contains( specimen
.getUuid())){
1343 handleSpecimen(state
, specimen
);
1346 WordClassificationExportTable table
= WordClassificationExportTable
.TYPE_DESIGNATION
;
1347 String
[] csvLine
= new String
[table
.getSize()];
1349 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_STATUS
)] = specimenType
.getTypeStatus() != null? specimenType
.getTypeStatus().getDescription(): "";
1350 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_ID
)] = getId(state
, specimenType
);
1351 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_FK
)] = getId(state
, specimenType
.getTypeSpecimen());
1352 if (specimenType
.getSources() != null && !specimenType
.getSources().isEmpty()){
1353 String sourceString
= "";
1355 List
<IdentifiableSource
> sources
= new ArrayList
<>(specimenType
.getSources());
1356 Comparator
<IdentifiableSource
> compareByYear
= new Comparator
<IdentifiableSource
>() {
1358 public int compare(IdentifiableSource o1
, IdentifiableSource o2
) {
1362 if (o1
.getCitation() == null && o2
.getCitation() != null){
1365 if (o2
.getCitation() == null && o1
.getCitation() != null){
1368 if (o1
.getCitation().equals(o2
.getCitation())){
1371 if (o1
.getCitation().getDatePublished() == null && o2
.getCitation().getDatePublished() != null){
1374 if (o1
.getCitation().getDatePublished() != null && o2
.getCitation().getDatePublished() == null){
1377 if (o1
.getCitation().getDatePublished().getYear() == null && o2
.getCitation().getDatePublished().getYear() != null){
1380 if (o1
.getCitation().getDatePublished().getYear() != null && o2
.getCitation().getDatePublished().getYear() == null){
1383 return o1
.getCitation().getDatePublished().getYear().compareTo(o2
.getCitation().getDatePublished().getYear());
1386 Collections
.sort(sources
, compareByYear
);
1387 for (IdentifiableSource source
: sources
){
1388 if (source
.getCitation()!= null){
1389 sourceString
= sourceString
.concat(source
.getCitation().getCitation());
1390 handleReference(state
, source
.getCitation());
1393 if (index
<= specimenType
.getSources().size()){
1394 sourceString
= sourceString
.concat("; ");
1398 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_INFORMATION_REF_STRING
)] = sourceString
;
1399 if (sources
.get(0).getCitation() != null ){
1400 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_INFORMATION_REF_FK
)] = getId(state
, sources
.get(0).getCitation());
1403 if (specimenType
.getDesignationSource() != null && specimenType
.getDesignationSource().getCitation() != null && !state
.getReferenceStore().contains(specimenType
.getDesignationSource().getCitation().getUuid())){
1404 handleReference(state
, specimenType
.getDesignationSource().getCitation());
1405 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_DESIGNATED_BY_REF_FK
)] = specimenType
.getDesignationSource() != null ?
getId(state
, specimenType
.getDesignationSource().getCitation()): "";
1409 Set
<TaxonName
> typifiedNames
= specimenType
.getTypifiedNames();
1411 if (typifiedNames
.size() > 1){
1412 state
.getResult().addWarning("Please check the specimen type "
1413 + cdmBaseStr(specimenType
) + " there are more then one typified name.");
1415 if (typifiedNames
.iterator().hasNext()){
1416 TaxonName name
= typifiedNames
.iterator().next();
1417 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPIFIED_NAME_FK
)] = getId(state
, name
);
1419 state
.getProcessor().put(table
, specimenType
, csvLine
);
1428 private String
createNameWithItalics(List
<TaggedText
> taggedName
) {
1430 String fullTitleWithHtml
= "";
1431 for (TaggedText taggedText
: taggedName
){
1432 if (taggedText
.getType().equals(TagEnum
.name
)){
1433 fullTitleWithHtml
+= "<i>" + taggedText
.getText() + "</i> ";
1434 }else if (taggedText
.getType().equals(TagEnum
.separator
)){
1435 fullTitleWithHtml
= fullTitleWithHtml
.trim() + taggedText
.getText() ;
1437 fullTitleWithHtml
+= taggedText
.getText() + " ";
1440 return fullTitleWithHtml
;
1443 private void handleNameRelationships(WordClassificationExportState state
, TaxonName name
) {
1444 Set
<NameRelationship
> rels
= name
.getRelationsFromThisName();
1445 WordClassificationExportTable table
= WordClassificationExportTable
.NAME_RELATIONSHIP
;
1446 String
[] csvLine
= new String
[table
.getSize()];
1448 for (NameRelationship rel
: rels
) {
1449 NameRelationshipType type
= rel
.getType();
1450 TaxonName name2
= rel
.getToName();
1451 name2
= HibernateProxyHelper
.deproxy(name2
, TaxonName
.class);
1452 if (!state
.getNameStore().containsKey(name2
.getId())) {
1453 handleName(state
, name2
, null);
1456 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME_REL_TYPE
)] = type
.getLabel();
1457 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME1_FK
)] = getId(state
, name
);
1458 csvLine
[table
.getIndex(WordClassificationExportTable
.NAME2_FK
)] = getId(state
, name2
);
1459 state
.getProcessor().put(table
, name
, csvLine
);
1462 rels
= name
.getRelationsToThisName();
1464 csvLine
= new String
[table
.getSize()];
1466 for (NameRelationship rel
: rels
) {
1467 NameRelationshipType type
= rel
.getType();
1468 TaxonName name2
= rel
.getFromName();
1469 name2
= HibernateProxyHelper
.deproxy(name2
, TaxonName
.class);
1470 if (!state
.getNameStore().containsKey(name2
.getId())) {
1471 handleName(state
, name2
, null);
1478 private String
createCollatation(TaxonName name
) {
1479 String collation
= "";
1480 if (name
.getNomenclaturalReference() != null) {
1481 Reference ref
= name
.getNomenclaturalReference();
1482 collation
= getVolume(ref
);
1484 if (name
.getNomenclaturalMicroReference() != null) {
1485 if (!StringUtils
.isBlank(collation
)) {
1488 collation
+= name
.getNomenclaturalMicroReference();
1494 private String
getVolume(Reference reference
) {
1495 if (reference
.getVolume() != null) {
1496 return reference
.getVolume();
1497 } else if (reference
.getInReference() != null) {
1498 if (reference
.getInReference().getVolume() != null) {
1499 return reference
.getInReference().getVolume();
1505 private void handleIdentifier(WordClassificationExportState state
, CdmBase cdmBase
) {
1506 WordClassificationExportTable table
= WordClassificationExportTable
.IDENTIFIER
;
1509 if (cdmBase
instanceof TaxonName
){
1510 TaxonName name
= (TaxonName
)cdmBase
;
1513 List
<Identifier
> identifiers
= name
.getIdentifiers();
1515 //first check which kind of identifiers are available and then sort and create table entries
1516 Map
<IdentifierType
, Set
<Identifier
>> identifierTypes
= new HashMap
<>();
1517 for (Identifier identifier
: identifiers
){
1518 IdentifierType type
= identifier
.getType();
1519 if (identifierTypes
.containsKey(type
)){
1520 identifierTypes
.get(type
).add(identifier
);
1522 Set
<Identifier
> tempList
= new HashSet
<>();
1523 tempList
.add(identifier
);
1524 identifierTypes
.put(type
, tempList
);
1528 for (IdentifierType type
:identifierTypes
.keySet()){
1529 Set
<Identifier
> identifiersByType
= identifierTypes
.get(type
);
1530 csvLine
= new String
[table
.getSize()];
1531 csvLine
[table
.getIndex(WordClassificationExportTable
.FK
)] = getId(state
, name
);
1532 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_TABLE
)] = "ScientificName";
1533 csvLine
[table
.getIndex(WordClassificationExportTable
.IDENTIFIER_TYPE
)] = type
.getLabel();
1534 csvLine
[table
.getIndex(WordClassificationExportTable
.EXTERNAL_NAME_IDENTIFIER
)] = extractIdentifier(
1536 state
.getProcessor().put(table
, name
.getUuid() + ", " + type
.getLabel(), csvLine
);
1540 // Set<String> IPNIidentifiers = name.getIdentifiers(DefinedTerm.IDENTIFIER_NAME_IPNI());
1541 // Set<String> tropicosIdentifiers = name.getIdentifiers(DefinedTerm.IDENTIFIER_NAME_TROPICOS());
1542 // Set<String> WFOIdentifiers = name.getIdentifiers(DefinedTerm.uuidWfoNameIdentifier);
1543 // if (!IPNIidentifiers.isEmpty()) {
1544 // csvLine = new String[table.getSize()];
1545 // csvLine[table.getIndex(CdmLightExportTable.FK)] = getId(state, name);
1546 // csvLine[table.getIndex(CdmLightExportTable.REF_TABLE)] = "ScientificName";
1547 // csvLine[table.getIndex(CdmLightExportTable.IDENTIFIER_TYPE)] = IPNI_NAME_IDENTIFIER;
1548 // csvLine[table.getIndex(CdmLightExportTable.EXTERNAL_NAME_IDENTIFIER)] = extractIdentifier(
1549 // IPNIidentifiers);
1550 // state.getProcessor().put(table, name.getUuid() + ", " + IPNI_NAME_IDENTIFIER, csvLine);
1552 // if (!tropicosIdentifiers.isEmpty()) {
1553 // csvLine = new String[table.getSize()];
1554 // csvLine[table.getIndex(CdmLightExportTable.FK)] = getId(state, name);
1555 // csvLine[table.getIndex(CdmLightExportTable.REF_TABLE)] = "ScientificName";
1556 // csvLine[table.getIndex(CdmLightExportTable.IDENTIFIER_TYPE)] = TROPICOS_NAME_IDENTIFIER;
1557 // csvLine[table.getIndex(CdmLightExportTable.EXTERNAL_NAME_IDENTIFIER)] = extractIdentifier(
1558 // tropicosIdentifiers);
1559 // state.getProcessor().put(table, name.getUuid() + ", " + IPNI_NAME_IDENTIFIER, csvLine);
1561 // if (!WFOIdentifiers.isEmpty()) {
1562 // csvLine = new String[table.getSize()];
1563 // csvLine[table.getIndex(CdmLightExportTable.FK)] = getId(state, name);
1564 // csvLine[table.getIndex(CdmLightExportTable.REF_TABLE)] = "ScientificName";
1565 // csvLine[table.getIndex(CdmLightExportTable.IDENTIFIER_TYPE)] = WFO_NAME_IDENTIFIER;
1566 // csvLine[table.getIndex(CdmLightExportTable.EXTERNAL_NAME_IDENTIFIER)] = extractIdentifier(
1568 // state.getProcessor().put(table, name.getUuid() + ", " + WFO_NAME_IDENTIFIER, csvLine);
1570 }catch(Exception e
){
1571 state
.getResult().addWarning("Please check the identifiers for "
1572 + cdmBaseStr(cdmBase
) + " maybe there is an empty identifier");
1577 if (cdmBase
instanceof IdentifiableEntity
){
1578 IdentifiableEntity
<?
> identifiableEntity
= (IdentifiableEntity
<?
>) cdmBase
;
1579 List
<Identifier
> identifiers
= identifiableEntity
.getIdentifiers();
1580 String tableName
= null;
1581 if (cdmBase
instanceof Reference
){
1582 tableName
= "Reference";
1583 }else if (cdmBase
instanceof SpecimenOrObservationBase
){
1584 tableName
= "Specimen";
1585 }else if (cdmBase
instanceof Taxon
){
1586 tableName
= "Taxon";
1587 }else if (cdmBase
instanceof Synonym
){
1588 tableName
= "Synonym";
1589 }else if (cdmBase
instanceof TeamOrPersonBase
){
1590 tableName
= "PersonOrTeam";
1593 for (Identifier identifier
: identifiers
){
1594 if (identifier
.getType() == null && identifier
.getIdentifier() == null){
1595 state
.getResult().addWarning("Please check the identifiers for "
1596 + cdmBaseStr(cdmBase
) + " there is an empty identifier");
1600 csvLine
= new String
[table
.getSize()];
1601 csvLine
[table
.getIndex(WordClassificationExportTable
.FK
)] = getId(state
, cdmBase
);
1603 if (tableName
!= null){
1604 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_TABLE
)] = tableName
;
1605 csvLine
[table
.getIndex(WordClassificationExportTable
.IDENTIFIER_TYPE
)] = identifier
.getType() != null? identifier
.getType().getLabel():null;
1606 csvLine
[table
.getIndex(WordClassificationExportTable
.EXTERNAL_NAME_IDENTIFIER
)] = identifier
.getIdentifier();
1607 state
.getProcessor().put(table
, cdmBase
.getUuid() + (identifier
.getType() != null? identifier
.getType().getLabel():null), csvLine
);
1610 if (cdmBase
instanceof Reference
){
1611 Reference ref
= (Reference
)cdmBase
;
1612 if (ref
.getDoi() != null){
1613 csvLine
= new String
[table
.getSize()];
1614 csvLine
[table
.getIndex(WordClassificationExportTable
.FK
)] = getId(state
, cdmBase
);
1615 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_TABLE
)] = tableName
;
1616 csvLine
[table
.getIndex(WordClassificationExportTable
.IDENTIFIER_TYPE
)] = "DOI";
1617 csvLine
[table
.getIndex(WordClassificationExportTable
.EXTERNAL_NAME_IDENTIFIER
)] = ref
.getDoiString();
1618 state
.getProcessor().put(table
, cdmBase
.getUuid() + "DOI", csvLine
);
1622 if (cdmBase
instanceof TeamOrPersonBase
){
1623 TeamOrPersonBase
<?
> person
= HibernateProxyHelper
.deproxy(cdmBase
, TeamOrPersonBase
.class);
1624 if (person
instanceof Person
&& ((Person
)person
).getOrcid() != null){
1625 csvLine
= new String
[table
.getSize()];
1626 csvLine
[table
.getIndex(WordClassificationExportTable
.FK
)] = getId(state
, cdmBase
);
1627 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_TABLE
)] = tableName
;
1628 csvLine
[table
.getIndex(WordClassificationExportTable
.IDENTIFIER_TYPE
)] = "ORCID";
1629 csvLine
[table
.getIndex(WordClassificationExportTable
.EXTERNAL_NAME_IDENTIFIER
)]= ((Person
)person
).getOrcid().asURI();
1630 state
.getProcessor().put(table
, cdmBase
.getUuid() + "ORCID", csvLine
);
1635 } catch (Exception e
) {
1636 state
.getResult().addException(e
, "An unexpected error occurred when handling identifiers for "
1637 + cdmBaseStr(cdmBase
) + ": " + e
.getMessage());
1638 e
.printStackTrace();
1642 private String
extractIdentifier(Set
<Identifier
> identifierSet
) {
1644 String identifierString
= "";
1645 for (Identifier identifier
: identifierSet
) {
1646 if (!StringUtils
.isBlank(identifierString
)) {
1647 identifierString
+= ", ";
1649 identifierString
+= identifier
.getIdentifier();
1651 return identifierString
;
1654 private String
extractProtologueURIs(WordClassificationExportState state
, TaxonName name
) {
1655 if (name
.getNomenclaturalSource() != null){
1656 Set
<ExternalLink
> links
= name
.getNomenclaturalSource().getLinks();
1657 return extractLinkUris(links
.iterator());
1663 private String
extractMediaURIs(WordClassificationExportState state
, Set
<?
extends DescriptionBase
<?
>> descriptionsSet
,
1666 String mediaUriString
= "";
1667 Set
<DescriptionElementBase
> elements
= new HashSet
<>();
1668 for (DescriptionBase
<?
> description
: descriptionsSet
) {
1670 if (!description
.getElements().isEmpty()) {
1671 elements
= description
.getElements();
1673 for (DescriptionElementBase element
: elements
) {
1674 Feature entityFeature
= HibernateProxyHelper
.deproxy(element
.getFeature());
1675 if (entityFeature
.equals(feature
)) {
1676 if (!element
.getMedia().isEmpty()) {
1677 List
<Media
> media
= element
.getMedia();
1678 for (Media mediaElement
: media
) {
1679 Iterator
<MediaRepresentation
> it
= mediaElement
.getRepresentations().iterator();
1680 mediaUriString
= extractMediaUris(it
);
1686 } catch (Exception e
) {
1687 state
.getResult().addException(e
, "An unexpected error occurred when extracting media URIs for "
1688 + cdmBaseStr(description
) + ": " + e
.getMessage());
1691 return mediaUriString
;
1694 private void handleAuthor(WordClassificationExportState state
, TeamOrPersonBase
<?
> author
) {
1696 if (state
.getAuthorFromStore(author
.getId()) != null) {
1699 state
.addAuthorToStore(author
);
1700 handleIdentifier(state
, author
);
1701 WordClassificationExportTable table
= WordClassificationExportTable
.NOMENCLATURAL_AUTHOR
;
1702 String
[] csvLine
= new String
[table
.getSize()];
1703 WordClassificationExportTable tableAuthorRel
= WordClassificationExportTable
.NOMENCLATURAL_AUTHOR_TEAM_RELATION
;
1704 String
[] csvLineRel
= new String
[tableAuthorRel
.getSize()];
1705 String
[] csvLineMember
= new String
[table
.getSize()];
1706 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_ID
)] = getId(state
, author
);
1707 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_AUTHOR
)] = author
.isProtectedTitleCache()
1708 ? author
.getTitleCache() : author
.getNomenclaturalTitleCache();
1709 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_TITLE
)] = author
.getTitleCache();
1710 author
= HibernateProxyHelper
.deproxy(author
);
1711 if (author
instanceof Person
) {
1712 Person authorPerson
= (Person
) author
;
1713 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_GIVEN_NAME
)] = authorPerson
.getGivenName();
1714 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_FAMILY_NAME
)] = authorPerson
.getFamilyName();
1715 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_PREFIX
)] = authorPerson
.getPrefix();
1716 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_SUFFIX
)] = authorPerson
.getSuffix();
1718 // create an entry in rel table and all members in author table,
1719 // check whether the team members already in author table
1721 Team authorTeam
= (Team
) author
;
1723 for (Person member
: authorTeam
.getTeamMembers()) {
1724 csvLineRel
= new String
[tableAuthorRel
.getSize()];
1725 csvLineRel
[tableAuthorRel
.getIndex(WordClassificationExportTable
.AUTHOR_TEAM_FK
)] = getId(state
, authorTeam
);
1726 csvLineRel
[tableAuthorRel
.getIndex(WordClassificationExportTable
.AUTHOR_FK
)] = getId(state
, member
);
1727 csvLineRel
[tableAuthorRel
.getIndex(WordClassificationExportTable
.AUTHOR_TEAM_SEQ_NUMBER
)] = String
1729 state
.getProcessor().put(tableAuthorRel
, authorTeam
.getId() + ":" + member
.getId(), csvLineRel
);
1731 if (state
.getAuthorFromStore(member
.getId()) == null) {
1732 state
.addAuthorToStore(member
);
1733 csvLineMember
= new String
[table
.getSize()];
1734 csvLineMember
[table
.getIndex(WordClassificationExportTable
.AUTHOR_ID
)] = getId(state
, member
);
1735 csvLineMember
[table
.getIndex(WordClassificationExportTable
.ABBREV_AUTHOR
)] = member
1736 .isProtectedTitleCache() ? member
.getTitleCache() : member
.getNomenclaturalTitleCache();
1737 csvLineMember
[table
.getIndex(WordClassificationExportTable
.AUTHOR_TITLE
)] = member
.getTitleCache();
1738 csvLineMember
[table
.getIndex(WordClassificationExportTable
.AUTHOR_GIVEN_NAME
)] = member
.getGivenName();
1739 csvLineMember
[table
.getIndex(WordClassificationExportTable
.AUTHOR_FAMILY_NAME
)] = member
.getFamilyName();
1740 csvLineMember
[table
.getIndex(WordClassificationExportTable
.AUTHOR_PREFIX
)] = member
.getPrefix();
1741 csvLineMember
[table
.getIndex(WordClassificationExportTable
.AUTHOR_SUFFIX
)] = member
.getSuffix();
1742 state
.getProcessor().put(table
, member
, csvLineMember
);
1747 state
.getProcessor().put(table
, author
, csvLine
);
1748 } catch (Exception e
) {
1749 state
.getResult().addException(e
,
1750 "An unexpected error occurred when handling author " + cdmBaseStr(author
) + ": " + e
.getMessage());
1754 private String
extractStatusString(WordClassificationExportState state
, TaxonName name
, boolean abbrev
) {
1756 Set
<NomenclaturalStatus
> status
= name
.getStatus();
1757 if (status
.isEmpty()) {
1760 String statusString
= "";
1761 for (NomenclaturalStatus nameStatus
: status
) {
1762 if (nameStatus
!= null) {
1764 if (nameStatus
.getType() != null) {
1765 statusString
+= nameStatus
.getType().getIdInVocabulary();
1768 if (nameStatus
.getType() != null) {
1769 statusString
+= nameStatus
.getType().getTitleCache();
1774 if (nameStatus
.getRuleConsidered() != null
1775 && !StringUtils
.isBlank(nameStatus
.getRuleConsidered())) {
1776 statusString
+= ": " + nameStatus
.getRuleConsidered();
1778 if (nameStatus
.getCitation() != null) {
1779 String shortCitation
= OriginalSourceFormatter
.INSTANCE
.format(nameStatus
.getCitation(), null);
1780 statusString
+= " (" + shortCitation
+ ")";
1782 // if (nameStatus.getCitationMicroReference() != null
1783 // && !StringUtils.isBlank(nameStatus.getCitationMicroReference())) {
1784 // statusString += " " + nameStatus.getCitationMicroReference();
1787 statusString
+= " ";
1790 return statusString
;
1791 } catch (Exception e
) {
1792 state
.getResult().addException(e
, "An unexpected error occurred when extracting status string for "
1793 + cdmBaseStr(name
) + ": " + e
.getMessage());
1798 private void handleHomotypicalGroup(WordClassificationExportState state
, HomotypicalGroup group
, Taxon acceptedTaxon
, int sortIndex
) {
1800 state
.addHomotypicalGroupToStore(group
);
1801 WordClassificationExportTable table
= WordClassificationExportTable
.HOMOTYPIC_GROUP
;
1802 String
[] csvLine
= new String
[table
.getSize()];
1803 csvLine
[table
.getIndex(WordClassificationExportTable
.SORT_INDEX
)] = String
.valueOf(sortIndex
);
1804 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_ID
)] = getId(state
, group
);
1806 List
<TaxonName
> typifiedNames
= new ArrayList
<>();
1807 if (acceptedTaxon
!= null){
1808 List
<Synonym
> synonymsInGroup
= acceptedTaxon
.getSynonymsInGroup(group
);
1809 if (group
.equals(acceptedTaxon
.getHomotypicGroup())){
1810 typifiedNames
.add(acceptedTaxon
.getName());
1812 synonymsInGroup
.stream().forEach(synonym
-> typifiedNames
.add(CdmBase
.deproxy(synonym
.getName())));
1816 TaxonName firstname
= null;
1817 for (TaxonName name
: typifiedNames
){
1818 Iterator
<Taxon
> taxa
= name
.getTaxa().iterator();
1819 while(taxa
.hasNext()){
1820 Taxon taxon
= taxa
.next();
1821 if(!(taxon
.isMisapplication() || taxon
.isProparteSynonym())){
1828 // Collections.sort(typifiedNames, new HomotypicalGroupNameComparator(firstname, true));
1829 String typifiedNamesString
= "";
1830 String typifiedNamesWithSecString
= "";
1831 String typifiedNamesWithoutAccepted
= "";
1832 String typifiedNamesWithoutAcceptedWithSec
= "";
1834 for (TaxonName name
: typifiedNames
) {
1835 // Concatenated output string for homotypic group (names and
1836 // citations) + status + some name relations (e.g. “non”)
1837 // TODO: nameRelations, which and how to display
1838 Set
<TaxonBase
> taxonBases
= name
.getTaxonBases();
1839 TaxonBase
<?
> taxonBase
;
1842 String nameString
= name
.getFullTitleCache();
1843 String doubtful
= "";
1845 if (state
.getConfig().isAddHTML()){
1846 nameString
= createNameWithItalics(name
.getTaggedFullTitle()) ;
1849 Set
<NameRelationship
> related
= name
.getNameRelations();
1850 List
<NameRelationship
> relatedList
= new ArrayList
<>(related
);
1852 Collections
.sort(relatedList
, new Comparator
<NameRelationship
>() {
1854 public int compare(NameRelationship nr1
, NameRelationship nr2
) {
1855 return nr1
.getType().compareTo(nr2
.getType());
1860 List
<NameRelationship
> nonNames
= new ArrayList
<>();
1861 List
<NameRelationship
> otherRelationships
= new ArrayList
<>();
1863 for (NameRelationship rel
: relatedList
){
1864 //no inverse relations
1865 if (rel
.getFromName().equals(name
)){
1866 // alle Homonyme und inverse blocking names
1867 if (rel
.getType().equals(NameRelationshipType
.LATER_HOMONYM())
1868 || rel
.getType().equals(NameRelationshipType
.TREATED_AS_LATER_HOMONYM())
1869 || (rel
.getType().equals(NameRelationshipType
.BLOCKING_NAME_FOR()))
1870 || (rel
.getType().equals(NameRelationshipType
.UNSPECIFIC_NON()))){
1872 }else if (!rel
.getType().isBasionymRelation()){
1873 otherRelationships
.add(rel
);
1878 String nonRelNames
= "";
1879 String relNames
= "";
1881 if (nonNames
.size() > 0){
1882 nonRelNames
+= " [";
1884 for (NameRelationship relName
: nonNames
){
1885 String label
= "non ";
1886 TaxonName relatedName
= null;
1887 if (relName
.getFromName().equals(name
)){
1888 relatedName
= relName
.getToName();
1889 nonRelNames
+= label
+ relatedName
.getTitleCache() + " ";
1892 // label = relName.getType().getInverseLabel() + " ";
1893 // relatedName = relName.getFromName();
1894 // nonRelNames += label + relatedName.getTitleCache() + " ";
1900 if (nonNames
.size() > 0){
1901 nonRelNames
= StringUtils
.strip(nonRelNames
, null);
1902 nonRelNames
+= "] ";
1905 if (otherRelationships
.size() > 0){
1908 for (NameRelationship rel
: otherRelationships
){
1910 TaxonName relatedName
= null;
1911 if (rel
.getFromName().equals(name
)){
1912 label
= rel
.getType().getLabel() + " ";
1913 relatedName
= rel
.getToName();
1914 if (state
.getConfig().isAddHTML()){
1915 relNames
+= label
+ createNameWithItalics(relatedName
.getTaggedName())+ " ";
1917 relNames
+= label
+ relatedName
.getTitleCache();
1921 // label = rel.getType().getInverseLabel() + " ";
1922 // relatedName = rel.getFromName();
1927 if (otherRelationships
.size() > 0){
1928 relNames
= StringUtils
.stripEnd(relNames
, null);
1932 String synonymSign
= "";
1934 if (name
.isInvalid()){
1935 synonymSign
= "\u2212 ";
1937 synonymSign
= "\u2261 ";
1940 if (name
.isInvalid() ){
1941 synonymSign
= "\u2212 ";
1943 synonymSign
= "\u003D ";
1946 boolean isAccepted
= false;
1948 if (taxonBases
.size() == 1){
1949 taxonBase
= HibernateProxyHelper
.deproxy(taxonBases
.iterator().next());
1951 if (taxonBase
.getSec() != null){
1952 sec
= OriginalSourceFormatter
.INSTANCE_WITH_YEAR_BRACKETS
.format(taxonBase
.getSecSource());
1954 if (taxonBase
.isDoubtful()){
1959 if (taxonBase
instanceof Synonym
){
1960 if (isNotBlank(sec
)){
1961 sec
= " syn. sec. " + sec
+ " ";
1966 typifiedNamesWithoutAccepted
+= synonymSign
+ doubtful
+ nameString
+ nonRelNames
+ relNames
;
1967 typifiedNamesWithoutAcceptedWithSec
+= synonymSign
+ doubtful
+ nameString
+ sec
+ nonRelNames
+ relNames
;
1970 if (!(((Taxon
)taxonBase
).isProparteSynonym() || ((Taxon
)taxonBase
).isMisapplication())){
1973 synonymSign
= "\u003D ";
1977 if (taxonBase
.getAppendedPhrase() != null){
1978 if (state
.getConfig().isAddHTML()){
1979 String taxonString
= createNameWithItalics(taxonBase
.getTaggedTitle()) ;
1980 taxonString
= taxonString
.replace("sec "+sec
, "");
1981 String nameCacheWithItalics
= createNameWithItalics(name
.getTaggedName());
1982 nameString
= nameString
.replace(nameCacheWithItalics
, taxonString
);
1986 //there are names used more than once?
1987 for (TaxonBase
<?
> tb
: taxonBases
){
1988 if (tb
.getSec() != null){
1989 sec
= OriginalSourceFormatter
.INSTANCE_WITH_YEAR_BRACKETS
.format(tb
.getSecSource());
1991 if (tb
.isDoubtful()){
1996 if (tb
instanceof Synonym
){
1997 if (StringUtils
.isNotBlank(sec
)){
1998 sec
= " syn. sec. " + sec
+ " ";
2006 if (!(((Taxon
)tb
).isProparteSynonym() || ((Taxon
)tb
).isMisapplication())){
2010 synonymSign
= "\u003D ";
2016 typifiedNamesWithoutAccepted
+= synonymSign
+ doubtful
+ nameString
+ "; ";
2017 typifiedNamesWithoutAcceptedWithSec
+= synonymSign
+ doubtful
+ nameString
+ sec
;
2018 typifiedNamesWithoutAcceptedWithSec
= typifiedNamesWithoutAcceptedWithSec
.trim() + "; ";
2021 typifiedNamesString
+= synonymSign
+ doubtful
+ nameString
+ nonRelNames
+ relNames
;
2022 typifiedNamesWithSecString
+= synonymSign
+ doubtful
+ nameString
+ sec
+ nonRelNames
+ relNames
;
2025 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_STRING
)] = typifiedNamesString
.trim();
2027 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_WITH_SEC_STRING
)] = typifiedNamesWithSecString
.trim();
2029 if (typifiedNamesWithoutAccepted
!= null && firstname
!= null) {
2030 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_WITHOUT_ACCEPTED
)] = typifiedNamesWithoutAccepted
.trim();
2032 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_WITHOUT_ACCEPTED
)] = "";
2035 if (typifiedNamesWithoutAcceptedWithSec
!= null && firstname
!= null) {
2036 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_WITHOUT_ACCEPTEDWITHSEC
)] = typifiedNamesWithoutAcceptedWithSec
.trim();
2038 csvLine
[table
.getIndex(WordClassificationExportTable
.HOMOTYPIC_GROUP_WITHOUT_ACCEPTEDWITHSEC
)] = "";
2043 Set
<TypeDesignationBase
<?
>> typeDesigantionSet
= group
.getTypeDesignations();
2044 List
<TypeDesignationBase
<?
>> designationList
= new ArrayList
<>();
2045 designationList
.addAll(typeDesigantionSet
);
2046 Collections
.sort(designationList
, new TypeComparator());
2048 List
<TaggedText
> list
= new ArrayList
<>();
2049 if (!designationList
.isEmpty()) {
2050 TypeDesignationSetContainer manager
= new TypeDesignationSetContainer(group
);
2051 list
.addAll(new TypeDesignationSetFormatter(true, false, false).toTaggedText(manager
));
2053 String typeTextDesignations
= "";
2054 //The typeDesignationManager does not handle the textual typeDesignations
2055 for (TypeDesignationBase
<?
> typeDes
: designationList
) {
2056 if (typeDes
instanceof TextualTypeDesignation
) {
2057 typeTextDesignations
= typeTextDesignations
+ ((TextualTypeDesignation
)typeDes
).getText(Language
.getDefaultLanguage());
2058 String typeDesStateRefs
= "";
2059 if (typeDes
.getDesignationSource() != null ){
2060 typeDesStateRefs
= "[";
2061 NamedSource source
= typeDes
.getDesignationSource();
2062 if (source
.getCitation() != null){
2063 typeDesStateRefs
+= "fide " + OriginalSourceFormatter
.INSTANCE
.format(source
.getCitation(), null);
2065 typeDesStateRefs
+= "]";
2066 }else if (typeDes
.getSources() != null && !typeDes
.getSources().isEmpty()){
2067 typeDesStateRefs
= "[";
2068 for (IdentifiableSource source
: typeDes
.getSources()) {
2069 if (source
.getCitation() != null){
2070 typeDesStateRefs
+= "fide " +OriginalSourceFormatter
.INSTANCE
.format(source
.getCitation(), null);
2074 typeDesStateRefs
+= "]";
2077 typeTextDesignations
= typeTextDesignations
+ typeDesStateRefs
+"; ";
2079 }else if (typeDes
instanceof SpecimenTypeDesignation
){
2080 DerivedUnit specimen
= ((SpecimenTypeDesignation
)typeDes
).getTypeSpecimen();
2081 if(specimen
!= null && !state
.getSpecimenStore().contains( specimen
.getUuid())){
2082 handleSpecimen(state
, specimen
);
2086 if (typeTextDesignations
.equals("; ")) {
2087 typeTextDesignations
= "";
2089 if (StringUtils
.isNotBlank(typeTextDesignations
)) {
2090 typeTextDesignations
= typeTextDesignations
.substring(0, typeTextDesignations
.length()-2);
2092 String specimenTypeString
= !list
.isEmpty()?
createTypeDesignationString(list
, true, typifiedNames
.get(0).isSpecies() || typifiedNames
.get(0).isInfraSpecific()):"";
2094 if (StringUtils
.isNotBlank(specimenTypeString
)) {
2095 if (!specimenTypeString
.endsWith(".")) {
2096 specimenTypeString
= specimenTypeString
+ ".";
2098 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_STRING
)] = specimenTypeString
;
2101 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_STRING
)] = "";
2103 if (StringUtils
.isNotBlank(typeTextDesignations
)) {
2104 if (!typeTextDesignations
.endsWith(".")) {
2105 typeTextDesignations
= typeTextDesignations
+ ".";
2107 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_CACHE
)] = typeTextDesignations
;
2110 csvLine
[table
.getIndex(WordClassificationExportTable
.TYPE_CACHE
)] = "";
2112 state
.getProcessor().put(table
, String
.valueOf(group
.getId()), csvLine
);
2113 } catch (Exception e
) {
2114 state
.getResult().addException(e
, "An unexpected error occurred when handling homotypic group "
2115 + cdmBaseStr(group
) + ": " + e
.getMessage());
2119 private String
createTypeDesignationString(List
<TaggedText
> list
, boolean isHomotypicGroup
, boolean isSpecimenTypeDesignation
) {
2120 StringBuffer homotypicalGroupTypeDesignationString
= new StringBuffer();
2122 for (TaggedText text
: list
) {
2123 if (text
== null || text
.getText() == null){
2124 continue; //just in case
2126 if ((text
.getText().equalsIgnoreCase("Type:") //should not happen anymore
2127 || text
.getText().equalsIgnoreCase("Nametype:") //should not happen anymore
2128 || (text
.getType().equals(TagEnum
.name
) && !isHomotypicGroup
))) {
2130 }else if (text
.getType().equals(TagEnum
.reference
)) {
2131 homotypicalGroupTypeDesignationString
.append(text
.getText());
2132 }else if (text
.getType().equals(TagEnum
.name
)){
2133 if (!isSpecimenTypeDesignation
){
2134 homotypicalGroupTypeDesignationString
2135 .append("<i>"+text
.getText()+"</i> ");
2137 }else if (text
.getType().equals(TagEnum
.typeDesignation
) ) {
2138 if(isSpecimenTypeDesignation
){
2139 homotypicalGroupTypeDesignationString
2140 .append(text
.getText().replace(").", "").replace("(", "").replace(")", ""));
2142 homotypicalGroupTypeDesignationString
2143 .append(text
.getText());
2147 homotypicalGroupTypeDesignationString
.append(text
.getText());
2151 String typeDesignations
= homotypicalGroupTypeDesignationString
.toString();
2152 typeDesignations
= typeDesignations
.trim();
2154 if (typeDesignations
.endsWith(";")){
2155 typeDesignations
= typeDesignations
.substring(0, typeDesignations
.length()-1);
2157 typeDesignations
+= ".";
2158 typeDesignations
= typeDesignations
.replace("..", ".");
2159 typeDesignations
= typeDesignations
.replace(". .", ".");
2160 typeDesignations
= typeDesignations
.replace("; \u2261", " \u2261 ");
2162 if (typeDesignations
.trim().equals(".")) {
2163 typeDesignations
= null;
2166 return typeDesignations
;
2169 private String
getTropicosTitleCache(WordClassificationExportState state
, TaxonName name
) {
2171 String basionymStart
= "(";
2172 String basionymEnd
= ") ";
2173 String exAuthorSeperator
= " ex ";
2174 TeamOrPersonBase
<?
> combinationAuthor
= name
.getCombinationAuthorship();
2175 TeamOrPersonBase
<?
> exCombinationAuthor
= name
.getExCombinationAuthorship();
2176 TeamOrPersonBase
<?
> basionymAuthor
= name
.getBasionymAuthorship();
2177 TeamOrPersonBase
<?
> exBasionymAuthor
= name
.getExBasionymAuthorship();
2179 String combinationAuthorString
= "";
2180 if (combinationAuthor
!= null) {
2181 combinationAuthor
= HibernateProxyHelper
.deproxy(combinationAuthor
);
2182 if (combinationAuthor
instanceof Team
) {
2183 combinationAuthorString
= createTropicosTeamTitle(combinationAuthor
);
2185 Person person
= HibernateProxyHelper
.deproxy(combinationAuthor
, Person
.class);
2186 combinationAuthorString
= createTropicosAuthorString(person
);
2189 String exCombinationAuthorString
= "";
2190 if (exCombinationAuthor
!= null) {
2191 exCombinationAuthor
= HibernateProxyHelper
.deproxy(exCombinationAuthor
);
2192 if (exCombinationAuthor
instanceof Team
) {
2193 exCombinationAuthorString
= createTropicosTeamTitle(exCombinationAuthor
);
2195 Person person
= HibernateProxyHelper
.deproxy(exCombinationAuthor
, Person
.class);
2196 exCombinationAuthorString
= createTropicosAuthorString(person
);
2200 String basionymAuthorString
= "";
2201 if (basionymAuthor
!= null) {
2202 basionymAuthor
= HibernateProxyHelper
.deproxy(basionymAuthor
);
2203 if (basionymAuthor
instanceof Team
) {
2204 basionymAuthorString
= createTropicosTeamTitle(basionymAuthor
);
2206 Person person
= HibernateProxyHelper
.deproxy(basionymAuthor
, Person
.class);
2207 basionymAuthorString
= createTropicosAuthorString(person
);
2211 String exBasionymAuthorString
= "";
2213 if (exBasionymAuthor
!= null) {
2214 exBasionymAuthor
= HibernateProxyHelper
.deproxy(exBasionymAuthor
);
2215 if (exBasionymAuthor
instanceof Team
) {
2216 exBasionymAuthorString
= createTropicosTeamTitle(exBasionymAuthor
);
2219 Person person
= HibernateProxyHelper
.deproxy(exBasionymAuthor
, Person
.class);
2220 exBasionymAuthorString
= createTropicosAuthorString(person
);
2223 String completeAuthorString
= name
.getNameCache() + " ";
2225 completeAuthorString
+= (!CdmUtils
.isBlank(exBasionymAuthorString
)
2226 || !CdmUtils
.isBlank(basionymAuthorString
)) ? basionymStart
: "";
2227 completeAuthorString
+= (!CdmUtils
.isBlank(exBasionymAuthorString
))
2228 ?
(CdmUtils
.Nz(exBasionymAuthorString
) + exAuthorSeperator
) : "";
2229 completeAuthorString
+= (!CdmUtils
.isBlank(basionymAuthorString
)) ? CdmUtils
.Nz(basionymAuthorString
) : "";
2230 completeAuthorString
+= (!CdmUtils
.isBlank(exBasionymAuthorString
)
2231 || !CdmUtils
.isBlank(basionymAuthorString
)) ? basionymEnd
: "";
2232 completeAuthorString
+= (!CdmUtils
.isBlank(exCombinationAuthorString
))
2233 ?
(CdmUtils
.Nz(exCombinationAuthorString
) + exAuthorSeperator
) : "";
2234 completeAuthorString
+= (!CdmUtils
.isBlank(combinationAuthorString
)) ? CdmUtils
.Nz(combinationAuthorString
)
2237 return completeAuthorString
;
2238 } catch (Exception e
) {
2239 state
.getResult().addException(e
, "An unexpected error occurred when handling tropicos title cache for "
2240 + cdmBaseStr(name
) + ": " + e
.getMessage());
2245 private String
createTropicosTeamTitle(TeamOrPersonBase
<?
> combinationAuthor
) {
2246 String combinationAuthorString
;
2247 Team team
= HibernateProxyHelper
.deproxy(combinationAuthor
, Team
.class);
2248 Team tempTeam
= Team
.NewInstance();
2249 for (Person teamMember
: team
.getTeamMembers()) {
2250 combinationAuthorString
= createTropicosAuthorString(teamMember
);
2251 Person tempPerson
= Person
.NewTitledInstance(combinationAuthorString
);
2252 tempTeam
.addTeamMember(tempPerson
);
2254 combinationAuthorString
= tempTeam
.generateTitle();
2255 return combinationAuthorString
;
2258 private String
createTropicosAuthorString(Person teamMember
) {
2259 String nomAuthorString
= "";
2260 String
[] splittedAuthorString
= null;
2261 if (teamMember
== null) {
2262 return nomAuthorString
;
2265 if (teamMember
.getGivenName() != null) {
2266 String givenNameString
= teamMember
.getGivenName().replaceAll("\\.", "\\. ");
2267 splittedAuthorString
= givenNameString
.split("\\s");
2268 for (String split
: splittedAuthorString
) {
2269 if (!StringUtils
.isBlank(split
)) {
2270 nomAuthorString
+= split
.substring(0, 1);
2271 nomAuthorString
+= ".";
2275 if (teamMember
.getFamilyName() != null) {
2276 String familyNameString
= teamMember
.getFamilyName().replaceAll("\\.", "\\. ");
2277 splittedAuthorString
= familyNameString
.split("\\s");
2278 for (String split
: splittedAuthorString
) {
2279 nomAuthorString
+= " " + split
;
2282 if (isBlank(nomAuthorString
.trim())) {
2283 if (teamMember
.getTitleCache() != null) {
2284 String titleCacheString
= teamMember
.getTitleCache().replaceAll("\\.", "\\. ");
2285 splittedAuthorString
= titleCacheString
.split("\\s");
2287 splittedAuthorString
= new String
[0];
2291 for (String split
: splittedAuthorString
) {
2292 if (index
< splittedAuthorString
.length
- 1 && (split
.length() == 1 || split
.endsWith("."))) {
2293 nomAuthorString
+= split
;
2295 nomAuthorString
= nomAuthorString
+ " " + split
;
2300 return nomAuthorString
.trim();
2303 private void handleReference(WordClassificationExportState state
, Reference reference
) {
2305 state
.addReferenceToStore(reference
);
2306 WordClassificationExportTable table
= WordClassificationExportTable
.REFERENCE
;
2307 reference
= HibernateProxyHelper
.deproxy(reference
);
2309 handleIdentifier(state
, reference
);
2310 String
[] csvLine
= new String
[table
.getSize()];
2311 csvLine
[table
.getIndex(WordClassificationExportTable
.REFERENCE_ID
)] = getId(state
, reference
);
2312 // TODO short citations correctly
2313 String shortCitation
= OriginalSourceFormatter
.INSTANCE_WITH_YEAR_BRACKETS
.format(reference
, null); // Should be Author(year) like in Taxon.sec
2314 csvLine
[table
.getIndex(WordClassificationExportTable
.BIBLIO_SHORT_CITATION
)] = shortCitation
;
2315 // TODO get preferred title
2316 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_TITLE
)] = reference
.isProtectedTitleCache()
2317 ? reference
.getTitleCache() : reference
.getTitle();
2318 csvLine
[table
.getIndex(WordClassificationExportTable
.ABBREV_REF_TITLE
)] = reference
.isProtectedAbbrevTitleCache()
2319 ? reference
.getAbbrevTitleCache() : reference
.getAbbrevTitle();
2320 csvLine
[table
.getIndex(WordClassificationExportTable
.DATE_PUBLISHED
)] = reference
.getDatePublishedString();
2322 csvLine
[table
.getIndex(WordClassificationExportTable
.EDITION
)] = reference
.getEdition();
2323 csvLine
[table
.getIndex(WordClassificationExportTable
.EDITOR
)] = reference
.getEditor();
2324 csvLine
[table
.getIndex(WordClassificationExportTable
.ISBN
)] = reference
.getIsbn();
2325 csvLine
[table
.getIndex(WordClassificationExportTable
.ISSN
)] = reference
.getIssn();
2326 csvLine
[table
.getIndex(WordClassificationExportTable
.ORGANISATION
)] = reference
.getOrganization();
2327 csvLine
[table
.getIndex(WordClassificationExportTable
.PAGES
)] = reference
.getPages();
2328 csvLine
[table
.getIndex(WordClassificationExportTable
.PLACE_PUBLISHED
)] = reference
.getPlacePublished();
2329 csvLine
[table
.getIndex(WordClassificationExportTable
.PUBLISHER
)] = reference
.getPublisher();
2330 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_ABSTRACT
)] = reference
.getReferenceAbstract();
2331 csvLine
[table
.getIndex(WordClassificationExportTable
.SERIES_PART
)] = reference
.getSeriesPart();
2332 csvLine
[table
.getIndex(WordClassificationExportTable
.VOLUME
)] = reference
.getVolume();
2333 csvLine
[table
.getIndex(WordClassificationExportTable
.YEAR
)] = reference
.getYear();
2335 if (reference
.getAuthorship() != null) {
2336 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHORSHIP_TITLE
)] = createFullAuthorship(reference
);
2337 csvLine
[table
.getIndex(WordClassificationExportTable
.AUTHOR_FK
)] = getId(state
, reference
.getAuthorship());
2340 csvLine
[table
.getIndex(WordClassificationExportTable
.IN_REFERENCE
)] = getId(state
, reference
.getInReference());
2341 if (reference
.getInReference() != null
2342 && !state
.getReferenceStore().contains(reference
.getInReference().getUuid())) {
2343 handleReference(state
, reference
.getInReference());
2345 if (reference
.getInstitution() != null) {
2346 csvLine
[table
.getIndex(WordClassificationExportTable
.INSTITUTION
)] = reference
.getInstitution().getTitleCache();
2348 if (reference
.getLsid() != null) {
2349 csvLine
[table
.getIndex(WordClassificationExportTable
.LSID
)] = reference
.getLsid().getLsid();
2351 if (reference
.getSchool() != null) {
2352 csvLine
[table
.getIndex(WordClassificationExportTable
.SCHOOL
)] = reference
.getSchool().getTitleCache();
2354 if (reference
.getUri() != null) {
2355 csvLine
[table
.getIndex(WordClassificationExportTable
.URI
)] = reference
.getUri().toString();
2357 csvLine
[table
.getIndex(WordClassificationExportTable
.REF_TYPE
)] = reference
.getType().getKey();
2359 state
.getProcessor().put(table
, reference
, csvLine
);
2360 } catch (Exception e
) {
2361 state
.getResult().addException(e
, "An unexpected error occurred when handling reference "
2362 + cdmBaseStr(reference
) + ": " + e
.getMessage());
2366 private String
createFullAuthorship(Reference reference
) {
2367 TeamOrPersonBase
<?
> authorship
= reference
.getAuthorship();
2368 String fullAuthorship
= "";
2369 if (authorship
== null) {
2372 authorship
= HibernateProxyHelper
.deproxy(authorship
);
2373 if (authorship
instanceof Person
) {
2374 fullAuthorship
= ((Person
) authorship
).getTitleCache();
2376 } else if (authorship
instanceof Team
) {
2378 Team authorTeam
= (Team
)authorship
;
2379 fullAuthorship
= authorTeam
.cacheStrategy().getTitleCache(authorTeam
);
2381 return fullAuthorship
;
2384 private void handleSpecimen(WordClassificationExportState state
, SpecimenOrObservationBase
<?
> specimen
) {
2386 state
.addSpecimenToStore(specimen
);
2387 WordClassificationExportTable table
= WordClassificationExportTable
.SPECIMEN
;
2388 String specimenId
= getId(state
, specimen
);
2389 String
[] csvLine
= new String
[table
.getSize()];
2392 * SpecimenCitation = “El Salvador, Municipio La Libertad, San
2393 * Diego, El Amatal, 14.4.1993, González 159” [Auch ohne Punkt] ->
2394 * FieldUnit TitleCache HerbariumAbbrev = “B” [wie gehabt]
2399 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_ID
)] = specimenId
;
2400 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_CITATION
)] = specimen
.getTitleCache();
2401 Collection
<FieldUnit
> fieldUnits
= this.getOccurrenceService().findFieldUnits(specimen
.getUuid(), null);
2402 if (fieldUnits
.size() == 1) {
2403 Iterator
<FieldUnit
> iterator
= fieldUnits
.iterator();
2404 if (iterator
.hasNext()){
2405 FieldUnit fieldUnit
= iterator
.next();
2406 csvLine
[table
.getIndex(WordClassificationExportTable
.FIELDUNIT_CITATION
)] = fieldUnit
.getTitleCache();
2409 if (specimen
.isInstanceOf(DerivedUnit
.class)){
2410 DerivedUnit derivedUnit
= (DerivedUnit
) specimen
;
2411 if (!StringUtils
.isBlank(derivedUnit
.getBarcode())){
2412 csvLine
[table
.getIndex(WordClassificationExportTable
.BARCODE
)] = derivedUnit
.getBarcode();
2414 if (!StringUtils
.isBlank(derivedUnit
.getAccessionNumber())){
2415 csvLine
[table
.getIndex(WordClassificationExportTable
.ACCESSION_NUMBER
)] = derivedUnit
.getAccessionNumber();
2417 if (!StringUtils
.isBlank(derivedUnit
.getCatalogNumber())){
2418 csvLine
[table
.getIndex(WordClassificationExportTable
.CATALOGUE_NUMBER
)] = derivedUnit
.getCatalogNumber();
2422 csvLine
[table
.getIndex(WordClassificationExportTable
.PREFERREDSTABLE_ID
)] = specimen
.getPreferredStableUri() != null? specimen
.getPreferredStableUri().toString(): null;
2423 csvLine
[table
.getIndex(WordClassificationExportTable
.SPECIMEN_IMAGE_URIS
)] = extractMediaURIs(state
,
2424 specimen
.getDescriptions(), Feature
.IMAGE());
2425 if (specimen
instanceof DerivedUnit
) {
2426 DerivedUnit derivedUnit
= HibernateProxyHelper
.deproxy(specimen
, DerivedUnit
.class);
2427 if (derivedUnit
.getCollection() != null) {
2428 csvLine
[table
.getIndex(WordClassificationExportTable
.HERBARIUM_ABBREV
)] = derivedUnit
.getCollection()
2432 if (specimen
instanceof MediaSpecimen
) {
2433 MediaSpecimen mediaSpecimen
= (MediaSpecimen
) specimen
;
2434 Iterator
<MediaRepresentation
> it
= mediaSpecimen
.getMediaSpecimen().getRepresentations().iterator();
2435 String mediaUris
= extractMediaUris(it
);
2436 csvLine
[table
.getIndex(WordClassificationExportTable
.MEDIA_SPECIMEN_URL
)] = mediaUris
;
2440 if (derivedUnit
.getDerivedFrom() != null) {
2441 for (SpecimenOrObservationBase
<?
> original
: derivedUnit
.getDerivedFrom().getOriginals()) {
2442 // TODO: What to do if there are more then one
2444 if (original
instanceof FieldUnit
) {
2445 FieldUnit fieldUnit
= (FieldUnit
) original
;
2446 csvLine
[table
.getIndex(WordClassificationExportTable
.COLLECTOR_NUMBER
)] = fieldUnit
.getFieldNumber();
2448 GatheringEvent gathering
= fieldUnit
.getGatheringEvent();
2449 if (gathering
!= null) {
2450 if (gathering
.getLocality() != null) {
2451 csvLine
[table
.getIndex(WordClassificationExportTable
.LOCALITY
)] = gathering
.getLocality()
2454 if (gathering
.getCountry() != null) {
2455 csvLine
[table
.getIndex(WordClassificationExportTable
.COUNTRY
)] = gathering
.getCountry()
2458 csvLine
[table
.getIndex(WordClassificationExportTable
.COLLECTOR_STRING
)] = createCollectorString(
2459 state
, gathering
, fieldUnit
);
2461 if (gathering
.getGatheringDate() != null) {
2462 csvLine
[table
.getIndex(WordClassificationExportTable
.COLLECTION_DATE
)] = gathering
2463 .getGatheringDate().toString();
2465 if (!gathering
.getCollectingAreas().isEmpty()) {
2467 csvLine
[table
.getIndex(WordClassificationExportTable
.FURTHER_AREAS
)] = "0";
2468 for (NamedArea area
: gathering
.getCollectingAreas()) {
2470 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_CATEGORY1
)] = area
.getLevel() != null?area
2471 .getLevel().getLabel():"";
2472 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_NAME1
)] = area
.getLabel();
2475 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_CATEGORY2
)] = area
.getLevel() != null?area
2476 .getLevel().getLabel():"";
2477 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_NAME2
)] = area
.getLabel();
2480 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_CATEGORY3
)] = area
.getLevel() != null?area
2481 .getLevel().getLabel():"";
2482 csvLine
[table
.getIndex(WordClassificationExportTable
.AREA_NAME3
)] = area
.getLabel();
2485 csvLine
[table
.getIndex(WordClassificationExportTable
.FURTHER_AREAS
)] = "1";
2495 state
.getResult().addWarning("The specimen with uuid " + specimen
.getUuid()
2496 + " is not an DerivedUnit.");
2500 state
.getProcessor().put(table
, specimen
, csvLine
);
2501 } catch (Exception e
) {
2502 state
.getResult().addException(e
, "An unexpected error occurred when handling specimen "
2503 + cdmBaseStr(specimen
) + ": " + e
.getMessage());
2507 private String
extractMediaUris(Iterator
<MediaRepresentation
> it
) {
2509 String mediaUriString
= "";
2510 boolean first
= true;
2511 while (it
.hasNext()) {
2512 MediaRepresentation rep
= it
.next();
2513 List
<MediaRepresentationPart
> parts
= rep
.getParts();
2514 for (MediaRepresentationPart part
: parts
) {
2516 if (part
.getUri() != null) {
2517 mediaUriString
+= part
.getUri().toString();
2521 if (part
.getUri() != null) {
2522 mediaUriString
+= ", " + part
.getUri().toString();
2528 return mediaUriString
;
2531 private String
extractLinkUris(Iterator
<ExternalLink
> it
) {
2533 String linkUriString
= "";
2534 boolean first
= true;
2535 while (it
.hasNext()) {
2536 ExternalLink link
= it
.next();
2538 if (link
.getUri() != null) {
2539 linkUriString
+= link
.getUri().toString();
2543 if (link
.getUri() != null) {
2544 linkUriString
+= ", " + link
.getUri().toString();
2548 return linkUriString
;
2551 private String
createCollectorString(WordClassificationExportState state
, GatheringEvent gathering
, FieldUnit fieldUnit
) {
2553 String collectorString
= "";
2554 AgentBase
<?
> collectorA
= CdmBase
.deproxy(gathering
.getCollector());
2555 if (gathering
.getCollector() != null) {
2556 if (collectorA
instanceof TeamOrPersonBase
&& state
.getConfig().isHighLightPrimaryCollector()) {
2558 Person primaryCollector
= fieldUnit
.getPrimaryCollector();
2559 if (collectorA
instanceof Team
) {
2560 Team collectorTeam
= (Team
) collectorA
;
2561 boolean isFirst
= true;
2562 for (Person member
: collectorTeam
.getTeamMembers()) {
2564 collectorString
+= "; ";
2566 if (member
.equals(primaryCollector
)) {
2568 collectorString
+= "<b>" + member
.getTitleCache() + "</b>";
2570 collectorString
+= member
.getTitleCache();
2575 collectorString
= collectorA
.getTitleCache();
2578 return collectorString
;
2579 } catch (Exception e
) {
2580 state
.getResult().addException(e
, "An unexpected error occurred when creating collector string for "
2581 + cdmBaseStr(fieldUnit
) + ": " + e
.getMessage());
2587 * Returns a string representation of the {@link CdmBase cdmBase} object for
2590 private String
cdmBaseStr(CdmBase cdmBase
) {
2591 if (cdmBase
== null) {
2592 return "-no object available-";
2594 return cdmBase
.getClass().getSimpleName() + ": " + cdmBase
.getUuid();
2599 protected boolean doCheck(WordClassificationExportState state
) {
2604 protected boolean isIgnore(WordClassificationExportState state
) {