// $Id$\r
/**\r
* Copyright (C) 2009 EDIT\r
- * European Distributed Institute of Taxonomy \r
+ * European Distributed Institute of Taxonomy\r
* http://www.e-taxonomy.eu\r
- * \r
+ *\r
* The contents of this file are subject to the Mozilla Public License Version 1.1\r
* See LICENSE.TXT at the top of this package for the full license terms.\r
*/\r
* http://www.niso.org/kst/reports/standards?step=2&gid=&project_key=\r
* d5320409c5160be4697dc046613f71b9a773cd9e</li>\r
* </ul>\r
- * \r
+ *\r
* @author a.kohlbecker\r
* @date 24.08.2010\r
- * \r
+ *\r
*/\r
public class MobotOpenUrlServiceWrapper extends ServiceWrapperBase<OpenUrlReference> {\r
\r
- private String urlVersion = "Z39.88-2004";\r
-\r
- public MobotOpenUrlServiceWrapper(){\r
- addSchemaAdapter(new MobotOpenUrlResponseSchemaAdapter());\r
- }\r
-\r
- /**\r
- * BHL uses the response format as specified in the\r
- * http://code.google.com/p/\r
- * bhl-bits/source/browse/trunk/portal/OpenUrlUtilities\r
- * /OpenUrlResponse.cs?r=17 there seems to be no xml schema available\r
- * though.\r
- * @param query the MobotOpenUrlQuery object\r
- * @return\r
- */\r
- public List<OpenUrlReference> doResolve(MobotOpenUrlQuery query) {\r
-\r
- List<NameValuePair> pairs = new ArrayList<NameValuePair>();\r
-\r
- // find the appropriate schemadapter using the schemaShortName\r
- if(query.schemaShortName == null){\r
- query.schemaShortName = "MOBOT.OpenUrl.Utilities.OpenUrlResponse";\r
- }\r
- SchemaAdapterBase<OpenUrlReference> schemaAdapter = schemaAdapterMap.get(query.schemaShortName);\r
- if (schemaAdapter == null) {\r
- logger.error("No SchemaAdapter found for " + query.schemaShortName);\r
- }\r
-\r
- addNewPairNN(pairs, "format", "xml");\r
- addNewPairNN(pairs, "url_ver", urlVersion);\r
- /* info:ofi/fmt:kev:mtx:book or info:ofi/fmt:kev:mtx:journal */\r
- addNewPairNN(pairs, "rft_val_fmt", "info:ofi/fmt:kev:mtx:" + query.refType);\r
- /* Book title */\r
- addNewPairNN(pairs, "rft.btitle", query.bookTitle);\r
- /* Journal title */\r
- addNewPairNN(pairs, "rft.jtitle", query.journalTitle);\r
- /* Author name ("last, first" or "corporation") */\r
- addNewPairNN(pairs, "rft.au", query.authorName);\r
- /* Author last name */\r
- addNewPairNN(pairs, "rft.aulast", query.authorLastName);\r
- /* Author first name */\r
- addNewPairNN(pairs, "rft.aufirst", query.authorFirstName);\r
- /* Author name (corporation) */\r
- addNewPairNN(pairs, "rft.aucorp", query.authorNameCorporation);\r
- /* Publication details */\r
- addNewPairNN(pairs, "rft.publisher", query.publicationDetails);\r
- /* Publisher name */\r
- addNewPairNN(pairs, "rft.pub", query.publisherName);\r
- /* Publication place */\r
- addNewPairNN(pairs, "rft.place", query.publicationPlace);\r
- /* Publication date (YYYY or YYYY-MM or YYYY-MM-DD) */\r
- addNewPairNN(pairs, "rft.date", query.publicationDate);\r
- /* ISSN */\r
- addNewPairNN(pairs, "rft.issn", query.ISSN);\r
- /* ISBN */\r
- addNewPairNN(pairs, "rft.isbn", query.ISBN);\r
- /* CODEN */\r
- addNewPairNN(pairs, "rft.coden", query.CODEN);\r
- /* Abbreviation = abbreviated Title */\r
- addNewPairNN(pairs, "rft.stitle", query.abbreviation);\r
- /* Volume */\r
- addNewPairNN(pairs, "rft.volume", query.volume);\r
- /* Issue */\r
- addNewPairNN(pairs, "rft.issue", query.issue);\r
- /* Start page */\r
- if(query.startPage != null){\r
- Integer page = parsePageNumber(query.startPage); \r
- addNewPairNN(pairs, "rft.spage", page.toString()); \r
- }\r
- /* BHL title ID (where XXXX is the ID value)*/ \r
- addNewPairNN(pairs, "rft_id" , query.bhlTitleURI);\r
- /* BHL page ID (where XXXX is the ID value)*/\r
- addNewPairNN(pairs, "rft_id", query.bhlPageURI);\r
- \r
- /* OCLC number (where XXXX is the ID value)*/ \r
- if(query.oclcNumber != null){\r
- pairs.add(new BasicNameValuePair("rft_id", "info:oclcnum/" +query.oclcNumber)); \r
- }\r
- /* Lib. of Congress ID (where XXXX is the ID value)*/ \r
- if(query.libofCongressID != null){\r
- pairs.add(new BasicNameValuePair("rft_id", "info:lccn/" +query.libofCongressID)); \r
- }\r
- \r
- Map<String, String> requestHeaders = new HashMap<String, String>();\r
- requestHeaders.put("Accept-Charset", "UTF-8"); \r
-\r
- try {\r
- URI requestUri = createUri(null, pairs);\r
-\r
- InputStream stream = executeHttpGet(requestUri, requestHeaders);\r
+ private String urlVersion = "Z39.88-2004";\r
+\r
+ public MobotOpenUrlServiceWrapper(){\r
+ addSchemaAdapter(new MobotOpenUrlResponseSchemaAdapter());\r
+ }\r
+\r
+ /**\r
+ * BHL uses the response format as specified in the\r
+ * http://code.google.com/p/\r
+ * bhl-bits/source/browse/trunk/portal/OpenUrlUtilities\r
+ * /OpenUrlResponse.cs?r=17 there seems to be no xml schema available\r
+ * though.\r
+ * @param query the MobotOpenUrlQuery object\r
+ * @return\r
+ */\r
+ public List<OpenUrlReference> doResolve(MobotOpenUrlQuery query) {\r
+\r
+ List<NameValuePair> pairs = new ArrayList<NameValuePair>();\r
+\r
+ // find the appropriate schemadapter using the schemaShortName\r
+ if(query.schemaShortName == null){\r
+ query.schemaShortName = "MOBOT.OpenUrl.Utilities.OpenUrlResponse";\r
+ }\r
+ SchemaAdapterBase<OpenUrlReference> schemaAdapter = schemaAdapterMap.get(query.schemaShortName);\r
+ if (schemaAdapter == null) {\r
+ logger.error("No SchemaAdapter found for " + query.schemaShortName);\r
+ }\r
+\r
+ addNewPairNN(pairs, "format", "xml");\r
+ addNewPairNN(pairs, "url_ver", urlVersion);\r
+ /* info:ofi/fmt:kev:mtx:book or info:ofi/fmt:kev:mtx:journal */\r
+ addNewPairNN(pairs, "rft_val_fmt", "info:ofi/fmt:kev:mtx:" + query.refType);\r
+ /* Book title */\r
+ addNewPairNN(pairs, "rft.btitle", query.bookTitle);\r
+ /* Journal title */\r
+ addNewPairNN(pairs, "rft.jtitle", query.journalTitle);\r
+ /* Author name ("last, first" or "corporation") */\r
+ addNewPairNN(pairs, "rft.au", query.authorName);\r
+ /* Author last name */\r
+ addNewPairNN(pairs, "rft.aulast", query.authorLastName);\r
+ /* Author first name */\r
+ addNewPairNN(pairs, "rft.aufirst", query.authorFirstName);\r
+ /* Author name (corporation) */\r
+ addNewPairNN(pairs, "rft.aucorp", query.authorNameCorporation);\r
+ /* Publication details */\r
+ addNewPairNN(pairs, "rft.publisher", query.publicationDetails);\r
+ /* Publisher name */\r
+ addNewPairNN(pairs, "rft.pub", query.publisherName);\r
+ /* Publication place */\r
+ addNewPairNN(pairs, "rft.place", query.publicationPlace);\r
+ /* Publication date (YYYY or YYYY-MM or YYYY-MM-DD) */\r
+ addNewPairNN(pairs, "rft.date", query.publicationDate);\r
+ /* ISSN */\r
+ addNewPairNN(pairs, "rft.issn", query.ISSN);\r
+ /* ISBN */\r
+ addNewPairNN(pairs, "rft.isbn", query.ISBN);\r
+ /* CODEN */\r
+ addNewPairNN(pairs, "rft.coden", query.CODEN);\r
+ /* Abbreviation = abbreviated Title */\r
+ addNewPairNN(pairs, "rft.stitle", query.abbreviation);\r
+ /* Volume */\r
+ addNewPairNN(pairs, "rft.volume", query.volume);\r
+ /* Issue */\r
+ addNewPairNN(pairs, "rft.issue", query.issue);\r
+ /* Start page */\r
+ if(query.startPage != null){\r
+ Integer page = parsePageNumber(query.startPage);\r
+ addNewPairNN(pairs, "rft.spage", page.toString());\r
+ }\r
+ /* BHL title ID (where XXXX is the ID value)*/\r
+ addNewPairNN(pairs, "rft_id" , query.bhlTitleURI);\r
+ /* BHL page ID (where XXXX is the ID value)*/\r
+ addNewPairNN(pairs, "rft_id", query.bhlPageURI);\r
+\r
+ /* OCLC number (where XXXX is the ID value)*/\r
+ if(query.oclcNumber != null){\r
+ pairs.add(new BasicNameValuePair("rft_id", "info:oclcnum/" +query.oclcNumber));\r
+ }\r
+ /* Lib. of Congress ID (where XXXX is the ID value)*/\r
+ if(query.libofCongressID != null){\r
+ pairs.add(new BasicNameValuePair("rft_id", "info:lccn/" +query.libofCongressID));\r
+ }\r
+\r
+ Map<String, String> requestHeaders = new HashMap<String, String>();\r
+ requestHeaders.put("Accept-Charset", "UTF-8");\r
+\r
+ try {\r
+ URI requestUri = createUri(null, pairs);\r
+\r
+ InputStream stream = executeHttpGet(requestUri, requestHeaders);\r
// String search = "utf-16";\r
// String replace = "UTF-8";\r
//// stream = StreamUtils.streamReplace(stream, search, replace);\r
- // fix the "org.xml.sax.SAXParseException: An invalid XML character (Unicode: 0x1) was found" problem\r
+ // fix the "org.xml.sax.SAXParseException: An invalid XML character (Unicode: 0x1) was found" problem\r
// stream = StreamUtils.streamReplaceAll(stream, "[\\x00-\\x10]", " ");\r
- \r
- List<OpenUrlReference> referenceList = schemaAdapter.getCmdEntities(stream);\r
- // TODO : we need to set ReferenceType here unless we know that the field Genre returns the reference type\r
- for(OpenUrlReference ref : referenceList){\r
- ref.setReferenceType(query.refType);\r
- }\r
- return referenceList;\r
-\r
- } catch (IOException e) {\r
- // thrown by doHttpGet\r
- logger.error(e);\r
- } catch (URISyntaxException e) {\r
- // thrown by createUri\r
- logger.error(e);\r
- } catch (HttpException e) {\r
- // thrown by executeHttpGet\r
- logger.error(e);\r
- }\r
-\r
- return null;\r
-\r
- }\r
-\r
- private Integer parsePageNumber(String startPage) {\r
- String pageNumbers = startPage.replaceAll("(?i)page|pages|p|p\\.|pp\\.|pp", "");\r
- String[] pageNumbersTokens = pageNumbers.split("[,-]", 1);\r
- Integer page = null;\r
- try {\r
- if(pageNumbersTokens[0] != null){\r
- pageNumbersTokens[0] = pageNumbersTokens[0].trim();\r
- } else {\r
- throw new NumberFormatException();\r
- }\r
- page = Integer.valueOf(pageNumbersTokens[0]);\r
- } catch (NumberFormatException e) {\r
- logger.warn("First page number token of " + startPage + " is not a Number", e);\r
- throw e;\r
- }\r
- return page;\r
- }\r
- \r
- \r
- public List<OpenUrlReference> doPage(OpenUrlReference reference, int forward) throws IllegalArgumentException{\r
- \r
- Integer pageNumber = null;\r
- try{\r
- if(reference.getPages() != null){\r
- pageNumber = parsePageNumber(reference.getPages());\r
- }\r
- }catch(NumberFormatException e){\r
- String errorMessage = "Reference has no page number or the field 'pages' is not parsable";\r
- logger.warn(errorMessage);\r
- throw new IllegalArgumentException(errorMessage);\r
- }\r
- \r
- MobotOpenUrlQuery query = new MobotOpenUrlQuery();\r
- query.bhlTitleURI = reference.getTitleUri();\r
- pageNumber += forward;\r
- query.startPage = pageNumber.toString();\r
- query.refType = reference.getReferenceType();\r
- return doResolve(query);\r
- }\r
- \r
- public enum ReferenceType{\r
- book, journal;\r
- \r
- public static ReferenceType getReferenceType(Reference reference){\r
- if(eu.etaxonomy.cdm.model.reference.ReferenceType.Book.equals(reference.getType())){\r
- return book;\r
- }\r
- else if(eu.etaxonomy.cdm.model.reference.ReferenceType.Journal.equals(reference.getType())){\r
- return journal;\r
- }\r
- else {\r
- return null;\r
- }\r
- }\r
- }\r
+\r
+ List<OpenUrlReference> referenceList = schemaAdapter.getCmdEntities(stream);\r
+ // TODO : we need to set ReferenceType here unless we know that the field Genre returns the reference type\r
+ for(OpenUrlReference ref : referenceList){\r
+ ref.setReferenceType(query.refType);\r
+ }\r
+ return referenceList;\r
+\r
+ } catch (IOException e) {\r
+ // thrown by doHttpGet\r
+ logger.error(e);\r
+ } catch (URISyntaxException e) {\r
+ // thrown by createUri\r
+ logger.error(e);\r
+ } catch (HttpException e) {\r
+ // thrown by executeHttpGet\r
+ logger.error(e);\r
+ }\r
+\r
+ return null;\r
+\r
+ }\r
+\r
+ private Integer parsePageNumber(String startPage) {\r
+ String pageNumbers = startPage.replaceAll("(?i)page|pages|p|p\\.|pp\\.|pp", "");\r
+ String[] pageNumbersTokens = pageNumbers.split("[,-]", 1);\r
+ Integer page = null;\r
+ try {\r
+ if(pageNumbersTokens[0] != null){\r
+ pageNumbersTokens[0] = pageNumbersTokens[0].trim();\r
+ } else {\r
+ throw new NumberFormatException();\r
+ }\r
+ page = Integer.valueOf(pageNumbersTokens[0]);\r
+ } catch (NumberFormatException e) {\r
+ logger.warn("First page number token of " + startPage + " is not a Number", e);\r
+ throw e;\r
+ }\r
+ return page;\r
+ }\r
+\r
+\r
+ /**\r
+ * @param reference\r
+ * the OpenUrlReference instance as a starting point for paging.\r
+ * @param forward\r
+ * integer indicating the number of pages to page forward. An\r
+ * negative integer will page backwards\r
+ * @return\r
+ * @throws IllegalArgumentException\r
+ * if the requested page number is not existent or if the field\r
+ * or if OpenUrlReference.pages is not parsable\r
+ */\r
+ public List<OpenUrlReference> doPage(OpenUrlReference reference, int forward) throws IllegalArgumentException{\r
+\r
+ Integer pageNumber = null;\r
+ try{\r
+ if(reference.getPages() != null){\r
+ pageNumber = parsePageNumber(reference.getPages());\r
+ }\r
+ }catch(NumberFormatException e){\r
+ String errorMessage = "Reference has no page number or the field 'pages' is not parsable";\r
+ logger.warn(errorMessage);\r
+ throw new IllegalArgumentException(errorMessage);\r
+ }\r
+\r
+ MobotOpenUrlQuery query = new MobotOpenUrlQuery();\r
+ query.bhlTitleURI = reference.getTitleUri();\r
+ pageNumber += forward;\r
+ query.startPage = pageNumber.toString();\r
+ query.refType = reference.getReferenceType();\r
+ return doResolve(query);\r
+ }\r
+\r
+ public enum ReferenceType{\r
+ book, journal;\r
+\r
+ public static ReferenceType getReferenceType(Reference reference){\r
+ if(eu.etaxonomy.cdm.model.reference.ReferenceType.Book.equals(reference.getType())){\r
+ return book;\r
+ }\r
+ else if(eu.etaxonomy.cdm.model.reference.ReferenceType.Journal.equals(reference.getType())){\r
+ return journal;\r
+ }\r
+ else {\r
+ return null;\r
+ }\r
+ }\r
+ }\r
\r
}\r
*\r
*/\r
public class SruServiceWrapperTest {\r
- public static final Logger logger = Logger.getLogger(SruServiceWrapperTest.class);\r
+ public static final Logger logger = Logger.getLogger(SruServiceWrapperTest.class);\r
\r
- private static final String baseUrl = "http://gso.gbv.de/sru/DB=1.83/";\r
+ private static final String baseUrl = "http://gso.gbv.de/sru/DB=1.83/";\r
\r
\r
- private SruServiceWrapper sruServiceWrapper;\r
+ private SruServiceWrapper sruServiceWrapper;\r
\r
- private static boolean internetIsAvailable = true;\r
+ private static boolean internetIsAvailable = true;\r
\r
- @BeforeClass\r
- public static void setUpClass() throws Exception {\r
- internetIsAvailable = true;\r
- }\r
+ @BeforeClass\r
+ public static void setUpClass() throws Exception {\r
+ internetIsAvailable = true;\r
+ }\r
\r
- /**\r
- * @throws java.lang.Exception\r
- */\r
- @Before\r
- public void setUp() throws Exception {\r
- sruServiceWrapper = new SruServiceWrapper();\r
- sruServiceWrapper.setBaseUrl(baseUrl);\r
- sruServiceWrapper.addSchemaAdapter(new DublinCoreSchemaAdapter());\r
- }\r
+ /**\r
+ * @throws java.lang.Exception\r
+ */\r
+ @Before\r
+ public void setUp() throws Exception {\r
+ sruServiceWrapper = new SruServiceWrapper();\r
+ sruServiceWrapper.setBaseUrl(baseUrl);\r
+ sruServiceWrapper.addSchemaAdapter(new DublinCoreSchemaAdapter());\r
+ }\r
\r
// ******************************* TESTS ******************************************************/\r
\r
- @Test\r
- @Ignore // ignoring since Global References Index to Biodiversity has problems\r
- public void testDoSearchRetrieve(){\r
+ @Test\r
+ @Ignore // ignoring since Global References Index to Biodiversity has problems\r
+ public void testDoSearchRetrieve(){\r
\r
- List<Reference> refList_1 = sruServiceWrapper.doSearchRetrieve("pica.tit=\"Linnaei Species Plantarum\"", "dc");\r
- // -> http://gso.gbv.de/sru/DB=2.1/?version=1.1&operation=searchRetrieve&query=pica.tit%3D%22Species+Plantarum%22&recordSchema=dc\r
+ List<Reference> refList_1 = sruServiceWrapper.doSearchRetrieve("pica.tit=\"Linnaei Species Plantarum\"", "dc");\r
+ // -> http://gso.gbv.de/sru/DB=2.1/?version=1.1&operation=searchRetrieve&query=pica.tit%3D%22Species+Plantarum%22&recordSchema=dc\r
\r
- if (testInternetConnectivity(refList_1)){\r
+ if (testInternetConnectivity(refList_1)){\r
\r
- Assert.assertEquals("There should be exactly 5 results for 'Linnaei Species Plantarum'", 5, refList_1.size());\r
- Reference reference_1 = refList_1.get(0);\r
- logger.info(reference_1.toString());\r
- //title cache\r
- Assert.assertEquals("Title of first entry should be 'Caroli Linnaei species plantarum'", "Caroli Linnaei species plantarum", reference_1.getTitleCache());\r
+ Assert.assertEquals("There should be exactly 5 results for 'Linnaei Species Plantarum'", 5, refList_1.size());\r
+ Reference reference_1 = refList_1.get(0);\r
+ logger.info(reference_1.toString());\r
+ //title cache\r
+ Assert.assertEquals("Title of first entry should be 'Caroli Linnaei species plantarum'", "Caroli Linnaei species plantarum", reference_1.getTitleCache());\r
\r
- //--------------------------\r
+ //--------------------------\r
\r
- List<Reference> refList_2 = sruServiceWrapper.doSearchRetrieve("pica.all = \"Species+plantarum\" and pica.dst = \"8305\"", "dc");\r
- // -> http://gso.gbv.de/sru/DB=2.1/?version=1.1&operation=searchRetrieve&query=pica.tit%3D%22Species+Plantarum%22&recordSchema=dc\r
+ List<Reference> refList_2 = sruServiceWrapper.doSearchRetrieve("pica.all = \"Species+plantarum\" and pica.dst = \"8305\"", "dc");\r
+ // -> http://gso.gbv.de/sru/DB=2.1/?version=1.1&operation=searchRetrieve&query=pica.tit%3D%22Species+Plantarum%22&recordSchema=dc\r
\r
- Assert.assertTrue("There should be at least 1 result for 'species+plantarum' and digitized", refList_2.size() > 0);\r
- Reference reference_2 = refList_2.get(0);\r
- logger.info(reference_2.toString());\r
- }\r
- }\r
+ Assert.assertTrue("There should be at least 1 result for 'species+plantarum' and digitized", refList_2.size() > 0);\r
+ Reference reference_2 = refList_2.get(0);\r
+ logger.info(reference_2.toString());\r
+ }\r
+ }\r
\r
- private boolean testInternetConnectivity(List<?> list) {\r
- if (list == null || list.isEmpty()){\r
- boolean result = internetIsAvailable && UriUtils.isInternetAvailable(null);\r
- internetIsAvailable = result;\r
- return result;\r
+ private boolean testInternetConnectivity(List<?> list) {\r
+ if (list == null || list.isEmpty()){\r
+ boolean result = internetIsAvailable && UriUtils.isInternetAvailable(null);\r
+ internetIsAvailable = result;\r
+ return result;\r
\r
- }\r
- return true;\r
- }\r
+ }\r
+ return true;\r
+ }\r
\r
\r
}\r