diff --git a/conf/solr/schema.xml b/conf/solr/schema.xml
index 5dde750573d..b323d0c74af 100644
--- a/conf/solr/schema.xml
+++ b/conf/solr/schema.xml
@@ -205,6 +205,7 @@
+
@@ -426,6 +427,7 @@
+
diff --git a/doc/release-notes/10517-datasetType.md b/doc/release-notes/10517-datasetType.md
new file mode 100644
index 00000000000..2e3aff940c7
--- /dev/null
+++ b/doc/release-notes/10517-datasetType.md
@@ -0,0 +1,10 @@
+### Initial Support for Dataset Types
+
+Out of the box, all datasets have the type "dataset" but superusers can add additional types. At this time the type can only be set at creation time via API. The types "dataset", "software", and "workflow" will be sent to DataCite when the dataset is published.
+
+For details see and #10517. Please note that this feature is highly experimental and is expected to evolve.
+
+Upgrade instructions
+--------------------
+
+Update your Solr schema.xml file to pick up the "datasetType" additions and do a full reindex.
diff --git a/doc/sphinx-guides/source/_static/api/dataset-create-software.json b/doc/sphinx-guides/source/_static/api/dataset-create-software.json
new file mode 100644
index 00000000000..4c649bff0aa
--- /dev/null
+++ b/doc/sphinx-guides/source/_static/api/dataset-create-software.json
@@ -0,0 +1,82 @@
+{
+ "datasetType": "software",
+ "datasetVersion": {
+ "license": {
+ "name": "CC0 1.0",
+ "uri": "http://creativecommons.org/publicdomain/zero/1.0"
+ },
+ "metadataBlocks": {
+ "citation": {
+ "fields": [
+ {
+ "value": "pyDataverse",
+ "typeClass": "primitive",
+ "multiple": false,
+ "typeName": "title"
+ },
+ {
+ "value": [
+ {
+ "authorName": {
+ "value": "Range, Jan",
+ "typeClass": "primitive",
+ "multiple": false,
+ "typeName": "authorName"
+ },
+ "authorAffiliation": {
+ "value": "University of Stuttgart",
+ "typeClass": "primitive",
+ "multiple": false,
+ "typeName": "authorAffiliation"
+ }
+ }
+ ],
+ "typeClass": "compound",
+ "multiple": true,
+ "typeName": "author"
+ },
+ {
+ "value": [
+ { "datasetContactEmail" : {
+ "typeClass": "primitive",
+ "multiple": false,
+ "typeName": "datasetContactEmail",
+ "value" : "jan@mailinator.com"
+ },
+ "datasetContactName" : {
+ "typeClass": "primitive",
+ "multiple": false,
+ "typeName": "datasetContactName",
+ "value": "Range, Jan"
+ }
+ }],
+ "typeClass": "compound",
+ "multiple": true,
+ "typeName": "datasetContact"
+ },
+ {
+ "value": [ {
+ "dsDescriptionValue":{
+ "value": "A Python module for Dataverse.",
+ "multiple":false,
+ "typeClass": "primitive",
+ "typeName": "dsDescriptionValue"
+ }}],
+ "typeClass": "compound",
+ "multiple": true,
+ "typeName": "dsDescription"
+ },
+ {
+ "value": [
+ "Computer and Information Science"
+ ],
+ "typeClass": "controlledVocabulary",
+ "multiple": true,
+ "typeName": "subject"
+ }
+ ],
+ "displayName": "Citation Metadata"
+ }
+ }
+ }
+}
diff --git a/doc/sphinx-guides/source/_static/api/dataset-create-software.jsonld b/doc/sphinx-guides/source/_static/api/dataset-create-software.jsonld
new file mode 100644
index 00000000000..6f072967dc8
--- /dev/null
+++ b/doc/sphinx-guides/source/_static/api/dataset-create-software.jsonld
@@ -0,0 +1,16 @@
+{
+ "http://purl.org/dc/terms/title": "Darwin's Finches",
+ "http://purl.org/dc/terms/subject": "Medicine, Health and Life Sciences",
+ "http://purl.org/dc/terms/creator": {
+ "https://dataverse.org/schema/citation/authorName": "Finch, Fiona",
+ "https://dataverse.org/schema/citation/authorAffiliation": "Birds Inc."
+ },
+ "https://dataverse.org/schema/citation/datasetContact": {
+ "https://dataverse.org/schema/citation/datasetContactEmail": "finch@mailinator.com",
+ "https://dataverse.org/schema/citation/datasetContactName": "Finch, Fiona"
+ },
+ "https://dataverse.org/schema/citation/dsDescription": {
+ "https://dataverse.org/schema/citation/dsDescriptionValue": "Darwin's finches (also known as the Galápagos finches) are a group of about fifteen species of passerine birds."
+ },
+ "https://dataverse.org/schema/core#datasetType": "software"
+}
diff --git a/doc/sphinx-guides/source/api/native-api.rst b/doc/sphinx-guides/source/api/native-api.rst
index b16ea55bd25..ad1d217b9a1 100644
--- a/doc/sphinx-guides/source/api/native-api.rst
+++ b/doc/sphinx-guides/source/api/native-api.rst
@@ -744,6 +744,8 @@ To create a dataset, you must supply a JSON file that contains at least the foll
- Description Text
- Subject
+.. _api-create-dataset-incomplete:
+
Submit Incomplete Dataset
^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -801,6 +803,8 @@ The following is an example HTTP call with deactivated validation:
**Note:** You may learn about an instance's support for deposition of incomplete datasets via :ref:`info-incomplete-metadata`.
+.. _api-create-dataset:
+
Submit Dataset
^^^^^^^^^^^^^^
@@ -830,6 +834,19 @@ You should expect an HTTP 200 ("OK") response and JSON indicating the database I
.. note:: Only a Dataverse installation account with superuser permissions is allowed to include files when creating a dataset via this API. Adding files this way only adds their file metadata to the database, you will need to manually add the physical files to the file system.
+.. _api-create-dataset-with-type:
+
+Create a Dataset with a Dataset Type (Software, etc.)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default, datasets are given the type "dataset" but if your installation had added additional types (see :ref:`api-add-dataset-type`), you can specify the type.
+
+Follow :ref:`api-create-dataset` as normal but include a line like ``"datasetType": "software"`` in your JSON. You can check which types are supported by your installation using the :ref:`api-list-dataset-types` API endpoint.
+
+Here is an example JSON file for reference: :download:`dataset-create-software.json <../_static/api/dataset-create-software.json>`.
+
+See also :ref:`dataset-types`.
+
.. _api-import-dataset:
Import a Dataset into a Dataverse Collection
@@ -872,6 +889,18 @@ Before calling the API, make sure the data files referenced by the ``POST``\ ed
* This API endpoint does not support importing *files'* persistent identifiers.
* A Dataverse installation can import datasets with a valid PID that uses a different protocol or authority than said server is configured for. However, the server will not update the PID metadata on subsequent update and publish actions.
+.. _import-dataset-with-type:
+
+Import a Dataset with a Dataset Type (Software, etc.)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+By default, datasets are given the type "dataset" but if your installation had added additional types (see :ref:`api-add-dataset-type`), you can specify the type.
+
+The same native JSON file as above under :ref:`api-create-dataset-with-type` can be used when importing a dataset.
+
+A file like this is the only difference. Otherwise, follow :ref:`api-import-dataset` as normal.
+
+See also :ref:`dataset-types`.
Import a Dataset into a Dataverse Installation with a DDI file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -3039,6 +3068,98 @@ The API can also be used to reset the dataset to use the default/inherited value
The default will always be the same provider as for the dataset PID if that provider can generate new PIDs, and will be the PID Provider set for the collection or the global default otherwise.
+.. _api-dataset-types:
+
+Dataset Types
+~~~~~~~~~~~~~
+
+See :ref:`dataset-types` in the User Guide for an overview of the feature.
+
+.. note:: See :ref:`curl-examples-and-environment-variables` if you are unfamiliar with the use of ``export`` below.
+
+.. _api-list-dataset-types:
+
+List Dataset Types
+^^^^^^^^^^^^^^^^^^
+
+Show which dataset types are available.
+
+.. code-block:: bash
+
+ export SERVER_URL=https://demo.dataverse.org
+
+ curl "$SERVER_URL/api/datasets/datasetTypes"
+
+The fully expanded example above (without environment variables) looks like this:
+
+.. code-block:: bash
+
+ curl "https://demo.dataverse.org/api/datasets/datasetTypes"
+
+.. _api-list-dataset-type:
+
+Get Dataset Type
+^^^^^^^^^^^^^^^^
+
+Show a dataset type by passing either its database id (e.g. "2") or its name (e.g. "software").
+
+.. code-block:: bash
+
+ export SERVER_URL=https://demo.dataverse.org
+ export TYPE=software
+
+ curl $SERVER_URL/api/datasets/datasetTypes/$TYPE"
+
+The fully expanded example above (without environment variables) looks like this:
+
+.. code-block:: bash
+
+ curl "https://demo.dataverse.org/api/datasets/datasetTypes/software"
+
+.. _api-add-dataset-type:
+
+Add Dataset Type
+^^^^^^^^^^^^^^^^
+
+Note: Before you add any types of your own, there should be a single type called "dataset". If you add "software" or "workflow", these types will be sent to DataCite (if you use DataCite). Otherwise, the only functionality you gain currently from adding types is an entry in the "Dataset Type" facet but be advised that if you add a type other than "software" or "workflow", you will need to add your new type to your Bundle.properties file for it to appear in Title Case rather than lower case in the "Dataset Type" facet.
+
+With all that said, we'll add a "software" type in the example below. This API endpoint is superuser only. The "name" of a type cannot be only digits.
+
+.. code-block:: bash
+
+ export API_TOKEN=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ export SERVER_URL=https://demo.dataverse.org
+ export JSON='{"name": "software"}'
+
+ curl -H "X-Dataverse-key:$API_TOKEN" -H "Content-Type: application/json" "$SERVER_URL/api/datasets/datasetTypes" -X POST -d $JSON
+
+The fully expanded example above (without environment variables) looks like this:
+
+.. code-block:: bash
+
+ curl -H "X-Dataverse-key:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" -H "Content-Type: application/json" "https://demo.dataverse.org/api/datasets/datasetTypes" -X POST -d '{"name": "software"}'
+
+.. _api-delete-dataset-type:
+
+Delete Dataset Type
+^^^^^^^^^^^^^^^^^^^
+
+Superuser only.
+
+.. code-block:: bash
+
+ export API_TOKEN=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ export SERVER_URL=https://demo.dataverse.org
+ export TYPE_ID=3
+
+ curl -H "X-Dataverse-key:$API_TOKEN" -X DELETE "$SERVER_URL/api/datasets/datasetTypes/$TYPE_ID"
+
+The fully expanded example above (without environment variables) looks like this:
+
+.. code-block:: bash
+
+ curl -H "X-Dataverse-key:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" -X DELETE "https://demo.dataverse.org/api/datasets/datasetTypes/3"
+
Files
-----
@@ -5295,6 +5416,51 @@ Delete Database Setting
Delete the setting under ``name``::
DELETE http://$SERVER/api/admin/settings/$name
+
+.. _list-all-feature-flags:
+
+List All Feature Flags
+~~~~~~~~~~~~~~~~~~~~~~
+
+Experimental and preview features are sometimes hidden behind feature flags. See :ref:`feature-flags` in the Installation Guide for a list of flags and how to configure them.
+
+This API endpoint provides a list of feature flags and "enabled" or "disabled" for each one.
+
+.. note:: See :ref:`curl-examples-and-environment-variables` if you are unfamiliar with the use of export below.
+
+.. code-block:: bash
+
+ export SERVER_URL=http://localhost:8080
+
+ curl "$SERVER_URL/api/admin/featureFlags"
+
+The fully expanded example above (without environment variables) looks like this:
+
+.. code-block:: bash
+
+ curl "http://localhost:8080/api/admin/featureFlags"
+
+.. _show-feature-flag-status:
+
+Show Feature Flag Status
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+This endpoint reports "enabled" as true for false for a single feature flag. (For all flags, see :ref:`list-all-feature-flags`.)
+
+.. note:: See :ref:`curl-examples-and-environment-variables` if you are unfamiliar with the use of export below.
+
+.. code-block:: bash
+
+ export SERVER_URL=http://localhost:8080
+ export FLAG=DATASET_TYPES
+
+ curl "$SERVER_URL/api/admin/featureFlags/$FLAG"
+
+The fully expanded example above (without environment variables) looks like this:
+
+.. code-block:: bash
+
+ curl "http://localhost:8080/api/admin/featureFlags/DATASET_TYPES"
Manage Banner Messages
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/sphinx-guides/source/developers/dataset-semantic-metadata-api.rst b/doc/sphinx-guides/source/developers/dataset-semantic-metadata-api.rst
index ded62288eb2..4f374bdc039 100644
--- a/doc/sphinx-guides/source/developers/dataset-semantic-metadata-api.rst
+++ b/doc/sphinx-guides/source/developers/dataset-semantic-metadata-api.rst
@@ -83,6 +83,7 @@ Note, this example uses the term URI directly rather than adding an ``@context``
You should expect a 200 ("OK") response indicating whether a draft Dataset version was created or an existing draft was updated.
+.. _api-semantic-create-dataset:
Create a Dataset
----------------
@@ -105,4 +106,16 @@ With curl, this is done by adding the following header:
curl -H X-Dataverse-key:$API_TOKEN -H 'Content-Type: application/ld+json' -X POST $SERVER_URL/api/dataverses/$DATAVERSE_ID/datasets --upload-file dataset-create.jsonld
An example jsonld file is available at :download:`dataset-create.jsonld <../_static/api/dataset-create.jsonld>` (:download:`dataset-create_en.jsonld <../_static/api/dataset-create.jsonld>` is a version that sets the metadata language (see :ref:`:MetadataLanguages`) to English (en).)
-
+
+.. _api-semantic-create-dataset-with-type:
+
+Create a Dataset with a Dataset Type
+------------------------------------
+
+By default, datasets are given the type "dataset" but if your installation had added additional types (see :ref:`api-add-dataset-type`), you can specify the type.
+
+An example JSON-LD file is available at :download:`dataset-create-software.jsonld <../_static/api/dataset-create-software.jsonld>`.
+
+You can use this file with the normal :ref:`api-semantic-create-dataset` endpoint above.
+
+See also :ref:`dataset-types`.
diff --git a/doc/sphinx-guides/source/installation/config.rst b/doc/sphinx-guides/source/installation/config.rst
index ae3b07fecfe..b2d9bd3d342 100644
--- a/doc/sphinx-guides/source/installation/config.rst
+++ b/doc/sphinx-guides/source/installation/config.rst
@@ -3302,10 +3302,11 @@ please find all known feature flags below. Any of these flags can be activated u
- Removes the reason field in the `Publish/Return To Author` dialog that was added as a required field in v6.2 and makes the reason an optional parameter in the :ref:`return-a-dataset` API call.
- ``Off``
-
**Note:** Feature flags can be set via any `supported MicroProfile Config API source`_, e.g. the environment variable
``DATAVERSE_FEATURE_XXX`` (e.g. ``DATAVERSE_FEATURE_API_SESSION_AUTH=1``). These environment variables can be set in your shell before starting Payara. If you are using :doc:`Docker for development `, you can set them in the `docker compose `_ file.
+To check the status of feature flags via API, see :ref:`list-all-feature-flags` in the API Guide.
+
.. _:ApplicationServerSettings:
Application Server Settings
diff --git a/doc/sphinx-guides/source/user/dataset-management.rst b/doc/sphinx-guides/source/user/dataset-management.rst
index 6852b60575b..2e5d84748a8 100755
--- a/doc/sphinx-guides/source/user/dataset-management.rst
+++ b/doc/sphinx-guides/source/user/dataset-management.rst
@@ -785,6 +785,27 @@ If you deaccession the most recently published version of the dataset but not al
**Important Note**: A tombstone landing page with the basic citation metadata will always be accessible to the public if they use the persistent URL (Handle or DOI) provided in the citation for that dataset. Users will not be able to see any of the files or additional metadata that were previously available prior to deaccession.
+.. _dataset-types:
+
+Dataset Types
+=============
+
+Out of the box, all datasets have a dataset type of "dataset". Superusers can add additional types such as "software" or "workflow" using the :ref:`api-add-dataset-type` API endpoint.
+
+Once more than one type appears in search results, a facet called "Dataset Type" will appear allowing you to filter down to a certain type.
+
+If your installation is configured to use DataCite as a persistent ID (PID) provider, the appropriate type ("Dataset", "Software", "Workflow") will be sent to DataCite when the dataset is published for those three types.
+
+Currently, the dataset type can only be specified via API and only when the dataset is created. For details, see the following sections of the API guide:
+
+- :ref:`api-create-dataset-with-type` (Native API)
+- :ref:`api-semantic-create-dataset-with-type` (Semantic API)
+- :ref:`import-dataset-with-type`
+
+Dataset types can be listed, added, or deleted via API. See :ref:`api-dataset-types` in the API Guide for more.
+
+Development of the dataset types feature is ongoing. Please see https://github.com/IQSS/dataverse/issues/10489 for details.
+
.. |image1| image:: ./img/DatasetDiagram.png
:class: img-responsive
.. |image3| image:: ./img/data_publishing_version_workflow.png
diff --git a/src/main/java/edu/harvard/iq/dataverse/Dataset.java b/src/main/java/edu/harvard/iq/dataverse/Dataset.java
index 98766dca447..52cb7d6f2dc 100644
--- a/src/main/java/edu/harvard/iq/dataverse/Dataset.java
+++ b/src/main/java/edu/harvard/iq/dataverse/Dataset.java
@@ -1,6 +1,7 @@
package edu.harvard.iq.dataverse;
import edu.harvard.iq.dataverse.dataset.DatasetThumbnail;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
import edu.harvard.iq.dataverse.harvest.client.HarvestingClient;
import edu.harvard.iq.dataverse.license.License;
@@ -129,6 +130,10 @@ public class Dataset extends DvObjectContainer {
*/
private boolean useGenericThumbnail;
+ @ManyToOne
+ @JoinColumn(name="datasettype_id", nullable = false)
+ private DatasetType datasetType;
+
@OneToOne(cascade = {CascadeType.MERGE, CascadeType.PERSIST})
@JoinColumn(name = "guestbook_id", unique = false, nullable = true, insertable = true, updatable = true)
private Guestbook guestbook;
@@ -741,7 +746,15 @@ public boolean isUseGenericThumbnail() {
public void setUseGenericThumbnail(boolean useGenericThumbnail) {
this.useGenericThumbnail = useGenericThumbnail;
}
-
+
+ public DatasetType getDatasetType() {
+ return datasetType;
+ }
+
+ public void setDatasetType(DatasetType datasetType) {
+ this.datasetType = datasetType;
+ }
+
public List getDatasetMetrics() {
return datasetMetrics;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/EjbDataverseEngine.java b/src/main/java/edu/harvard/iq/dataverse/EjbDataverseEngine.java
index c8537f2a424..0561fed8a97 100644
--- a/src/main/java/edu/harvard/iq/dataverse/EjbDataverseEngine.java
+++ b/src/main/java/edu/harvard/iq/dataverse/EjbDataverseEngine.java
@@ -12,6 +12,7 @@
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.confirmemail.ConfirmEmailServiceBean;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleServiceBean;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.CommandContext;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
@@ -127,7 +128,10 @@ public class EjbDataverseEngine {
@EJB
MetadataBlockServiceBean metadataBlockService;
-
+
+ @EJB
+ DatasetTypeServiceBean datasetTypeService;
+
@EJB
DataverseLinkingServiceBean dvLinking;
@@ -603,6 +607,11 @@ public MetadataBlockServiceBean metadataBlocks() {
return metadataBlockService;
}
+ @Override
+ public DatasetTypeServiceBean datasetTypes() {
+ return datasetTypeService;
+ }
+
@Override
public void beginCommandSequence() {
this.commandsCalled = new Stack();
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/AbstractApiBean.java b/src/main/java/edu/harvard/iq/dataverse/api/AbstractApiBean.java
index 19df6d8c1c7..3257a3cc7ac 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/AbstractApiBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/AbstractApiBean.java
@@ -11,6 +11,7 @@
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.confirmemail.ConfirmEmailServiceBean;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleServiceBean;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
@@ -164,6 +165,9 @@ String getWrappedMessageWhenJson() {
@EJB
protected LicenseServiceBean licenseSvc;
+ @EJB
+ protected DatasetTypeServiceBean datasetTypeSvc;
+
@EJB
protected UserServiceBean userSvc;
@@ -247,7 +251,7 @@ public enum Format {
private final LazyRef jsonParserRef = new LazyRef<>(new Callable() {
@Override
public JsonParser call() throws Exception {
- return new JsonParser(datasetFieldSvc, metadataBlockSvc,settingsSvc, licenseSvc);
+ return new JsonParser(datasetFieldSvc, metadataBlockSvc,settingsSvc, licenseSvc, datasetTypeSvc);
}
});
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Admin.java b/src/main/java/edu/harvard/iq/dataverse/api/Admin.java
index 550ad1b3043..54e5eaf7b84 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Admin.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Admin.java
@@ -100,6 +100,7 @@
import edu.harvard.iq.dataverse.engine.command.impl.RegisterDvObjectCommand;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
import edu.harvard.iq.dataverse.pidproviders.handle.HandlePidProvider;
+import edu.harvard.iq.dataverse.settings.FeatureFlags;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.userdata.UserListMaker;
import edu.harvard.iq.dataverse.userdata.UserListResult;
@@ -127,6 +128,7 @@
import jakarta.ws.rs.WebApplicationException;
import jakarta.ws.rs.core.StreamingOutput;
import java.nio.file.Paths;
+import java.util.TreeMap;
/**
* Where the secure, setup API calls live.
@@ -2516,4 +2518,27 @@ public Response downloadTmpFile(@Context ContainerRequestContext crc, @QueryPara
}
}
+ @GET
+ @Path("/featureFlags")
+ public Response getFeatureFlags() {
+ Map map = new TreeMap<>();
+ for (FeatureFlags flag : FeatureFlags.values()) {
+ map.put(flag.name(), flag.enabled() ? "enabled" : "disabled");
+ }
+ return ok(Json.createObjectBuilder(map));
+ }
+
+ @GET
+ @Path("/featureFlags/{flag}")
+ public Response getFeatureFlag(@PathParam("flag") String flagIn) {
+ try {
+ FeatureFlags flag = FeatureFlags.valueOf(flagIn);
+ JsonObjectBuilder job = Json.createObjectBuilder();
+ job.add("enabled", flag.enabled());
+ return ok(job);
+ } catch (IllegalArgumentException ex) {
+ return error(Status.NOT_FOUND, "Feature flag not found. Try listing all feature flags.");
+ }
+ }
+
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java b/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java
index ec51bb2c27f..034ba4536a1 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java
@@ -98,9 +98,12 @@
import java.util.stream.Collectors;
import static edu.harvard.iq.dataverse.api.ApiConstants.*;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import static edu.harvard.iq.dataverse.util.json.JsonPrinter.*;
import static edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder.jsonObjectBuilder;
import static jakarta.ws.rs.core.Response.Status.BAD_REQUEST;
+import static jakarta.ws.rs.core.Response.Status.NOT_FOUND;
@Path("datasets")
public class Datasets extends AbstractApiBean {
@@ -186,6 +189,9 @@ public class Datasets extends AbstractApiBean {
@Inject
DatasetVersionFilesServiceBean datasetVersionFilesServiceBean;
+ @Inject
+ DatasetTypeServiceBean datasetTypeSvc;
+
/**
* Used to consolidate the way we parse and handle dataset versions.
* @param
@@ -5070,4 +5076,130 @@ public Response resetPidGenerator(@Context ContainerRequestContext crc, @PathPar
return ok("Pid Generator reset to default: " + dataset.getEffectivePidGenerator().getId());
}
+ @GET
+ @Path("datasetTypes")
+ public Response getDatasetTypes() {
+ JsonArrayBuilder jab = Json.createArrayBuilder();
+ List datasetTypes = datasetTypeSvc.listAll();
+ for (DatasetType datasetType : datasetTypes) {
+ JsonObjectBuilder job = Json.createObjectBuilder();
+ job.add("id", datasetType.getId());
+ job.add("name", datasetType.getName());
+ jab.add(job);
+ }
+ return ok(jab.build());
+ }
+
+ @GET
+ @Path("datasetTypes/{idOrName}")
+ public Response getDatasetTypes(@PathParam("idOrName") String idOrName) {
+ DatasetType datasetType = null;
+ if (StringUtils.isNumeric(idOrName)) {
+ try {
+ long id = Long.parseLong(idOrName);
+ datasetType = datasetTypeSvc.getById(id);
+ } catch (NumberFormatException ex) {
+ return error(NOT_FOUND, "Could not find a dataset type with id " + idOrName);
+ }
+ } else {
+ datasetType = datasetTypeSvc.getByName(idOrName);
+ }
+ if (datasetType != null) {
+ return ok(datasetType.toJson());
+ } else {
+ return error(NOT_FOUND, "Could not find a dataset type with name " + idOrName);
+ }
+ }
+
+ @POST
+ @AuthRequired
+ @Path("datasetTypes")
+ public Response addDatasetType(@Context ContainerRequestContext crc, String jsonIn) {
+ AuthenticatedUser user;
+ try {
+ user = getRequestAuthenticatedUserOrDie(crc);
+ } catch (WrappedResponse ex) {
+ return error(Response.Status.BAD_REQUEST, "Authentication is required.");
+ }
+ if (!user.isSuperuser()) {
+ return error(Response.Status.FORBIDDEN, "Superusers only.");
+ }
+
+ if (jsonIn == null || jsonIn.isEmpty()) {
+ return error(BAD_REQUEST, "JSON input was null or empty!");
+ }
+
+ String nameIn = null;
+ try {
+ JsonObject jsonObject = JsonUtil.getJsonObject(jsonIn);
+ nameIn = jsonObject.getString("name", null);
+ } catch (JsonParsingException ex) {
+ return error(BAD_REQUEST, "Problem parsing supplied JSON: " + ex.getLocalizedMessage());
+ }
+ if (nameIn == null) {
+ return error(BAD_REQUEST, "A name for the dataset type is required");
+ }
+ if (StringUtils.isNumeric(nameIn)) {
+ // getDatasetTypes supports id or name so we don't want a names that looks like an id
+ return error(BAD_REQUEST, "The name of the type cannot be only digits.");
+ }
+
+ try {
+ DatasetType datasetType = new DatasetType();
+ datasetType.setName(nameIn);
+ DatasetType saved = datasetTypeSvc.save(datasetType);
+ Long typeId = saved.getId();
+ String name = saved.getName();
+ return ok(saved.toJson());
+ } catch (WrappedResponse ex) {
+ return error(BAD_REQUEST, ex.getMessage());
+ }
+ }
+
+ @DELETE
+ @AuthRequired
+ @Path("datasetTypes/{id}")
+ public Response deleteDatasetType(@Context ContainerRequestContext crc, @PathParam("id") String doomed) {
+ AuthenticatedUser user;
+ try {
+ user = getRequestAuthenticatedUserOrDie(crc);
+ } catch (WrappedResponse ex) {
+ return error(Response.Status.BAD_REQUEST, "Authentication is required.");
+ }
+ if (!user.isSuperuser()) {
+ return error(Response.Status.FORBIDDEN, "Superusers only.");
+ }
+
+ if (doomed == null || doomed.isEmpty()) {
+ throw new IllegalArgumentException("ID is required!");
+ }
+
+ long idToDelete;
+ try {
+ idToDelete = Long.parseLong(doomed);
+ } catch (NumberFormatException e) {
+ throw new IllegalArgumentException("ID must be a number");
+ }
+
+ DatasetType datasetTypeToDelete = datasetTypeSvc.getById(idToDelete);
+ if (datasetTypeToDelete == null) {
+ return error(BAD_REQUEST, "Could not find dataset type with id " + idToDelete);
+ }
+
+ if (DatasetType.DEFAULT_DATASET_TYPE.equals(datasetTypeToDelete.getName())) {
+ return error(Status.FORBIDDEN, "You cannot delete the default dataset type: " + DatasetType.DEFAULT_DATASET_TYPE);
+ }
+
+ try {
+ int numDeleted = datasetTypeSvc.deleteById(idToDelete);
+ if (numDeleted == 1) {
+ return ok("deleted");
+ } else {
+ return error(BAD_REQUEST, "Something went wrong. Number of dataset types deleted: " + numDeleted);
+ }
+ } catch (WrappedResponse ex) {
+ return error(BAD_REQUEST, ex.getMessage());
+ }
+ }
+
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java b/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java
index 75e3456ab27..17e3086f184 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Dataverses.java
@@ -332,7 +332,7 @@ public Response createDatasetFromJsonLd(@Context ContainerRequestContext crc, St
Dataset ds = new Dataset();
ds.setOwner(owner);
- ds = JSONLDUtil.updateDatasetMDFromJsonLD(ds, jsonLDBody, metadataBlockSvc, datasetFieldSvc, false, false, licenseSvc);
+ ds = JSONLDUtil.updateDatasetMDFromJsonLD(ds, jsonLDBody, metadataBlockSvc, datasetFieldSvc, false, false, licenseSvc, datasetTypeSvc);
ds.setOwner(owner);
@@ -361,6 +361,8 @@ public Response createDatasetFromJsonLd(@Context ContainerRequestContext crc, St
} catch (WrappedResponse ex) {
return ex.getResponse();
+ } catch (Exception ex) {
+ return error(Status.BAD_REQUEST, ex.getLocalizedMessage());
}
}
@@ -513,7 +515,7 @@ public Response importDatasetDdi(@Context ContainerRequestContext crc, String xm
return ex.getResponse();
}
}
-
+
@POST
@AuthRequired
@Path("{identifier}/datasets/:startmigration")
@@ -529,7 +531,7 @@ public Response recreateDataset(@Context ContainerRequestContext crc, String jso
Dataset ds = new Dataset();
ds.setOwner(owner);
- ds = JSONLDUtil.updateDatasetMDFromJsonLD(ds, jsonLDBody, metadataBlockSvc, datasetFieldSvc, false, true, licenseSvc);
+ ds = JSONLDUtil.updateDatasetMDFromJsonLD(ds, jsonLDBody, metadataBlockSvc, datasetFieldSvc, false, true, licenseSvc, datasetTypeSvc);
//ToDo - verify PID is one Dataverse can manage (protocol/authority/shoulder match)
if (!PidUtil.getPidProvider(ds.getGlobalId().getProviderId()).canManagePID()) {
throw new BadRequestException(
@@ -572,6 +574,8 @@ private Dataset parseDataset(String datasetJson) throws WrappedResponse {
try {
return jsonParser().parseDataset(JsonUtil.getJsonObject(datasetJson));
} catch (JsonParsingException | JsonParseException jpe) {
+ String message = jpe.getLocalizedMessage();
+ logger.log(Level.SEVERE, "Error parsing dataset JSON. message: {0}", message);
logger.log(Level.SEVERE, "Error parsing dataset json. Json: {0}", datasetJson);
throw new WrappedResponse(error(Status.BAD_REQUEST, "Error parsing Json: " + jpe.getMessage()));
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportGenericServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportGenericServiceBean.java
index 6068ec45e4f..d32a548c8bf 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportGenericServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportGenericServiceBean.java
@@ -14,6 +14,7 @@
import edu.harvard.iq.dataverse.api.dto.*;
import edu.harvard.iq.dataverse.api.dto.FieldDTO;
import edu.harvard.iq.dataverse.api.dto.MetadataBlockDTO;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.license.LicenseServiceBean;
import edu.harvard.iq.dataverse.pidproviders.doi.AbstractDOIProvider;
import edu.harvard.iq.dataverse.pidproviders.handle.HandlePidProvider;
@@ -71,9 +72,13 @@ public class ImportGenericServiceBean {
@EJB
SettingsServiceBean settingsService;
+
@EJB
LicenseServiceBean licenseService;
+ @EJB
+ DatasetTypeServiceBean datasetTypeService;
+
@PersistenceContext(unitName = "VDCNet-ejbPU")
private EntityManager em;
@@ -110,7 +115,7 @@ public void importXML(String xmlToParse, String foreignFormat, DatasetVersion da
logger.fine(json);
JsonReader jsonReader = Json.createReader(new StringReader(json));
JsonObject obj = jsonReader.readObject();
- DatasetVersion dv = new JsonParser(datasetFieldSvc, blockService, settingsService, licenseService).parseDatasetVersion(obj, datasetVersion);
+ DatasetVersion dv = new JsonParser(datasetFieldSvc, blockService, settingsService, licenseService, datasetTypeService).parseDatasetVersion(obj, datasetVersion);
} catch (XMLStreamException ex) {
//Logger.getLogger("global").log(Level.SEVERE, null, ex);
throw new EJBException("ERROR occurred while parsing XML fragment ("+xmlToParse.substring(0, 64)+"...); ", ex);
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportServiceBean.java
index 39977190691..d2bba56f884 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/imports/ImportServiceBean.java
@@ -23,6 +23,7 @@
import edu.harvard.iq.dataverse.MetadataBlockServiceBean;
import edu.harvard.iq.dataverse.api.dto.DatasetDTO;
import edu.harvard.iq.dataverse.api.imports.ImportUtil.ImportType;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.exception.CommandException;
import edu.harvard.iq.dataverse.engine.command.impl.CreateDatasetVersionCommand;
@@ -104,8 +105,13 @@ public class ImportServiceBean {
@EJB
IndexServiceBean indexService;
+
@EJB
LicenseServiceBean licenseService;
+
+ @EJB
+ DatasetTypeServiceBean datasetTypeService;
+
/**
* This is just a convenience method, for testing migration. It creates
* a dummy dataverse with the directory name as dataverse name & alias.
@@ -264,7 +270,7 @@ public Dataset doImportHarvestedDataset(DataverseRequest dataverseRequest, Harve
JsonObject obj = JsonUtil.getJsonObject(json);
//and call parse Json to read it into a dataset
try {
- JsonParser parser = new JsonParser(datasetfieldService, metadataBlockService, settingsService, licenseService, harvestingClient);
+ JsonParser parser = new JsonParser(datasetfieldService, metadataBlockService, settingsService, licenseService, datasetTypeService, harvestingClient);
parser.setLenient(true);
Dataset ds = parser.parseDataset(obj);
@@ -417,7 +423,7 @@ public JsonObjectBuilder doImport(DataverseRequest dataverseRequest, Dataverse o
JsonObject obj = JsonUtil.getJsonObject(json);
//and call parse Json to read it into a dataset
try {
- JsonParser parser = new JsonParser(datasetfieldService, metadataBlockService, settingsService, licenseService);
+ JsonParser parser = new JsonParser(datasetfieldService, metadataBlockService, settingsService, licenseService, datasetTypeService);
parser.setLenient(!importType.equals(ImportType.NEW));
Dataset ds = parser.parseDataset(obj);
diff --git a/src/main/java/edu/harvard/iq/dataverse/dataset/DatasetType.java b/src/main/java/edu/harvard/iq/dataverse/dataset/DatasetType.java
new file mode 100644
index 00000000000..78bf232e1a6
--- /dev/null
+++ b/src/main/java/edu/harvard/iq/dataverse/dataset/DatasetType.java
@@ -0,0 +1,70 @@
+package edu.harvard.iq.dataverse.dataset;
+
+import jakarta.json.Json;
+import jakarta.json.JsonObjectBuilder;
+import jakarta.persistence.Column;
+import jakarta.persistence.Entity;
+import jakarta.persistence.GeneratedValue;
+import jakarta.persistence.GenerationType;
+import jakarta.persistence.Id;
+import jakarta.persistence.NamedQueries;
+import jakarta.persistence.NamedQuery;
+import jakarta.persistence.Table;
+import jakarta.persistence.UniqueConstraint;
+import java.io.Serializable;
+
+@NamedQueries({
+ @NamedQuery(name = "DatasetType.findAll",
+ query = "SELECT d FROM DatasetType d"),
+ @NamedQuery(name = "DatasetType.findById",
+ query = "SELECT d FROM DatasetType d WHERE d.id=:id"),
+ @NamedQuery(name = "DatasetType.findByName",
+ query = "SELECT d FROM DatasetType d WHERE d.name=:name"),
+ @NamedQuery(name = "DatasetType.deleteById",
+ query = "DELETE FROM DatasetType d WHERE d.id=:id"),})
+@Entity
+@Table(uniqueConstraints = {
+ @UniqueConstraint(columnNames = "name"),}
+)
+
+public class DatasetType implements Serializable {
+
+ public static final String DATASET_TYPE_DATASET = "dataset";
+ public static final String DATASET_TYPE_SOFTWARE = "software";
+ public static final String DATASET_TYPE_WORKFLOW = "workflow";
+ public static final String DEFAULT_DATASET_TYPE = DATASET_TYPE_DATASET;
+
+ @Id
+ @GeneratedValue(strategy = GenerationType.IDENTITY)
+ private Long id;
+
+ // Any constraints? @Pattern regexp?
+ @Column(nullable = false)
+ private String name;
+
+ public DatasetType() {
+ }
+
+ public Long getId() {
+ return id;
+ }
+
+ public void setId(Long id) {
+ this.id = id;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public JsonObjectBuilder toJson() {
+ return Json.createObjectBuilder()
+ .add("id", getId())
+ .add("name", getName());
+ }
+
+}
diff --git a/src/main/java/edu/harvard/iq/dataverse/dataset/DatasetTypeServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/dataset/DatasetTypeServiceBean.java
new file mode 100644
index 00000000000..832182f2a4a
--- /dev/null
+++ b/src/main/java/edu/harvard/iq/dataverse/dataset/DatasetTypeServiceBean.java
@@ -0,0 +1,79 @@
+package edu.harvard.iq.dataverse.dataset;
+
+import edu.harvard.iq.dataverse.actionlogging.ActionLogRecord;
+import edu.harvard.iq.dataverse.api.AbstractApiBean;
+import jakarta.ejb.Stateless;
+import jakarta.inject.Named;
+import jakarta.persistence.EntityManager;
+import jakarta.persistence.NoResultException;
+import jakarta.persistence.PersistenceContext;
+import jakarta.persistence.PersistenceException;
+import java.util.List;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+@Stateless
+@Named
+public class DatasetTypeServiceBean {
+
+ private static final Logger logger = Logger.getLogger(DatasetTypeServiceBean.class.getName());
+
+ @PersistenceContext
+ EntityManager em;
+
+ public List listAll() {
+ return em.createNamedQuery("DatasetType.findAll", DatasetType.class).getResultList();
+ }
+
+ public DatasetType getById(long id) {
+ try {
+ return em.createNamedQuery("DatasetType.findById", DatasetType.class)
+ .setParameter("id", id)
+ .getSingleResult();
+ } catch (NoResultException noResultException) {
+ logger.log(Level.WARNING, "Couldn't find a dataset type with id " + id);
+ return null;
+ }
+ }
+
+ public DatasetType getByName(String name) {
+ try {
+ return em.createNamedQuery("DatasetType.findByName", DatasetType.class)
+ .setParameter("name", name)
+ .getSingleResult();
+ } catch (NoResultException noResultException) {
+ logger.log(Level.WARNING, "Couldn't find a dataset type named " + name);
+ return null;
+ }
+ }
+
+ public DatasetType save(DatasetType datasetType) throws AbstractApiBean.WrappedResponse {
+ if (datasetType.getId() != null) {
+ throw new AbstractApiBean.WrappedResponse(new IllegalArgumentException("There shouldn't be an ID in the request body"), null);
+ }
+ try {
+ em.persist(datasetType);
+ em.flush();
+ } catch (PersistenceException p) {
+ if (p.getMessage().contains("duplicate key")) {
+ throw new AbstractApiBean.WrappedResponse(new IllegalStateException("A dataset type with the same name is already present.", p), null);
+ } else {
+ throw p;
+ }
+ }
+ return datasetType;
+ }
+
+ public int deleteById(long id) throws AbstractApiBean.WrappedResponse {
+ try {
+ return em.createNamedQuery("DatasetType.deleteById").setParameter("id", id).executeUpdate();
+ } catch (PersistenceException p) {
+ if (p.getMessage().contains("violates foreign key constraint")) {
+ throw new AbstractApiBean.WrappedResponse(new IllegalStateException("Dataset type with id " + id + " is referenced and cannot be deleted.", p), null);
+ } else {
+ throw p;
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/CommandContext.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/CommandContext.java
index 96330271367..282cbb88988 100644
--- a/src/main/java/edu/harvard/iq/dataverse/engine/command/CommandContext.java
+++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/CommandContext.java
@@ -29,6 +29,7 @@
import edu.harvard.iq.dataverse.authorization.groups.impl.explicit.ExplicitGroupServiceBean;
import edu.harvard.iq.dataverse.confirmemail.ConfirmEmailServiceBean;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleServiceBean;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.engine.DataverseEngine;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
import edu.harvard.iq.dataverse.pidproviders.PidProviderFactoryBean;
@@ -138,6 +139,8 @@ public interface CommandContext {
public MetadataBlockServiceBean metadataBlocks();
+ public DatasetTypeServiceBean datasetTypes();
+
public void beginCommandSequence();
public boolean completeCommandSequence(Command command);
diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractCreateDatasetCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractCreateDatasetCommand.java
index ab78a88c9a7..7b7c5fd0e93 100644
--- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractCreateDatasetCommand.java
+++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/AbstractCreateDatasetCommand.java
@@ -6,6 +6,7 @@
import edu.harvard.iq.dataverse.authorization.Permission;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
import edu.harvard.iq.dataverse.engine.command.CommandContext;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.engine.command.RequiredPermissions;
@@ -119,7 +120,17 @@ public Dataset execute(CommandContext ctxt) throws CommandException {
// Attempt the registration if importing dataset through the API, or the app (but not harvest)
handlePid(theDataset, ctxt);
-
+
+ DatasetType defaultDatasetType = ctxt.datasetTypes().getByName(DatasetType.DEFAULT_DATASET_TYPE);
+ DatasetType existingDatasetType = theDataset.getDatasetType();
+ logger.fine("existing dataset type: " + existingDatasetType);
+ if (existingDatasetType != null) {
+ // A dataset type can be specified via API, for example.
+ theDataset.setDatasetType(existingDatasetType);
+ } else {
+ theDataset.setDatasetType(defaultDatasetType);
+ }
+
ctxt.em().persist(theDataset);
postPersist(theDataset, ctxt);
diff --git a/src/main/java/edu/harvard/iq/dataverse/pidproviders/doi/XmlMetadataTemplate.java b/src/main/java/edu/harvard/iq/dataverse/pidproviders/doi/XmlMetadataTemplate.java
index 30e4dfd79cc..fb4e294d246 100644
--- a/src/main/java/edu/harvard/iq/dataverse/pidproviders/doi/XmlMetadataTemplate.java
+++ b/src/main/java/edu/harvard/iq/dataverse/pidproviders/doi/XmlMetadataTemplate.java
@@ -16,6 +16,7 @@
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetAuthor;
import edu.harvard.iq.dataverse.DvObject;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
import edu.harvard.iq.dataverse.pidproviders.AbstractPidProvider;
public class XmlMetadataTemplate {
@@ -43,6 +44,7 @@ public class XmlMetadataTemplate {
private String publisher;
private String publisherYear;
private List authors;
+ private String resourceTypeGeneral;
private String description;
private List contacts;
private List producers;
@@ -197,6 +199,22 @@ public String generateXML(DvObject dvObject) {
}
}
+ if (dvObject.isInstanceofDataset()) {
+ Dataset dataset = (Dataset) dvObject;
+ String datasetTypeName = dataset.getDatasetType().getName();
+ resourceTypeGeneral = switch (datasetTypeName) {
+ case DatasetType.DATASET_TYPE_DATASET ->
+ "Dataset";
+ case DatasetType.DATASET_TYPE_SOFTWARE ->
+ "Software";
+ case DatasetType.DATASET_TYPE_WORKFLOW ->
+ "Workflow";
+ default ->
+ "Dataset";
+ };
+ xmlMetadata = xmlMetadata.replace("${resourceTypeGeneral}", resourceTypeGeneral);
+ }
+
String relIdentifiers = generateRelatedIdentifiers(dvObject);
xmlMetadata = xmlMetadata.replace("${relatedIdentifiers}", relIdentifiers);
@@ -311,4 +329,12 @@ public void setPublisherYear(String publisherYear) {
this.publisherYear = publisherYear;
}
+ public String getResourceTypeGeneral() {
+ return resourceTypeGeneral;
+ }
+
+ public void setResourceTypeGeneral(String resourceTypeGeneral) {
+ this.resourceTypeGeneral = resourceTypeGeneral;
+ }
+
}
\ No newline at end of file
diff --git a/src/main/java/edu/harvard/iq/dataverse/search/IndexServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/search/IndexServiceBean.java
index c91eb0bfa7c..fd769846490 100644
--- a/src/main/java/edu/harvard/iq/dataverse/search/IndexServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/search/IndexServiceBean.java
@@ -8,6 +8,7 @@
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
import edu.harvard.iq.dataverse.dataaccess.DataAccessRequest;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
import edu.harvard.iq.dataverse.datavariable.VariableMetadata;
import edu.harvard.iq.dataverse.datavariable.VariableMetadataUtil;
@@ -1002,6 +1003,9 @@ public SolrInputDocuments toSolrDocs(IndexableDataset indexableDataset, Set facetLabelList = new ArrayList<>();
int numMetadataSources = 0;
int numLicenses = 0;
+ int numDatasetTypes = 0;
String metadataBlockName = "";
String datasetFieldName = "";
/**
@@ -768,6 +773,10 @@ public SolrQueryResponse search(
}
} else {
try {
+ // This is where facets are capitalized.
+ // This will be a problem for the API clients because they get back a string like this from the Search API...
+ // {"datasetType":{"friendly":"Dataset Type","labels":[{"Dataset":1},{"Software":1}]}
+ // ... but they will need to use the lower case version (e.g. "software") to narrow results.
localefriendlyName = BundleUtil.getStringFromPropertyFile(facetFieldCount.getName(), "Bundle");
} catch (Exception e) {
localefriendlyName = facetFieldCount.getName();
@@ -789,6 +798,8 @@ public SolrQueryResponse search(
numMetadataSources++;
} else if (facetField.getName().equals(SearchFields.DATASET_LICENSE)) {
numLicenses++;
+ } else if (facetField.getName().equals(SearchFields.DATASET_TYPE)) {
+ numDatasetTypes++;
}
}
}
@@ -798,6 +809,9 @@ public SolrQueryResponse search(
if (numLicenses > 1) {
hideLicenseFacet = false;
}
+ if (numDatasetTypes > 1 ) {
+ hideDatasetTypeFacet = false;
+ }
facetCategory.setName(facetField.getName());
// hopefully people will never see the raw facetField.getName() because it may well have an _s at the end
facetCategory.setFriendlyName(facetField.getName());
@@ -878,6 +892,10 @@ public SolrQueryResponse search(
if (!hideLicenseFacet) {
facetCategoryList.add(facetCategory);
}
+ } else if (facetCategory.getName().equals(SearchFields.DATASET_TYPE)) {
+ if (!hideDatasetTypeFacet) {
+ facetCategoryList.add(facetCategory);
+ }
} else {
facetCategoryList.add(facetCategory);
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/search/SolrSearchResult.java b/src/main/java/edu/harvard/iq/dataverse/search/SolrSearchResult.java
index b40dcd69f3b..01cbf7c1055 100644
--- a/src/main/java/edu/harvard/iq/dataverse/search/SolrSearchResult.java
+++ b/src/main/java/edu/harvard/iq/dataverse/search/SolrSearchResult.java
@@ -26,135 +26,143 @@
import edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder;
public class SolrSearchResult {
- private static final Logger logger = Logger.getLogger(SolrSearchResult.class.getCanonicalName());
-
- private String id;
- private Long entityId;
- private DvObject entity;
- private String identifier;
- private String type;
- private String htmlUrl;
- private String persistentUrl;
- private String downloadUrl;
- private String apiUrl;
- /**
- * This is called "imageUrl" because it used to really be a URL. While performance improvements were being made in the 4.2 timeframe, we started
- * putting base64 representations of images in this String instead, which broke the Search API and probably things built on top of it such as MyData.
- * See "`image_url` from Search API results no longer yields a downloadable image" at https://github.com/IQSS/dataverse/issues/3616
- */
- private String imageUrl;
- private DatasetThumbnail datasetThumbnail;
- private String query;
- private String name;
- private String nameSort;
- private String status;
- private Date releaseOrCreateDate;
- private String dateToDisplayOnCard;
- private List publicationStatuses = new ArrayList<>();
-
- /**
- * @todo: how important is it to differentiate between name and title?
- */
- private String title;
- private String descriptionNoSnippet;
- private List datasetAuthors = new ArrayList<>();
- private String deaccessionReason;
- private List highlightsAsList = new ArrayList<>();
- private Map highlightsMap;
- private Map highlightsAsMap;
-
- // parent can be dataverse or dataset, store the name and id
- /**
- * The "identifier" of a file's parent (a dataset) is a globalId (often a doi).
- */
- public static String PARENT_IDENTIFIER = "identifier";
- private Map parent;
- private String dataverseAffiliation;
- private String citation;
- private String citationHtml;
- /**
- * Files and datasets might have a UNF. Dataverses don't.
- */
- private String unf;
- private String filetype;
- private String fileContentType;
- private Long fileSizeInBytes;
- /**
- * fileMD5 is here for legacy and backward-compatibility reasons. It might be deprecated some day in favor of "fileChecksumType" and
- * "fileChecksumValue"
- */
- private String fileMd5;
- private DataFile.ChecksumType fileChecksumType;
- private String fileChecksumValue;
- private String dataverseAlias;
- private String dataverseParentAlias;
+
+ private static final Logger logger = Logger.getLogger(SolrSearchResult.class.getCanonicalName());
+
+ private String id;
+ private Long entityId;
+ private DvObject entity;
+ private String identifier;
+ private String type;
+ private String htmlUrl;
+ private String persistentUrl;
+ private String downloadUrl;
+ private String apiUrl;
+ /**
+ * This is called "imageUrl" because it used to really be a URL. While
+ * performance improvements were being made in the 4.2 timeframe, we started
+ * putting base64 representations of images in this String instead, which
+ * broke the Search API and probably things built on top of it such as
+ * MyData. See "`image_url` from Search API results no longer yields a
+ * downloadable image" at https://github.com/IQSS/dataverse/issues/3616
+ */
+ private String imageUrl;
+ private DatasetThumbnail datasetThumbnail;
+ private String query;
+ private String name;
+ private String nameSort;
+ private String status;
+ private Date releaseOrCreateDate;
+ private String dateToDisplayOnCard;
+ private List publicationStatuses = new ArrayList<>();
+
+ /**
+ * @todo: how important is it to differentiate between name and title?
+ */
+ private String title;
+ private String descriptionNoSnippet;
+ private List datasetAuthors = new ArrayList<>();
+ private String deaccessionReason;
+ private List highlightsAsList = new ArrayList<>();
+ private Map highlightsMap;
+ private Map highlightsAsMap;
+
+ // parent can be dataverse or dataset, store the name and id
+ /**
+ * The "identifier" of a file's parent (a dataset) is a globalId (often a
+ * doi).
+ */
+ public static String PARENT_IDENTIFIER = "identifier";
+ private Map parent;
+ private String dataverseAffiliation;
+ private String citation;
+ private String citationHtml;
+ private String datasetType;
+ /**
+ * Files and datasets might have a UNF. Dataverses don't.
+ */
+ private String unf;
+ private String filetype;
+ private String fileContentType;
+ private Long fileSizeInBytes;
+ /**
+ * fileMD5 is here for legacy and backward-compatibility reasons. It might
+ * be deprecated some day in favor of "fileChecksumType" and
+ * "fileChecksumValue"
+ */
+ private String fileMd5;
+ private DataFile.ChecksumType fileChecksumType;
+ private String fileChecksumValue;
+ private String dataverseAlias;
+ private String dataverseParentAlias;
// private boolean statePublished;
- /**
- * @todo Investigate/remove this "unpublishedState" variable. For files that have been published along with a dataset it says "true", which makes no
- * sense.
- */
- private boolean publishedState = false;
- private boolean unpublishedState = false;
- private boolean draftState = false;
- private boolean inReviewState = false;
- private boolean deaccessionedState = false;
- private long datasetVersionId;
- private String versionNumberFriendly;
- // Determine if the search result is owned by any of the dvs in the tree of the DV displayed
- private boolean isInTree;
- private float score;
- private List userRole;
- private boolean harvested = false;
- private String dvTree;
- private String harvestingDescription = null;
- private List fileCategories = null;
- private List tabularDataTags = null;
-
- private String identifierOfDataverse = null;
- private String nameOfDataverse = null;
-
- private String filePersistentId = null;
-
- private Long embargoEndDate;
-
- private Long retentionEndDate;
-
- private boolean datasetValid;
-
- public String getDvTree() {
- return dvTree;
- }
-
- public void setDvTree(String dvTree) {
- this.dvTree = dvTree;
- }
-
- public boolean isIsInTree() {
- return isInTree;
- }
-
- public void setIsInTree(boolean isInTree) {
- this.isInTree = isInTree;
- }
-
- public boolean isHarvested() {
- return harvested;
- }
-
- public void setHarvested(boolean harvested) {
- this.harvested = harvested;
- }
-
- public String getHarvestingDescription() {
- // if (this.isHarvested()) {
- return harvestingDescription;
- // }
- // return null;
- }
-
- public void setHarvestingDescription(String harvestingDescription) {
- this.harvestingDescription = harvestingDescription;
- }
+ /**
+ * @todo Investigate/remove this "unpublishedState" variable. For files that
+ * have been published along with a dataset it says "true", which makes no
+ * sense.
+ */
+ private boolean publishedState = false;
+ private boolean unpublishedState = false;
+ private boolean draftState = false;
+ private boolean inReviewState = false;
+ private boolean deaccessionedState = false;
+ private long datasetVersionId;
+ private String versionNumberFriendly;
+ // Determine if the search result is owned by any of the dvs in the tree of the DV displayed
+ private boolean isInTree;
+ private float score;
+ private List userRole;
+ private boolean harvested = false;
+ private String dvTree;
+ private String harvestingDescription = null;
+ private List fileCategories = null;
+ private List tabularDataTags = null;
+
+ private String identifierOfDataverse = null;
+ private String nameOfDataverse = null;
+
+ private String filePersistentId = null;
+
+ private Long embargoEndDate;
+
+ private Long retentionEndDate;
+
+ private boolean datasetValid;
+
+ public String getDvTree() {
+ return dvTree;
+ }
+
+ public void setDvTree(String dvTree) {
+ this.dvTree = dvTree;
+ }
+
+ public boolean isIsInTree() {
+ return isInTree;
+ }
+
+ public void setIsInTree(boolean isInTree) {
+ this.isInTree = isInTree;
+ }
+
+ public boolean isHarvested() {
+ return harvested;
+ }
+
+ public void setHarvested(boolean harvested) {
+ this.harvested = harvested;
+ }
+
+ public String getHarvestingDescription() {
+ // if (this.isHarvested()) {
+ return harvestingDescription;
+ // }
+ // return null;
+ }
+
+ public void setHarvestingDescription(String harvestingDescription) {
+ this.harvestingDescription = harvestingDescription;
+ }
// public boolean isStatePublished() {
// return statePublished;
// }
@@ -162,1106 +170,1139 @@ public void setHarvestingDescription(String harvestingDescription) {
// this.statePublished = statePublished;
// }
- public boolean isPublishedState() {
- return publishedState;
- }
-
- public void setPublishedState(boolean publishedState) {
- this.publishedState = publishedState;
- }
-
- public boolean isUnpublishedState() {
- return unpublishedState;
- }
-
- public void setUnpublishedState(boolean unpublishedState) {
- this.unpublishedState = unpublishedState;
- }
-
- public void setPublicationStatuses(List statuses) {
-
- if (statuses == null) {
- this.publicationStatuses = new ArrayList<>();
- return;
- }
- this.publicationStatuses = statuses;
-
- // set booleans for individual statuses
- //
- for (String status : this.publicationStatuses) {
-
- if (status.equals(IndexServiceBean.getUNPUBLISHED_STRING())) {
- this.setUnpublishedState(true);
-
- } else if (status.equals(IndexServiceBean.getPUBLISHED_STRING())) {
- this.setPublishedState(true);
-
- } else if (status.equals(IndexServiceBean.getDRAFT_STRING())) {
- this.setDraftState(true);
-
- } else if (status.equals(IndexServiceBean.getIN_REVIEW_STRING())) {
- this.setInReviewState(true);
-
- } else if (status.equals(IndexServiceBean.getDEACCESSIONED_STRING())) {
- this.setDeaccessionedState(true);
- }
- }
- } // setPublicationStatuses
-
- /**
- * Never return null, return an empty list instead
- *
- * @return
- */
- public List getPublicationStatuses() {
-
- if (this.publicationStatuses == null) {
- this.publicationStatuses = new ArrayList<>();
- }
- return this.publicationStatuses;
- }
-
- public JsonArrayBuilder getPublicationStatusesAsJSON() {
-
- JsonArrayBuilder statuses = Json.createArrayBuilder();
- for (String status : this.getPublicationStatuses()) {
- statuses.add(status);
- }
- return statuses;
- }
-
- public boolean isDraftState() {
- return draftState;
- }
-
- public void setDraftState(boolean draftState) {
- this.draftState = draftState;
- }
-
- public boolean isInReviewState() {
- return inReviewState;
- }
-
- public void setInReviewState(boolean inReviewState) {
- this.inReviewState = inReviewState;
- }
-
- public boolean isDeaccessionedState() {
- return deaccessionedState;
- }
-
- public void setDeaccessionedState(boolean deaccessionedState) {
- this.deaccessionedState = deaccessionedState;
- }
-
- /**
- * @todo: used? remove
- */
- private List matchedFields;
-
- // External Status Label (enabled via AllowedCurationLabels setting)
- private String externalStatus;
-
- /**
- * @todo: remove name?
- */
- SolrSearchResult(String queryFromUser, String name) {
- this.query = queryFromUser;
+ public boolean isPublishedState() {
+ return publishedState;
+ }
+
+ public void setPublishedState(boolean publishedState) {
+ this.publishedState = publishedState;
+ }
+
+ public boolean isUnpublishedState() {
+ return unpublishedState;
+ }
+
+ public void setUnpublishedState(boolean unpublishedState) {
+ this.unpublishedState = unpublishedState;
+ }
+
+ public void setPublicationStatuses(List statuses) {
+
+ if (statuses == null) {
+ this.publicationStatuses = new ArrayList<>();
+ return;
+ }
+ this.publicationStatuses = statuses;
+
+ // set booleans for individual statuses
+ //
+ for (String status : this.publicationStatuses) {
+
+ if (status.equals(IndexServiceBean.getUNPUBLISHED_STRING())) {
+ this.setUnpublishedState(true);
+
+ } else if (status.equals(IndexServiceBean.getPUBLISHED_STRING())) {
+ this.setPublishedState(true);
+
+ } else if (status.equals(IndexServiceBean.getDRAFT_STRING())) {
+ this.setDraftState(true);
+
+ } else if (status.equals(IndexServiceBean.getIN_REVIEW_STRING())) {
+ this.setInReviewState(true);
+
+ } else if (status.equals(IndexServiceBean.getDEACCESSIONED_STRING())) {
+ this.setDeaccessionedState(true);
+ }
+ }
+ } // setPublicationStatuses
+
+ /**
+ * Never return null, return an empty list instead
+ *
+ * @return
+ */
+ public List getPublicationStatuses() {
+
+ if (this.publicationStatuses == null) {
+ this.publicationStatuses = new ArrayList<>();
+ }
+ return this.publicationStatuses;
+ }
+
+ public JsonArrayBuilder getPublicationStatusesAsJSON() {
+
+ JsonArrayBuilder statuses = Json.createArrayBuilder();
+ for (String status : this.getPublicationStatuses()) {
+ statuses.add(status);
+ }
+ return statuses;
+ }
+
+ public boolean isDraftState() {
+ return draftState;
+ }
+
+ public void setDraftState(boolean draftState) {
+ this.draftState = draftState;
+ }
+
+ public boolean isInReviewState() {
+ return inReviewState;
+ }
+
+ public void setInReviewState(boolean inReviewState) {
+ this.inReviewState = inReviewState;
+ }
+
+ public boolean isDeaccessionedState() {
+ return deaccessionedState;
+ }
+
+ public void setDeaccessionedState(boolean deaccessionedState) {
+ this.deaccessionedState = deaccessionedState;
+ }
+
+ /**
+ * @todo: used? remove
+ */
+ private List matchedFields;
+
+ // External Status Label (enabled via AllowedCurationLabels setting)
+ private String externalStatus;
+
+ /**
+ * @todo: remove name?
+ */
+ SolrSearchResult(String queryFromUser, String name) {
+ this.query = queryFromUser;
// this.name = name;
- }
-
- public Map getHighlightsAsMap() {
- return highlightsAsMap;
- }
-
- public void setHighlightsAsMap(Map highlightsAsMap) {
- this.highlightsAsMap = highlightsAsMap;
- }
-
- public String getNameHighlightSnippet() {
- Highlight highlight = highlightsAsMap.get(SearchFields.NAME);
- if (highlight != null) {
- String firstSnippet = highlight.getSnippets().get(0);
- if (firstSnippet != null) {
- return firstSnippet;
- }
- }
- return null;
- }
-
- public String getDataverseAffiliationHighlightSnippet() {
- Highlight highlight = highlightsAsMap.get(SearchFields.AFFILIATION);
- if (highlight != null) {
- String firstSnippet = highlight.getSnippets().get(0);
- if (firstSnippet != null) {
- return firstSnippet;
- }
- }
- return null;
- }
-
- public String getFileTypeHighlightSnippet() {
- Highlight highlight = highlightsAsMap.get(SearchFields.FILE_TYPE_FRIENDLY);
- if (highlight != null) {
- String firstSnippet = highlight.getSnippets().get(0);
- if (firstSnippet != null) {
- return firstSnippet;
- }
- }
- return null;
- }
-
- public String getTitleHighlightSnippet() {
- /**
- * @todo: don't hard-code title, look it up properly... or start indexing titles as names: https://redmine.hmdc.harvard.edu/issues/3798#note-2
- */
- Highlight highlight = highlightsAsMap.get("title");
- if (highlight != null) {
- String firstSnippet = highlight.getSnippets().get(0);
- if (firstSnippet != null) {
- return firstSnippet;
- }
- }
- return null;
- }
-
- public List getDescriptionSnippets() {
- for (Map.Entry entry : highlightsMap.entrySet()) {
- SolrField solrField = entry.getKey();
- Highlight highlight = entry.getValue();
- logger.fine("SolrSearchResult class: " + solrField.getNameSearchable() + ":" + highlight.getSnippets());
- }
-
- Highlight highlight = highlightsAsMap.get(SearchFields.DESCRIPTION);
- if (type.equals("datasets")) {
- highlight = highlightsAsMap.get(SearchFields.DATASET_DESCRIPTION);
- }
- if (highlight != null) {
- return highlight.getSnippets();
- } else {
- return new ArrayList<>();
- }
- }
-
- public Map getHighlightsMap() {
- return highlightsMap;
- }
-
- public void setHighlightsMap(Map highlightsMap) {
- this.highlightsMap = highlightsMap;
- }
-
- public List getMatchedFields() {
- return matchedFields;
- }
-
- public void setMatchedFields(List matchedFields) {
- this.matchedFields = matchedFields;
- }
-
- @Override
- public String toString() {
- if (this.name != null) {
- return this.id + ":" + this.name + ":" + this.entityId;
- } else {
- return this.id + ":" + this.title + ":" + this.entityId;
- }
- }
-
- public JsonArrayBuilder getRelevance() {
- JsonArrayBuilder matchedFieldsArray = Json.createArrayBuilder();
- JsonObjectBuilder matchedFieldObject = Json.createObjectBuilder();
- for (Map.Entry entry : highlightsMap.entrySet()) {
- SolrField solrField = entry.getKey();
- Highlight snippets = entry.getValue();
- JsonArrayBuilder snippetArrayBuilder = Json.createArrayBuilder();
- JsonObjectBuilder matchedFieldDetails = Json.createObjectBuilder();
- for (String highlight : snippets.getSnippets()) {
- snippetArrayBuilder.add(highlight);
- }
- /**
- * @todo for the Search API, it might be nice to return offset numbers rather than html snippets surrounded by span tags or whatever.
- *
- * That's what the GitHub Search API does: "Requests can opt to receive those text fragments in the response, and every fragment is accompanied
- * by numeric offsets identifying the exact location of each matching search term." https://developer.github.com/v3/search/#text-match-metadata
- *
- * It's not clear if getting the offset values is possible with Solr, however:
- * stackoverflow.com/questions/13863118/can-solr-highlighting-also-indicate-the-position-or-offset-of-the-returned-fragments-within-the-original-field
- */
- matchedFieldDetails.add("snippets", snippetArrayBuilder);
- /**
- * @todo In addition to the name of the field used by Solr , it would be nice to show the "friendly" name of the field we show in the GUI.
- */
+ }
+
+ public Map getHighlightsAsMap() {
+ return highlightsAsMap;
+ }
+
+ public void setHighlightsAsMap(Map highlightsAsMap) {
+ this.highlightsAsMap = highlightsAsMap;
+ }
+
+ public String getNameHighlightSnippet() {
+ Highlight highlight = highlightsAsMap.get(SearchFields.NAME);
+ if (highlight != null) {
+ String firstSnippet = highlight.getSnippets().get(0);
+ if (firstSnippet != null) {
+ return firstSnippet;
+ }
+ }
+ return null;
+ }
+
+ public String getDataverseAffiliationHighlightSnippet() {
+ Highlight highlight = highlightsAsMap.get(SearchFields.AFFILIATION);
+ if (highlight != null) {
+ String firstSnippet = highlight.getSnippets().get(0);
+ if (firstSnippet != null) {
+ return firstSnippet;
+ }
+ }
+ return null;
+ }
+
+ public String getFileTypeHighlightSnippet() {
+ Highlight highlight = highlightsAsMap.get(SearchFields.FILE_TYPE_FRIENDLY);
+ if (highlight != null) {
+ String firstSnippet = highlight.getSnippets().get(0);
+ if (firstSnippet != null) {
+ return firstSnippet;
+ }
+ }
+ return null;
+ }
+
+ public String getTitleHighlightSnippet() {
+ /**
+ * @todo: don't hard-code title, look it up properly... or start
+ * indexing titles as names:
+ * https://redmine.hmdc.harvard.edu/issues/3798#note-2
+ */
+ Highlight highlight = highlightsAsMap.get("title");
+ if (highlight != null) {
+ String firstSnippet = highlight.getSnippets().get(0);
+ if (firstSnippet != null) {
+ return firstSnippet;
+ }
+ }
+ return null;
+ }
+
+ public List getDescriptionSnippets() {
+ for (Map.Entry entry : highlightsMap.entrySet()) {
+ SolrField solrField = entry.getKey();
+ Highlight highlight = entry.getValue();
+ logger.fine("SolrSearchResult class: " + solrField.getNameSearchable() + ":" + highlight.getSnippets());
+ }
+
+ Highlight highlight = highlightsAsMap.get(SearchFields.DESCRIPTION);
+ if (type.equals("datasets")) {
+ highlight = highlightsAsMap.get(SearchFields.DATASET_DESCRIPTION);
+ }
+ if (highlight != null) {
+ return highlight.getSnippets();
+ } else {
+ return new ArrayList<>();
+ }
+ }
+
+ public Map getHighlightsMap() {
+ return highlightsMap;
+ }
+
+ public void setHighlightsMap(Map highlightsMap) {
+ this.highlightsMap = highlightsMap;
+ }
+
+ public List getMatchedFields() {
+ return matchedFields;
+ }
+
+ public void setMatchedFields(List matchedFields) {
+ this.matchedFields = matchedFields;
+ }
+
+ @Override
+ public String toString() {
+ if (this.name != null) {
+ return this.id + ":" + this.name + ":" + this.entityId;
+ } else {
+ return this.id + ":" + this.title + ":" + this.entityId;
+ }
+ }
+
+ public JsonArrayBuilder getRelevance() {
+ JsonArrayBuilder matchedFieldsArray = Json.createArrayBuilder();
+ JsonObjectBuilder matchedFieldObject = Json.createObjectBuilder();
+ for (Map.Entry entry : highlightsMap.entrySet()) {
+ SolrField solrField = entry.getKey();
+ Highlight snippets = entry.getValue();
+ JsonArrayBuilder snippetArrayBuilder = Json.createArrayBuilder();
+ JsonObjectBuilder matchedFieldDetails = Json.createObjectBuilder();
+ for (String highlight : snippets.getSnippets()) {
+ snippetArrayBuilder.add(highlight);
+ }
+ /**
+ * @todo for the Search API, it might be nice to return offset
+ * numbers rather than html snippets surrounded by span tags or
+ * whatever.
+ *
+ * That's what the GitHub Search API does: "Requests can opt to
+ * receive those text fragments in the response, and every fragment
+ * is accompanied by numeric offsets identifying the exact location
+ * of each matching search term."
+ * https://developer.github.com/v3/search/#text-match-metadata
+ *
+ * It's not clear if getting the offset values is possible with
+ * Solr, however:
+ * stackoverflow.com/questions/13863118/can-solr-highlighting-also-indicate-the-position-or-offset-of-the-returned-fragments-within-the-original-field
+ */
+ matchedFieldDetails.add("snippets", snippetArrayBuilder);
+ /**
+ * @todo In addition to the name of the field used by Solr , it
+ * would be nice to show the "friendly" name of the field we show in
+ * the GUI.
+ */
// matchedFieldDetails.add("friendly", "FIXME");
- matchedFieldObject.add(solrField.getNameSearchable(), matchedFieldDetails);
- matchedFieldsArray.add(matchedFieldObject);
- }
- return matchedFieldsArray;
- }
-
- /**
- * Add additional fields for the MyData page
- *
- * @return
- */
- public JsonObjectBuilder getJsonForMyData(boolean isValid) {
-
- JsonObjectBuilder myDataJson = json(true, true, true);// boolean showRelevance, boolean showEntityIds, boolean showApiUrls)
-
- myDataJson.add("publication_statuses", this.getPublicationStatusesAsJSON())
- .add("is_draft_state", this.isDraftState()).add("is_in_review_state", this.isInReviewState())
- .add("is_unpublished_state", this.isUnpublishedState()).add("is_published", this.isPublishedState())
- .add("is_deaccesioned", this.isDeaccessionedState())
- .add("is_valid", isValid)
- .add("date_to_display_on_card", getDateToDisplayOnCard());
-
- // Add is_deaccessioned attribute, even though MyData currently screens any deaccessioned info out
- //
- if ((this.isDeaccessionedState()) && (this.getPublicationStatuses().size() == 1)) {
- myDataJson.add("deaccesioned_is_only_pubstatus", true);
- }
-
- if ((this.getParent() != null) && (!this.getParent().isEmpty())) {
- // System.out.println("keys:" + parent.keySet().toString());
- if (this.entity != null && this.entity.isInstanceofDataFile()) {
- myDataJson.add("parentIdentifier", this.getParent().get(SolrSearchResult.PARENT_IDENTIFIER))
- .add("parentName", this.getParent().get("name"));
-
- } else {
- // for Dataverse and Dataset, get parent which is a Dataverse
- myDataJson.add("parentId", this.getParent().get("id")).add("parentName", this.getParent().get("name"));
- }
- }
-
- return myDataJson;
- } // getJsonForMydata
-
- public JsonObjectBuilder json(boolean showRelevance, boolean showEntityIds, boolean showApiUrls) {
- return json(showRelevance, showEntityIds, showApiUrls, null, null);
- }
-
- public JsonObjectBuilder json(boolean showRelevance, boolean showEntityIds, boolean showApiUrls, List metadataFields, Long datasetFileCount) {
- if (this.type == null) {
- return jsonObjectBuilder();
- }
-
- String displayName = null;
-
- String identifierLabel = null;
- String datasetCitation = null;
- String datasetName = null;
- String datasetId = null;
- String datasetPersistentId = null;
- String filePersistentId = null;
- String preferredUrl = null;
- String apiUrl = null;
- String publisherName = null;
-
- if (this.type.equals(SearchConstants.DATAVERSES)) {
- displayName = this.name;
- identifierLabel = "identifier";
- preferredUrl = getHtmlUrl();
- } else if (this.type.equals(SearchConstants.DATASETS)) {
- displayName = this.title;
- identifierLabel = "global_id";
- preferredUrl = getPersistentUrl();
- publisherName = this.parent.get("name");
- // if
- /**
- * @todo Should we show the name of the parent dataverse?
- */
- } else if (this.type.equals(SearchConstants.FILES)) {
- displayName = this.name;
- identifierLabel = "file_id";
- preferredUrl = getDownloadUrl();
- /**
- * @todo show more information for a file's parent, such as the title of the dataset it belongs to.
- */
- datasetCitation = parent.get("citation");
- datasetName = parent.get("name");
- datasetId = parent.get("id");
- datasetPersistentId = parent.get(SolrSearchResult.PARENT_IDENTIFIER);
- }
-
- // displayName = null; // testing NullSafeJsonBuilder
- // because we are using NullSafeJsonBuilder key/value pairs will be dropped if the value is null
- NullSafeJsonBuilder nullSafeJsonBuilder = jsonObjectBuilder().add("name", displayName)
- .add("type", getDisplayType(getType())).add("url", preferredUrl).add("image_url", getImageUrl())
- // .add("persistent_url", this.persistentUrl)
- // .add("download_url", this.downloadUrl)
- /**
- * @todo How much value is there in exposing the identifier for dataverses? For
- */
- .add(identifierLabel, this.identifier)
- /**
- * @todo Get dataset description from dsDescriptionValue. Also, is descriptionNoSnippet the right field to use generally?
- *
- * @todo What about the fact that datasets can now have multiple descriptions? Should we create an array called "additional_descriptions" that gets
- * populated if there is more than one dataset description?
- *
- * @todo Why aren't file descriptions ever null? They always have an empty string at least.
- */
- .add("description", this.descriptionNoSnippet)
- /**
- * @todo In the future we'd like to support non-public datasets per https://github.com/IQSS/dataverse/issues/1299 but for now we are only supporting
- * non-public searches.
- */
- .add("published_at", getDateTimePublished())
- /**
- * @todo Expose MIME Type: https://github.com/IQSS/dataverse/issues/1595
- */
- .add("file_type", this.filetype).add("file_content_type", this.fileContentType)
- .add("size_in_bytes", getFileSizeInBytes())
- /**
- * "md5" was the only possible value so it's hard-coded here but we might want to deprecate it someday since we now put the MD5 or SHA-1 in
- * "checksum".
- */
- .add("md5", getFileMd5())
- .add("checksum", JsonPrinter.getChecksumTypeAndValue(getFileChecksumType(), getFileChecksumValue()))
- .add("unf", getUnf()).add("file_persistent_id", this.filePersistentId).add("dataset_name", datasetName)
- .add("dataset_id", datasetId).add("publisher", publisherName)
- .add("dataset_persistent_id", datasetPersistentId).add("dataset_citation", datasetCitation)
- .add("deaccession_reason", this.deaccessionReason).add("citationHtml", this.citationHtml)
- .add("identifier_of_dataverse", this.identifierOfDataverse)
- .add("name_of_dataverse", this.nameOfDataverse).add("citation", this.citation);
- // Now that nullSafeJsonBuilder has been instatiated, check for null before adding to it!
- if (showRelevance) {
- nullSafeJsonBuilder.add("matches", getRelevance());
- nullSafeJsonBuilder.add("score", getScore());
- }
- if (showEntityIds) {
- if (this.entityId != null) {
- nullSafeJsonBuilder.add("entity_id", this.entityId);
- }
- }
- if (!getPublicationStatuses().isEmpty()) {
- nullSafeJsonBuilder.add("publicationStatuses", getPublicationStatusesAsJSON());
- }
-
- if (this.entity == null) {
-
- } else {
- if (this.entity.isInstanceofDataset()) {
- nullSafeJsonBuilder.add("storageIdentifier", this.entity.getStorageIdentifier());
- Dataset ds = (Dataset) this.entity;
- DatasetVersion dv = ds.getVersionFromId(this.datasetVersionId);
-
- if (!dv.getKeywords().isEmpty()) {
- JsonArrayBuilder keyWords = Json.createArrayBuilder();
- for (String keyword : dv.getKeywords()) {
- keyWords.add(keyword);
- }
- nullSafeJsonBuilder.add("keywords", keyWords);
- }
-
- JsonArrayBuilder subjects = Json.createArrayBuilder();
- for (String subject : dv.getDatasetSubjects()) {
- subjects.add(subject);
- }
- nullSafeJsonBuilder.add("subjects", subjects);
- nullSafeJsonBuilder.add("fileCount", datasetFileCount);
- nullSafeJsonBuilder.add("versionId", dv.getId());
- nullSafeJsonBuilder.add("versionState", dv.getVersionState().toString());
- if (this.isPublishedState()) {
- nullSafeJsonBuilder.add("majorVersion", dv.getVersionNumber());
- nullSafeJsonBuilder.add("minorVersion", dv.getMinorVersionNumber());
- }
-
- nullSafeJsonBuilder.add("createdAt", ds.getCreateDate());
- nullSafeJsonBuilder.add("updatedAt", ds.getModificationTime());
-
- if (!dv.getDatasetContacts().isEmpty()) {
- JsonArrayBuilder contacts = Json.createArrayBuilder();
- NullSafeJsonBuilder nullSafeJsonBuilderInner = jsonObjectBuilder();
- for (String contact[] : dv.getDatasetContacts(false)) {
- nullSafeJsonBuilderInner.add("name", contact[0]);
- nullSafeJsonBuilderInner.add("affiliation", contact[1]);
- contacts.add(nullSafeJsonBuilderInner);
- }
- nullSafeJsonBuilder.add("contacts", contacts);
- }
- if (!dv.getRelatedPublications().isEmpty()) {
- JsonArrayBuilder relPub = Json.createArrayBuilder();
- NullSafeJsonBuilder inner = jsonObjectBuilder();
- for (DatasetRelPublication dsRelPub : dv.getRelatedPublications()) {
- inner.add("title", dsRelPub.getTitle());
- inner.add("citation", dsRelPub.getText());
- inner.add("url", dsRelPub.getUrl());
- relPub.add(inner);
- }
- nullSafeJsonBuilder.add("publications", relPub);
- }
-
- if (!dv.getDatasetProducers().isEmpty()) {
- JsonArrayBuilder producers = Json.createArrayBuilder();
- for (String[] producer : dv.getDatasetProducers()) {
- producers.add(producer[0]);
- }
- nullSafeJsonBuilder.add("producers", producers);
- }
- if (!dv.getRelatedMaterial().isEmpty()) {
- JsonArrayBuilder relatedMaterials = Json.createArrayBuilder();
- for (String relatedMaterial : dv.getRelatedMaterial()) {
- relatedMaterials.add(relatedMaterial);
- }
- nullSafeJsonBuilder.add("relatedMaterial", relatedMaterials);
- }
-
- if (!dv.getGeographicCoverage().isEmpty()) {
- JsonArrayBuilder geoCov = Json.createArrayBuilder();
- NullSafeJsonBuilder inner = jsonObjectBuilder();
- for (String ind[] : dv.getGeographicCoverage()) {
- inner.add("country", ind[0]);
- inner.add("state", ind[1]);
- inner.add("city", ind[2]);
- inner.add("other", ind[3]);
- geoCov.add(inner);
- }
- nullSafeJsonBuilder.add("geographicCoverage", geoCov);
- }
- if (!dv.getDataSource().isEmpty()) {
- JsonArrayBuilder dataSources = Json.createArrayBuilder();
- for (String dsource : dv.getDataSource()) {
- dataSources.add(dsource);
- }
- nullSafeJsonBuilder.add("dataSources", dataSources);
- }
-
- if (CollectionUtils.isNotEmpty(metadataFields)) {
- // create metadata fields map names
- Map> metadataFieldMapNames = computeRequestedMetadataFieldMapNames(
- metadataFields);
-
- // add metadatafields objet to wrap all requeested fields
- NullSafeJsonBuilder metadataFieldBuilder = jsonObjectBuilder();
-
- Map> groupedFields = DatasetField
- .groupByBlock(dv.getFlatDatasetFields());
- json(metadataFieldMapNames, groupedFields, metadataFieldBuilder);
-
- nullSafeJsonBuilder.add("metadataBlocks", metadataFieldBuilder);
- }
- }
- }
-
- if (showApiUrls) {
- /**
- * @todo We should probably have a metadata_url or api_url concept enabled by default, not hidden behind an undocumented boolean. For datasets, this
- * would be http://example.com/api/datasets/10 or whatever (to get more detailed JSON), but right now this requires an API token. Discuss at
- * https://docs.google.com/document/d/1d8sT2GLSavgiAuMTVX8KzTCX0lROEET1edhvHHRDZOs/edit?usp=sharing";
- */
- if (getApiUrl() != null) {
- nullSafeJsonBuilder.add("api_url", getApiUrl());
- }
- }
- // NullSafeJsonBuilder is awesome but can't build null safe arrays. :(
- if (!datasetAuthors.isEmpty()) {
- JsonArrayBuilder authors = Json.createArrayBuilder();
- for (String datasetAuthor : datasetAuthors) {
- authors.add(datasetAuthor);
- }
- nullSafeJsonBuilder.add("authors", authors);
- }
- return nullSafeJsonBuilder;
- }
-
- private void json(Map> metadataFieldMapNames,
- Map> groupedFields, NullSafeJsonBuilder metadataFieldBuilder) {
- for (Map.Entry> metadataFieldNamesEntry : metadataFieldMapNames.entrySet()) {
- String metadataBlockName = metadataFieldNamesEntry.getKey();
- List metadataBlockFieldNames = metadataFieldNamesEntry.getValue();
- for (MetadataBlock metadataBlock : groupedFields.keySet()) {
- if (metadataBlockName.equals(metadataBlock.getName())) {
- // create metadataBlock object
- NullSafeJsonBuilder metadataBlockBuilder = jsonObjectBuilder();
- metadataBlockBuilder.add("displayName", metadataBlock.getDisplayName());
- JsonArrayBuilder fieldsArray = Json.createArrayBuilder();
-
- List datasetFields = groupedFields.get(metadataBlock);
- for (DatasetField datasetField : datasetFields) {
- if (metadataBlockFieldNames.contains("*")
- || metadataBlockFieldNames.contains(datasetField.getDatasetFieldType().getName())) {
- if (datasetField.getDatasetFieldType().isCompound() || !datasetField.getDatasetFieldType().isHasParent()) {
- JsonObject item = JsonPrinter.json(datasetField);
- if (item != null) {
- fieldsArray.add(item);
- }
- }
- }
- }
- // with a fields to hold all requested properties
- metadataBlockBuilder.add("fields", fieldsArray);
-
- metadataFieldBuilder.add(metadataBlock.getName(), metadataBlockBuilder);
- }
- }
- }
- }
-
- private Map> computeRequestedMetadataFieldMapNames(List metadataFields) {
- Map> metadataFieldMapNames = new HashMap<>();
- for (String metadataField : metadataFields) {
- String parts[] = metadataField.split(":");
- if (parts.length == 2) {
- List metadataFieldNames = metadataFieldMapNames.get(parts[0]);
- if (metadataFieldNames == null) {
- metadataFieldNames = new ArrayList<>();
- metadataFieldMapNames.put(parts[0], metadataFieldNames);
- }
- metadataFieldNames.add(parts[1]);
- }
- }
- return metadataFieldMapNames;
- }
-
- private String getDateTimePublished() {
- String datePublished = null;
- if (draftState == false) {
- datePublished = releaseOrCreateDate == null ? null : Util.getDateTimeFormat().format(releaseOrCreateDate);
- }
- return datePublished;
- }
-
- public String getId() {
- return id;
- }
-
- public void setId(String id) {
- this.id = id;
- }
-
- public Long getEntityId() {
- return entityId;
- }
-
- public void setEntityId(Long entityId) {
- this.entityId = entityId;
- }
-
- public DvObject getEntity() {
- return entity;
- }
-
- public void setEntity(DvObject entity) {
- this.entity = entity;
- }
-
- public String getIdentifier() {
- return identifier;
- }
-
- public void setIdentifier(String identifier) {
- this.identifier = identifier;
- }
-
- public String getType() {
- return type;
- }
-
- public void setType(String type) {
- this.type = type;
- }
-
- public String getHtmlUrl() {
- return htmlUrl;
- }
-
- public void setHtmlUrl(String htmlUrl) {
- this.htmlUrl = htmlUrl;
- }
-
- public String getPersistentUrl() {
- return persistentUrl;
- }
-
- public void setPersistentUrl(String persistentUrl) {
- this.persistentUrl = persistentUrl;
- }
-
- public String getDownloadUrl() {
- return downloadUrl;
- }
-
- public void setDownloadUrl(String downloadUrl) {
- this.downloadUrl = downloadUrl;
- }
-
- public String getApiUrl() {
- return apiUrl;
- }
-
- public void setApiUrl(String apiUrl) {
- this.apiUrl = apiUrl;
- }
-
- public String getImageUrl() {
- return imageUrl;
- }
-
- public void setImageUrl(String imageUrl) {
- this.imageUrl = imageUrl;
- }
-
- public DatasetThumbnail getDatasetThumbnail() {
- return datasetThumbnail;
- }
-
- public void setDatasetThumbnail(DatasetThumbnail datasetThumbnail) {
- this.datasetThumbnail = datasetThumbnail;
- }
-
- public String getQuery() {
- return query;
- }
-
- public void setQuery(String query) {
- this.query = query;
- }
-
- public String getName() {
- return name;
- }
-
- public void setName(String name) {
- this.name = name;
- }
-
- public String getTitle() {
- return title;
- }
-
- public void setTitle(String title) {
- this.title = title;
- }
-
- public String getDescriptionNoSnippet() {
- return descriptionNoSnippet;
- }
-
- public void setDescriptionNoSnippet(String descriptionNoSnippet) {
- this.descriptionNoSnippet = descriptionNoSnippet;
- }
-
- public List getDatasetAuthors() {
- return datasetAuthors;
- }
-
- public void setDatasetAuthors(List datasetAuthors) {
- this.datasetAuthors = datasetAuthors;
- }
-
- public String getDeaccessionReason() {
- return deaccessionReason;
- }
-
- public void setDeaccessionReason(String deaccessionReason) {
- this.deaccessionReason = deaccessionReason;
- }
-
- public List getHighlightsAsListOrig() {
- return highlightsAsList;
- }
-
- public List getHighlightsAsList() {
- List filtered = new ArrayList<>();
- for (Highlight highlight : highlightsAsList) {
- String field = highlight.getSolrField().getNameSearchable();
- /**
- * @todo don't hard code "title" here. And should we collapse name and title together anyway?
- */
- if (!field.equals(SearchFields.NAME) && !field.equals(SearchFields.DESCRIPTION)
- && !field.equals(SearchFields.DATASET_DESCRIPTION) && !field.equals(SearchFields.AFFILIATION)
- && !field.equals("title")) {
- filtered.add(highlight);
- }
- }
- return filtered;
- }
-
- public void setHighlightsAsList(List highlightsAsList) {
- this.highlightsAsList = highlightsAsList;
- }
-
- public List getFileCategories() {
- return fileCategories;
- }
-
- public void setFileCategories(List fileCategories) {
- this.fileCategories = fileCategories;
- }
-
- public List getTabularDataTags() {
- return tabularDataTags;
- }
-
- public void setTabularDataTags(List tabularDataTags) {
- this.tabularDataTags = tabularDataTags;
- }
-
- public Map getParent() {
- return parent;
- }
-
- public Long getParentIdAsLong() {
-
- if (this.getParent() == null) {
- return null;
- }
- if (!this.getParent().containsKey("id")) {
- return null;
- }
-
- String parentIdString = getParent().get("id");
- if (parentIdString == null) {
- return null;
- }
-
- try {
- return Long.parseLong(parentIdString);
- } catch (NumberFormatException ex) {
- return null;
- }
- }
-
- public void setParent(Map parent) {
- this.parent = parent;
- }
-
- public String getDataverseAffiliation() {
- return dataverseAffiliation;
- }
-
- public void setDataverseAffiliation(String dataverseAffiliation) {
- this.dataverseAffiliation = dataverseAffiliation;
- }
-
- public String getCitation() {
- return citation;
- }
-
- public void setCitation(String citation) {
- this.citation = citation;
- }
-
- public String getCitationHtml() {
- return citationHtml;
- }
-
- public void setCitationHtml(String citationHtml) {
- this.citationHtml = citationHtml;
- }
-
- public String getFiletype() {
- return filetype;
- }
-
- public void setFiletype(String filetype) {
- this.filetype = filetype;
- }
-
- public String getFileContentType() {
- return fileContentType;
- }
-
- public void setFileContentType(String fileContentType) {
- this.fileContentType = fileContentType;
- }
-
- public String getUnf() {
- return unf;
- }
-
- public void setUnf(String unf) {
- this.unf = unf;
- }
-
- public Long getFileSizeInBytes() {
- return fileSizeInBytes;
- }
-
- public void setFileSizeInBytes(Long fileSizeInBytes) {
- this.fileSizeInBytes = fileSizeInBytes;
- }
-
- public String getFileMd5() {
- if (DataFile.ChecksumType.MD5.equals(getFileChecksumType())) {
- return fileMd5;
- } else {
- return null;
- }
- }
-
- public void setFileMd5(String fileMd5) {
- this.fileMd5 = fileMd5;
- }
-
- public DataFile.ChecksumType getFileChecksumType() {
- return fileChecksumType;
- }
-
- public void setFileChecksumType(DataFile.ChecksumType fileChecksumType) {
- this.fileChecksumType = fileChecksumType;
- }
-
- public String getFileChecksumValue() {
- return fileChecksumValue;
- }
-
- public void setFileChecksumValue(String fileChecksumValue) {
- this.fileChecksumValue = fileChecksumValue;
- }
-
- public String getNameSort() {
- return nameSort;
- }
-
- public void setNameSort(String nameSort) {
- this.nameSort = nameSort;
- }
-
- public String getStatus() {
- return status;
- }
-
- void setStatus(String status) {
- this.status = status;
- }
-
- public Date getReleaseOrCreateDate() {
- return releaseOrCreateDate;
- }
-
- public void setReleaseOrCreateDate(Date releaseOrCreateDate) {
- this.releaseOrCreateDate = releaseOrCreateDate;
- }
-
- public String getDateToDisplayOnCard() {
- return DateUtil.formatDate(this.releaseOrCreateDate);
- }
-
- public long getDatasetVersionId() {
- return datasetVersionId;
- }
-
- public void setDatasetVersionId(long datasetVersionId) {
- this.datasetVersionId = datasetVersionId;
- }
-
- public String getVersionNumberFriendly() {
- return versionNumberFriendly;
- }
-
- public void setVersionNumberFriendly(String versionNumberFriendly) {
- this.versionNumberFriendly = versionNumberFriendly;
- }
-
- public String getDatasetUrl() {
- String failSafeUrl = "/dataset.xhtml?id=" + entityId + "&versionId=" + datasetVersionId;
- if (identifier != null) {
- /**
- * Unfortunately, colons in the globalId (doi:10...) are converted to %3A (doi%3A10...). To prevent this we switched many JSF tags to a plain "a" tag
- * with an href as suggested at http://stackoverflow.com/questions/24733959/houtputlink-value-escaped
- */
- String badString = "null";
- if (!identifier.contains(badString)) {
- if (entity != null && entity instanceof Dataset) {
- if (this.isHarvested() && ((Dataset) entity).getHarvestedFrom() != null) {
- String remoteArchiveUrl = ((Dataset) entity).getRemoteArchiveURL();
- if (remoteArchiveUrl != null) {
- return remoteArchiveUrl;
- }
- return null;
- }
- }
- if (isDraftState()) {
- return "/dataset.xhtml?persistentId=" + identifier + "&version=DRAFT";
- }
- return "/dataset.xhtml?persistentId=" + identifier;
- } else {
- logger.info("Dataset identifier/globalId contains \"" + badString
- + "\" perhaps due to https://github.com/IQSS/dataverse/issues/1147 . Fix data in database and reindex. Returning failsafe URL: "
- + failSafeUrl);
- return failSafeUrl;
- }
- } else {
- logger.info("Dataset identifier/globalId was null. Returning failsafe URL: " + failSafeUrl);
- return failSafeUrl;
- }
- }
-
- public String getFileParentIdentifier() {
- if (entity == null) {
- return null;
- }
- if (entity instanceof DataFile) {
- return parent.get(PARENT_IDENTIFIER); // Dataset globalID
- }
-
- return null;
- // if (entity)
- }
-
- public String getFilePersistentId() {
- return filePersistentId;
- }
-
- public void setFilePersistentId(String pid) {
- filePersistentId = pid;
- }
-
- public String getFileUrl() {
- // Nothing special needs to be done for harvested file URLs:
- // simply directing these to the local dataset.xhtml for this dataset
- // will take care of it - because DatasetPage will issue a redirect
- // to the remote archive URL.
- // This is true AS OF 4.2.4, FEB. 2016! - We'll probably want to make
- // .getRemoteArchiveURL() methods, both in DataFile and Dataset objects,
- // work again at some point in the future.
- /*
+ matchedFieldObject.add(solrField.getNameSearchable(), matchedFieldDetails);
+ matchedFieldsArray.add(matchedFieldObject);
+ }
+ return matchedFieldsArray;
+ }
+
+ /**
+ * Add additional fields for the MyData page
+ *
+ * @return
+ */
+ public JsonObjectBuilder getJsonForMyData(boolean isValid) {
+
+ JsonObjectBuilder myDataJson = json(true, true, true);// boolean showRelevance, boolean showEntityIds, boolean showApiUrls)
+
+ myDataJson.add("publication_statuses", this.getPublicationStatusesAsJSON())
+ .add("is_draft_state", this.isDraftState()).add("is_in_review_state", this.isInReviewState())
+ .add("is_unpublished_state", this.isUnpublishedState()).add("is_published", this.isPublishedState())
+ .add("is_deaccesioned", this.isDeaccessionedState())
+ .add("is_valid", isValid)
+ .add("date_to_display_on_card", getDateToDisplayOnCard());
+
+ // Add is_deaccessioned attribute, even though MyData currently screens any deaccessioned info out
+ //
+ if ((this.isDeaccessionedState()) && (this.getPublicationStatuses().size() == 1)) {
+ myDataJson.add("deaccesioned_is_only_pubstatus", true);
+ }
+
+ if ((this.getParent() != null) && (!this.getParent().isEmpty())) {
+ // System.out.println("keys:" + parent.keySet().toString());
+ if (this.entity != null && this.entity.isInstanceofDataFile()) {
+ myDataJson.add("parentIdentifier", this.getParent().get(SolrSearchResult.PARENT_IDENTIFIER))
+ .add("parentName", this.getParent().get("name"));
+
+ } else {
+ // for Dataverse and Dataset, get parent which is a Dataverse
+ myDataJson.add("parentId", this.getParent().get("id")).add("parentName", this.getParent().get("name"));
+ }
+ }
+
+ return myDataJson;
+ } // getJsonForMydata
+
+ public JsonObjectBuilder json(boolean showRelevance, boolean showEntityIds, boolean showApiUrls) {
+ return json(showRelevance, showEntityIds, showApiUrls, null, null);
+ }
+
+ public JsonObjectBuilder json(boolean showRelevance, boolean showEntityIds, boolean showApiUrls, List metadataFields, Long datasetFileCount) {
+ if (this.type == null) {
+ return jsonObjectBuilder();
+ }
+
+ String displayName = null;
+
+ String identifierLabel = null;
+ String datasetCitation = null;
+ String datasetName = null;
+ String datasetId = null;
+ String datasetPersistentId = null;
+ String filePersistentId = null;
+ String preferredUrl = null;
+ String apiUrl = null;
+ String publisherName = null;
+
+ if (this.type.equals(SearchConstants.DATAVERSES)) {
+ displayName = this.name;
+ identifierLabel = "identifier";
+ preferredUrl = getHtmlUrl();
+ } else if (this.type.equals(SearchConstants.DATASETS)) {
+ displayName = this.title;
+ identifierLabel = "global_id";
+ preferredUrl = getPersistentUrl();
+ publisherName = this.parent.get("name");
+ // if
+ /**
+ * @todo Should we show the name of the parent dataverse?
+ */
+ } else if (this.type.equals(SearchConstants.FILES)) {
+ displayName = this.name;
+ identifierLabel = "file_id";
+ preferredUrl = getDownloadUrl();
+ /**
+ * @todo show more information for a file's parent, such as the
+ * title of the dataset it belongs to.
+ */
+ datasetCitation = parent.get("citation");
+ datasetName = parent.get("name");
+ datasetId = parent.get("id");
+ datasetPersistentId = parent.get(SolrSearchResult.PARENT_IDENTIFIER);
+ }
+
+ // displayName = null; // testing NullSafeJsonBuilder
+ // because we are using NullSafeJsonBuilder key/value pairs will be dropped if the value is null
+ NullSafeJsonBuilder nullSafeJsonBuilder = jsonObjectBuilder().add("name", displayName)
+ .add("type", getDisplayType(getType())).add("url", preferredUrl).add("image_url", getImageUrl())
+ // .add("persistent_url", this.persistentUrl)
+ // .add("download_url", this.downloadUrl)
+ /**
+ * @todo How much value is there in exposing the identifier for
+ * dataverses? For
+ */
+ .add(identifierLabel, this.identifier)
+ /**
+ * @todo Get dataset description from dsDescriptionValue. Also,
+ * is descriptionNoSnippet the right field to use generally?
+ *
+ * @todo What about the fact that datasets can now have multiple
+ * descriptions? Should we create an array called
+ * "additional_descriptions" that gets populated if there is
+ * more than one dataset description?
+ *
+ * @todo Why aren't file descriptions ever null? They always
+ * have an empty string at least.
+ */
+ .add("description", this.descriptionNoSnippet)
+ /**
+ * @todo In the future we'd like to support non-public datasets
+ * per https://github.com/IQSS/dataverse/issues/1299 but for now
+ * we are only supporting non-public searches.
+ */
+ .add("published_at", getDateTimePublished())
+ /**
+ * @todo Expose MIME Type:
+ * https://github.com/IQSS/dataverse/issues/1595
+ */
+ .add("file_type", this.filetype).add("file_content_type", this.fileContentType)
+ .add("size_in_bytes", getFileSizeInBytes())
+ /**
+ * "md5" was the only possible value so it's hard-coded here but
+ * we might want to deprecate it someday since we now put the
+ * MD5 or SHA-1 in "checksum".
+ */
+ .add("md5", getFileMd5())
+ .add("checksum", JsonPrinter.getChecksumTypeAndValue(getFileChecksumType(), getFileChecksumValue()))
+ .add("unf", getUnf()).add("file_persistent_id", this.filePersistentId).add("dataset_name", datasetName)
+ .add("dataset_id", datasetId).add("publisher", publisherName)
+ .add("dataset_persistent_id", datasetPersistentId).add("dataset_citation", datasetCitation)
+ .add("deaccession_reason", this.deaccessionReason).add("citationHtml", this.citationHtml)
+ .add("identifier_of_dataverse", this.identifierOfDataverse)
+ .add("name_of_dataverse", this.nameOfDataverse).add("citation", this.citation);
+ // Now that nullSafeJsonBuilder has been instatiated, check for null before adding to it!
+ if (showRelevance) {
+ nullSafeJsonBuilder.add("matches", getRelevance());
+ nullSafeJsonBuilder.add("score", getScore());
+ }
+ if (showEntityIds) {
+ if (this.entityId != null) {
+ nullSafeJsonBuilder.add("entity_id", this.entityId);
+ }
+ }
+ if (!getPublicationStatuses().isEmpty()) {
+ nullSafeJsonBuilder.add("publicationStatuses", getPublicationStatusesAsJSON());
+ }
+
+ if (this.entity == null) {
+
+ } else {
+ if (this.entity.isInstanceofDataset()) {
+ nullSafeJsonBuilder.add("storageIdentifier", this.entity.getStorageIdentifier());
+ Dataset ds = (Dataset) this.entity;
+ DatasetVersion dv = ds.getVersionFromId(this.datasetVersionId);
+
+ if (!dv.getKeywords().isEmpty()) {
+ JsonArrayBuilder keyWords = Json.createArrayBuilder();
+ for (String keyword : dv.getKeywords()) {
+ keyWords.add(keyword);
+ }
+ nullSafeJsonBuilder.add("keywords", keyWords);
+ }
+
+ JsonArrayBuilder subjects = Json.createArrayBuilder();
+ for (String subject : dv.getDatasetSubjects()) {
+ subjects.add(subject);
+ }
+ nullSafeJsonBuilder.add("subjects", subjects);
+ nullSafeJsonBuilder.add("fileCount", datasetFileCount);
+ nullSafeJsonBuilder.add("versionId", dv.getId());
+ nullSafeJsonBuilder.add("versionState", dv.getVersionState().toString());
+ if (this.isPublishedState()) {
+ nullSafeJsonBuilder.add("majorVersion", dv.getVersionNumber());
+ nullSafeJsonBuilder.add("minorVersion", dv.getMinorVersionNumber());
+ }
+
+ nullSafeJsonBuilder.add("createdAt", ds.getCreateDate());
+ nullSafeJsonBuilder.add("updatedAt", ds.getModificationTime());
+
+ if (!dv.getDatasetContacts().isEmpty()) {
+ JsonArrayBuilder contacts = Json.createArrayBuilder();
+ NullSafeJsonBuilder nullSafeJsonBuilderInner = jsonObjectBuilder();
+ for (String contact[] : dv.getDatasetContacts(false)) {
+ nullSafeJsonBuilderInner.add("name", contact[0]);
+ nullSafeJsonBuilderInner.add("affiliation", contact[1]);
+ contacts.add(nullSafeJsonBuilderInner);
+ }
+ nullSafeJsonBuilder.add("contacts", contacts);
+ }
+ if (!dv.getRelatedPublications().isEmpty()) {
+ JsonArrayBuilder relPub = Json.createArrayBuilder();
+ NullSafeJsonBuilder inner = jsonObjectBuilder();
+ for (DatasetRelPublication dsRelPub : dv.getRelatedPublications()) {
+ inner.add("title", dsRelPub.getTitle());
+ inner.add("citation", dsRelPub.getText());
+ inner.add("url", dsRelPub.getUrl());
+ relPub.add(inner);
+ }
+ nullSafeJsonBuilder.add("publications", relPub);
+ }
+
+ if (!dv.getDatasetProducers().isEmpty()) {
+ JsonArrayBuilder producers = Json.createArrayBuilder();
+ for (String[] producer : dv.getDatasetProducers()) {
+ producers.add(producer[0]);
+ }
+ nullSafeJsonBuilder.add("producers", producers);
+ }
+ if (!dv.getRelatedMaterial().isEmpty()) {
+ JsonArrayBuilder relatedMaterials = Json.createArrayBuilder();
+ for (String relatedMaterial : dv.getRelatedMaterial()) {
+ relatedMaterials.add(relatedMaterial);
+ }
+ nullSafeJsonBuilder.add("relatedMaterial", relatedMaterials);
+ }
+
+ if (!dv.getGeographicCoverage().isEmpty()) {
+ JsonArrayBuilder geoCov = Json.createArrayBuilder();
+ NullSafeJsonBuilder inner = jsonObjectBuilder();
+ for (String ind[] : dv.getGeographicCoverage()) {
+ inner.add("country", ind[0]);
+ inner.add("state", ind[1]);
+ inner.add("city", ind[2]);
+ inner.add("other", ind[3]);
+ geoCov.add(inner);
+ }
+ nullSafeJsonBuilder.add("geographicCoverage", geoCov);
+ }
+ if (!dv.getDataSource().isEmpty()) {
+ JsonArrayBuilder dataSources = Json.createArrayBuilder();
+ for (String dsource : dv.getDataSource()) {
+ dataSources.add(dsource);
+ }
+ nullSafeJsonBuilder.add("dataSources", dataSources);
+ }
+
+ if (CollectionUtils.isNotEmpty(metadataFields)) {
+ // create metadata fields map names
+ Map> metadataFieldMapNames = computeRequestedMetadataFieldMapNames(
+ metadataFields);
+
+ // add metadatafields objet to wrap all requeested fields
+ NullSafeJsonBuilder metadataFieldBuilder = jsonObjectBuilder();
+
+ Map> groupedFields = DatasetField
+ .groupByBlock(dv.getFlatDatasetFields());
+ json(metadataFieldMapNames, groupedFields, metadataFieldBuilder);
+
+ nullSafeJsonBuilder.add("metadataBlocks", metadataFieldBuilder);
+ }
+ }
+ }
+
+ if (showApiUrls) {
+ /**
+ * @todo We should probably have a metadata_url or api_url concept
+ * enabled by default, not hidden behind an undocumented boolean.
+ * For datasets, this would be http://example.com/api/datasets/10 or
+ * whatever (to get more detailed JSON), but right now this requires
+ * an API token. Discuss at
+ * https://docs.google.com/document/d/1d8sT2GLSavgiAuMTVX8KzTCX0lROEET1edhvHHRDZOs/edit?usp=sharing";
+ */
+ if (getApiUrl() != null) {
+ nullSafeJsonBuilder.add("api_url", getApiUrl());
+ }
+ }
+ // NullSafeJsonBuilder is awesome but can't build null safe arrays. :(
+ if (!datasetAuthors.isEmpty()) {
+ JsonArrayBuilder authors = Json.createArrayBuilder();
+ for (String datasetAuthor : datasetAuthors) {
+ authors.add(datasetAuthor);
+ }
+ nullSafeJsonBuilder.add("authors", authors);
+ }
+ return nullSafeJsonBuilder;
+ }
+
+ private void json(Map> metadataFieldMapNames,
+ Map> groupedFields, NullSafeJsonBuilder metadataFieldBuilder) {
+ for (Map.Entry> metadataFieldNamesEntry : metadataFieldMapNames.entrySet()) {
+ String metadataBlockName = metadataFieldNamesEntry.getKey();
+ List metadataBlockFieldNames = metadataFieldNamesEntry.getValue();
+ for (MetadataBlock metadataBlock : groupedFields.keySet()) {
+ if (metadataBlockName.equals(metadataBlock.getName())) {
+ // create metadataBlock object
+ NullSafeJsonBuilder metadataBlockBuilder = jsonObjectBuilder();
+ metadataBlockBuilder.add("displayName", metadataBlock.getDisplayName());
+ JsonArrayBuilder fieldsArray = Json.createArrayBuilder();
+
+ List datasetFields = groupedFields.get(metadataBlock);
+ for (DatasetField datasetField : datasetFields) {
+ if (metadataBlockFieldNames.contains("*")
+ || metadataBlockFieldNames.contains(datasetField.getDatasetFieldType().getName())) {
+ if (datasetField.getDatasetFieldType().isCompound() || !datasetField.getDatasetFieldType().isHasParent()) {
+ JsonObject item = JsonPrinter.json(datasetField);
+ if (item != null) {
+ fieldsArray.add(item);
+ }
+ }
+ }
+ }
+ // with a fields to hold all requested properties
+ metadataBlockBuilder.add("fields", fieldsArray);
+
+ metadataFieldBuilder.add(metadataBlock.getName(), metadataBlockBuilder);
+ }
+ }
+ }
+ }
+
+ private Map> computeRequestedMetadataFieldMapNames(List metadataFields) {
+ Map> metadataFieldMapNames = new HashMap<>();
+ for (String metadataField : metadataFields) {
+ String parts[] = metadataField.split(":");
+ if (parts.length == 2) {
+ List metadataFieldNames = metadataFieldMapNames.get(parts[0]);
+ if (metadataFieldNames == null) {
+ metadataFieldNames = new ArrayList<>();
+ metadataFieldMapNames.put(parts[0], metadataFieldNames);
+ }
+ metadataFieldNames.add(parts[1]);
+ }
+ }
+ return metadataFieldMapNames;
+ }
+
+ private String getDateTimePublished() {
+ String datePublished = null;
+ if (draftState == false) {
+ datePublished = releaseOrCreateDate == null ? null : Util.getDateTimeFormat().format(releaseOrCreateDate);
+ }
+ return datePublished;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public void setId(String id) {
+ this.id = id;
+ }
+
+ public Long getEntityId() {
+ return entityId;
+ }
+
+ public void setEntityId(Long entityId) {
+ this.entityId = entityId;
+ }
+
+ public DvObject getEntity() {
+ return entity;
+ }
+
+ public void setEntity(DvObject entity) {
+ this.entity = entity;
+ }
+
+ public String getIdentifier() {
+ return identifier;
+ }
+
+ public void setIdentifier(String identifier) {
+ this.identifier = identifier;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public String getHtmlUrl() {
+ return htmlUrl;
+ }
+
+ public void setHtmlUrl(String htmlUrl) {
+ this.htmlUrl = htmlUrl;
+ }
+
+ public String getPersistentUrl() {
+ return persistentUrl;
+ }
+
+ public void setPersistentUrl(String persistentUrl) {
+ this.persistentUrl = persistentUrl;
+ }
+
+ public String getDownloadUrl() {
+ return downloadUrl;
+ }
+
+ public void setDownloadUrl(String downloadUrl) {
+ this.downloadUrl = downloadUrl;
+ }
+
+ public String getApiUrl() {
+ return apiUrl;
+ }
+
+ public void setApiUrl(String apiUrl) {
+ this.apiUrl = apiUrl;
+ }
+
+ public String getImageUrl() {
+ return imageUrl;
+ }
+
+ public void setImageUrl(String imageUrl) {
+ this.imageUrl = imageUrl;
+ }
+
+ public DatasetThumbnail getDatasetThumbnail() {
+ return datasetThumbnail;
+ }
+
+ public void setDatasetThumbnail(DatasetThumbnail datasetThumbnail) {
+ this.datasetThumbnail = datasetThumbnail;
+ }
+
+ public String getQuery() {
+ return query;
+ }
+
+ public void setQuery(String query) {
+ this.query = query;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getTitle() {
+ return title;
+ }
+
+ public void setTitle(String title) {
+ this.title = title;
+ }
+
+ public String getDescriptionNoSnippet() {
+ return descriptionNoSnippet;
+ }
+
+ public void setDescriptionNoSnippet(String descriptionNoSnippet) {
+ this.descriptionNoSnippet = descriptionNoSnippet;
+ }
+
+ public List getDatasetAuthors() {
+ return datasetAuthors;
+ }
+
+ public void setDatasetAuthors(List datasetAuthors) {
+ this.datasetAuthors = datasetAuthors;
+ }
+
+ public String getDeaccessionReason() {
+ return deaccessionReason;
+ }
+
+ public void setDeaccessionReason(String deaccessionReason) {
+ this.deaccessionReason = deaccessionReason;
+ }
+
+ public List getHighlightsAsListOrig() {
+ return highlightsAsList;
+ }
+
+ public List getHighlightsAsList() {
+ List filtered = new ArrayList<>();
+ for (Highlight highlight : highlightsAsList) {
+ String field = highlight.getSolrField().getNameSearchable();
+ /**
+ * @todo don't hard code "title" here. And should we collapse name
+ * and title together anyway?
+ */
+ if (!field.equals(SearchFields.NAME) && !field.equals(SearchFields.DESCRIPTION)
+ && !field.equals(SearchFields.DATASET_DESCRIPTION) && !field.equals(SearchFields.AFFILIATION)
+ && !field.equals("title")) {
+ filtered.add(highlight);
+ }
+ }
+ return filtered;
+ }
+
+ public void setHighlightsAsList(List highlightsAsList) {
+ this.highlightsAsList = highlightsAsList;
+ }
+
+ public List getFileCategories() {
+ return fileCategories;
+ }
+
+ public void setFileCategories(List fileCategories) {
+ this.fileCategories = fileCategories;
+ }
+
+ public List getTabularDataTags() {
+ return tabularDataTags;
+ }
+
+ public void setTabularDataTags(List tabularDataTags) {
+ this.tabularDataTags = tabularDataTags;
+ }
+
+ public Map getParent() {
+ return parent;
+ }
+
+ public Long getParentIdAsLong() {
+
+ if (this.getParent() == null) {
+ return null;
+ }
+ if (!this.getParent().containsKey("id")) {
+ return null;
+ }
+
+ String parentIdString = getParent().get("id");
+ if (parentIdString == null) {
+ return null;
+ }
+
+ try {
+ return Long.parseLong(parentIdString);
+ } catch (NumberFormatException ex) {
+ return null;
+ }
+ }
+
+ public void setParent(Map parent) {
+ this.parent = parent;
+ }
+
+ public String getDataverseAffiliation() {
+ return dataverseAffiliation;
+ }
+
+ public void setDataverseAffiliation(String dataverseAffiliation) {
+ this.dataverseAffiliation = dataverseAffiliation;
+ }
+
+ public String getCitation() {
+ return citation;
+ }
+
+ public void setCitation(String citation) {
+ this.citation = citation;
+ }
+
+ public String getCitationHtml() {
+ return citationHtml;
+ }
+
+ public void setCitationHtml(String citationHtml) {
+ this.citationHtml = citationHtml;
+ }
+
+ public String getDatasetType() {
+ return datasetType;
+ }
+
+ public void setDatasetType(String datasetType) {
+ this.datasetType = datasetType;
+ }
+
+ public String getFiletype() {
+ return filetype;
+ }
+
+ public void setFiletype(String filetype) {
+ this.filetype = filetype;
+ }
+
+ public String getFileContentType() {
+ return fileContentType;
+ }
+
+ public void setFileContentType(String fileContentType) {
+ this.fileContentType = fileContentType;
+ }
+
+ public String getUnf() {
+ return unf;
+ }
+
+ public void setUnf(String unf) {
+ this.unf = unf;
+ }
+
+ public Long getFileSizeInBytes() {
+ return fileSizeInBytes;
+ }
+
+ public void setFileSizeInBytes(Long fileSizeInBytes) {
+ this.fileSizeInBytes = fileSizeInBytes;
+ }
+
+ public String getFileMd5() {
+ if (DataFile.ChecksumType.MD5.equals(getFileChecksumType())) {
+ return fileMd5;
+ } else {
+ return null;
+ }
+ }
+
+ public void setFileMd5(String fileMd5) {
+ this.fileMd5 = fileMd5;
+ }
+
+ public DataFile.ChecksumType getFileChecksumType() {
+ return fileChecksumType;
+ }
+
+ public void setFileChecksumType(DataFile.ChecksumType fileChecksumType) {
+ this.fileChecksumType = fileChecksumType;
+ }
+
+ public String getFileChecksumValue() {
+ return fileChecksumValue;
+ }
+
+ public void setFileChecksumValue(String fileChecksumValue) {
+ this.fileChecksumValue = fileChecksumValue;
+ }
+
+ public String getNameSort() {
+ return nameSort;
+ }
+
+ public void setNameSort(String nameSort) {
+ this.nameSort = nameSort;
+ }
+
+ public String getStatus() {
+ return status;
+ }
+
+ void setStatus(String status) {
+ this.status = status;
+ }
+
+ public Date getReleaseOrCreateDate() {
+ return releaseOrCreateDate;
+ }
+
+ public void setReleaseOrCreateDate(Date releaseOrCreateDate) {
+ this.releaseOrCreateDate = releaseOrCreateDate;
+ }
+
+ public String getDateToDisplayOnCard() {
+ return DateUtil.formatDate(this.releaseOrCreateDate);
+ }
+
+ public long getDatasetVersionId() {
+ return datasetVersionId;
+ }
+
+ public void setDatasetVersionId(long datasetVersionId) {
+ this.datasetVersionId = datasetVersionId;
+ }
+
+ public String getVersionNumberFriendly() {
+ return versionNumberFriendly;
+ }
+
+ public void setVersionNumberFriendly(String versionNumberFriendly) {
+ this.versionNumberFriendly = versionNumberFriendly;
+ }
+
+ public String getDatasetUrl() {
+ String failSafeUrl = "/dataset.xhtml?id=" + entityId + "&versionId=" + datasetVersionId;
+ if (identifier != null) {
+ /**
+ * Unfortunately, colons in the globalId (doi:10...) are converted
+ * to %3A (doi%3A10...). To prevent this we switched many JSF tags
+ * to a plain "a" tag with an href as suggested at
+ * http://stackoverflow.com/questions/24733959/houtputlink-value-escaped
+ */
+ String badString = "null";
+ if (!identifier.contains(badString)) {
+ if (entity != null && entity instanceof Dataset) {
+ if (this.isHarvested() && ((Dataset) entity).getHarvestedFrom() != null) {
+ String remoteArchiveUrl = ((Dataset) entity).getRemoteArchiveURL();
+ if (remoteArchiveUrl != null) {
+ return remoteArchiveUrl;
+ }
+ return null;
+ }
+ }
+ if (isDraftState()) {
+ return "/dataset.xhtml?persistentId=" + identifier + "&version=DRAFT";
+ }
+ return "/dataset.xhtml?persistentId=" + identifier;
+ } else {
+ logger.info("Dataset identifier/globalId contains \"" + badString
+ + "\" perhaps due to https://github.com/IQSS/dataverse/issues/1147 . Fix data in database and reindex. Returning failsafe URL: "
+ + failSafeUrl);
+ return failSafeUrl;
+ }
+ } else {
+ logger.info("Dataset identifier/globalId was null. Returning failsafe URL: " + failSafeUrl);
+ return failSafeUrl;
+ }
+ }
+
+ public String getFileParentIdentifier() {
+ if (entity == null) {
+ return null;
+ }
+ if (entity instanceof DataFile) {
+ return parent.get(PARENT_IDENTIFIER); // Dataset globalID
+ }
+
+ return null;
+ // if (entity)
+ }
+
+ public String getFilePersistentId() {
+ return filePersistentId;
+ }
+
+ public void setFilePersistentId(String pid) {
+ filePersistentId = pid;
+ }
+
+ public String getFileUrl() {
+ // Nothing special needs to be done for harvested file URLs:
+ // simply directing these to the local dataset.xhtml for this dataset
+ // will take care of it - because DatasetPage will issue a redirect
+ // to the remote archive URL.
+ // This is true AS OF 4.2.4, FEB. 2016! - We'll probably want to make
+ // .getRemoteArchiveURL() methods, both in DataFile and Dataset objects,
+ // work again at some point in the future.
+ /*
* if (entity != null && entity instanceof DataFile && this.isHarvested()) { String remoteArchiveUrl = ((DataFile) entity).getRemoteArchiveURL(); if
* (remoteArchiveUrl != null) { return remoteArchiveUrl; } return null; }
- */
+ */
if (entity.getIdentifier() != null) {
GlobalId entityPid = entity.getGlobalId();
return "/file.xhtml?persistentId=" + ((entityPid != null) ? entityPid.asString() : null);
}
- return "/file.xhtml?fileId=" + entity.getId() + "&datasetVersionId=" + datasetVersionId;
+ return "/file.xhtml?fileId=" + entity.getId() + "&datasetVersionId=" + datasetVersionId;
- /*
+ /*
* if (parentDatasetGlobalId != null) { return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId; } else { return "/dataset.xhtml?id=" +
* parent.get(SearchFields.ID) + "&versionId=" + datasetVersionId; }
- */
- }
+ */
+ }
- public String getFileDatasetUrl() {
- // See the comment in the getFileUrl() method above. -- L.A. 4.2.4
- /*
+ public String getFileDatasetUrl() {
+ // See the comment in the getFileUrl() method above. -- L.A. 4.2.4
+ /*
* if (entity != null && entity instanceof DataFile && this.isHarvested()) { String remoteArchiveUrl = ((DataFile) entity).getRemoteArchiveURL(); if
* (remoteArchiveUrl != null) { return remoteArchiveUrl; } return null; }
- */
-
- String parentDatasetGlobalId = parent.get(PARENT_IDENTIFIER);
-
- if (parentDatasetGlobalId != null) {
- if (isDraftState()) {
- return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId + "&version=DRAFT";
- } else {
- return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId;
- }
- } else {
- return "/dataset.xhtml?id=" + parent.get(SearchFields.ID) + "&versionId=" + datasetVersionId;
- }
- }
-
- /**
- * @return the dataverseAlias
- */
- public String getDataverseAlias() {
- return dataverseAlias;
- }
-
- /**
- * @param dataverseAlias the dataverseAlias to set
- */
- public void setDataverseAlias(String dataverseAlias) {
- this.dataverseAlias = dataverseAlias;
- }
-
- /**
- * @return the dataverseParentAlias
- */
- public String getDataverseParentAlias() {
- return dataverseParentAlias;
- }
-
- /**
- * @param dataverseParentAlias the dataverseParentAlias to set
- */
- public void setDataverseParentAlias(String dataverseParentAlias) {
- this.dataverseParentAlias = dataverseParentAlias;
- }
-
- public float getScore() {
- return score;
- }
-
- public void setScore(float score) {
- this.score = score;
- }
-
- private String getDisplayType(String type) {
- if (type.equals(SearchConstants.DATAVERSES)) {
- return SearchConstants.DATAVERSE;
- } else if (type.equals(SearchConstants.DATASETS)) {
- return SearchConstants.DATASET;
- } else if (type.equals(SearchConstants.FILES)) {
- return SearchConstants.FILE;
- } else {
- return null;
- }
- }
-
- /*
+ */
+
+ String parentDatasetGlobalId = parent.get(PARENT_IDENTIFIER);
+
+ if (parentDatasetGlobalId != null) {
+ if (isDraftState()) {
+ return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId + "&version=DRAFT";
+ } else {
+ return "/dataset.xhtml?persistentId=" + parentDatasetGlobalId;
+ }
+ } else {
+ return "/dataset.xhtml?id=" + parent.get(SearchFields.ID) + "&versionId=" + datasetVersionId;
+ }
+ }
+
+ /**
+ * @return the dataverseAlias
+ */
+ public String getDataverseAlias() {
+ return dataverseAlias;
+ }
+
+ /**
+ * @param dataverseAlias the dataverseAlias to set
+ */
+ public void setDataverseAlias(String dataverseAlias) {
+ this.dataverseAlias = dataverseAlias;
+ }
+
+ /**
+ * @return the dataverseParentAlias
+ */
+ public String getDataverseParentAlias() {
+ return dataverseParentAlias;
+ }
+
+ /**
+ * @param dataverseParentAlias the dataverseParentAlias to set
+ */
+ public void setDataverseParentAlias(String dataverseParentAlias) {
+ this.dataverseParentAlias = dataverseParentAlias;
+ }
+
+ public float getScore() {
+ return score;
+ }
+
+ public void setScore(float score) {
+ this.score = score;
+ }
+
+ private String getDisplayType(String type) {
+ if (type.equals(SearchConstants.DATAVERSES)) {
+ return SearchConstants.DATAVERSE;
+ } else if (type.equals(SearchConstants.DATASETS)) {
+ return SearchConstants.DATASET;
+ } else if (type.equals(SearchConstants.FILES)) {
+ return SearchConstants.FILE;
+ } else {
+ return null;
+ }
+ }
+
+ /*
* public JsonArrayBuilder getUserRolesAsJson() {
*
* JsonArrayBuilder jsonRoleStrings = Json.createArrayBuilder(); for (String role : this.getUserRole()) { jsonRoleStrings.add(role); } return
* jsonRoleStrings; }
- */
- public List getUserRole() {
- return userRole;
- }
+ */
+ public List getUserRole() {
+ return userRole;
+ }
- public void setUserRole(List userRole) {
- this.userRole = userRole;
- }
+ public void setUserRole(List userRole) {
+ this.userRole = userRole;
+ }
- public String getIdentifierOfDataverse() {
- return identifierOfDataverse;
- }
+ public String getIdentifierOfDataverse() {
+ return identifierOfDataverse;
+ }
- public void setIdentifierOfDataverse(String id) {
- this.identifierOfDataverse = id;
- }
+ public void setIdentifierOfDataverse(String id) {
+ this.identifierOfDataverse = id;
+ }
- public String getNameOfDataverse() {
- return nameOfDataverse;
- }
+ public String getNameOfDataverse() {
+ return nameOfDataverse;
+ }
- public void setNameOfDataverse(String id) {
- this.nameOfDataverse = id;
- }
+ public void setNameOfDataverse(String id) {
+ this.nameOfDataverse = id;
+ }
- public String getExternalStatus() {
- return externalStatus;
- }
+ public String getExternalStatus() {
+ return externalStatus;
+ }
- public void setExternalStatus(String externalStatus) {
- this.externalStatus = externalStatus;
+ public void setExternalStatus(String externalStatus) {
+ this.externalStatus = externalStatus;
- }
+ }
- public Long getEmbargoEndDate() {
- return embargoEndDate;
- }
+ public Long getEmbargoEndDate() {
+ return embargoEndDate;
+ }
- public void setEmbargoEndDate(Long embargoEndDate) {
- this.embargoEndDate = embargoEndDate;
- }
+ public void setEmbargoEndDate(Long embargoEndDate) {
+ this.embargoEndDate = embargoEndDate;
+ }
- public Long getRetentionEndDate() {
- return retentionEndDate;
- }
+ public Long getRetentionEndDate() {
+ return retentionEndDate;
+ }
- public void setRetentionEndDate(Long retentionEndDate) {
- this.retentionEndDate = retentionEndDate;
- }
+ public void setRetentionEndDate(Long retentionEndDate) {
+ this.retentionEndDate = retentionEndDate;
+ }
- public void setDatasetValid(Boolean datasetValid) {
- this.datasetValid = datasetValid == null || Boolean.valueOf(datasetValid);
- }
+ public void setDatasetValid(Boolean datasetValid) {
+ this.datasetValid = datasetValid == null || Boolean.valueOf(datasetValid);
+ }
- public boolean isValid(Predicate canUpdateDataset) {
+ public boolean isValid(Predicate canUpdateDataset) {
if (this.datasetValid) {
return true;
}
@@ -1274,6 +1315,6 @@ public boolean isValid(Predicate canUpdateDataset) {
if (!JvmSettings.UI_SHOW_VALIDITY_LABEL_WHEN_PUBLISHED.lookupOptional(Boolean.class).orElse(true)) {
return true;
}
- return !canUpdateDataset.test(this);
+ return !canUpdateDataset.test(this);
}
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JSONLDUtil.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JSONLDUtil.java
index 52491a5a7e1..380cef6aa9d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/util/json/JSONLDUtil.java
+++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JSONLDUtil.java
@@ -49,6 +49,8 @@
import com.apicatalog.jsonld.document.JsonDocument;
import edu.harvard.iq.dataverse.DatasetVersion.VersionState;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.license.License;
import edu.harvard.iq.dataverse.license.LicenseServiceBean;
import edu.harvard.iq.dataverse.pidproviders.PidProvider;
@@ -77,7 +79,7 @@ public static JsonObject getContext(Map contextMap) {
public static Dataset updateDatasetMDFromJsonLD(Dataset ds, String jsonLDBody,
MetadataBlockServiceBean metadataBlockSvc, DatasetFieldServiceBean datasetFieldSvc, boolean append,
- boolean migrating, LicenseServiceBean licenseSvc) {
+ boolean migrating, LicenseServiceBean licenseSvc, DatasetTypeServiceBean datasetTypeSvc) {
DatasetVersion dsv = new DatasetVersion();
@@ -96,6 +98,14 @@ public static Dataset updateDatasetMDFromJsonLD(Dataset ds, String jsonLDBody,
//Store the metadatalanguage if sent - the caller needs to check whether it is allowed (as with any GlobalID)
ds.setMetadataLanguage(jsonld.getString(JsonLDTerm.schemaOrg("inLanguage").getUrl(),null));
+ String datasetTypeIn = jsonld.getString(JsonLDTerm.datasetType.getUrl(), DatasetType.DEFAULT_DATASET_TYPE);
+ DatasetType datasetType = datasetTypeSvc.getByName(datasetTypeIn);
+ if (datasetType != null) {
+ ds.setDatasetType(datasetType);
+ } else {
+ throw new BadRequestException("Invalid dataset type: " + datasetTypeIn);
+ }
+
dsv = updateDatasetVersionMDFromJsonLD(dsv, jsonld, metadataBlockSvc, datasetFieldSvc, append, migrating, licenseSvc);
dsv.setDataset(ds);
diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonLDTerm.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonLDTerm.java
index 3193f762538..3166fa9dbfa 100644
--- a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonLDTerm.java
+++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonLDTerm.java
@@ -52,6 +52,8 @@ public class JsonLDTerm {
public static JsonLDTerm fileCount = JsonLDTerm.DVCore("fileCount");
public static JsonLDTerm maxFileSize = JsonLDTerm.DVCore("maxFileSize");
+ public static JsonLDTerm datasetType = JsonLDTerm.DVCore("datasetType");
+
public JsonLDTerm(JsonLDNamespace namespace, String term) {
this.namespace = namespace;
this.term = term;
diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java
index addccc93fe0..2f01c9bc2f2 100644
--- a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java
+++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java
@@ -24,6 +24,8 @@
import edu.harvard.iq.dataverse.authorization.groups.impl.ipaddress.ip.IpAddress;
import edu.harvard.iq.dataverse.authorization.groups.impl.ipaddress.ip.IpAddressRange;
import edu.harvard.iq.dataverse.authorization.groups.impl.maildomain.MailDomainGroup;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.datasetutility.OptionalFileParams;
import edu.harvard.iq.dataverse.harvest.client.HarvestingClient;
import edu.harvard.iq.dataverse.license.License;
@@ -68,6 +70,7 @@ public class JsonParser {
MetadataBlockServiceBean blockService;
SettingsServiceBean settingsService;
LicenseServiceBean licenseService;
+ DatasetTypeServiceBean datasetTypeService;
HarvestingClient harvestingClient = null;
boolean allowHarvestingMissingCVV = false;
@@ -83,15 +86,16 @@ public JsonParser(DatasetFieldServiceBean datasetFieldSvc, MetadataBlockServiceB
this.settingsService = settingsService;
}
- public JsonParser(DatasetFieldServiceBean datasetFieldSvc, MetadataBlockServiceBean blockService, SettingsServiceBean settingsService, LicenseServiceBean licenseService) {
- this(datasetFieldSvc, blockService, settingsService, licenseService, null);
+ public JsonParser(DatasetFieldServiceBean datasetFieldSvc, MetadataBlockServiceBean blockService, SettingsServiceBean settingsService, LicenseServiceBean licenseService, DatasetTypeServiceBean datasetTypeService) {
+ this(datasetFieldSvc, blockService, settingsService, licenseService, datasetTypeService, null);
}
- public JsonParser(DatasetFieldServiceBean datasetFieldSvc, MetadataBlockServiceBean blockService, SettingsServiceBean settingsService, LicenseServiceBean licenseService, HarvestingClient harvestingClient) {
+ public JsonParser(DatasetFieldServiceBean datasetFieldSvc, MetadataBlockServiceBean blockService, SettingsServiceBean settingsService, LicenseServiceBean licenseService, DatasetTypeServiceBean datasetTypeService, HarvestingClient harvestingClient) {
this.datasetFieldSvc = datasetFieldSvc;
this.blockService = blockService;
this.settingsService = settingsService;
this.licenseService = licenseService;
+ this.datasetTypeService = datasetTypeService;
this.harvestingClient = harvestingClient;
this.allowHarvestingMissingCVV = harvestingClient != null && harvestingClient.getAllowHarvestingMissingCVV();
}
@@ -328,7 +332,15 @@ public Dataset parseDataset(JsonObject obj) throws JsonParseException {
}else {
throw new JsonParseException("Specified metadatalanguage not allowed.");
}
-
+ String datasetTypeIn = obj.getString("datasetType", DatasetType.DEFAULT_DATASET_TYPE);
+ logger.fine("datasetTypeIn: " + datasetTypeIn);
+ DatasetType datasetType = datasetTypeService.getByName(datasetTypeIn);
+ if (datasetType != null) {
+ dataset.setDatasetType(datasetType);
+ } else {
+ throw new JsonParseException("Invalid dataset type: " + datasetTypeIn);
+ }
+
DatasetVersion dsv = new DatasetVersion();
dsv.setDataset(dataset);
dsv = parseDatasetVersion(obj.getJsonObject("datasetVersion"), dsv);
diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java
index adb7cf98975..34c8fc5c6a6 100644
--- a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java
+++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java
@@ -17,6 +17,7 @@
import edu.harvard.iq.dataverse.authorization.users.User;
import edu.harvard.iq.dataverse.branding.BrandingUtil;
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
import edu.harvard.iq.dataverse.dataset.DatasetUtil;
import edu.harvard.iq.dataverse.datavariable.CategoryMetadata;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
@@ -55,7 +56,6 @@
import jakarta.ejb.Singleton;
import jakarta.json.JsonArray;
import jakarta.json.JsonObject;
-import java.math.BigDecimal;
/**
* Convert objects to Json.
@@ -406,6 +406,7 @@ public static JsonObjectBuilder json(Dataset ds, Boolean returnOwners) {
if (returnOwners){
bld.add("isPartOf", getOwnersFromDvObject(ds));
}
+ bld.add("datasetType", ds.getDatasetType().getName());
return bld;
}
diff --git a/src/main/java/propertyFiles/Bundle.properties b/src/main/java/propertyFiles/Bundle.properties
index c62620b43cf..6c771d8337b 100644
--- a/src/main/java/propertyFiles/Bundle.properties
+++ b/src/main/java/propertyFiles/Bundle.properties
@@ -3,7 +3,17 @@ newDataverse=New Dataverse
hostDataverse=Host Dataverse
dataverses=Dataverses
passwd=Password
+# BEGIN dataset types
+# `dataset=Dataset` has been here since 4.0 but now that we have dataset types,
+# we need to add the rest of the types here for two reasons. First, we want
+# translators to be able to translate these types. Second, in English it looks
+# weird to have only "Dataset" capitalized in the facet but not "software" and
+# "workflow". This capitalization (looking up here in the bundle) is done by
+# SearchServiceBean near the comment "This is where facets are capitalized".
dataset=Dataset
+software=Software
+workflow=Workflow
+# END dataset types
datasets=Datasets
newDataset=New Dataset
files=Files
diff --git a/src/main/java/propertyFiles/staticSearchFields.properties b/src/main/java/propertyFiles/staticSearchFields.properties
index 53d0080b87c..9a208e841d6 100644
--- a/src/main/java/propertyFiles/staticSearchFields.properties
+++ b/src/main/java/propertyFiles/staticSearchFields.properties
@@ -9,4 +9,5 @@ staticSearchFields.dvObjectType=Type
staticSearchFields.fileTag=File Tag
staticSearchFields.fileAccess=Access
staticSearchFields.publicationStatus=Publication Status
-staticSearchFields.subject_ss=Subject
\ No newline at end of file
+staticSearchFields.subject_ss=Subject
+staticSearchFields.datasetType=Dataset Type
diff --git a/src/main/resources/db/migration/V6.3.0.3.sql b/src/main/resources/db/migration/V6.3.0.3.sql
new file mode 100644
index 00000000000..ece87767bcb
--- /dev/null
+++ b/src/main/resources/db/migration/V6.3.0.3.sql
@@ -0,0 +1,30 @@
+-- Dataset types have been added. See #10517 and #10694
+--
+-- Insert the default dataset type: dataset (if not present).
+-- Inspired by https://stackoverflow.com/questions/4069718/postgres-insert-if-does-not-exist-already/13342031#13342031
+INSERT INTO datasettype
+ (name)
+SELECT 'dataset'
+WHERE
+ NOT EXISTS (
+ SELECT name FROM datasettype WHERE name = 'dataset'
+ );
+--
+-- Add the new column (if it doesn't exist).
+ALTER TABLE dataset ADD COLUMN IF NOT EXISTS datasettype_id bigint;
+--
+-- Add the foreign key.
+DO $$
+BEGIN
+ BEGIN
+ ALTER TABLE dataset ADD CONSTRAINT fk_dataset_datasettype_id FOREIGN KEY (datasettype_id) REFERENCES datasettype(id);
+ EXCEPTION
+ WHEN duplicate_object THEN RAISE NOTICE 'Table constraint fk_dataset_datasettype_id already exists';
+ END;
+END $$;
+--
+-- Give existing datasets a type of "dataset".
+UPDATE dataset SET datasettype_id = (SELECT id FROM datasettype WHERE name = 'dataset');
+--
+-- Make the column non-null.
+ALTER TABLE dataset ALTER COLUMN datasettype_id SET NOT NULL;
diff --git a/src/main/resources/edu/harvard/iq/dataverse/pidproviders/doi/datacite_metadata_template.xml b/src/main/resources/edu/harvard/iq/dataverse/pidproviders/doi/datacite_metadata_template.xml
index abe7ce79972..150a098834e 100644
--- a/src/main/resources/edu/harvard/iq/dataverse/pidproviders/doi/datacite_metadata_template.xml
+++ b/src/main/resources/edu/harvard/iq/dataverse/pidproviders/doi/datacite_metadata_template.xml
@@ -9,7 +9,7 @@
${publisher}
${publisherYear}
-
+
${relatedIdentifiers}
${description}
diff --git a/src/test/java/edu/harvard/iq/dataverse/api/DatasetTypesIT.java b/src/test/java/edu/harvard/iq/dataverse/api/DatasetTypesIT.java
new file mode 100644
index 00000000000..1dec51cc3ef
--- /dev/null
+++ b/src/test/java/edu/harvard/iq/dataverse/api/DatasetTypesIT.java
@@ -0,0 +1,266 @@
+package edu.harvard.iq.dataverse.api;
+
+import edu.harvard.iq.dataverse.dataset.DatasetType;
+import io.restassured.RestAssured;
+import io.restassured.path.json.JsonPath;
+import io.restassured.response.Response;
+import jakarta.json.Json;
+import static jakarta.ws.rs.core.Response.Status.BAD_REQUEST;
+import static jakarta.ws.rs.core.Response.Status.CREATED;
+import static jakarta.ws.rs.core.Response.Status.FORBIDDEN;
+import static jakarta.ws.rs.core.Response.Status.OK;
+import java.util.UUID;
+import org.hamcrest.CoreMatchers;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+public class DatasetTypesIT {
+
+ @BeforeAll
+ public static void setUpClass() {
+ RestAssured.baseURI = UtilIT.getRestAssuredBaseUri();
+
+ Response getSoftwareType = UtilIT.getDatasetType(DatasetType.DATASET_TYPE_SOFTWARE);
+ getSoftwareType.prettyPrint();
+
+ String typeFound = JsonPath.from(getSoftwareType.getBody().asString()).getString("data.name");
+ System.out.println("type found: " + typeFound);
+ if (DatasetType.DATASET_TYPE_SOFTWARE.equals(typeFound)) {
+ return;
+ }
+
+ System.out.println("The \"software\" type wasn't found. Create it.");
+ Response createUser = UtilIT.createRandomUser();
+ createUser.then().assertThat().statusCode(OK.getStatusCode());
+ String username = UtilIT.getUsernameFromResponse(createUser);
+ String apiToken = UtilIT.getApiTokenFromResponse(createUser);
+ UtilIT.setSuperuserStatus(username, true).then().assertThat().statusCode(OK.getStatusCode());
+
+ String jsonIn = Json.createObjectBuilder().add("name", DatasetType.DATASET_TYPE_SOFTWARE).build().toString();
+
+ Response typeAdded = UtilIT.addDatasetType(jsonIn, apiToken);
+ typeAdded.prettyPrint();
+ typeAdded.then().assertThat().statusCode(OK.getStatusCode());
+ }
+
+ @Test
+ public void testCreateSoftwareDatasetNative() {
+ Response createUser = UtilIT.createRandomUser();
+ createUser.then().assertThat().statusCode(OK.getStatusCode());
+ String username = UtilIT.getUsernameFromResponse(createUser);
+ String apiToken = UtilIT.getApiTokenFromResponse(createUser);
+
+ Response createDataverse = UtilIT.createRandomDataverse(apiToken);
+ createDataverse.then().assertThat().statusCode(CREATED.getStatusCode());
+ String dataverseAlias = UtilIT.getAliasFromResponse(createDataverse);
+ Integer dataverseId = UtilIT.getDataverseIdFromResponse(createDataverse);
+
+ String jsonIn = UtilIT.getDatasetJson("doc/sphinx-guides/source/_static/api/dataset-create-software.json");
+
+ Response createSoftware = UtilIT.createDataset(dataverseAlias, jsonIn, apiToken);
+ createSoftware.prettyPrint();
+
+ createSoftware.then().assertThat().statusCode(CREATED.getStatusCode());
+
+ Integer datasetId = UtilIT.getDatasetIdFromResponse(createSoftware);
+ String datasetPid = JsonPath.from(createSoftware.getBody().asString()).getString("data.persistentId");
+
+ Response getDatasetJson = UtilIT.nativeGet(datasetId, apiToken);
+ getDatasetJson.prettyPrint();
+ getDatasetJson.then().assertThat().statusCode(OK.getStatusCode());
+ String datasetType = JsonPath.from(getDatasetJson.getBody().asString()).getString("data.datasetType");
+ System.out.println("datasetType: " + datasetType);
+ assertEquals("software", datasetType);
+
+ Response searchDraft = UtilIT.searchAndShowFacets("id:dataset_" + datasetId + "_draft", apiToken);
+ searchDraft.prettyPrint();
+ searchDraft.then().assertThat()
+ .body("data.total_count", CoreMatchers.is(1))
+ .body("data.count_in_response", CoreMatchers.is(1))
+ // No "Dataset Type" or count for "Software" because we hide the facet if there is only one type.
+ .body("data.facets[0].datasetType.friendly", CoreMatchers.nullValue())
+ .body("data.facets[0].datasetType.labels[0].Software", CoreMatchers.nullValue())
+ .statusCode(OK.getStatusCode());
+
+ UtilIT.publishDataverseViaNativeApi(dataverseAlias, apiToken).then().assertThat().statusCode(OK.getStatusCode());
+ UtilIT.publishDatasetViaNativeApi(datasetPid, "major", apiToken).then().assertThat().statusCode(OK.getStatusCode());
+
+ Response createDataset = UtilIT.createRandomDatasetViaNativeApi(dataverseAlias, apiToken);
+ createDataset.prettyPrint();
+ createDataset.then().assertThat().statusCode(CREATED.getStatusCode());
+
+ String dataset2Pid = JsonPath.from(createDataset.getBody().asString()).getString("data.persistentId");
+
+ UtilIT.publishDatasetViaNativeApi(dataset2Pid, "major", apiToken).then().assertThat().statusCode(OK.getStatusCode());
+
+ Response searchCollection = UtilIT.searchAndShowFacets("parentName:" + dataverseAlias, null);
+ searchCollection.prettyPrint();
+ searchCollection.then().assertThat()
+ .body("data.total_count", CoreMatchers.is(2))
+ .body("data.count_in_response", CoreMatchers.is(2))
+ .body("data.facets[0].datasetType.friendly", CoreMatchers.is("Dataset Type"))
+ .body("data.facets[0].datasetType.labels[0].Dataset", CoreMatchers.is(1))
+ .body("data.facets[0].datasetType.labels[1].Software", CoreMatchers.is(1))
+ .statusCode(OK.getStatusCode());
+
+// Response searchAsGuest = UtilIT.search(SearchFields.DATASET_TYPE + ":software", null);
+// searchAsGuest.prettyPrint();
+// searchAsGuest.then().assertThat()
+// .body("data.total_count", CoreMatchers.is(1))
+// .body("data.count_in_response", CoreMatchers.is(1))
+// .body("data.facets[0].datasetType.friendly", CoreMatchers.is("Dataset Type"))
+// .body("data.facets[0].datasetType.labels[0].software", CoreMatchers.is(1))
+// .statusCode(OK.getStatusCode());
+ }
+
+ @Test
+ public void testCreateDatasetSemantic() {
+ Response createUser = UtilIT.createRandomUser();
+ createUser.then().assertThat().statusCode(OK.getStatusCode());
+ String username = UtilIT.getUsernameFromResponse(createUser);
+ String apiToken = UtilIT.getApiTokenFromResponse(createUser);
+
+ Response createDataverse = UtilIT.createRandomDataverse(apiToken);
+ createDataverse.then().assertThat().statusCode(CREATED.getStatusCode());
+ String dataverseAlias = UtilIT.getAliasFromResponse(createDataverse);
+ Integer dataverseId = UtilIT.getDataverseIdFromResponse(createDataverse);
+
+ String jsonIn = UtilIT.getDatasetJson("doc/sphinx-guides/source/_static/api/dataset-create-software.jsonld");
+
+ Response createSoftware = UtilIT.createDatasetSemantic(dataverseAlias, jsonIn, apiToken);
+ createSoftware.prettyPrint();
+
+ createSoftware.then().assertThat().statusCode(CREATED.getStatusCode());
+
+ Integer datasetId = UtilIT.getDatasetIdFromResponse(createSoftware);
+ String datasetPid = JsonPath.from(createSoftware.getBody().asString()).getString("data.persistentId");
+
+ Response getDatasetJson = UtilIT.nativeGet(datasetId, apiToken);
+ getDatasetJson.prettyPrint();
+ getDatasetJson.then().assertThat().statusCode(OK.getStatusCode());
+ String datasetType = JsonPath.from(getDatasetJson.getBody().asString()).getString("data.datasetType");
+ System.out.println("datasetType: " + datasetType);
+
+ assertEquals("software", datasetType);
+
+ }
+
+ @Test
+ public void testImportJson() {
+ Response createUser = UtilIT.createRandomUser();
+ createUser.then().assertThat().statusCode(OK.getStatusCode());
+ String username = UtilIT.getUsernameFromResponse(createUser);
+ String apiToken = UtilIT.getApiTokenFromResponse(createUser);
+
+ UtilIT.setSuperuserStatus(username, true).then().assertThat().statusCode(OK.getStatusCode());
+
+ Response createDataverse = UtilIT.createRandomDataverse(apiToken);
+ createDataverse.then().assertThat().statusCode(CREATED.getStatusCode());
+ String dataverseAlias = UtilIT.getAliasFromResponse(createDataverse);
+ Integer dataverseId = UtilIT.getDataverseIdFromResponse(createDataverse);
+
+ String jsonIn = UtilIT.getDatasetJson("doc/sphinx-guides/source/_static/api/dataset-create-software.json");
+
+ String randomString = UtilIT.getRandomString(6);
+
+ Response importJson = UtilIT.importDatasetNativeJson(apiToken, dataverseAlias, jsonIn, "doi:10.5072/FK2/" + randomString, "no");
+ importJson.prettyPrint();
+
+ importJson.then().assertThat().statusCode(CREATED.getStatusCode());
+
+ Integer datasetId = JsonPath.from(importJson.getBody().asString()).getInt("data.id");
+ String datasetPid = JsonPath.from(importJson.getBody().asString()).getString("data.persistentId");
+
+ Response getDatasetJson = UtilIT.nativeGet(datasetId, apiToken);
+ getDatasetJson.prettyPrint();
+ getDatasetJson.then().assertThat().statusCode(OK.getStatusCode());
+ String datasetType = JsonPath.from(getDatasetJson.getBody().asString()).getString("data.datasetType");
+ System.out.println("datasetType: " + datasetType);
+ assertEquals("software", datasetType);
+
+ }
+
+ @Test
+ public void testGetDatasetTypes() {
+ Response getTypes = UtilIT.getDatasetTypes();
+ getTypes.prettyPrint();
+ getTypes.then().assertThat()
+ .statusCode(OK.getStatusCode())
+ // non-null because types were added by a Flyway script
+ .body("data", CoreMatchers.not(equalTo(null)));
+ }
+
+ @Test
+ public void testGetDefaultDatasetType() {
+ Response getType = UtilIT.getDatasetType(DatasetType.DEFAULT_DATASET_TYPE);
+ getType.prettyPrint();
+ getType.then().assertThat()
+ .statusCode(OK.getStatusCode())
+ .body("data.name", equalTo(DatasetType.DEFAULT_DATASET_TYPE));
+ }
+
+ @Test
+ public void testDeleteDefaultDatasetType() {
+ Response getType = UtilIT.getDatasetType(DatasetType.DEFAULT_DATASET_TYPE);
+ getType.prettyPrint();
+ getType.then().assertThat()
+ .statusCode(OK.getStatusCode())
+ .body("data.name", equalTo(DatasetType.DEFAULT_DATASET_TYPE));
+
+ Long doomed = JsonPath.from(getType.getBody().asString()).getLong("data.id");
+
+ Response createUser = UtilIT.createRandomUser();
+ createUser.then().assertThat().statusCode(OK.getStatusCode());
+ String username = UtilIT.getUsernameFromResponse(createUser);
+ String apiToken = UtilIT.getApiTokenFromResponse(createUser);
+ UtilIT.setSuperuserStatus(username, true).then().assertThat().statusCode(OK.getStatusCode());
+
+ Response deleteType = UtilIT.deleteDatasetTypes(doomed, apiToken);
+ deleteType.prettyPrint();
+ deleteType.then().assertThat()
+ .statusCode(FORBIDDEN.getStatusCode());
+ }
+
+ @Test
+ public void testAddAndDeleteDatasetType() {
+ Response createUser = UtilIT.createRandomUser();
+ createUser.then().assertThat().statusCode(OK.getStatusCode());
+ String username = UtilIT.getUsernameFromResponse(createUser);
+ String apiToken = UtilIT.getApiTokenFromResponse(createUser);
+ UtilIT.setSuperuserStatus(username, true).then().assertThat().statusCode(OK.getStatusCode());
+
+ Response badJson = UtilIT.addDatasetType("this isn't even JSON", apiToken);
+ badJson.prettyPrint();
+ badJson.then().assertThat().statusCode(BAD_REQUEST.getStatusCode());
+
+ String numbersOnlyIn = Json.createObjectBuilder().add("name", "12345").build().toString();
+ Response numbersOnly = UtilIT.addDatasetType(numbersOnlyIn, apiToken);
+ numbersOnly.prettyPrint();
+ numbersOnly.then().assertThat().statusCode(BAD_REQUEST.getStatusCode());
+
+ String randomName = UUID.randomUUID().toString().substring(0, 8);
+ String jsonIn = Json.createObjectBuilder().add("name", randomName).build().toString();
+
+ System.out.println("adding type with name " + randomName);
+ Response typeAdded = UtilIT.addDatasetType(jsonIn, apiToken);
+ typeAdded.prettyPrint();
+
+ typeAdded.then().assertThat().statusCode(OK.getStatusCode());
+
+ Long doomed = JsonPath.from(typeAdded.getBody().asString()).getLong("data.id");
+
+ System.out.println("doomed: " + doomed);
+ Response getTypeById = UtilIT.getDatasetType(doomed.toString());
+ getTypeById.prettyPrint();
+ getTypeById.then().assertThat().statusCode(OK.getStatusCode());
+
+ System.out.println("deleting type with id " + doomed);
+ Response typeDeleted = UtilIT.deleteDatasetTypes(doomed, apiToken);
+ typeDeleted.prettyPrint();
+ typeDeleted.then().assertThat().statusCode(OK.getStatusCode());
+
+ }
+
+}
diff --git a/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java b/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java
index 110dc7e570e..8b170ec5fce 100644
--- a/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java
+++ b/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java
@@ -46,6 +46,7 @@
import edu.harvard.iq.dataverse.DatasetField;
import edu.harvard.iq.dataverse.DatasetFieldType;
import edu.harvard.iq.dataverse.DatasetFieldValue;
+import edu.harvard.iq.dataverse.settings.FeatureFlags;
import edu.harvard.iq.dataverse.util.StringUtil;
import java.util.Collections;
@@ -534,6 +535,15 @@ static Response createDataset(String dataverseAlias, String datasetJson, String
return createDatasetResponse;
}
+ static Response createDatasetSemantic(String dataverseAlias, String datasetJson, String apiToken) {
+ Response response = given()
+ .header(API_TOKEN_HTTP_HEADER, apiToken)
+ .body(datasetJson)
+ .contentType("application/ld+json")
+ .post("/api/dataverses/" + dataverseAlias + "/datasets");
+ return response;
+ }
+
static String getDatasetJson(String pathToJsonFile) {
File datasetVersionJson = new File(pathToJsonFile);
try {
@@ -2210,6 +2220,16 @@ public static Response setSetting(String settingKey, String value) {
return response;
}
+ static Response getFeatureFlags() {
+ Response response = given().when().get("/api/admin/featureFlags");
+ return response;
+ }
+
+ static Response getFeatureFlag(FeatureFlags featureFlag) {
+ Response response = given().when().get("/api/admin/featureFlags/" + featureFlag);
+ return response;
+ }
+
static Response getRoleAssignmentsOnDataverse(String dataverseAliasOrId, String apiToken) {
String url = "/api/dataverses/" + dataverseAliasOrId + "/assignments";
return given()
@@ -3579,6 +3599,31 @@ private static DatasetField constructPrimitive(String fieldName, String value) {
return field;
}
+ static Response importDatasetNativeJson(String apiToken, String dataverseAlias, String jsonString, String pid, String release) {
+
+ String postString = "/api/dataverses/" + dataverseAlias + "/datasets/:import";
+ if (pid != null || release != null) {
+ //postString = postString + "?";
+ if (pid != null) {
+ postString = postString + "?pid=" + pid;
+ if (release != null && release.compareTo("yes") == 0) {
+ postString = postString + "&release=" + release.toString();
+ }
+ } else {
+ if (release != null && release.compareTo("yes") == 0) {
+ postString = postString + "?release=" + release.toString();
+ }
+ }
+ }
+
+ RequestSpecification importJson = given()
+ .header(API_TOKEN_HTTP_HEADER, apiToken)
+ .urlEncodingEnabled(false)
+ .body(jsonString)
+ .contentType("application/json");
+
+ return importJson.post(postString);
+ }
static Response importDatasetDDIViaNativeApi(String apiToken, String dataverseAlias, String xml, String pid, String release) {
@@ -4041,6 +4086,11 @@ static Response listDataverseFacets(String dataverseAlias, boolean returnDetails
.get("/api/dataverses/" + dataverseAlias + "/facets");
}
+ static Response listAllFacetableDatasetFields() {
+ return given()
+ .get("/api/datasetfields/facetables");
+ }
+
static Response listDataverseInputLevels(String dataverseAlias, String apiToken) {
return given()
.header(API_TOKEN_HTTP_HEADER, apiToken)
@@ -4048,8 +4098,30 @@ static Response listDataverseInputLevels(String dataverseAlias, String apiToken)
.get("/api/dataverses/" + dataverseAlias + "/inputLevels");
}
- static Response listAllFacetableDatasetFields() {
+ public static Response getDatasetTypes() {
+ Response response = given()
+ .get("/api/datasets/datasetTypes");
+ return response;
+ }
+
+ static Response getDatasetType(String idOrName) {
return given()
- .get("/api/datasetfields/facetables");
+ .get("/api/datasets/datasetTypes/" + idOrName);
}
+
+ static Response addDatasetType(String jsonIn, String apiToken) {
+ System.out.println("called addDatasetType...");
+ return given()
+ .header(API_TOKEN_HTTP_HEADER, apiToken)
+ .body(jsonIn)
+ .contentType(ContentType.JSON)
+ .post("/api/datasets/datasetTypes");
+ }
+
+ static Response deleteDatasetTypes(long doomed, String apiToken) {
+ return given()
+ .header(API_TOKEN_HTTP_HEADER, apiToken)
+ .delete("/api/datasets/datasetTypes/" + doomed);
+ }
+
}
diff --git a/src/test/java/edu/harvard/iq/dataverse/engine/TestCommandContext.java b/src/test/java/edu/harvard/iq/dataverse/engine/TestCommandContext.java
index f2c03adea20..b4b9c0d33f2 100644
--- a/src/test/java/edu/harvard/iq/dataverse/engine/TestCommandContext.java
+++ b/src/test/java/edu/harvard/iq/dataverse/engine/TestCommandContext.java
@@ -8,6 +8,7 @@
import edu.harvard.iq.dataverse.authorization.groups.impl.explicit.ExplicitGroupServiceBean;
import edu.harvard.iq.dataverse.confirmemail.ConfirmEmailServiceBean;
import edu.harvard.iq.dataverse.datacapturemodule.DataCaptureModuleServiceBean;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.engine.command.Command;
import edu.harvard.iq.dataverse.engine.command.CommandContext;
import edu.harvard.iq.dataverse.ingest.IngestServiceBean;
@@ -234,7 +235,12 @@ public ActionLogServiceBean actionLog() {
public MetadataBlockServiceBean metadataBlocks() {
return null;
}
-
+
+ @Override
+ public DatasetTypeServiceBean datasetTypes() {
+ return null;
+ }
+
@Override
public StorageUseServiceBean storageUse() {
return null;
diff --git a/src/test/java/edu/harvard/iq/dataverse/export/SchemaDotOrgExporterTest.java b/src/test/java/edu/harvard/iq/dataverse/export/SchemaDotOrgExporterTest.java
index 2139589b4c3..9850e9d80e9 100644
--- a/src/test/java/edu/harvard/iq/dataverse/export/SchemaDotOrgExporterTest.java
+++ b/src/test/java/edu/harvard/iq/dataverse/export/SchemaDotOrgExporterTest.java
@@ -2,6 +2,7 @@
import edu.harvard.iq.dataverse.*;
import edu.harvard.iq.dataverse.branding.BrandingUtilTest;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import io.gdcc.spi.export.ExportDataProvider;
import io.gdcc.spi.export.XMLExporter;
import edu.harvard.iq.dataverse.license.License;
@@ -53,6 +54,7 @@ public class SchemaDotOrgExporterTest {
private static final MockDatasetFieldSvc datasetFieldTypeSvc = new MockDatasetFieldSvc();
private static final SettingsServiceBean settingsService = Mockito.mock(SettingsServiceBean.class);
private static final LicenseServiceBean licenseService = Mockito.mock(LicenseServiceBean.class);
+ private static final DatasetTypeServiceBean datasetTypeService = Mockito.mock(DatasetTypeServiceBean.class);
private static final SchemaDotOrgExporter schemaDotOrgExporter = new SchemaDotOrgExporter();
@BeforeAll
@@ -173,7 +175,7 @@ public void testExportDescriptionTruncation() throws JsonParseException, ParseEx
private JsonObject createExportFromJson(ExportDataProvider provider) throws JsonParseException, ParseException {
License license = new License("CC0 1.0", "You can copy, modify, distribute and perform the work, even for commercial purposes, all without asking permission.", URI.create("http://creativecommons.org/publicdomain/zero/1.0/"), URI.create("/resources/images/cc0.png"), true, 1l);
license.setDefault(true);
- JsonParser jsonParser = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService);
+ JsonParser jsonParser = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService, datasetTypeService);
DatasetVersion version = jsonParser.parseDatasetVersion(provider.getDatasetJson().getJsonObject("datasetVersion"));
version.setVersionState(DatasetVersion.VersionState.RELEASED);
SimpleDateFormat dateFmt = new SimpleDateFormat("yyyyMMdd");
diff --git a/src/test/java/edu/harvard/iq/dataverse/feedback/FeedbackUtilTest.java b/src/test/java/edu/harvard/iq/dataverse/feedback/FeedbackUtilTest.java
index 7c31db5bee2..072be13dcec 100644
--- a/src/test/java/edu/harvard/iq/dataverse/feedback/FeedbackUtilTest.java
+++ b/src/test/java/edu/harvard/iq/dataverse/feedback/FeedbackUtilTest.java
@@ -13,6 +13,7 @@
import edu.harvard.iq.dataverse.DvObject;
import edu.harvard.iq.dataverse.FileMetadata;
import edu.harvard.iq.dataverse.authorization.users.AuthenticatedUser;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.license.LicenseServiceBean;
import edu.harvard.iq.dataverse.mocks.MockDatasetFieldSvc;
import edu.harvard.iq.dataverse.mocks.MocksFactory;
@@ -55,6 +56,7 @@ public class FeedbackUtilTest {
private static InternetAddress systemAddress;
private static final SettingsServiceBean settingsService = Mockito.mock(SettingsServiceBean.class);
private static final LicenseServiceBean licenseService = Mockito.mock(LicenseServiceBean.class);
+ private static final DatasetTypeServiceBean datasetTypeService = Mockito.mock(DatasetTypeServiceBean.class);
private static final String systemEmail = "support@librascholar.edu";
private static final boolean weKnowHowToCreateMockAuthenticatedUsers = false;
@@ -144,7 +146,7 @@ public static void setUpClass() throws IOException, JsonParseException, AddressE
JsonReader jsonReader1 = Json.createReader(new StringReader(datasetVersionAsJson));
JsonObject json1 = jsonReader1.readObject();
- JsonParser jsonParser = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService);
+ JsonParser jsonParser = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService, datasetTypeService);
dsVersion = jsonParser.parseDatasetVersion(json1.getJsonObject("datasetVersion"));
File datasetVersionJson2 = new File("tests/data/datasetContacts1.json");
@@ -153,14 +155,14 @@ public static void setUpClass() throws IOException, JsonParseException, AddressE
JsonReader jsonReader12 = Json.createReader(new StringReader(datasetVersionAsJson2));
JsonObject json12 = jsonReader12.readObject();
- JsonParser jsonParser2 = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService);
+ JsonParser jsonParser2 = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService, datasetTypeService);
dsVersion2 = jsonParser2.parseDatasetVersion(json12.getJsonObject("datasetVersion"));
File datasetVersionJsonNoContacts = new File("tests/data/datasetNoContacts.json");
String datasetVersionAsJsonNoContacts = new String(Files.readAllBytes(Paths.get(datasetVersionJsonNoContacts.getAbsolutePath())));
JsonReader jsonReaderNoContacts = Json.createReader(new StringReader(datasetVersionAsJsonNoContacts));
JsonObject jsonNoContacts = jsonReaderNoContacts.readObject();
- JsonParser jsonParserNoContacts = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService);
+ JsonParser jsonParserNoContacts = new JsonParser(datasetFieldTypeSvc, null, settingsService, licenseService, datasetTypeService);
dsVersionNoContacts = jsonParserNoContacts.parseDatasetVersion(jsonNoContacts.getJsonObject("datasetVersion"));
FeedbackUtil justForCodeCoverage = new FeedbackUtil();
diff --git a/src/test/java/edu/harvard/iq/dataverse/search/IndexServiceBeanTest.java b/src/test/java/edu/harvard/iq/dataverse/search/IndexServiceBeanTest.java
index c062f63e264..124ce19369c 100644
--- a/src/test/java/edu/harvard/iq/dataverse/search/IndexServiceBeanTest.java
+++ b/src/test/java/edu/harvard/iq/dataverse/search/IndexServiceBeanTest.java
@@ -3,6 +3,7 @@
import edu.harvard.iq.dataverse.*;
import edu.harvard.iq.dataverse.Dataverse.DataverseType;
import edu.harvard.iq.dataverse.branding.BrandingUtil;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
import edu.harvard.iq.dataverse.mocks.MocksFactory;
import edu.harvard.iq.dataverse.pidproviders.doi.AbstractDOIProvider;
import edu.harvard.iq.dataverse.settings.JvmSettings;
@@ -142,6 +143,9 @@ private IndexableDataset createIndexableDataset() {
datasetVersion.getDatasetFields().add(field);
final IndexableDataset indexableDataset = new IndexableDataset(datasetVersion);
indexableDataset.getDatasetVersion().getDataset().setOwner(dataverse);
+ DatasetType datasetType = new DatasetType();
+ datasetType.setName(DatasetType.DEFAULT_DATASET_TYPE);
+ indexableDataset.getDatasetVersion().getDataset().setDatasetType(datasetType);
return indexableDataset;
}
diff --git a/src/test/java/edu/harvard/iq/dataverse/util/json/JsonParserTest.java b/src/test/java/edu/harvard/iq/dataverse/util/json/JsonParserTest.java
index 972fc9c41cd..59e175f30c1 100644
--- a/src/test/java/edu/harvard/iq/dataverse/util/json/JsonParserTest.java
+++ b/src/test/java/edu/harvard/iq/dataverse/util/json/JsonParserTest.java
@@ -23,6 +23,8 @@
import edu.harvard.iq.dataverse.authorization.groups.impl.maildomain.MailDomainGroup;
import edu.harvard.iq.dataverse.authorization.groups.impl.maildomain.MailDomainGroupTest;
import edu.harvard.iq.dataverse.authorization.users.GuestUser;
+import edu.harvard.iq.dataverse.dataset.DatasetType;
+import edu.harvard.iq.dataverse.dataset.DatasetTypeServiceBean;
import edu.harvard.iq.dataverse.engine.command.DataverseRequest;
import edu.harvard.iq.dataverse.license.LicenseServiceBean;
import edu.harvard.iq.dataverse.mocks.MockDatasetFieldSvc;
@@ -72,6 +74,7 @@ public class JsonParserTest {
MockDatasetFieldSvc datasetFieldTypeSvc = null;
MockSettingsSvc settingsSvc = null;
LicenseServiceBean licenseService = Mockito.mock(LicenseServiceBean.class);
+ DatasetTypeServiceBean datasetTypeService = Mockito.mock(DatasetTypeServiceBean.class);
DatasetFieldType keywordType;
DatasetFieldType descriptionType;
DatasetFieldType subjectType;
@@ -124,7 +127,11 @@ public void setUp() {
}
compoundSingleType.setChildDatasetFieldTypes(childTypes);
settingsSvc = new MockSettingsSvc();
- sut = new JsonParser(datasetFieldTypeSvc, null, settingsSvc, licenseService);
+ DatasetType datasetType = new DatasetType();
+ datasetType.setName(DatasetType.DEFAULT_DATASET_TYPE);
+ datasetType.setId(1l);
+ Mockito.when(datasetTypeService.getByName(DatasetType.DEFAULT_DATASET_TYPE)).thenReturn(datasetType);
+ sut = new JsonParser(datasetFieldTypeSvc, null, settingsSvc, licenseService, datasetTypeService);
}
@Test
diff --git a/tests/integration-tests.txt b/tests/integration-tests.txt
index fc3fc9b4a3f..e1dad7a75b1 100644
--- a/tests/integration-tests.txt
+++ b/tests/integration-tests.txt
@@ -1 +1 @@
-DataversesIT,DatasetsIT,SwordIT,AdminIT,BuiltinUsersIT,UsersIT,UtilIT,ConfirmEmailIT,FileMetadataIT,FilesIT,SearchIT,InReviewWorkflowIT,HarvestingServerIT,HarvestingClientsIT,MoveIT,MakeDataCountApiIT,FileTypeDetectionIT,EditDDIIT,ExternalToolsIT,AccessIT,DuplicateFilesIT,DownloadFilesIT,LinkIT,DeleteUsersIT,DeactivateUsersIT,AuxiliaryFilesIT,InvalidCharactersIT,LicensesIT,NotificationsIT,BagIT,MetadataBlocksIT,NetcdfIT,SignpostingIT,FitsIT,LogoutIT,DataRetrieverApiIT,ProvIT,S3AccessIT,OpenApiIT,InfoIT,DatasetFieldsIT,SavedSearchIT
+DataversesIT,DatasetsIT,SwordIT,AdminIT,BuiltinUsersIT,UsersIT,UtilIT,ConfirmEmailIT,FileMetadataIT,FilesIT,SearchIT,InReviewWorkflowIT,HarvestingServerIT,HarvestingClientsIT,MoveIT,MakeDataCountApiIT,FileTypeDetectionIT,EditDDIIT,ExternalToolsIT,AccessIT,DuplicateFilesIT,DownloadFilesIT,LinkIT,DeleteUsersIT,DeactivateUsersIT,AuxiliaryFilesIT,InvalidCharactersIT,LicensesIT,NotificationsIT,BagIT,MetadataBlocksIT,NetcdfIT,SignpostingIT,FitsIT,LogoutIT,DataRetrieverApiIT,ProvIT,S3AccessIT,OpenApiIT,InfoIT,DatasetFieldsIT,SavedSearchIT,DatasetTypesIT
\ No newline at end of file