/** * Parse a JSON response to extract an entity document. * * <p>TODO This method currently contains code to work around Wikibase issue * https://phabricator.wikimedia.org/T73349. This should be removed once the issue is fixed. * * @param entityNode the JSON node that should contain the entity document data * @return the entitiy document, or null if there were unrecoverable errors * @throws IOException * @throws JsonProcessingException */ private EntityDocument parseJsonResponse(JsonNode entityNode) throws JsonProcessingException, IOException { try { JacksonTermedStatementDocument ed = mapper.treeToValue(entityNode, JacksonTermedStatementDocument.class); ed.setSiteIri(this.siteIri); return ed; } catch (JsonProcessingException e) { logger.warn( "Error when reading JSON for entity " + entityNode.path("id").asText("UNKNOWN") + ": " + e.toString() + "\nTrying to manually fix issue https://phabricator.wikimedia.org/T73349."); String jsonString = entityNode.toString(); jsonString = jsonString .replace("\"sitelinks\":[]", "\"sitelinks\":{}") .replace("\"labels\":[]", "\"labels\":{}") .replace("\"aliases\":[]", "\"aliases\":{}") .replace("\"claims\":[]", "\"claims\":{}") .replace("\"descriptions\":[]", "\"descriptions\":{}"); ObjectReader documentReader = this.mapper.reader(JacksonTermedStatementDocument.class); JacksonTermedStatementDocument ed; ed = documentReader.readValue(jsonString); ed.setSiteIri(this.siteIri); return ed; } }
@Override public StatementsResultLRSResponse saveStatements(List<Statement> statements) { StatementsResultLRSResponse lrsResponse = new StatementsResultLRSResponse(); if (statements.isEmpty()) { lrsResponse.setSuccess(true); return lrsResponse; } ArrayNode rootNode = Mapper.getInstance().createArrayNode(); for (Statement statement : statements) { rootNode.add(statement.toJSONNode(getVersion())); } lrsResponse.setRequest(new HTTPRequest()); lrsResponse.getRequest().setResource("statements"); lrsResponse.getRequest().setMethod(HttpMethods.POST); lrsResponse.getRequest().setContentType("application/json"); try { lrsResponse .getRequest() .setContent(Mapper.getWriter(this.usePrettyJSON()).writeValueAsBytes(rootNode)); } catch (JsonProcessingException ex) { lrsResponse.setErrMsg("Exception: " + ex.toString()); return lrsResponse; } HTTPResponse response = makeSyncRequest(lrsResponse.getRequest()); int status = response.getStatus(); lrsResponse.setResponse(response); if (status == 200) { lrsResponse.setSuccess(true); lrsResponse.setContent(new StatementsResult()); try { Iterator it = Mapper.getInstance().readValue(response.getContent(), ArrayNode.class).elements(); for (int i = 0; it.hasNext(); ++i) { lrsResponse.getContent().getStatements().add(statements.get(i)); lrsResponse .getContent() .getStatements() .get(i) .setId(UUID.fromString(((JsonNode) it.next()).textValue())); } } catch (Exception ex) { lrsResponse.setErrMsg("Exception: " + ex.toString()); lrsResponse.setSuccess(false); } } else { lrsResponse.setSuccess(false); } return lrsResponse; }
/** * Creates a map of identifiers or page titles to documents retrieved via the API. All parameters * that accept lists expect the pipe character | to be used as a separator, as created by {@link * ApiConnection#implodeObjects(Iterable)}. There is a limit on how many entities can be retrieved * in one request, usually 50 by default and 500 for bots. This limit may also apply to the number * of language codes and sites used for filtering. * * <p>The method can fail in two ways. If errors occur (e.g., exceptions trying to access the Web * API), then the errors will be logged and null will be returned. If the API the request is made * but the API returns errors, then the errors will be logged and an empty map is returned. * * @param ids list of ids of entities for which data should be retrieved * @param sites site key (e.g. "enwiki"); used together with parameters "titles"; the API supports * the use of many site keys with a single title, but this implementation does not support * this (the resulting map will use title strings for keys) * @param titles list of titles of the page corresponding to the requested entities on the given * site; use together with 'sites', but only give one site for several titles or several sites * for one title * @param props list of strings that specifies what kind of data should be retrieved for each * entity; possible values include "info", "sitelinks", "sitelinks/urls", "aliases", "labels", * "descriptions", "claims" (statements), "datatype"; additional filters may apply; defaults * to "info|sitelinks|aliases|labels|descriptions|claims|datatype" * @param languages list of language codes to return labels, aliases or descriptions for; if * omitted, data for all languages is returned * @param sitefilter list of site keys to return sitelinks for; if omitted, data for all languages * is returned * @return map of document identifiers or titles to documents retrieved via the API URL, or null * if there were errors * @throws MediaWikiApiErrorException if the API returns an error * @throws IllegalArgumentException if the given combination of parameters does not make sense */ public Map<String, EntityDocument> wbGetEntities( String ids, String sites, String titles, String props, String languages, String sitefilter) throws MediaWikiApiErrorException { Map<String, String> parameters = new HashMap<String, String>(); parameters.put(ApiConnection.PARAM_ACTION, "wbgetentities"); if (ids != null) { parameters.put("ids", ids); if (titles != null || sites != null) { throw new IllegalArgumentException( "Cannot use parameters \"sites\" or \"titles\" when using ids to get entity data"); } } else if (titles != null) { parameters.put("titles", titles); if (sites == null) { throw new IllegalArgumentException( "Sites parameter is required when using titles parameter to get entity data."); } parameters.put("sites", sites); } else { throw new IllegalArgumentException( "Either ids, or titles and site must be specified for this action."); } if (props != null) { parameters.put("props", props); } if (languages != null) { parameters.put("languages", languages); } if (sitefilter != null) { parameters.put("sitefilter", sitefilter); } parameters.put(ApiConnection.PARAM_FORMAT, "json"); try (InputStream response = this.connection.sendRequest("POST", parameters)) { JsonNode root = mapper.readTree(response); Map<String, EntityDocument> result = new HashMap<String, EntityDocument>(); this.connection.checkErrors(root); this.connection.logWarnings(root); JsonNode entities = root.path("entities"); for (JsonNode entityNode : entities) { if (!entityNode.has("missing")) { try { JacksonTermedStatementDocument ed = mapper.treeToValue(entityNode, JacksonTermedStatementDocument.class); ed.setSiteIri(this.siteIri); if (titles == null) { result.put(ed.getEntityId().getId(), ed); } else { if (ed instanceof JacksonItemDocument && ((JacksonItemDocument) ed).getSiteLinks().containsKey(sites)) { result.put(((JacksonItemDocument) ed).getSiteLinks().get(sites).getPageTitle(), ed); } } } catch (JsonProcessingException e) { logger.error( "Error when reading JSON for entity " + entityNode.path("id").asText("UNKNOWN") + ": " + e.toString()); } } } return result; } catch (IOException e) { logger.error("Could not retrive data: " + e.toString()); return null; } }