private static void runTopicCollab(DataManager mgr, int venueId, int[] stend) throws SQLException, IOException { CsvWriter outfile; DBLPTopicAuthCollabQuery tquery2 = new DBLPTopicAuthCollabQuery(); tquery2.setResearchDomain(venueId); tquery2.setEarliestLatestYear(stend[0], stend[1]); tquery2.setTopicBasedCollaboration(); outfile = new CsvWriter("out/topiccollabindex.csv"); tquery2.processQuery(mgr.getConnection(), outfile); outfile.close(); tquery2.setKWBasedCollaboration(); outfile = new CsvWriter("out/collabindex.csv"); tquery2.processQuery(mgr.getConnection(), outfile); outfile.close(); }
@Override public void createSchema(SimpleFeatureType featureType) throws IOException { List<String> header = new ArrayList<String>(); GeometryDescriptor geometryDescrptor = featureType.getGeometryDescriptor(); if (geometryDescrptor != null && CRS.equalsIgnoreMetadata( DefaultGeographicCRS.WGS84, geometryDescrptor.getCoordinateReferenceSystem()) && geometryDescrptor.getType().getBinding().isAssignableFrom(Point.class)) { header.add(this.latField); header.add(this.lngField); } else { throw new IOException( "Unable use '" + this.latField + "' / '" + this.lngField + "' to represent " + geometryDescrptor); } for (AttributeDescriptor descriptor : featureType.getAttributeDescriptors()) { if (descriptor instanceof GeometryDescriptor) continue; header.add(descriptor.getLocalName()); } // Write out header, producing an empty file of the correct type CsvWriter writer = new CsvWriter(new FileWriter(this.csvFileState.getFile()), ','); try { writer.writeRecord(header.toArray(new String[header.size()])); } finally { writer.close(); } }
/** Adds the seached tweets to the file tweetsBase.csv */ public void saveBase() { try { if (baseTweets == null) { baseTweets = new ArrayList<Tweet>(); } CsvWriter csvOutput = new CsvWriter(new FileWriter(AppliSettings.filename, true), ';'); for (Tweet tweet : listCleanTweets) { int index = alreadyIn(tweet.getId()); if (index == -1) { csvOutput.write("" + tweet.getId()); csvOutput.write(tweet.getUser()); String text = cleanTweet(tweet.getTweet()); csvOutput.write(text); csvOutput.write("" + tweet.getDate()); csvOutput.write("" + tweet.getNote()); csvOutput.endRecord(); baseTweets.add(tweet); } else { updateCSV(tweet.getNote(), index); } } csvOutput.close(); } catch (IOException e) { System.out.println("saveBase"); System.out.println(e.getMessage()); } }
public void exportarProspecto(String nombreArchivo) { String outputFile = nombreArchivo; // before we open the file check to see if it already exists boolean alreadyExists = new File(outputFile).exists(); try { // use FileWriter constructor that specifies open for appending CsvWriter csvOutput = new CsvWriter(new FileWriter(outputFile, true), ','); // if the file didn't already exist then we need to write out the header line if (!alreadyExists) { csvOutput.write("Dni"); csvOutput.write("Nombres"); csvOutput.write("Apellido_Paterno"); csvOutput.write("Apellido_Materno"); csvOutput.write("Telefono"); csvOutput.write("FechaContacto"); csvOutput.write("Correo"); csvOutput.write("Direccion"); csvOutput.write("Distrito"); csvOutput.write("Departamento"); csvOutput.endRecord(); } // else assume that the file already has the correct header line List<Prospecto> prospecto = new ArrayList<Prospecto>(); prospecto = devolverListaProspecto(); for (Prospecto prospectos : prospecto) { // write out a few records csvOutput.write(prospectos.getDni()); csvOutput.write(prospectos.getNombres()); csvOutput.write(prospectos.getApellido_Paterno()); csvOutput.write(prospectos.getApellido_Materno()); csvOutput.write(prospectos.getTelefono()); csvOutput.write(prospectos.getFechaContacto()); csvOutput.write(prospectos.getCorreo()); csvOutput.write(prospectos.getDireccion()); csvOutput.write(prospectos.getDistrito()); csvOutput.write(prospectos.getDepartamento()); csvOutput.endRecord(); } csvOutput.close(); } catch (IOException e) { e.printStackTrace(); } }
private static void runTopicDistribution(DataManager mgr, int venueId, int[] stend, int timestep) throws SQLException, IOException { CsvWriter outfile; /* outfile= new CsvWriter("out/topicq1_total.csv"); DBLPTopicQuery1 tquery1 = new DBLPTopicQuery1(); tquery1.setResearchDomain(venueId); tquery1.setEarliestLatestYear(stend[0], stend[1]); tquery1.setTimeStep(timestep); System.out.println("Processing Topic Query#1- by Paper Count"); tquery1.setTopicDistributionByPaperCount(); tquery1.processQuery(mgr.getConnection(), outfile); outfile.close(); System.out.println("Processing Topic Query#1- by Citation Count Hindsight"); tquery1.setTopicDistributionByCitationHindsight(); outfile= new CsvWriter("out/topicq1cite_hindsight_total.csv"); tquery1.processQuery(mgr.getConnection(), outfile); outfile.close(); System.out.println("Processing Topic Query#1- by Citation Count Yearbased"); // Below two lines are useless. Doing it since the data import format was wrong DataExporterImporter importer = new DataExporterImporter(); importer.createCitationTable(mgr.getConnection(), venueId); tquery1.setTopicDistributionByCitationInAYear(); outfile= new CsvWriter("out/topicq1cite_total.csv"); tquery1.processQuery(mgr.getConnection(), outfile); outfile.close(); */ /* DBLPTopicAffinityQuery tquery2= new DBLPTopicAffinityQuery(); tquery2.setResearchDomain(venueId); tquery2.setEarliestLatestYear(stend[0], stend[1]); tquery2.setBeginEndYear(stend[0], stend[1]); outfile= new CsvWriter("out/author_topicaffinity.csv"); tquery2.processQuery(mgr.getConnection(), outfile); outfile.close(); */ /* DBLPTopicAffinityQuery tquery2= new DBLPTopicAffinityQuery(); tquery2.setResearchDomain(venueId); outfile= new CsvWriter("out/author_topicaffinity_"+stend[0] + "-" + stend[0]+".csv"); tquery2.setBeginEndYear(stend[0], stend[0]); System.out.println("Processing Query to generate Topic Affinity for each author for " + stend[0] + "-" + stend[0]); tquery2.processQuery(mgr.getConnection(), outfile); outfile.close(); for (int curryr = stend[0]+timestep; curryr <= stend[1]; curryr+=timestep) { int endyr= (curryr + timestep-1)>stend[1]?stend[1]:(curryr+timestep-1); String yrRange = stend[0] + "-" + endyr; outfile= new CsvWriter("out/author_topicaffinity_"+yrRange+".csv"); tquery2.setBeginEndYear(stend[0], endyr); System.out.println("Processing Query to generate Topic Affinity for each author for " + yrRange); tquery2.processQuery(mgr.getConnection(), outfile); outfile.close(); } */ System.out.println( "Processing Topic Query#4- half-life calculation based on Cited Half-Life and prospective half-life"); outfile = new CsvWriter("out/newtopichl.5.csv"); /* DBLPTopicQuery2 tquery2 = new DBLPTopicQuery2(); tquery2.setResearchDomain(venueId); tquery2.setEarliestLatestYear(stend[1], stend[1]); tquery2.setTimeStep(timestep); tquery2.processQuery(mgr.getConnection(), outfile); outfile.close(); */ DBLPTopicQuery5 tquery4 = new DBLPTopicQuery5(); tquery4.setResearchDomain(venueId); tquery4.setEarliestLatestYear(stend[0], stend[1]); tquery4.setTimeStep(timestep); tquery4.processQuery(mgr.getConnection(), outfile); outfile.close(); }
public void createCSVResultFile(RemoteHostList remoteHostList, String currentTime) { for (RemoteHost rh : remoteHostList.getListOfRemoteHost()) { String outpuCsvtFile = "./host/" + currentTime + "_" + rh.getIP() + "/" + currentTime + "_" + rh.getIP() + "_EPAV_Result.csv"; try { CsvWriter csvOutput = new CsvWriter(new FileWriter(outpuCsvtFile, true), ' '); csvOutput.write("Remote host infomation:"); csvOutput.endRecord(); csvOutput.endRecord(); csvOutput.write("IP Address:"); csvOutput.write(rh.getIP()); csvOutput.endRecord(); csvOutput.write("MAC Address:"); csvOutput.write(rh.getMAC()); csvOutput.endRecord(); csvOutput.write("Operating system details:"); csvOutput.write(rh.getOS()); csvOutput.endRecord(); csvOutput.endRecord(); csvOutput.write("List of Ports:"); csvOutput.endRecord(); csvOutput.write("Name"); csvOutput.write("State"); csvOutput.write("Service"); csvOutput.write("Warning"); csvOutput.write("Solution"); csvOutput.endRecord(); for (Port p : rh.getPorts().getListOfPort()) { csvOutput.write(p.getName()); csvOutput.write(p.getState()); csvOutput.write(p.getService()); csvOutput.write(p.getWarning()); csvOutput.write(p.getSolution()); csvOutput.endRecord(); } csvOutput.endRecord(); csvOutput.write("List of Vulnerabilities:"); csvOutput.endRecord(); csvOutput.write("Name"); csvOutput.write("State"); csvOutput.write("Patches"); csvOutput.endRecord(); for (Vulnerability v : rh.getVulnerabilities().getListOfVulnerability()) { csvOutput.write(v.getName()); csvOutput.write(v.getState()); csvOutput.write(v.getPatchList().display()); csvOutput.endRecord(); } csvOutput.close(); } catch (Exception e) { e.printStackTrace(); } } }
public static void main(String argv[]) throws IOException { argv[0] = "-mis"; argv[1] = "/Users/Indri/Eclipse_workspace/GeoNames/cities1000.txt"; argv[2] = "/Users/Indri/Eclipse_workspace/GazIndex"; argv[3] = "SampleInput/jsonTweets.txt"; argv[4] = "-json"; argv[5] = "SampleOutput/jsonTweets.out.csv"; boolean misspell = argv[0].equals("-mis") ? true : false; String dicPath = argv[1]; // = "GeoNames/allCountries.txt";// gazetteer from geonames String indexPath = argv[2]; // index path String input = argv[3]; // = "tweet.csv";//to be determined.// test file path String type = argv[4]; // -json or -text String output = argv[5]; // = "output2.csv"; //output file path CollaborativeIndex ci = new CollaborativeIndex() .config("GazIndex/StringIndex", "GazIndex/InfoIndex", "mmap", "mmap") .open(); EnglishParser enparser = new EnglishParser("res/", ci, false); ContextDisamb c = new ContextDisamb(); LangDetector lang = new LangDetector("res/langdetect.profile"); BufferedReader reader = GetReader.getUTF8FileReader(argv[3]); CsvWriter writer = new CsvWriter(output, ',', Charset.forName("utf-8")); // write writer.writeRecord(new String[] {"SPANISH TWEETS", "LOCATIONS"}); String line = null; while ((line = reader.readLine()) != null) { line = line.trim(); if (line.length() == 0) continue; Tweet t = new Tweet(); String text = null; if (argv[4].equals("-text")) text = line; else try { text = (DataObjectFactory.createStatus(line.trim()).getText()); } catch (TwitterException e) { // TODO Auto-generated catch block System.err.println("JSON format corrupted, or no content."); continue; } t.setText(text); List<String> match = enparser.parse(t); // Generate Matches if (match == null || match.size() == 0) { /** write blank result and the line itself if no match found. */ writer.writeRecord(new String[] {text, ""}); continue; } HashSet<String> reducedmatch = new HashSet<String>(); for (String s : match) reducedmatch.add(s.substring(3, s.length() - 3)); // Disambiguate topo HashMap<String, String[]> result = c.returnBestTopo(ci, reducedmatch); if (result == null) { System.out.println("No GPS for any location is found."); } else { System.out.println("The grounded location(s) are:"); String topoStr = ""; for (String topo : result.keySet()) topoStr += "[" + (topo + ": " + result.get(topo)[2] + " " + result.get(topo)[0] + " " + result.get(topo)[1]) + "] "; writer.writeRecord(new String[] {text, topoStr}); } } reader.close(); writer.close(); }