/** * This method read the given number of tuples from disk to disk_buffer. * * @throws java.io.IOException */ public void readDiskTuplesIntoBufferB() throws java.io.IOException { long startTime = 0, endTime = 0; int row = 0; startTime = System.nanoTime(); try { startTime = System.nanoTime(); rs = stmt.executeQuery( "SELECT * from lookup_table_twomillion where attr1>=" + startRead + " AND attr1<" + (startRead + DISK_BUFFER) + ""); endTime = System.nanoTime(); if (measurementStart) { CIO[CIO_index++] = endTime - startTime; } while (rs.next()) { for (int col = 1; col <= 30; col++) { bufferB[row][col - 1] = rs.getInt(col); } row++; } } catch (SQLException e) { System.out.print(e); } startRead += DISK_BUFFER; }
/** * This method remove the oldest tuples from hash table along with their addresses * * @param ht */ public void removeExpireTuples() { long startTime = 0, endTime = 0, CE_per_Iteration = 0; int w[] = (int[]) abq.poll(); for (int i = 0; i < STREAM_BUFFER; i++) { startTime = System.nanoTime(); if (mhm.containsKey(new Integer(get(w, i)))) { mhm.remove(new Integer(get(w, i))); unmatched++; } endTime = System.nanoTime(); CE_per_Iteration += endTime - startTime; } CE[CE_index++] = CE_per_Iteration / DISK_BUFFER; }
/** * This method generate the input stream and load it into stream buffer. Before loading the stream * tuples into hash table it check the status of queue if it is already full remove the oldest * tuples from hash table along with their pointer addresses from Queue. Finally it loads the new * scanned tuples into hash table with their pointer address in queue. * * @param ht */ public void inputStream() { long startTime = 0, endTime = 0, CA_per_Iteration = 0; ; int w[] = new int[STREAM_BUFFER]; if (round2) { removeExpireTuples(); } while (streamBuffer.size() < STREAM_BUFFER) ; for (int i = 0; i < STREAM_BUFFER; i++) { startTime = System.nanoTime(); set(w, streamBuffer.peek().attr1, i); mhm.put(new Integer(streamBuffer.peek().attr1), streamBuffer.poll()); endTime = System.nanoTime(); if (measurementStart) { CA_per_Iteration += endTime - startTime; } } System.out.println("Available stream size After: " + streamBuffer.size()); abq.offer(w); if (measurementStart) { CA[CA_index++] = CA_per_Iteration / STREAM_BUFFER; } }
/** * This method sequentially read the fixed amount of tuples from the disk using disk_buffer and * probe them in hash table. * * @param ht * @throws java.sql.SQLException */ public void lookupMasterData() throws java.io.IOException, InterruptedException { long startTime = 0, endTime = 0, CH_per_Iteration = 0; int matched = 0, diskInputs = 0; conn = connectDB(); mhm.clear(); int index = 0; try { stmt = conn.createStatement(); stmt.setFetchSize(DISK_BUFFER); } catch (SQLException exp) { exp.printStackTrace(); } stream.start(); // Thread.sleep(LATE_START); for (int round = 1; round <= 2; round++) { startRead = 1; if (round == 2) { System.out.println("Second round started"); round2 = true; } for (int tuple = 0; tuple < R_SIZE; tuple++) { measurementStart = false; if ((round2) && (tuple >= MEASUREMENT_START && tuple < MEASUREMENT_STOP)) { measurementStart = true; } if (tuple % DISK_BUFFER == 0) { readDiskTuplesIntoBufferB(); inputStream(); diskInputs++; if (measurementStart && matched != 0) { CH[CH_index++] = CH_per_Iteration / matched; CH_per_Iteration = 0; matched = 0; } index = 0; } if (mhm.containsKey(bufferB[index][0])) { startTime = System.nanoTime(); list = (ArrayList<MeshJoinObject>) mhm.get(bufferB[index][0]); endTime = System.nanoTime(); if (measurementStart) { CH_per_Iteration += endTime - startTime; matched++; } } index++; } } stream.stop(); closeConnection(conn); System.out.println("Hash tuples: " + mhm.size()); System.out.println("Unmatched tuples: " + unmatched); System.out.println("Iterations required to bring R into Disk_buffer=" + diskInputs); System.out.println("Stream back log: " + streamBuffer.size()); }