@Override public void run() { synchronized (m_jobLock) { m_started = true; m_jobLock.notifyAll(); } /*if (true) return;*/ try { if (m_pendingMutations != null) { for (Triple<RowKeyType, ColumnKeyType, ValueType> data : m_buffer) { HColumnImpl<ColumnKeyType, ValueType> col = new HColumnImpl<ColumnKeyType, ValueType>( data.getSecond(), data.getThird(), data.getTime(), m_columnKeySerializer, m_valueSerializer); // if a TTL is set apply it to the column. This will // cause it to be removed after this number of seconds if (data.getTtl() != 0) { col.setTtl(data.getTtl()); } m_pendingMutations.addInsertion(data.getFirst(), m_cfName, col); } m_pendingMutations.execute(); } m_pendingMutations = null; } catch (Exception e) { logger.error("Error sending data to Cassandra (" + m_cfName + ")", e); m_maxBufferSize = m_maxBufferSize * 3 / 4; logger.error( "Reducing write buffer size to " + m_maxBufferSize + ". You need to increase your cassandra capacity or change the kairosdb.datastore.cassandra.write_buffer_max_size property."); } // If the batch failed we will retry it without changing the buffer size. while (m_pendingMutations != null) { try { Thread.sleep(100); } catch (InterruptedException ignored) { } try { m_pendingMutations.execute(); m_pendingMutations = null; } catch (Exception e) { logger.error("Error resending data", e); } } }
public ColumnSliceMockImpl add(String name, byte[] value) { HColumnImpl<String, byte[]> col = new HColumnImpl<String, byte[]>(StringSerializer.get(), BytesArraySerializer.get()); col.setName(name); col.setValue(value); colList.add(col); return this; }