/** Tests that setting the {@link Constants#USER_FILE_BUFFER_BYTES} runs correctly. */ @Test public void variableUserFileBufferBytesNormalCheckTest() { Properties mProperties = new Properties(); mProperties.put(Constants.USER_FILE_BUFFER_BYTES, String.valueOf(Integer.MAX_VALUE) + "B"); mCustomPropsConfiguration = new Configuration(mProperties); Assert.assertEquals( Integer.MAX_VALUE, (int) mCustomPropsConfiguration.getBytes(Constants.USER_FILE_BUFFER_BYTES)); mCustomPropsConfiguration.set(Constants.USER_FILE_BUFFER_BYTES, "1GB"); Assert.assertEquals( 1073741824, (int) mCustomPropsConfiguration.getBytes(Constants.USER_FILE_BUFFER_BYTES)); }
/** Tests the default properties for the user. */ @Test public void userDefaultTest() { int intValue = sDefaultConfiguration.getInt(Constants.USER_FAILED_SPACE_REQUEST_LIMITS); Assert.assertEquals(3, intValue); intValue = sDefaultConfiguration.getInt(Constants.USER_HEARTBEAT_INTERVAL_MS); Assert.assertEquals(Constants.SECOND_MS, intValue); long longValue = sDefaultConfiguration.getBytes(Constants.USER_FILE_BUFFER_BYTES); Assert.assertEquals(Constants.MB, longValue); longValue = sDefaultConfiguration.getBytes(Constants.USER_BLOCK_REMOTE_READ_BUFFER_SIZE_BYTES); Assert.assertEquals(8 * Constants.MB, longValue); }
/** * Creates a new instance of {@link FileSystemWorker}. * * @param blockWorker the block worker handle * @param workerId a reference to the id of this worker * @throws IOException if an I/O error occurs */ public DefaultFileSystemWorker(BlockWorker blockWorker, AtomicReference<Long> workerId) throws IOException { super( Executors.newFixedThreadPool( 3, ThreadFactoryUtils.build("file-system-worker-heartbeat-%d", true))); mWorkerId = workerId; mSessions = new Sessions(); UnderFileSystem ufs = UnderFileSystem.get(Configuration.get(PropertyKey.UNDERFS_ADDRESS)); mFileDataManager = new FileDataManager( Preconditions.checkNotNull(blockWorker), ufs, RateLimiter.create(Configuration.getBytes(PropertyKey.WORKER_FILE_PERSIST_RATE_LIMIT))); mUnderFileSystemManager = new UnderFileSystemManager(); // Setup AbstractMasterClient mFileSystemMasterWorkerClient = new FileSystemMasterClient(NetworkAddressUtils.getConnectAddress(ServiceType.MASTER_RPC)); // Setup session cleaner mSessionCleaner = new SessionCleaner( new SessionCleanupCallback() { /** Cleans up after sessions, to prevent zombie sessions holding ufs resources. */ @Override public void cleanupSessions() { for (long session : mSessions.getTimedOutSessions()) { mSessions.removeSession(session); mUnderFileSystemManager.cleanupSession(session); } } }); mServiceHandler = new FileSystemWorkerClientServiceHandler(this); }
/** * Constructs a new stream for reading a file from HDFS. * * @param uri the Alluxio file URI * @param conf Hadoop configuration * @param bufferSize the buffer size * @param stats filesystem statistics * @throws IOException if the underlying file does not exist or its stream cannot be created */ public HdfsFileInputStream( AlluxioURI uri, org.apache.hadoop.conf.Configuration conf, int bufferSize, org.apache.hadoop.fs.FileSystem.Statistics stats) throws IOException { LOG.debug("HdfsFileInputStream({}, {}, {}, {}, {})", uri, conf, bufferSize, stats); long bufferBytes = Configuration.getBytes(Constants.USER_FILE_BUFFER_BYTES); mBuffer = new byte[Ints.checkedCast(bufferBytes) * 4]; mCurrentPosition = 0; FileSystem fs = FileSystem.Factory.get(); mHadoopConf = conf; mHadoopBufferSize = bufferSize; mStatistics = stats; try { mFileInfo = fs.getStatus(uri); mHdfsPath = new Path(mFileInfo.getUfsPath()); mAlluxioFileInputStream = fs.openFile(uri, OpenFileOptions.defaults()); } catch (FileDoesNotExistException e) { throw new FileNotFoundException( ExceptionMessage.HDFS_FILE_NOT_FOUND.getMessage(mHdfsPath, uri)); } catch (AlluxioException e) { throw new IOException(e); } }
/** Tests the default properties for the worker. */ @Test public void workerDefaultTest() { String value = sDefaultConfiguration.get(Constants.WORKER_DATA_FOLDER); Assert.assertNotNull(value); Assert.assertEquals("/alluxioworker/", value); value = sDefaultConfiguration.get(Constants.WORKER_BIND_HOST); Assert.assertNotNull(value); Assert.assertEquals(NetworkAddressUtils.WILDCARD_ADDRESS, value); int intValue = sDefaultConfiguration.getInt(Constants.WORKER_RPC_PORT); Assert.assertEquals(29998, intValue); value = sDefaultConfiguration.get(Constants.WORKER_DATA_BIND_HOST); Assert.assertNotNull(value); Assert.assertEquals(NetworkAddressUtils.WILDCARD_ADDRESS, value); intValue = sDefaultConfiguration.getInt(Constants.WORKER_DATA_PORT); Assert.assertEquals(29999, intValue); value = sDefaultConfiguration.get(Constants.WORKER_WEB_BIND_HOST); Assert.assertNotNull(value); Assert.assertEquals(NetworkAddressUtils.WILDCARD_ADDRESS, value); intValue = sDefaultConfiguration.getInt(Constants.WORKER_WEB_PORT); Assert.assertEquals(30000, intValue); intValue = sDefaultConfiguration.getInt(Constants.WORKER_BLOCK_HEARTBEAT_TIMEOUT_MS); Assert.assertEquals(10 * Constants.SECOND_MS, intValue); intValue = sDefaultConfiguration.getInt(Constants.WORKER_BLOCK_HEARTBEAT_INTERVAL_MS); Assert.assertEquals(Constants.SECOND_MS, intValue); intValue = sDefaultConfiguration.getInt(Constants.WORKER_WORKER_BLOCK_THREADS_MIN); Assert.assertEquals(1, intValue); intValue = sDefaultConfiguration.getInt(Constants.WORKER_SESSION_TIMEOUT_MS); Assert.assertEquals(10 * Constants.SECOND_MS, intValue); intValue = sDefaultConfiguration.getInt(Constants.WORKER_NETWORK_NETTY_BOSS_THREADS); Assert.assertEquals(1, intValue); intValue = sDefaultConfiguration.getInt(Constants.WORKER_NETWORK_NETTY_WORKER_THREADS); Assert.assertEquals(0, intValue); long longValue = sDefaultConfiguration.getBytes(Constants.WORKER_MEMORY_SIZE); Assert.assertEquals(128 * Constants.MB, longValue); }
/** Tests the default common properties. */ @Test public void commonDefaultTest() { String alluxioHome = sDefaultConfiguration.get(Constants.HOME); Assert.assertNotNull(alluxioHome); Assert.assertEquals("/mnt/alluxio_default_home", alluxioHome); String ufsAddress = sDefaultConfiguration.get(Constants.UNDERFS_ADDRESS); Assert.assertNotNull(ufsAddress); Assert.assertEquals(alluxioHome + "/underFSStorage", ufsAddress); String value = sDefaultConfiguration.get(Constants.WEB_RESOURCES); Assert.assertNotNull(value); Assert.assertEquals(alluxioHome + "/core/server/src/main/webapp", value); value = sDefaultConfiguration.get(Constants.UNDERFS_HDFS_IMPL); Assert.assertNotNull(value); Assert.assertEquals("org.apache.hadoop.hdfs.DistributedFileSystem", value); value = sDefaultConfiguration.get(Constants.UNDERFS_HDFS_PREFIXS); Assert.assertNotNull(value); Assert.assertEquals(DEFAULT_HADOOP_UFS_PREFIX, value); value = sDefaultConfiguration.get(Constants.UNDERFS_GLUSTERFS_IMPL); Assert.assertNotNull(value); Assert.assertEquals("org.apache.hadoop.fs.glusterfs.GlusterFileSystem", value); boolean booleanValue = sDefaultConfiguration.getBoolean(Constants.ZOOKEEPER_ENABLED); Assert.assertFalse(booleanValue); booleanValue = sDefaultConfiguration.getBoolean(Constants.IN_TEST_MODE); Assert.assertFalse(booleanValue); int intValue = sDefaultConfiguration.getInt(Constants.NETWORK_HOST_RESOLUTION_TIMEOUT_MS); Assert.assertEquals(Constants.DEFAULT_HOST_RESOLUTION_TIMEOUT_MS, intValue); long longBytesValue = sDefaultConfiguration.getBytes(Constants.USER_BLOCK_REMOTE_READ_BUFFER_SIZE_BYTES); Assert.assertEquals(Constants.MB * 8, longBytesValue); int maxTry = sDefaultConfiguration.getInt(Constants.ZOOKEEPER_LEADER_INQUIRY_RETRY_COUNT); Assert.assertEquals(10, maxTry); }
/** * Gets the block size in bytes. There is no concept of a block in Swift and the maximum size of * one file is 4 GB. This method defaults to the default user block size in Alluxio. * * @param path the path to the object * @return the default Alluxio user block size * @throws IOException this implementation will not throw this exception, but subclasses may */ @Override public long getBlockSizeByte(String path) throws IOException { return Configuration.getBytes(Constants.USER_BLOCK_SIZE_BYTES_DEFAULT); }
@Override public long getDefaultBlockSize() { return Configuration.getBytes(PropertyKey.USER_BLOCK_SIZE_BYTES_DEFAULT); }
@Override public void resourceOffers(SchedulerDriver driver, List<Protos.Offer> offers) { long masterCpu = Configuration.getInt(PropertyKey.INTEGRATION_MASTER_RESOURCE_CPU); long masterMem = Configuration.getBytes(PropertyKey.INTEGRATION_MASTER_RESOURCE_MEM) / Constants.MB; long workerCpu = Configuration.getInt(PropertyKey.INTEGRATION_WORKER_RESOURCE_CPU); long workerMem = Configuration.getBytes(PropertyKey.INTEGRATION_WORKER_RESOURCE_MEM) / Constants.MB; LOG.info( "Master launched {}, master count {}, " + "requested master cpu {} mem {} MB and required master hostname {}", mMasterLaunched, mMasterCount, masterCpu, masterMem, mRequiredMasterHostname); for (Protos.Offer offer : offers) { Protos.Offer.Operation.Launch.Builder launch = Protos.Offer.Operation.Launch.newBuilder(); double offerCpu = 0; double offerMem = 0; for (Protos.Resource resource : offer.getResourcesList()) { if (resource.getName().equals(Constants.MESOS_RESOURCE_CPUS)) { offerCpu += resource.getScalar().getValue(); } else if (resource.getName().equals(Constants.MESOS_RESOURCE_MEM)) { offerMem += resource.getScalar().getValue(); } else { // Other resources are currently ignored. } } LOG.info( "Received offer {} on host {} with cpus {} and mem {} MB and hasMasterPorts {}", offer.getId().getValue(), offer.getHostname(), offerCpu, offerMem, OfferUtils.hasAvailableMasterPorts(offer)); Protos.ExecutorInfo.Builder executorBuilder = Protos.ExecutorInfo.newBuilder(); List<Protos.Resource> resources; if (!mMasterLaunched && offerCpu >= masterCpu && offerMem >= masterMem && mMasterCount < Configuration.getInt(PropertyKey.INTEGRATION_MESOS_ALLUXIO_MASTER_NODE_COUNT) && OfferUtils.hasAvailableMasterPorts(offer) && (mRequiredMasterHostname == null || mRequiredMasterHostname.equals(offer.getHostname()))) { LOG.debug("Creating Alluxio Master executor"); executorBuilder .setName("Alluxio Master Executor") .setSource("master") .setExecutorId(Protos.ExecutorID.newBuilder().setValue("master")) .addAllResources(getExecutorResources()) .setCommand( Protos.CommandInfo.newBuilder() .setValue(createStartAlluxioCommand("alluxio-master-mesos.sh")) .addAllUris(getExecutorDependencyURIList()) .setEnvironment( Protos.Environment.newBuilder() .addVariables( Protos.Environment.Variable.newBuilder() .setName("ALLUXIO_UNDERFS_ADDRESS") .setValue(Configuration.get(PropertyKey.UNDERFS_ADDRESS)) .build()) .build())); // pre-build resource list here, then use it to build Protos.Task later. resources = getMasterRequiredResources(masterCpu, masterMem); mMasterHostname = offer.getHostname(); mTaskName = Configuration.get(PropertyKey.INTEGRATION_MESOS_ALLUXIO_MASTER_NAME); mMasterCount++; mMasterTaskId = mLaunchedTasks; } else if (mMasterLaunched && !mWorkers.contains(offer.getHostname()) && offerCpu >= workerCpu && offerMem >= workerMem && OfferUtils.hasAvailableWorkerPorts(offer)) { LOG.debug("Creating Alluxio Worker executor"); final String memSize = FormatUtils.getSizeFromBytes((long) workerMem * Constants.MB); executorBuilder .setName("Alluxio Worker Executor") .setSource("worker") .setExecutorId(Protos.ExecutorID.newBuilder().setValue("worker")) .addAllResources(getExecutorResources()) .setCommand( Protos.CommandInfo.newBuilder() .setValue(createStartAlluxioCommand("alluxio-worker-mesos.sh")) .addAllUris(getExecutorDependencyURIList()) .setEnvironment( Protos.Environment.newBuilder() .addVariables( Protos.Environment.Variable.newBuilder() .setName("ALLUXIO_MASTER_HOSTNAME") .setValue(mMasterHostname) .build()) .addVariables( Protos.Environment.Variable.newBuilder() .setName("ALLUXIO_WORKER_MEMORY_SIZE") .setValue(memSize) .build()) .addVariables( Protos.Environment.Variable.newBuilder() .setName("ALLUXIO_UNDERFS_ADDRESS") .setValue(Configuration.get(PropertyKey.UNDERFS_ADDRESS)) .build()) .build())); // pre-build resource list here, then use it to build Protos.Task later. resources = getWorkerRequiredResources(workerCpu, workerMem); mWorkers.add(offer.getHostname()); mTaskName = Configuration.get(PropertyKey.INTEGRATION_MESOS_ALLUXIO_WORKER_NAME); } else { // The resource offer cannot be used to start either master or a worker. LOG.info("Declining offer {}", offer.getId().getValue()); driver.declineOffer(offer.getId()); continue; } Protos.TaskID taskId = Protos.TaskID.newBuilder().setValue(String.valueOf(mLaunchedTasks)).build(); LOG.info("Launching task {} using offer {}", taskId.getValue(), offer.getId().getValue()); Protos.TaskInfo task = Protos.TaskInfo.newBuilder() .setName(mTaskName) .setTaskId(taskId) .setSlaveId(offer.getSlaveId()) .addAllResources(resources) .setExecutor(executorBuilder) .build(); launch.addTaskInfos(Protos.TaskInfo.newBuilder(task)); mLaunchedTasks++; // NOTE: We use the new API `acceptOffers` here to launch tasks. // The 'launchTasks' API will be deprecated. List<Protos.OfferID> offerIds = new ArrayList<Protos.OfferID>(); offerIds.add(offer.getId()); List<Protos.Offer.Operation> operations = new ArrayList<Protos.Offer.Operation>(); Protos.Offer.Operation operation = Protos.Offer.Operation.newBuilder() .setType(Protos.Offer.Operation.Type.LAUNCH) .setLaunch(launch) .build(); operations.add(operation); Protos.Filters filters = Protos.Filters.newBuilder().setRefuseSeconds(1).build(); driver.acceptOffers(offerIds, operations, filters); } }
/** * Initializes the internal buffer based on the user's specified size. Any reads above half this * size will not be buffered. * * @return a heap buffer of user configured size */ private ByteBuffer allocateBuffer() { return ByteBuffer.allocate( (int) Configuration.getBytes(Constants.USER_UFS_DELEGATION_READ_BUFFER_SIZE_BYTES)); }