/** Connect timeout in milliseconds. Default 120 secs. */ public int connectionTimeoutMs() { long defaultNetworkTimeoutS = JavaUtils.timeStringAsSec(conf.get("spark.network.timeout", "120s")); long defaultTimeoutMs = JavaUtils.timeStringAsSec( conf.get("spark.shuffle.io.connectionTimeout", defaultNetworkTimeoutS + "s")) * 1000; return (int) defaultTimeoutMs; }
/** Send buffer size (SO_SNDBUF). */ public int sendBuf() { return conf.getInt("spark.shuffle.io.sendBuffer", -1); }
/** * Receive buffer size (SO_RCVBUF). Note: the optimal size for receive buffer and send buffer * should be latency * network_bandwidth. Assuming latency = 1ms, network_bandwidth = 10Gbps * buffer size should be ~ 1.25MB */ public int receiveBuf() { return conf.getInt("spark.shuffle.io.receiveBuffer", -1); }
/** Number of threads used in the client thread pool. Default to 0, which is 2x#cores. */ public int clientThreads() { return conf.getInt("spark.shuffle.io.clientThreads", 0); }
/** Number of threads used in the server thread pool. Default to 0, which is 2x#cores. */ public int serverThreads() { return conf.getInt("spark.shuffle.io.serverThreads", 0); }
/** Requested maximum length of the queue of incoming connections. Default -1 for no backlog. */ public int backLog() { return conf.getInt("spark.shuffle.io.backLog", -1); }
/** Number of concurrent connections between two nodes for fetching data. */ public int numConnectionsPerPeer() { return conf.getInt("spark.shuffle.io.numConnectionsPerPeer", 1); }
/** * Minimum size of a block that we should start using memory map rather than reading in through * normal IO operations. This prevents Spark from memory mapping very small blocks. In general, * memory mapping has high overhead for blocks close to or below the page size of the OS. */ public int memoryMapBytes() { return conf.getInt("spark.storage.memoryMapThreshold", 2 * 1024 * 1024); }
/** If true, we will prefer allocating off-heap byte buffers within Netty. */ public boolean preferDirectBufs() { return conf.getBoolean("spark.shuffle.io.preferDirectBufs", true); }
/** IO mode: nio or epoll */ public String ioMode() { return conf.get("spark.shuffle.io.mode", "NIO").toUpperCase(); }
/** Whether the server should enforce encryption on SASL-authenticated connections. */ public boolean saslServerAlwaysEncrypt() { return conf.getBoolean("spark.network.sasl.serverAlwaysEncrypt", false); }
/** Maximum number of bytes to be encrypted at a time when SASL encryption is enabled. */ public int maxSaslEncryptedBlockSize() { return Ints.checkedCast( JavaUtils.byteStringAsBytes(conf.get("spark.network.sasl.maxEncryptedBlockSize", "64k"))); }
/** Maximum number of retries when binding to a port before giving up. */ public int portMaxRetries() { return conf.getInt("spark.port.maxRetries", 16); }
/** * Whether to initialize shuffle FileDescriptor lazily or not. If true, file descriptors are * created only when data is going to be transferred. This can reduce the number of open files. */ public boolean lazyFileDescriptor() { return conf.getBoolean("spark.shuffle.io.lazyFD", true); }
/** Timeout for a single round trip of SASL token exchange, in milliseconds. */ public int saslRTTimeoutMs() { return (int) JavaUtils.timeStringAsSec(conf.get("spark.shuffle.sasl.timeout", "30s")) * 1000; }
/** * Max number of times we will try IO exceptions (such as connection timeouts) per request. If set * to 0, we will not do any retries. */ public int maxIORetries() { return conf.getInt("spark.shuffle.io.maxRetries", 3); }
/** * Time (in milliseconds) that we will wait in order to perform a retry after an IOException. Only * relevant if maxIORetries > 0. */ public int ioRetryWaitTimeMs() { return (int) JavaUtils.timeStringAsSec(conf.get("spark.shuffle.io.retryWait", "5s")) * 1000; }