Example #1
0
 @Override
 public void onReplicationEvent(WanReplicationEvent replicationEvent) {
   Object eventObject = replicationEvent.getEventObject();
   if (eventObject instanceof MapReplicationUpdate) {
     MapReplicationUpdate replicationUpdate = (MapReplicationUpdate) eventObject;
     EntryView entryView = replicationUpdate.getEntryView();
     MapMergePolicy mergePolicy = replicationUpdate.getMergePolicy();
     String mapName = replicationUpdate.getMapName();
     MapContainer mapContainer = getMapContainer(mapName);
     MergeOperation operation =
         new MergeOperation(
             mapName,
             toData(entryView.getKey(), mapContainer.getPartitioningStrategy()),
             entryView,
             mergePolicy);
     try {
       int partitionId = nodeEngine.getPartitionService().getPartitionId(entryView.getKey());
       Future f =
           nodeEngine
               .getOperationService()
               .invokeOnPartition(SERVICE_NAME, operation, partitionId);
       f.get();
     } catch (Throwable t) {
       throw ExceptionUtil.rethrow(t);
     }
   } else if (eventObject instanceof MapReplicationRemove) {
     MapReplicationRemove replicationRemove = (MapReplicationRemove) eventObject;
     WanOriginatedDeleteOperation operation =
         new WanOriginatedDeleteOperation(
             replicationRemove.getMapName(), replicationRemove.getKey());
     try {
       int partitionId =
           nodeEngine.getPartitionService().getPartitionId(replicationRemove.getKey());
       Future f =
           nodeEngine
               .getOperationService()
               .invokeOnPartition(SERVICE_NAME, operation, partitionId);
       f.get();
     } catch (Throwable t) {
       throw ExceptionUtil.rethrow(t);
     }
   }
 }
Example #2
0
  public MapMergePolicy getMergePolicy(String mergePolicyName) {
    MapMergePolicy mergePolicy = mergePolicyMap.get(mergePolicyName);
    if (mergePolicy == null && mergePolicyName != null) {
      try {

        // check if user has entered custom class name instead of policy name
        mergePolicy =
            ClassLoaderUtil.newInstance(nodeEngine.getConfigClassLoader(), mergePolicyName);
        mergePolicyMap.put(mergePolicyName, mergePolicy);
      } catch (Exception e) {
        logger.severe(e);
        throw ExceptionUtil.rethrow(e);
      }
    }
    if (mergePolicy == null) {
      return mergePolicyMap.get(MapConfig.DEFAULT_MAP_MERGE_POLICY);
    }
    return mergePolicy;
  }
Example #3
0
 public Node(HazelcastInstanceImpl hazelcastInstance, Config config, NodeContext nodeContext) {
   this.hazelcastInstance = hazelcastInstance;
   this.threadGroup = hazelcastInstance.threadGroup;
   this.config = config;
   configClassLoader = config.getClassLoader();
   this.groupProperties = new GroupProperties(config);
   SerializationService ss;
   try {
     ss =
         new SerializationServiceBuilder()
             .setClassLoader(configClassLoader)
             .setConfig(config.getSerializationConfig())
             .setManagedContext(hazelcastInstance.managedContext)
             .setHazelcastInstance(hazelcastInstance)
             .build();
   } catch (Exception e) {
     throw ExceptionUtil.rethrow(e);
   }
   serializationService = (SerializationServiceImpl) ss;
   systemLogService = new SystemLogService(groupProperties.SYSTEM_LOG_ENABLED.getBoolean());
   final AddressPicker addressPicker = nodeContext.createAddressPicker(this);
   try {
     addressPicker.pickAddress();
   } catch (Throwable e) {
     throw ExceptionUtil.rethrow(e);
   }
   final ServerSocketChannel serverSocketChannel = addressPicker.getServerSocketChannel();
   address = addressPicker.getPublicAddress();
   localMember = new MemberImpl(address, true, UuidUtil.createMemberUuid(address));
   String loggingType = groupProperties.LOGGING_TYPE.getString();
   loggingService =
       new LoggingServiceImpl(
           systemLogService, config.getGroupConfig().getName(), loggingType, localMember);
   logger = loggingService.getLogger(Node.class.getName());
   initializer = NodeInitializerFactory.create(configClassLoader);
   try {
     initializer.beforeInitialize(this);
   } catch (Throwable e) {
     try {
       serverSocketChannel.close();
     } catch (Throwable ignored) {
     }
     throw ExceptionUtil.rethrow(e);
   }
   securityContext =
       config.getSecurityConfig().isEnabled() ? initializer.getSecurityContext() : null;
   nodeEngine = new NodeEngineImpl(this);
   clientEngine = new ClientEngineImpl(this);
   connectionManager = nodeContext.createConnectionManager(this, serverSocketChannel);
   partitionService = new PartitionServiceImpl(this);
   clusterService = new ClusterServiceImpl(this);
   textCommandService = new TextCommandServiceImpl(this);
   initializer.printNodeInfo(this);
   buildNumber = initializer.getBuildNumber();
   VersionCheck.check(this, initializer.getBuild(), initializer.getVersion());
   JoinConfig join = config.getNetworkConfig().getJoin();
   MulticastService mcService = null;
   try {
     if (join.getMulticastConfig().isEnabled()) {
       MulticastConfig multicastConfig = join.getMulticastConfig();
       MulticastSocket multicastSocket = new MulticastSocket(null);
       multicastSocket.setReuseAddress(true);
       // bind to receive interface
       multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort()));
       multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive());
       try {
         // set the send interface
         final Address bindAddress = addressPicker.getBindAddress();
         // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033
         // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758
         if (!bindAddress.getInetAddress().isLoopbackAddress()) {
           multicastSocket.setInterface(bindAddress.getInetAddress());
         }
       } catch (Exception e) {
         logger.warning(e);
       }
       multicastSocket.setReceiveBufferSize(64 * 1024);
       multicastSocket.setSendBufferSize(64 * 1024);
       String multicastGroup = System.getProperty("hazelcast.multicast.group");
       if (multicastGroup == null) {
         multicastGroup = multicastConfig.getMulticastGroup();
       }
       multicastConfig.setMulticastGroup(multicastGroup);
       multicastSocket.joinGroup(InetAddress.getByName(multicastGroup));
       multicastSocket.setSoTimeout(1000);
       mcService = new MulticastService(this, multicastSocket);
       mcService.addMulticastListener(new NodeMulticastListener(this));
     }
   } catch (Exception e) {
     logger.severe(e);
   }
   this.multicastService = mcService;
   initializeListeners(config);
   joiner = nodeContext.createJoiner(this);
 }
Example #4
0
  public LocalMapStatsImpl createLocalMapStats(String mapName) {
    MapContainer mapContainer = getMapContainer(mapName);
    LocalMapStatsImpl localMapStats = getLocalMapStatsImpl(mapName);
    if (!mapContainer.getMapConfig().isStatisticsEnabled()) {
      return localMapStats;
    }

    long ownedEntryCount = 0;
    long backupEntryCount = 0;
    long dirtyCount = 0;
    long ownedEntryMemoryCost = 0;
    long backupEntryMemoryCost = 0;
    long hits = 0;
    long lockedEntryCount = 0;
    long heapCost = 0;

    int backupCount = mapContainer.getTotalBackupCount();
    ClusterService clusterService = nodeEngine.getClusterService();
    final InternalPartitionService partitionService = nodeEngine.getPartitionService();

    Address thisAddress = clusterService.getThisAddress();
    for (int partitionId = 0; partitionId < partitionService.getPartitionCount(); partitionId++) {
      InternalPartition partition = partitionService.getPartition(partitionId);
      Address owner = partition.getOwner();
      if (owner == null) {
        // no-op because no owner is set yet. Therefor we don't know anything about the map
        continue;
      }
      if (owner.equals(thisAddress)) {
        PartitionContainer partitionContainer = getPartitionContainer(partitionId);
        RecordStore recordStore = partitionContainer.getExistingRecordStore(mapName);

        // we don't want to force loading the record store because we are loading statistics. So
        // that is why
        // we ask for 'getExistingRecordStore' instead of 'getRecordStore' which does the load.
        if (recordStore != null) {
          heapCost += recordStore.getHeapCost();
          Map<Data, Record> records = recordStore.getReadonlyRecordMap();
          for (Record record : records.values()) {
            RecordStatistics stats = record.getStatistics();
            // there is map store and the record is dirty (waits to be stored)
            ownedEntryCount++;
            ownedEntryMemoryCost += record.getCost();
            localMapStats.setLastAccessTime(stats.getLastAccessTime());
            localMapStats.setLastUpdateTime(stats.getLastUpdateTime());
            hits += stats.getHits();
            if (recordStore.isLocked(record.getKey())) {
              lockedEntryCount++;
            }
          }
        }
      } else {
        for (int replica = 1; replica <= backupCount; replica++) {
          Address replicaAddress = partition.getReplicaAddress(replica);
          int tryCount = 30;
          // wait if the partition table is not updated yet
          while (replicaAddress == null
              && clusterService.getSize() > backupCount
              && tryCount-- > 0) {
            try {
              Thread.sleep(100);
            } catch (InterruptedException e) {
              throw ExceptionUtil.rethrow(e);
            }
            replicaAddress = partition.getReplicaAddress(replica);
          }

          if (replicaAddress != null && replicaAddress.equals(thisAddress)) {
            PartitionContainer partitionContainer = getPartitionContainer(partitionId);
            RecordStore recordStore = partitionContainer.getRecordStore(mapName);
            heapCost += recordStore.getHeapCost();

            Map<Data, Record> records = recordStore.getReadonlyRecordMap();
            for (Record record : records.values()) {
              backupEntryCount++;
              backupEntryMemoryCost += record.getCost();
            }
          } else if (replicaAddress == null && clusterService.getSize() > backupCount) {
            logger.warning("Partition: " + partition + ", replica: " + replica + " has no owner!");
          }
        }
      }
    }

    if (mapContainer.getMapStoreScheduler() != null) {
      dirtyCount = mapContainer.getMapStoreScheduler().size();
    }
    localMapStats.setBackupCount(backupCount);
    localMapStats.setDirtyEntryCount(zeroOrPositive(dirtyCount));
    localMapStats.setLockedEntryCount(zeroOrPositive(lockedEntryCount));
    localMapStats.setHits(zeroOrPositive(hits));
    localMapStats.setOwnedEntryCount(zeroOrPositive(ownedEntryCount));
    localMapStats.setBackupEntryCount(zeroOrPositive(backupEntryCount));
    localMapStats.setOwnedEntryMemoryCost(zeroOrPositive(ownedEntryMemoryCost));
    localMapStats.setBackupEntryMemoryCost(zeroOrPositive(backupEntryMemoryCost));
    // add near cache heap cost.
    heapCost += mapContainer.getNearCacheSizeEstimator().getSize();
    localMapStats.setHeapCost(heapCost);
    if (mapContainer.getMapConfig().isNearCacheEnabled()) {
      NearCacheStatsImpl nearCacheStats = getNearCache(mapName).getNearCacheStats();
      localMapStats.setNearCacheStats(nearCacheStats);
    }

    return localMapStats;
  }